summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAmar Tumballi <amar@gluster.com>2010-06-21 07:00:04 +0000
committerAnand V. Avati <avati@dev.gluster.com>2010-06-21 20:21:10 -0700
commitfdd20492638fe98a62b5e6d5e82f18cf4799fd1a (patch)
tree98082d7bfdc66157f40666f2070d3a45b582327a
parentb9b8734a9496ccf5f8ed5527dc7714930a59948b (diff)
rpc protocol
Signed-off-by: Amar Tumballi <amar@gluster.com> Signed-off-by: Raghavendra G <raghavendra@gluster.com> Signed-off-by: Anand V. Avati <avati@dev.gluster.com> BUG: 875 (Implement a new protocol to provide proper backward/forward compatibility) URL: http://bugs.gluster.com/cgi-bin/bugzilla3/show_bug.cgi?id=875
-rw-r--r--configure.ac10
-rw-r--r--doc/rpc-for-glusterfs.changes-done.txt18
-rw-r--r--libglusterfs/src/iatt.h1
-rw-r--r--xlators/protocol/Makefile.am4
-rw-r--r--xlators/protocol/auth/Makefile.am2
-rw-r--r--xlators/protocol/auth/addr/Makefile.am2
-rw-r--r--xlators/protocol/auth/addr/src/Makefile.am2
-rw-r--r--xlators/protocol/auth/login/Makefile.am2
-rw-r--r--xlators/protocol/auth/login/src/Makefile.am2
-rw-r--r--xlators/protocol/client/Makefile.am1
-rw-r--r--xlators/protocol/client/src/Makefile.am16
-rw-r--r--xlators/protocol/client/src/client-handshake.c738
-rw-r--r--xlators/protocol/client/src/client-helpers.c109
-rw-r--r--xlators/protocol/client/src/client-mem-types.h32
-rw-r--r--xlators/protocol/client/src/client.c1791
-rw-r--r--xlators/protocol/client/src/client.h125
-rw-r--r--xlators/protocol/client/src/client3_1-fops.c4662
-rw-r--r--xlators/protocol/lib/Makefile.am2
-rw-r--r--xlators/protocol/lib/src/Makefile.am11
-rw-r--r--xlators/protocol/lib/src/authenticate.c40
-rw-r--r--xlators/protocol/lib/src/authenticate.h6
-rw-r--r--xlators/protocol/lib/src/glusterfs-xdr.c1847
-rw-r--r--xlators/protocol/lib/src/glusterfs-xdr.h1337
-rw-r--r--xlators/protocol/lib/src/glusterfs3.x755
-rw-r--r--xlators/protocol/lib/src/msg-xdr.c1264
-rw-r--r--xlators/protocol/lib/src/msg-xdr.h536
-rw-r--r--xlators/protocol/lib/src/protocol-common.c109
-rw-r--r--xlators/protocol/lib/src/protocol-common.h104
-rw-r--r--xlators/protocol/rpc/Makefile.am1
-rw-r--r--xlators/protocol/rpc/rpc-lib/Makefile.am1
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/Makefile.am15
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/auth-glusterfs.c112
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/auth-null.c70
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/auth-unix.c90
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/rpc-clnt.c1281
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/rpc-clnt.h174
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/rpc-transport.c1300
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/rpc-transport.h287
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/rpcsvc-auth.c409
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/rpcsvc-common.h83
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/rpcsvc.c2010
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/rpcsvc.h584
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/xdr-common.h48
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/xdr-rpc.c189
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/xdr-rpc.h74
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/xdr-rpcclnt.c131
-rw-r--r--xlators/protocol/rpc/rpc-lib/src/xdr-rpcclnt.h51
-rw-r--r--xlators/protocol/rpc/rpc-transport/Makefile.am1
-rw-r--r--xlators/protocol/rpc/rpc-transport/socket/Makefile.am1
-rw-r--r--xlators/protocol/rpc/rpc-transport/socket/src/Makefile.am15
-rw-r--r--xlators/protocol/rpc/rpc-transport/socket/src/name.c737
-rw-r--r--xlators/protocol/rpc/rpc-transport/socket/src/name.h44
-rw-r--r--xlators/protocol/rpc/rpc-transport/socket/src/socket.c2307
-rw-r--r--xlators/protocol/rpc/rpc-transport/socket/src/socket.h190
-rw-r--r--xlators/protocol/server/Makefile.am1
-rw-r--r--xlators/protocol/server/src/Makefile.am22
-rw-r--r--xlators/protocol/server/src/server-handshake.c689
-rw-r--r--xlators/protocol/server/src/server-helpers.c1392
-rw-r--r--xlators/protocol/server/src/server-helpers.h89
-rw-r--r--xlators/protocol/server/src/server-mem-types.h37
-rw-r--r--xlators/protocol/server/src/server-resolve.c655
-rw-r--r--xlators/protocol/server/src/server.c687
-rw-r--r--xlators/protocol/server/src/server.h203
-rw-r--r--xlators/protocol/server/src/server3_1-fops.c4839
64 files changed, 32304 insertions, 43 deletions
diff --git a/configure.ac b/configure.ac
index 6ef8188a621..0347ca59a9d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -84,6 +84,16 @@ AC_CONFIG_FILES([Makefile
xlators/protocol/auth/addr/src/Makefile
xlators/protocol/auth/login/Makefile
xlators/protocol/auth/login/src/Makefile
+ xlators/protocol/rpc/Makefile
+ xlators/protocol/rpc/rpc-lib/Makefile
+ xlators/protocol/rpc/rpc-lib/src/Makefile
+ xlators/protocol/rpc/rpc-transport/Makefile
+ xlators/protocol/rpc/rpc-transport/socket/Makefile
+ xlators/protocol/rpc/rpc-transport/socket/src/Makefile
+ xlators/protocol/client/Makefile
+ xlators/protocol/client/src/Makefile
+ xlators/protocol/server/Makefile
+ xlators/protocol/server/src/Makefile
xlators/features/Makefile
xlators/features/locks/Makefile
xlators/features/locks/src/Makefile
diff --git a/doc/rpc-for-glusterfs.changes-done.txt b/doc/rpc-for-glusterfs.changes-done.txt
new file mode 100644
index 00000000000..6bbbca78826
--- /dev/null
+++ b/doc/rpc-for-glusterfs.changes-done.txt
@@ -0,0 +1,18 @@
+This document serves as a basic coding standard/practise for further
+developments after proper protocol layer is implemented.
+
+With this release we are bringing abstraction based on xlator driven
+operation and protocol driven operation. ie, all the client side (fuse)
+operations are xlator driven operations and will come with 'op' value
+taken from 'libglusterfs/'.
+
+All the server protocol driven operations are driven by which ever
+version of protocol is used.
+
+All the currently implemented fops will remain, and 'getspec' being generated
+by top level and passes through translator graph, is treated as an 'fop'.
+
+All new 'gluster' and 'glusterd' related calls will be _mgmt_ calls instead of
+fops. All release, releasedir and forget are treated as fops (but they won't
+come with requirement to use STACK_WIND and STACK_UNWIND).
+
diff --git a/libglusterfs/src/iatt.h b/libglusterfs/src/iatt.h
index 92d679f4c17..ef64f9afc95 100644
--- a/libglusterfs/src/iatt.h
+++ b/libglusterfs/src/iatt.h
@@ -30,6 +30,7 @@
#include <sys/stat.h> /* for iatt <--> stat conversions */
#include <unistd.h>
+#include "compat.h"
typedef enum {
IA_INVAL = 0,
diff --git a/xlators/protocol/Makefile.am b/xlators/protocol/Makefile.am
index 0e1eca0c8e2..b38506d473e 100644
--- a/xlators/protocol/Makefile.am
+++ b/xlators/protocol/Makefile.am
@@ -1,3 +1 @@
-SUBDIRS = lib auth legacy
-
-CLEANFILES =
+SUBDIRS = lib auth legacy rpc client server
diff --git a/xlators/protocol/auth/Makefile.am b/xlators/protocol/auth/Makefile.am
index 6bd54eee38f..e9e0ba97e14 100644
--- a/xlators/protocol/auth/Makefile.am
+++ b/xlators/protocol/auth/Makefile.am
@@ -1,3 +1 @@
SUBDIRS = addr login
-
-CLEANFILES =
diff --git a/xlators/protocol/auth/addr/Makefile.am b/xlators/protocol/auth/addr/Makefile.am
index d471a3f9243..af437a64d6d 100644
--- a/xlators/protocol/auth/addr/Makefile.am
+++ b/xlators/protocol/auth/addr/Makefile.am
@@ -1,3 +1 @@
SUBDIRS = src
-
-CLEANFILES =
diff --git a/xlators/protocol/auth/addr/src/Makefile.am b/xlators/protocol/auth/addr/src/Makefile.am
index 2c94c94e4fb..ebf20b38a84 100644
--- a/xlators/protocol/auth/addr/src/Makefile.am
+++ b/xlators/protocol/auth/addr/src/Makefile.am
@@ -10,5 +10,3 @@ addr_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\
-I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS) \
-I$(top_srcdir)/xlators/protocol/lib/src
-
-CLEANFILES =
diff --git a/xlators/protocol/auth/login/Makefile.am b/xlators/protocol/auth/login/Makefile.am
index d471a3f9243..af437a64d6d 100644
--- a/xlators/protocol/auth/login/Makefile.am
+++ b/xlators/protocol/auth/login/Makefile.am
@@ -1,3 +1 @@
SUBDIRS = src
-
-CLEANFILES =
diff --git a/xlators/protocol/auth/login/src/Makefile.am b/xlators/protocol/auth/login/src/Makefile.am
index ef99a965be6..b3b625b6e52 100644
--- a/xlators/protocol/auth/login/src/Makefile.am
+++ b/xlators/protocol/auth/login/src/Makefile.am
@@ -11,5 +11,3 @@ login_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\
-I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS) \
-I$(top_srcdir)/xlators/protocol/lib/src
-
-CLEANFILES =
diff --git a/xlators/protocol/client/Makefile.am b/xlators/protocol/client/Makefile.am
new file mode 100644
index 00000000000..af437a64d6d
--- /dev/null
+++ b/xlators/protocol/client/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = src
diff --git a/xlators/protocol/client/src/Makefile.am b/xlators/protocol/client/src/Makefile.am
new file mode 100644
index 00000000000..40281467e17
--- /dev/null
+++ b/xlators/protocol/client/src/Makefile.am
@@ -0,0 +1,16 @@
+
+xlator_LTLIBRARIES = client.la
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/alpha/protocol
+
+client_la_LDFLAGS = -module -avoidversion
+
+client_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
+ $(top_builddir)/xlators/protocol/rpc/rpc-lib/src/libgfrpc.la \
+ $(top_builddir)/xlators/protocol/lib/src/libgfproto1.la
+
+client_la_SOURCES = client.c client-helpers.c client3_1-fops.c client-handshake.c
+noinst_HEADERS = client.h client-mem-types.h
+
+AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS) \
+ -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS) \
+ -I$(top_srcdir)/xlators/protocol/lib/src -I$(top_srcdir)/xlators/protocol/rpc/rpc-lib/src/
diff --git a/xlators/protocol/client/src/client-handshake.c b/xlators/protocol/client/src/client-handshake.c
new file mode 100644
index 00000000000..79cabd106b2
--- /dev/null
+++ b/xlators/protocol/client/src/client-handshake.c
@@ -0,0 +1,738 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "client.h"
+#include "xlator.h"
+#include "defaults.h"
+#include "glusterfs.h"
+#include "msg-xdr.h"
+#include "statedump.h"
+#include "compat-errno.h"
+
+extern rpc_clnt_prog_t clnt3_1_fop_prog;
+extern rpc_clnt_prog_t clnt3_1_mgmt_prog;
+
+/* Handshake */
+
+void
+rpc_client_ping_timer_expired (void *data)
+{
+ rpc_transport_t *trans = NULL;
+ rpc_clnt_connection_t *conn = NULL;
+ int disconnect = 0;
+ int transport_activity = 0;
+ struct timeval timeout = {0, };
+ struct timeval current = {0, };
+ struct rpc_clnt *clnt = NULL;
+ xlator_t *this = NULL;
+ clnt_conf_t *conf = NULL;
+
+ if (!data) {
+ goto out;
+ }
+
+ this = data;
+ conf = this->private;
+
+ conn = &conf->rpc->conn;
+ trans = conn->trans;
+
+ if (!clnt || !trans) {
+ goto out;
+ }
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ if (conn->ping_timer)
+ gf_timer_call_cancel (this->ctx,
+ conn->ping_timer);
+ gettimeofday (&current, NULL);
+
+ if (((current.tv_sec - conn->last_received.tv_sec) <
+ conf->opt.ping_timeout)
+ || ((current.tv_sec - conn->last_sent.tv_sec) <
+ conf->opt.ping_timeout)) {
+ transport_activity = 1;
+ }
+
+ if (transport_activity) {
+ gf_log (trans->name, GF_LOG_TRACE,
+ "ping timer expired but transport activity "
+ "detected - not bailing transport");
+ timeout.tv_sec = conf->opt.ping_timeout;
+ timeout.tv_usec = 0;
+
+ conn->ping_timer =
+ gf_timer_call_after (this->ctx, timeout,
+ rpc_client_ping_timer_expired,
+ (void *) this);
+ if (conn->ping_timer == NULL)
+ gf_log (trans->name, GF_LOG_DEBUG,
+ "unable to setup timer");
+
+ } else {
+ conn->ping_started = 0;
+ conn->ping_timer = NULL;
+ disconnect = 1;
+ }
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ if (disconnect) {
+ gf_log (trans->name, GF_LOG_ERROR,
+ "Server %s has not responded in the last %d "
+ "seconds, disconnecting.",
+ conn->trans->peerinfo.identifier,
+ conf->opt.ping_timeout);
+
+ rpc_transport_disconnect (conn->trans);
+ }
+
+out:
+ return;
+}
+
+
+void
+client_start_ping (void *data)
+{
+ xlator_t *this = NULL;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_connection_t *conn = NULL;
+ int32_t ret = -1;
+ struct timeval timeout = {0, };
+ call_frame_t *frame = NULL;
+ int frame_count = 0;
+ rpc_transport_t *trans = NULL;
+
+ this = data;
+ conf = this->private;
+
+ conn = &conf->rpc->conn;
+ trans = conn->trans;
+
+ if (conf->opt.ping_timeout == 0)
+ return;
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ if (conn->ping_timer)
+ gf_timer_call_cancel (this->ctx, conn->ping_timer);
+
+ conn->ping_timer = NULL;
+ conn->ping_started = 0;
+
+ if (conn->saved_frames)
+ /* treat the case where conn->saved_frames is NULL
+ as no pending frames */
+ frame_count = conn->saved_frames->count;
+
+ if ((frame_count == 0) || !conn->connected) {
+ /* using goto looked ugly here,
+ * hence getting out this way */
+ /* unlock */
+ pthread_mutex_unlock (&conn->lock);
+ return;
+ }
+
+ if (frame_count < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "saved_frames->count is %"PRId64,
+ conn->saved_frames->count);
+ conn->saved_frames->count = 0;
+ }
+
+ timeout.tv_sec = conf->opt.ping_timeout;
+ timeout.tv_usec = 0;
+
+ conn->ping_timer =
+ gf_timer_call_after (this->ctx, timeout,
+ rpc_client_ping_timer_expired,
+ (void *) this);
+
+ if (conn->ping_timer == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "unable to setup timer");
+ } else {
+ conn->ping_started = 1;
+ }
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame)
+ goto fail;
+
+ ret = client_submit_request (this, NULL, frame, conf->handshake,
+ GF_HNDSK_PING, NULL, NULL);
+
+ return;
+fail:
+
+ if (frame) {
+ STACK_DESTROY (frame->root);
+ }
+
+ return;
+}
+
+
+int
+client_ping_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ xlator_t *this = NULL;
+ rpc_clnt_connection_t *conn = NULL;
+ struct timeval timeout = {0, };
+ call_frame_t *frame = NULL;
+ clnt_conf_t *conf = NULL;
+
+ frame = myframe;
+
+ this = frame->this;
+ conf = this->private;
+ conn = &conf->rpc->conn;
+
+ if (req->rpc_status == -1) {
+ /* timer expired and transport bailed out */
+ gf_log (this->name, GF_LOG_DEBUG, "timer must have expired");
+ goto out;
+ }
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ timeout.tv_sec = conf->opt.ping_timeout;
+ timeout.tv_usec = 0;
+
+ gf_timer_call_cancel (this->ctx,
+ conn->ping_timer);
+
+ conn->ping_timer =
+ gf_timer_call_after (this->ctx, timeout,
+ client_start_ping, (void *)this);
+
+ if (conn->ping_timer == NULL)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "gf_timer_call_after() returned NULL");
+ }
+ pthread_mutex_unlock (&conn->lock);
+out:
+ STACK_DESTROY (frame->root);
+ return 0;
+}
+
+
+int
+client_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe)
+{
+ gf_getspec_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ clnt_conf_t *conf = NULL;
+ int ret = 0;
+ char spec[(32*1024)] = {0,};
+
+ frame = myframe;
+ conf = frame->this->private;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ rsp.spec = spec;
+ ret = xdr_to_dump_version_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "failed to get the 'volume file' from server");
+ goto out;
+ }
+
+out:
+ /* no _STRICT for mops */
+ STACK_UNWIND_STRICT (getspec, frame, rsp.op_ret, rsp.op_errno, rsp.spec);
+ return 0;
+}
+
+
+int
+client_post_handshake (call_frame_t *frame, xlator_t *this)
+{
+ clnt_conf_t *conf = NULL;
+ clnt_fd_ctx_t *tmp = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ xlator_list_t *parent = NULL;
+ struct list_head reopen_head;
+
+ if (!this || !this->private)
+ goto out;
+
+ conf = this->private;
+ INIT_LIST_HEAD (&reopen_head);
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ list_for_each_entry_safe (fdctx, tmp, &conf->saved_fds,
+ sfd_pos) {
+ if (fdctx->remote_fd != -1)
+ continue;
+
+ list_del_init (&fdctx->sfd_pos);
+ list_add_tail (&fdctx->sfd_pos, &reopen_head);
+ }
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ list_for_each_entry_safe (fdctx, tmp, &reopen_head, sfd_pos) {
+ list_del_init (&fdctx->sfd_pos);
+
+ if (fdctx->is_dir)
+ protocol_client_reopendir (this, fdctx);
+ else
+ protocol_client_reopen (this, fdctx);
+ }
+
+ parent = this->parents;
+
+ while (parent) {
+ xlator_notify (parent->xlator, GF_EVENT_CHILD_UP,
+ this);
+ parent = parent->next;
+ }
+
+out:
+ return 0;
+}
+
+int
+client_setvolume_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe)
+{
+ call_frame_t *frame = NULL;
+ clnt_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ dict_t *reply = NULL;
+ xlator_list_t *parent = NULL;
+ char *process_uuid = NULL;
+ char *remote_error = NULL;
+ char *remote_subvol = NULL;
+ rpc_transport_t *peer_trans = NULL;
+ gf_setvolume_rsp rsp = {0,};
+ uint64_t peertrans_int = 0;
+ int ret = 0;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ frame = myframe;
+ this = frame->this;
+ conf = this->private;
+
+ if (-1 == req->rpc_status) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_setvolume_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ op_errno = EINVAL;
+ op_ret = -1;
+ goto out;
+ }
+ op_ret = rsp.op_ret;
+ op_errno = gf_error_to_errno (rsp.op_errno);
+ if (-1 == rsp.op_ret) {
+ gf_log (frame->this->name, GF_LOG_WARNING,
+ "failed to set the volume");
+ }
+
+ reply = dict_new ();
+ if (!reply)
+ goto out;
+
+ if (rsp.dict.dict_len) {
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len, &reply);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "failed to unserialize buffer to dict");
+ goto out;
+ }
+ }
+
+ ret = dict_get_str (reply, "ERROR", &remote_error);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get ERROR string from reply dict");
+ }
+
+ ret = dict_get_str (reply, "process-uuid", &process_uuid);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get 'process-uuid' from reply dict");
+ }
+
+ if (op_ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "SETVOLUME on remote-host failed: %s",
+ remote_error ? remote_error : strerror (op_errno));
+ errno = op_errno;
+ if (op_errno == ESTALE) {
+ parent = this->parents;
+ while (parent) {
+ xlator_notify (parent->xlator,
+ GF_EVENT_VOLFILE_MODIFIED,
+ this);
+ parent = parent->next;
+ }
+ }
+ goto out;
+ }
+ ret = dict_get_str (this->options, "remote-subvolume",
+ &remote_subvol);
+ if (!remote_subvol)
+ goto out;
+
+ if (process_uuid &&
+ !strcmp (this->ctx->process_uuid, process_uuid)) {
+ ret = dict_get_uint64 (reply, "transport-ptr",
+ &peertrans_int);
+
+ peer_trans = (void *) (long) (peertrans_int);
+
+ gf_log (this->name, GF_LOG_WARNING,
+ "attaching to the local volume '%s'",
+ remote_subvol);
+
+ if (req->conn) {
+ /* TODO: Some issues with this logic at present */
+ //rpc_transport_setpeer (req->conn->trans, peer_trans);
+ }
+ }
+
+ gf_log (this->name, GF_LOG_NORMAL,
+ "Connected to %s, attached to remote volume '%s'.",
+ conf->rpc->conn.trans->peerinfo.identifier,
+ remote_subvol);
+
+ rpc_clnt_set_connected (&conf->rpc->conn);
+
+ op_ret = 0;
+ conf->connecting = 0;
+
+ /* TODO: more to test */
+ client_post_handshake (frame, frame->this);
+
+out:
+
+ if (-1 == op_ret) {
+ /* Let the connection/re-connection happen in
+ * background, for now, don't hang here,
+ * tell the parents that i am all ok..
+ */
+ parent = this->parents;
+ while (parent) {
+ xlator_notify (parent->xlator,
+ GF_EVENT_CHILD_CONNECTING, this);
+ parent = parent->next;
+ }
+
+ conf->connecting= 1;
+ }
+
+ if (rsp.dict.dict_val)
+ free (rsp.dict.dict_val);
+
+ STACK_DESTROY (frame->root);
+
+ if (reply)
+ dict_unref (reply);
+
+ return 0;
+}
+
+int
+client_setvolume (xlator_t *this, struct rpc_clnt *rpc)
+{
+ int ret = 0;
+ gf_setvolume_req req = {0,};
+ call_frame_t *fr = NULL;
+ char *process_uuid_xl = NULL;
+ clnt_conf_t *conf = NULL;
+ dict_t *options = NULL;
+
+ options = this->options;
+ conf = this->private;
+
+ if (conf->fops || !dict_get (options, "fops-version")) {
+ ret = dict_set_int32 (options, "fops-version",
+ conf->fops->prognum);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to set version-fops(%d) in handshake msg",
+ conf->fops->prognum);
+ goto fail;
+ }
+ }
+
+ if (conf->mgmt) {
+ ret = dict_set_int32 (options, "mgmt-version", conf->mgmt->prognum);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to set version-mgmt(%d) in handshake msg",
+ conf->mgmt->prognum);
+ goto fail;
+ }
+ }
+
+ ret = gf_asprintf (&process_uuid_xl, "%s-%s", this->ctx->process_uuid,
+ this->name);
+ if (-1 == ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "asprintf failed while setting process_uuid");
+ goto fail;
+ }
+ ret = dict_set_dynstr (options, "process-uuid", process_uuid_xl);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to set process-uuid(%s) in handshake msg",
+ process_uuid_xl);
+ goto fail;
+ }
+
+ if (this->ctx->cmd_args.volfile_server) {
+ if (this->ctx->cmd_args.volfile_id)
+ ret = dict_set_str (options, "volfile-key",
+ this->ctx->cmd_args.volfile_id);
+ ret = dict_set_uint32 (options, "volfile-checksum",
+ this->graph->volfile_checksum);
+ }
+
+ req.dict.dict_len = dict_serialized_length (options);
+ if (req.dict.dict_len < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ ret = -1;
+ goto fail;
+ }
+ req.dict.dict_val = GF_CALLOC (1, req.dict.dict_len, 0);
+ ret = dict_serialize (options, req.dict.dict_val);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to serialize dictionary");
+ goto fail;
+ }
+
+ fr = create_frame (this, this->ctx->pool);
+ if (!fr)
+ goto fail;
+
+ ret = client_submit_request (this, &req, fr, conf->handshake,
+ GF_HNDSK_SETVOLUME, NULL, xdr_from_setvolume_req);
+
+fail:
+ if (req.dict.dict_val)
+ GF_FREE (req.dict.dict_val);
+
+ return ret;
+}
+
+int
+select_server_supported_programs (xlator_t *this, char *msg)
+{
+ clnt_conf_t *conf = NULL;
+ char *tmp_str = NULL;
+ char *prog_str = NULL;
+ char *dup_str = NULL;
+ char *tmp_str1 = NULL;
+ char *tmp_msg = NULL;
+ char *progname = NULL;
+ char *progver_str = NULL;
+ char *prognum_str = NULL;
+ int ret = -1;
+ int progver = 0;
+ int prognum = 0;
+
+ if (!this || !msg)
+ goto out;
+
+ conf = this->private;
+
+ /* Reply in "Name:Program-Number:Program-Version,..." format */
+ tmp_msg = gf_strdup (msg);
+ prog_str = strtok_r (tmp_msg, ",", &tmp_str);
+ while (prog_str) {
+ dup_str = gf_strdup (prog_str);
+
+ progname = strtok_r (dup_str, ":", &tmp_str1);
+ prognum_str = strtok_r (NULL, ":", &tmp_str1);
+ if (!prognum_str) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Supported versions not formatted");
+ goto out;
+ }
+ sscanf (prognum_str, "%d", &prognum);
+ progver_str = strtok_r (NULL, ":", &tmp_str1);
+ if (!progver_str) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Supported versions not formatted");
+ goto out;
+ }
+ sscanf (progver_str, "%d", &progver);
+
+ /* Select 'programs' */
+ if ((clnt3_1_fop_prog.prognum == prognum) &&
+ (clnt3_1_fop_prog.progver == progver)) {
+ conf->fops = &clnt3_1_fop_prog;
+ gf_log (this->name, GF_LOG_INFO,
+ "Using Program %s, Num (%s), Version (%s)",
+ progname, prognum_str, progver_str);
+ ret = 0;
+ }
+ if ((clnt3_1_mgmt_prog.prognum == prognum) &&
+ (clnt3_1_mgmt_prog.progver == progver)) {
+ conf->mgmt = &clnt3_1_mgmt_prog;
+ gf_log (this->name, GF_LOG_INFO,
+ "Using Program %s, Num (%s), Version (%s)",
+ progname, prognum_str, progver_str);
+ ret = 0;
+ }
+
+ prog_str = strtok_r (NULL, ",", &tmp_str);
+ GF_FREE (dup_str);
+ }
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "none of the server versions are supported by client");
+ }
+ ret = 0;
+out:
+ if (tmp_msg)
+ GF_FREE (tmp_msg);
+ return ret;
+}
+
+int
+client_dump_version_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe)
+{
+ gf_dump_version_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ clnt_conf_t *conf = NULL;
+ int ret = 0;
+
+ frame = myframe;
+ conf = frame->this->private;
+
+ if (-1 == req->rpc_status) {
+ gf_log ("", 1, "some error, retry again later");
+ goto out;
+ }
+
+ ret = xdr_to_dump_version_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+ if (-1 == rsp.op_ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "failed to get the 'versions' from server");
+ goto out;
+ }
+
+ /* Check for the proper version string */
+ /* Reply in "Name:Program-Number:Program-Version,..." format */
+ ret = select_server_supported_programs (frame->this,
+ rsp.msg.msg_val);
+ if (ret) {
+ gf_log (frame->this->name, GF_LOG_ERROR,
+ "Server versions are not present in this "
+ "release (%s)", rsp.msg.msg_val);
+ goto out;
+ }
+
+ client_setvolume (frame->this, conf->rpc);
+
+out:
+ /* don't use GF_FREE, buffer was allocated by libc */
+ if (rsp.msg.msg_val) {
+ free (rsp.msg.msg_val);
+ }
+
+ STACK_DESTROY (frame->root);
+ return ret;
+}
+
+int
+client_handshake (xlator_t *this, struct rpc_clnt *rpc)
+{
+ call_frame_t *frame = NULL;
+ clnt_conf_t *conf = NULL;
+ gf_dump_version_req req = {0,};
+ int ret = 0;
+
+ conf = this->private;
+ if (!conf->handshake)
+ goto out;
+
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame)
+ goto out;
+
+ req.key = "fop-handshake";
+ req.gfs_id = 123456;
+ ret = client_submit_request (this, &req, frame, conf->handshake,
+ GF_HNDSK_DUMP_VERSION,
+ NULL, xdr_from_dump_version_req);
+
+out:
+ return ret;
+}
+
+
+/* */
+/* This table should ideally remain same irrespective of versions */
+static rpc_clnt_procedure_t clnt_handshake_actors[] = {
+ [GF_HNDSK_NULL] = { "NULL", NULL, NULL},
+ [GF_HNDSK_DUMP_VERSION] = { "VERSION", NULL, client_dump_version_cbk},
+ [GF_HNDSK_SETVOLUME] = { "SETVOLUME", NULL, client_setvolume_cbk},
+ [GF_HNDSK_GETSPEC] = { "GETSPEC", NULL, client_getspec_cbk },
+ [GF_HNDSK_PING] = { "PING", NULL, client_ping_cbk },
+};
+
+rpc_clnt_prog_t clnt_handshake_prog = {
+ .progname = "GlusterFS Handshake",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+ .actor = clnt_handshake_actors,
+ .numproc = (sizeof (*clnt_handshake_actors) /
+ sizeof (rpc_clnt_procedure_t)),
+};
diff --git a/xlators/protocol/client/src/client-helpers.c b/xlators/protocol/client/src/client-helpers.c
new file mode 100644
index 00000000000..ae091ed1d8d
--- /dev/null
+++ b/xlators/protocol/client/src/client-helpers.c
@@ -0,0 +1,109 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "client.h"
+#include "fd.h"
+
+
+clnt_fd_ctx_t *
+this_fd_del_ctx (fd_t *file, xlator_t *this)
+{
+ int dict_ret = -1;
+ uint64_t ctxaddr = 0;
+
+ GF_VALIDATE_OR_GOTO ("client", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, file, out);
+
+ dict_ret = fd_ctx_del (file, this, &ctxaddr);
+
+ if (dict_ret < 0) {
+ ctxaddr = 0;
+ }
+
+out:
+ return (clnt_fd_ctx_t *)(unsigned long)ctxaddr;
+}
+
+
+clnt_fd_ctx_t *
+this_fd_get_ctx (fd_t *file, xlator_t *this)
+{
+ int dict_ret = -1;
+ uint64_t ctxaddr = 0;
+
+ GF_VALIDATE_OR_GOTO ("client", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, file, out);
+
+ dict_ret = fd_ctx_get (file, this, &ctxaddr);
+
+ if (dict_ret < 0) {
+ ctxaddr = 0;
+ }
+
+out:
+ return (clnt_fd_ctx_t *)(unsigned long)ctxaddr;
+}
+
+
+void
+this_fd_set_ctx (fd_t *file, xlator_t *this, loc_t *loc, clnt_fd_ctx_t *ctx)
+{
+ uint64_t oldaddr = 0;
+ int32_t ret = -1;
+
+ GF_VALIDATE_OR_GOTO ("client", this, out);
+ GF_VALIDATE_OR_GOTO (this->name, file, out);
+
+ ret = fd_ctx_get (file, this, &oldaddr);
+ if (ret >= 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%s (%"PRId64"): trying duplicate remote fd set. ",
+ loc->path, loc->inode->ino);
+ }
+
+ ret = fd_ctx_set (file, this, (uint64_t)(unsigned long)ctx);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%s (%"PRId64"): failed to set remote fd",
+ loc->path, loc->inode->ino);
+ }
+out:
+ return;
+}
+
+
+int
+client_local_wipe (clnt_local_t *local)
+{
+ if (local) {
+ loc_wipe (&local->loc);
+
+ if (local->fd)
+ fd_unref (local->fd);
+
+ GF_FREE (local);
+ }
+
+ return 0;
+}
diff --git a/xlators/protocol/client/src/client-mem-types.h b/xlators/protocol/client/src/client-mem-types.h
new file mode 100644
index 00000000000..5fadfafc9e1
--- /dev/null
+++ b/xlators/protocol/client/src/client-mem-types.h
@@ -0,0 +1,32 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef __CLIENT_MEM_TYPES_H__
+#define __CLIENT_MEM_TYPES_H__
+
+#include "mem-types.h"
+
+enum gf_client_mem_types_ {
+ gf_client_mt_clnt_conf_t = gf_common_mt_end + 1,
+ gf_client_mt_clnt_local_t,
+ gf_client_mt_clnt_fdctx_t,
+ gf_client_mt_end,
+};
+#endif /* __CLIENT_MEM_TYPES_H__ */
diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c
new file mode 100644
index 00000000000..49e88bfdea3
--- /dev/null
+++ b/xlators/protocol/client/src/client.c
@@ -0,0 +1,1791 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "client.h"
+#include "xlator.h"
+#include "defaults.h"
+#include "glusterfs.h"
+#include "msg-xdr.h"
+#include "statedump.h"
+#include "compat-errno.h"
+
+extern rpc_clnt_prog_t clnt_handshake_prog;
+int
+client_handshake (xlator_t *this, struct rpc_clnt *rpc);
+
+void
+client_start_ping (void *data);
+
+int
+client_submit_request (xlator_t *this, void *req,
+ call_frame_t *frame, rpc_clnt_prog_t *prog, int procnum,
+ struct iobref *iobref, gfs_serialize_t sfunc)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ struct iovec iov = {0, };
+ struct iobuf *iobuf = NULL;
+ int count = 0;
+ char new_iobref = 0, start_ping = 0;
+
+ conf = this->private;
+
+ iobuf = iobuf_get (this->ctx->iobuf_pool);
+ if (!iobuf) {
+ goto out;
+ };
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = 128 * GF_UNIT_KB;
+
+ /* Create the xdr payload */
+ if (req && sfunc) {
+ ret = sfunc (iov, req);
+ if (ret == -1) {
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+ /* Send the msg */
+ ret = rpc_clnt_submit (conf->rpc, prog, procnum, &iov, count, NULL, 0,
+ iobref, frame);
+
+ if (ret == 0) {
+ pthread_mutex_lock (&conf->rpc->conn.lock);
+ {
+ if (!conf->rpc->conn.ping_started) {
+ start_ping = 1;
+ }
+ }
+ pthread_mutex_unlock (&conf->rpc->conn.lock);
+ }
+
+ if (start_ping)
+ client_start_ping ((void *) this);
+
+ ret = 0;
+out:
+ if (new_iobref) {
+ iobref_unref (iobref);
+ }
+
+ iobuf_unref (iobuf);
+
+ return ret;
+}
+
+
+int32_t
+client_forget (xlator_t *this, inode_t *inode)
+{
+ /* Nothing here */
+ return 0;
+}
+
+int32_t
+client_releasedir (xlator_t *this, fd_t *fd)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+ call_frame_t *frame = NULL;
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+
+ proc = &conf->fops->actor[GF_FOP_RELEASEDIR];
+ if (proc->fn) {
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame) {
+ goto out;
+ }
+ ret = proc->fn (frame, this, conf->fops, &args);
+ }
+out:
+ return 0;
+}
+
+int32_t
+client_release (xlator_t *this, fd_t *fd)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+ call_frame_t *frame = NULL;
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ proc = &conf->fops->actor[GF_FOP_RELEASE];
+ if (proc->fn) {
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame) {
+ goto out;
+ }
+ ret = proc->fn (frame, this, conf->fops, &args);
+ }
+out:
+ return 0;
+}
+
+
+int32_t
+client_lookup (call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *xattr_req)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.dict = xattr_req;
+
+ proc = &conf->fops->actor[GF_FOP_LOOKUP];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ /* think of avoiding a missing frame */
+ if (ret)
+ STACK_UNWIND_STRICT (lookup, frame, -1, ENOTCONN,
+ NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+
+int32_t
+client_stat (call_frame_t *frame, xlator_t *this, loc_t *loc)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+
+ proc = &conf->fops->actor[GF_FOP_STAT];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (stat, frame, -1, ENOTCONN, NULL);
+
+
+ return 0;
+}
+
+
+int32_t
+client_truncate (call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.offset = offset;
+
+ proc = &conf->fops->actor[GF_FOP_TRUNCATE];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (truncate, frame, -1, ENOTCONN, NULL, NULL);
+
+
+ return 0;
+}
+
+
+int32_t
+client_ftruncate (call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.offset = offset;
+
+ proc = &conf->fops->actor[GF_FOP_FTRUNCATE];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (ftruncate, frame, -1, ENOTCONN, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_access (call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t mask)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.mask = mask;
+
+ proc = &conf->fops->actor[GF_FOP_ACCESS];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (access, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+
+
+int32_t
+client_readlink (call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.size = size;
+
+ proc = &conf->fops->actor[GF_FOP_READLINK];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (readlink, frame, -1, ENOTCONN, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_mknod (call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t rdev)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.mode = mode;
+ args.rdev = rdev;
+
+ proc = &conf->fops->actor[GF_FOP_MKNOD];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (mknod, frame, -1, ENOTCONN,
+ NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_mkdir (call_frame_t *frame, xlator_t *this, loc_t *loc,
+ mode_t mode)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.mode = mode;
+
+ proc = &conf->fops->actor[GF_FOP_MKDIR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (mkdir, frame, -1, ENOTCONN,
+ NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_unlink (call_frame_t *frame, xlator_t *this, loc_t *loc)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+
+ proc = &conf->fops->actor[GF_FOP_UNLINK];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (unlink, frame, -1, ENOTCONN,
+ NULL, NULL);
+
+ return 0;
+}
+
+int32_t
+client_rmdir (call_frame_t *frame, xlator_t *this, loc_t *loc)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+
+ proc = &conf->fops->actor[GF_FOP_RMDIR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ /* think of avoiding a missing frame */
+ if (ret)
+ STACK_UNWIND_STRICT (rmdir, frame, -1, ENOTCONN,
+ NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_symlink (call_frame_t *frame, xlator_t *this, const char *linkpath,
+ loc_t *loc)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.linkname = linkpath;
+ args.loc = loc;
+
+ proc = &conf->fops->actor[GF_FOP_SYMLINK];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (symlink, frame, -1, ENOTCONN,
+ NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_rename (call_frame_t *frame, xlator_t *this, loc_t *oldloc,
+ loc_t *newloc)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.oldloc = oldloc;
+ args.newloc = newloc;
+ proc = &conf->fops->actor[GF_FOP_RENAME];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (rename, frame, -1, ENOTCONN,
+ NULL, NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_link (call_frame_t *frame, xlator_t *this, loc_t *oldloc,
+ loc_t *newloc)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.oldloc = oldloc;
+ args.newloc = newloc;
+
+ proc = &conf->fops->actor[GF_FOP_LINK];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (link, frame, -1, ENOTCONN,
+ NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_create (call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int32_t flags, mode_t mode, fd_t *fd)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.flags = flags;
+ args.mode = mode;
+ args.fd = fd;
+
+ proc = &conf->fops->actor[GF_FOP_CREATE];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (create, frame, -1, ENOTCONN,
+ NULL, NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_open (call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int32_t flags, fd_t *fd, int32_t wbflags)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.flags = flags;
+ args.fd = fd;
+ args.wbflags = wbflags;
+
+ proc = &conf->fops->actor[GF_FOP_OPEN];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (open, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_readv (call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.size = size;
+ args.offset = offset;
+
+ proc = &conf->fops->actor[GF_FOP_READ];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (readv, frame, -1, ENOTCONN,
+ NULL, 0, NULL, NULL);
+
+ return 0;
+}
+
+
+
+
+int32_t
+client_writev (call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int32_t count, off_t off,
+ struct iobref *iobref)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.vector = vector;
+ args.count = count;
+ args.offset = off;
+ args.iobref = iobref;
+
+ proc = &conf->fops->actor[GF_FOP_WRITE];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (writev, frame, -1, ENOTCONN, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_flush (call_frame_t *frame, xlator_t *this, fd_t *fd)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+
+ proc = &conf->fops->actor[GF_FOP_FLUSH];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (flush, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+
+int32_t
+client_fsync (call_frame_t *frame, xlator_t *this, fd_t *fd,
+ int32_t flags)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.flags = flags;
+
+ proc = &conf->fops->actor[GF_FOP_FSYNC];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (fsync, frame, -1, ENOTCONN, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_fstat (call_frame_t *frame, xlator_t *this, fd_t *fd)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+
+ proc = &conf->fops->actor[GF_FOP_FSTAT];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (fstat, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_opendir (call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.fd = fd;
+
+ proc = &conf->fops->actor[GF_FOP_OPENDIR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (opendir, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_fsyncdir (call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.flags = flags;
+
+ proc = &conf->fops->actor[GF_FOP_FSYNCDIR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (fsyncdir, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+
+int32_t
+client_statfs (call_frame_t *frame, xlator_t *this, loc_t *loc)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+
+ proc = &conf->fops->actor[GF_FOP_STATFS];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (statfs, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_setxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int32_t flags)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.dict = dict;
+ args.flags = flags;
+
+ proc = &conf->fops->actor[GF_FOP_SETXATTR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (setxattr, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+
+int32_t
+client_fsetxattr (call_frame_t *frame, xlator_t *this, fd_t *fd,
+ dict_t *dict, int32_t flags)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.dict = dict;
+ args.flags = flags;
+
+ proc = &conf->fops->actor[GF_FOP_FSETXATTR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (fsetxattr, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+
+
+int32_t
+client_fgetxattr (call_frame_t *frame, xlator_t *this, fd_t *fd,
+ const char *name)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.name = name;
+
+ proc = &conf->fops->actor[GF_FOP_FGETXATTR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (fgetxattr, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.name = name;
+ args.loc = loc;
+
+ proc = &conf->fops->actor[GF_FOP_GETXATTR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (getxattr, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_xattrop (call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t flags, dict_t *dict)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.flags = flags;
+ args.dict = dict;
+
+ proc = &conf->fops->actor[GF_FOP_XATTROP];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (xattrop, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_fxattrop (call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t flags, dict_t *dict)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.flags = flags;
+ args.dict = dict;
+
+ proc = &conf->fops->actor[GF_FOP_FXATTROP];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (fxattrop, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_removexattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.name = name;
+ args.loc = loc;
+
+ proc = &conf->fops->actor[GF_FOP_REMOVEXATTR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (removexattr, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+int32_t
+client_lk (call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+ struct flock *lock)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.cmd = cmd;
+ args.flock = lock;
+
+ proc = &conf->fops->actor[GF_FOP_LK];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (lk, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+int32_t
+client_inodelk (call_frame_t *frame, xlator_t *this, const char *volume,
+ loc_t *loc, int32_t cmd, struct flock *lock)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.cmd = cmd;
+ args.flock = lock;
+ args.volume = volume;
+
+ proc = &conf->fops->actor[GF_FOP_INODELK];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (inodelk, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+
+int32_t
+client_finodelk (call_frame_t *frame, xlator_t *this, const char *volume,
+ fd_t *fd, int32_t cmd, struct flock *lock)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.cmd = cmd;
+ args.flock = lock;
+ args.volume = volume;
+
+ proc = &conf->fops->actor[GF_FOP_FINODELK];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (finodelk, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+int32_t
+client_entrylk (call_frame_t *frame, xlator_t *this, const char *volume,
+ loc_t *loc, const char *basename, entrylk_cmd cmd,
+ entrylk_type type)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.basename = basename;
+ args.type = type;
+ args.volume = volume;
+ args.cmd_entrylk = cmd;
+
+ proc = &conf->fops->actor[GF_FOP_ENTRYLK];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (entrylk, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+
+int32_t
+client_fentrylk (call_frame_t *frame, xlator_t *this, const char *volume,
+ fd_t *fd, const char *basename, entrylk_cmd cmd,
+ entrylk_type type)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.basename = basename;
+ args.type = type;
+ args.volume = volume;
+ args.cmd_entrylk = cmd;
+
+ proc = &conf->fops->actor[GF_FOP_FENTRYLK];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (fentrylk, frame, -1, ENOTCONN);
+
+ return 0;
+}
+
+
+int32_t
+client_checksum (call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int32_t flag)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.flags = flag;
+
+ proc = &conf->fops->actor[GF_FOP_CHECKSUM];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (checksum, frame, -1, ENOTCONN, NULL, NULL);
+
+ return 0;
+}
+
+
+
+int32_t
+client_rchecksum (call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ int32_t len)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.offset = offset;
+ args.len = len;
+
+ proc = &conf->fops->actor[GF_FOP_RCHECKSUM];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (rchecksum, frame, -1, ENOTCONN, 0, NULL);
+
+ return 0;
+}
+
+int32_t
+client_readdir (call_frame_t *frame, xlator_t *this, fd_t *fd,
+ size_t size, off_t off)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.size = size;
+ args.offset = off;
+
+ proc = &conf->fops->actor[GF_FOP_READDIR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (readdir, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+int32_t
+client_readdirp (call_frame_t *frame, xlator_t *this, fd_t *fd,
+ size_t size, off_t off)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.size = size;
+ args.offset = off;
+
+ proc = &conf->fops->actor[GF_FOP_READDIRP];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (readdirp, frame, -1, ENOTCONN, NULL);
+
+ return 0;
+}
+
+
+int32_t
+client_setattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct iatt *stbuf, int32_t valid)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.loc = loc;
+ args.stbuf = stbuf;
+ args.valid = valid;
+
+ proc = &conf->fops->actor[GF_FOP_SETATTR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (setattr, frame, -1, ENOTCONN, NULL, NULL);
+
+ return 0;
+}
+
+int32_t
+client_fsetattr (call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iatt *stbuf, int32_t valid)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ clnt_args_t args = {0,};
+
+ conf = this->private;
+ if (!conf->fops)
+ goto out;
+
+ args.fd = fd;
+ args.stbuf = stbuf;
+ args.valid = valid;
+
+ proc = &conf->fops->actor[GF_FOP_FSETATTR];
+ if (proc->fn)
+ ret = proc->fn (frame, this, conf->fops, &args);
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (fsetattr, frame, -1, ENOTCONN, NULL, NULL);
+
+ return 0;
+}
+
+/////////////////
+int32_t
+client_getspec (call_frame_t *frame, xlator_t *this, const char *key,
+ int32_t flags)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+ gf_getspec_req req = {0,};
+
+ conf = this->private;
+ if (!conf->handshake)
+ goto out;
+
+ req.key = (char *)key;
+ req.flags = flags;
+
+ client_submit_request (this, &req, frame, conf->handshake,
+ GF_HNDSK_GETSPEC, NULL, xdr_from_getspec_req);
+ ret = 0;
+out:
+ if (ret)
+ STACK_UNWIND_STRICT (getspec, frame, -1, EINVAL, NULL);
+
+ return 0;
+}
+
+
+ int
+client_mark_fd_bad (xlator_t *this)
+{
+ clnt_conf_t *conf = NULL;
+ clnt_fd_ctx_t *tmp = NULL, *fdctx = NULL;
+
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ list_for_each_entry_safe (fdctx, tmp, &conf->saved_fds,
+ sfd_pos) {
+ fdctx->remote_fd = -1;
+ }
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ return 0;
+}
+
+
+int
+client_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
+ void *data)
+{
+ xlator_t *this = NULL;
+ char *handshake = NULL;
+ clnt_conf_t *conf = NULL;
+ int ret = 0;
+
+ this = mydata;
+ conf = this->private;
+
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ {
+ // connect happened, send 'get_supported_versions' mop
+ ret = dict_get_str (this->options, "disable-handshake",
+ &handshake);
+
+ gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_CONNECT");
+
+ if ((ret < 0) || (strcasecmp (handshake, "on"))) {
+ ret = client_handshake (this, conf->rpc);
+
+ } else {
+ //conf->rpc->connected = 1;
+ ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
+ }
+ break;
+ }
+ case RPC_CLNT_DISCONNECT:
+
+ client_mark_fd_bad (this);
+
+ gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_DISCONNECT");
+
+ default_notify (this, GF_EVENT_CHILD_DOWN, NULL);
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_TRACE,
+ "got some other RPC event %d", event);
+
+ break;
+ }
+
+ return 0;
+}
+
+
+int
+notify (xlator_t *this, int32_t event, void *data, ...)
+{
+ clnt_conf_t *conf = NULL;
+ void *trans = NULL;
+
+ conf = this->private;
+
+ switch (event) {
+ case GF_EVENT_PARENT_UP:
+ {
+ if (conf->rpc)
+ trans = conf->rpc->conn.trans;
+
+ if (!trans) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "transport init failed");
+ return 0;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "got GF_EVENT_PARENT_UP, attempting connect "
+ "on transport");
+
+ rpc_clnt_reconnect (trans);
+ }
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_DEBUG,
+ "got %d, calling default_notify ()", event);
+
+ default_notify (this, event, data);
+ break;
+ }
+
+ return 0;
+}
+
+int
+build_client_config (xlator_t *this, clnt_conf_t *conf)
+{
+ int ret = 0;
+
+ ret = dict_get_str (this->options, "remote-subvolume",
+ &conf->opt.remote_subvolume);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "option 'remote-subvolume' not given");
+ goto out;
+ }
+
+ ret = dict_get_int32 (this->options, "frame-timeout",
+ &conf->rpc_conf.rpc_timeout);
+ if (ret >= 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "setting frame-timeout to %d",
+ conf->rpc_conf.rpc_timeout);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "defaulting frame-timeout to 30mins");
+ conf->rpc_conf.rpc_timeout = 1800;
+ }
+
+ ret = dict_get_int32 (this->options, "remote-port",
+ &conf->rpc_conf.remote_port);
+ if (ret >= 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "remote-port is %d", conf->rpc_conf.remote_port);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "defaulting remote-port to %d",
+ GF_PROTOCOL_DEFAULT_PORT);
+ conf->rpc_conf.remote_port = GF_PROTOCOL_DEFAULT_PORT;
+ }
+
+ ret = dict_get_int32 (this->options, "ping-timeout",
+ &conf->opt.ping_timeout);
+ if (ret >= 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "setting ping-timeout to %d", conf->opt.ping_timeout);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "defaulting ping-timeout to 42");
+ conf->opt.ping_timeout = GF_UNIVERSAL_ANSWER;
+ }
+
+ ret = 0;
+out:
+ return ret;}
+
+
+static int32_t
+mem_acct_init (xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init (this, gf_client_mt_end + 1);
+
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Memory accounting init"
+ "failed");
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int
+init (xlator_t *this)
+{
+ int ret = -1;
+ clnt_conf_t *conf = NULL;
+
+ /* */
+ if (this->children) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "FATAL: client protocol translator cannot have any "
+ "subvolumes");
+ goto out;
+ }
+
+ if (!this->parents) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Volume is dangling. ");
+ }
+
+ ret = mem_acct_init (this);
+ if (ret)
+ goto out;
+
+ conf = GF_CALLOC (1, sizeof (*conf), gf_client_mt_clnt_conf_t);
+ if (!conf)
+ goto out;
+
+ pthread_mutex_init (&conf->lock, NULL);
+ INIT_LIST_HEAD (&conf->saved_fds);
+
+ ret = build_client_config (this, conf);
+ if (ret)
+ goto out;
+
+ conf->rpc = rpc_clnt_init (&conf->rpc_conf, this->options, this->ctx,
+ this->name);
+ if (!conf->rpc)
+ goto out;
+ conf->rpc->xid = 42; /* It should be enough random everytime :O */
+ ret = rpc_clnt_register_notify (conf->rpc, client_rpc_notify, this);
+ if (ret)
+ goto out;
+
+ conf->handshake = &clnt_handshake_prog;
+ this->private = conf;
+
+ ret = 0;
+out:
+ if (ret)
+ this->fini (this);
+
+ return ret;
+}
+
+void
+fini (xlator_t *this)
+{
+ clnt_conf_t *conf = NULL;
+
+ conf = this->private;
+ this->private = NULL;
+
+ if (conf) {
+ if (conf->rpc)
+ rpc_clnt_destroy (conf->rpc);
+
+ /* Saved Fds */
+ /* TODO: */
+
+ pthread_mutex_destroy (&conf->lock);
+
+ GF_FREE (conf);
+ }
+ return;
+}
+
+int
+client_priv_dump (xlator_t *this)
+{
+ clnt_conf_t *conf = NULL;
+ int ret = -1;
+ clnt_fd_ctx_t *tmp = NULL;
+ int i = 0;
+ char key[GF_DUMP_MAX_BUF_LEN];
+ char key_prefix[GF_DUMP_MAX_BUF_LEN];
+
+ if (!this)
+ return -1;
+
+ conf = this->private;
+ if (!conf) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "conf null in xlator");
+ return -1;
+ }
+
+ ret = pthread_mutex_trylock(&conf->lock);
+ if (ret) {
+ gf_log("", GF_LOG_WARNING, "Unable to lock client %s"
+ " errno: %d", this->name, errno);
+ return -1;
+ }
+
+ gf_proc_dump_build_key(key_prefix, "xlator.protocol.client",
+ "%s.priv", this->name);
+
+ gf_proc_dump_add_section(key_prefix);
+
+ list_for_each_entry(tmp, &conf->saved_fds, sfd_pos) {
+ gf_proc_dump_build_key(key, key_prefix,
+ "fd.%d.remote_fd", ++i);
+ gf_proc_dump_write(key, "%d", tmp->remote_fd);
+ }
+
+ gf_proc_dump_build_key(key, key_prefix, "connecting");
+ gf_proc_dump_write(key, "%d", conf->connecting);
+ gf_proc_dump_build_key(key, key_prefix, "last_sent");
+ gf_proc_dump_write(key, "%s", ctime(&conf->last_sent.tv_sec));
+ gf_proc_dump_build_key(key, key_prefix, "last_received");
+ gf_proc_dump_write(key, "%s", ctime(&conf->last_received.tv_sec));
+
+ pthread_mutex_unlock(&conf->lock);
+
+ return 0;
+
+}
+
+int32_t
+client_inodectx_dump (xlator_t *this, inode_t *inode)
+{
+ ino_t par = 0;
+ uint64_t gen = 0;
+ int ret = -1;
+ char key[GF_DUMP_MAX_BUF_LEN];
+
+ if (!inode)
+ return -1;
+
+ if (!this)
+ return -1;
+
+ ret = inode_ctx_get2 (inode, this, &par, &gen);
+
+ if (ret != 0)
+ return ret;
+
+ gf_proc_dump_build_key(key, "xlator.protocol.client",
+ "%s.inode.%ld.par",
+ this->name,inode->ino);
+ gf_proc_dump_write(key, "%ld, %ld", par, gen);
+
+ return 0;
+}
+
+
+
+
+struct xlator_cbks cbks = {
+ .forget = client_forget,
+ .release = client_release,
+ .releasedir = client_releasedir
+};
+
+struct xlator_fops fops = {
+ .stat = client_stat,
+ .readlink = client_readlink,
+ .mknod = client_mknod,
+ .mkdir = client_mkdir,
+ .unlink = client_unlink,
+ .rmdir = client_rmdir,
+ .symlink = client_symlink,
+ .rename = client_rename,
+ .link = client_link,
+ .truncate = client_truncate,
+ .open = client_open,
+ .readv = client_readv,
+ .writev = client_writev,
+ .statfs = client_statfs,
+ .flush = client_flush,
+ .fsync = client_fsync,
+ .setxattr = client_setxattr,
+ .getxattr = client_getxattr,
+ .fsetxattr = client_fsetxattr,
+ .fgetxattr = client_fgetxattr,
+ .removexattr = client_removexattr,
+ .opendir = client_opendir,
+ .readdir = client_readdir,
+ .readdirp = client_readdirp,
+ .fsyncdir = client_fsyncdir,
+ .access = client_access,
+ .ftruncate = client_ftruncate,
+ .fstat = client_fstat,
+ .create = client_create,
+ .lk = client_lk,
+ .inodelk = client_inodelk,
+ .finodelk = client_finodelk,
+ .entrylk = client_entrylk,
+ .fentrylk = client_fentrylk,
+ .lookup = client_lookup,
+ .checksum = client_checksum,
+ .rchecksum = client_rchecksum,
+ .xattrop = client_xattrop,
+ .fxattrop = client_fxattrop,
+ .setattr = client_setattr,
+ .fsetattr = client_fsetattr,
+ .getspec = client_getspec,
+};
+
+
+struct xlator_dumpops dumpops = {
+ .priv = client_priv_dump,
+ .inodectx = client_inodectx_dump,
+};
+
+
+struct volume_options options[] = {
+ { .key = {"username"},
+ .type = GF_OPTION_TYPE_ANY
+ },
+ { .key = {"password"},
+ .type = GF_OPTION_TYPE_ANY
+ },
+ { .key = {"transport-type"},
+ .value = {"tcp", "socket", "ib-verbs", "unix", "ib-sdp",
+ "tcp/client", "ib-verbs/client"},
+ .type = GF_OPTION_TYPE_STR
+ },
+ { .key = {"remote-host"},
+ .type = GF_OPTION_TYPE_INTERNET_ADDRESS
+ },
+ { .key = {"remote-subvolume"},
+ .type = GF_OPTION_TYPE_ANY
+ },
+ { .key = {"frame-timeout",
+ "rpc-timeout" },
+ .type = GF_OPTION_TYPE_TIME,
+ .min = 0,
+ .max = 86400,
+ },
+ { .key = {"ping-timeout"},
+ .type = GF_OPTION_TYPE_TIME,
+ .min = 1,
+ .max = 1013,
+ },
+ { .key = {NULL} },
+};
diff --git a/xlators/protocol/client/src/client.h b/xlators/protocol/client/src/client.h
new file mode 100644
index 00000000000..9d713bed204
--- /dev/null
+++ b/xlators/protocol/client/src/client.h
@@ -0,0 +1,125 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CLIENT_H
+#define _CLIENT_H
+
+#include <pthread.h>
+#include <stdint.h>
+
+#include "rpc-clnt.h"
+#include "list.h"
+#include "inode.h"
+#include "client-mem-types.h"
+#include "protocol-common.h"
+
+struct clnt_options {
+ char *remote_subvolume;
+ int ping_timeout;
+};
+
+typedef struct clnt_conf {
+ struct rpc_clnt *rpc;
+ struct clnt_options opt;
+ struct rpc_clnt_config rpc_conf;
+ struct list_head saved_fds;
+ pthread_mutex_t lock;
+ int connecting;
+ struct timeval last_sent;
+ struct timeval last_received;
+
+ rpc_clnt_prog_t *fops;
+ rpc_clnt_prog_t *mgmt;
+ rpc_clnt_prog_t *handshake;
+} clnt_conf_t;
+
+typedef struct _client_fd_ctx {
+ struct list_head sfd_pos; /* Stores the reference to this
+ fd's position in the saved_fds list.
+ */
+ int64_t remote_fd;
+ inode_t *inode;
+ uint64_t ino;
+ uint64_t gen;
+ char is_dir;
+ char released;
+ int32_t flags;
+ int32_t wbflags;
+} clnt_fd_ctx_t;
+
+typedef struct client_local {
+ loc_t loc;
+ loc_t loc2;
+ fd_t *fd;
+ clnt_fd_ctx_t *fdctx;
+ uint32_t flags;
+ uint32_t wbflags;
+ fop_cbk_fn_t op;
+} clnt_local_t;
+
+typedef struct client_args {
+ loc_t *loc;
+ fd_t *fd;
+ dict_t *xattr_req;
+ const char *linkname;
+ struct iobref *iobref;
+ struct iovec *vector;
+ dict_t *xattr;
+ struct iatt *stbuf;
+ dict_t *dict;
+ loc_t *oldloc;
+ loc_t *newloc;
+ const char *name;
+ struct flock *flock;
+ const char *volume;
+ const char *basename;
+ off_t offset;
+ int32_t mask;
+ int32_t cmd;
+ size_t size;
+ mode_t mode;
+ dev_t rdev;
+ int32_t flags;
+ int32_t wbflags;
+ int32_t count;
+ int32_t datasync;
+ entrylk_cmd cmd_entrylk;
+ entrylk_type type;
+ gf_xattrop_flags_t optype;
+ int32_t valid;
+ int32_t len;
+} clnt_args_t;
+
+typedef ssize_t (*gfs_serialize_t) (struct iovec outmsg, void *args);
+
+clnt_fd_ctx_t *this_fd_get_ctx (fd_t *file, xlator_t *this);
+clnt_fd_ctx_t *this_fd_del_ctx (fd_t *file, xlator_t *this);
+void this_fd_set_ctx (fd_t *file, xlator_t *this, loc_t *loc,
+ clnt_fd_ctx_t *ctx);
+
+int client_local_wipe (clnt_local_t *local);
+int client_submit_request (xlator_t *this, void *req,
+ call_frame_t *frame, rpc_clnt_prog_t *prog,
+ int procnum, struct iobref *iobref,
+ gfs_serialize_t sfunc);
+
+int protocol_client_reopendir (xlator_t *this, clnt_fd_ctx_t *fdctx);
+int protocol_client_reopen (xlator_t *this, clnt_fd_ctx_t *fdctx);
+
+#endif /* !_CLIENT_H */
diff --git a/xlators/protocol/client/src/client3_1-fops.c b/xlators/protocol/client/src/client3_1-fops.c
new file mode 100644
index 00000000000..01b78436c6c
--- /dev/null
+++ b/xlators/protocol/client/src/client3_1-fops.c
@@ -0,0 +1,4662 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "client.h"
+#include "glusterfs-xdr.h"
+#include "msg-xdr.h"
+#include "compat-errno.h"
+
+void client_start_ping (void *data);
+rpc_clnt_prog_t clnt3_1_fop_prog;
+
+int
+client_submit_vec_request (xlator_t *this, void *req,
+ call_frame_t *frame, rpc_clnt_prog_t *prog, int procnum,
+ struct iovec *payload, int payloadcnt,
+ struct iobref *iobref, gfs_serialize_t sfunc)
+{
+ int ret = 0;
+ clnt_conf_t *conf = NULL;
+ struct iovec iov = {0, };
+ struct iobuf *iobuf = NULL;
+ int count = 0;
+ char new_iobref = 0;
+ int start_ping = 0;
+
+ start_ping = 0;
+
+ conf = this->private;
+
+ iobuf = iobuf_get (this->ctx->iobuf_pool);
+ if (!iobuf) {
+ goto out;
+ };
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = 128 * GF_UNIT_KB;
+
+ /* Create the xdr payload */
+ if (req && sfunc) {
+ ret = sfunc (iov, req);
+ if (ret == -1) {
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+ /* Send the msg */
+ ret = rpc_clnt_submit (conf->rpc, prog, procnum, &iov, count,
+ payload, payloadcnt, iobref, frame);
+
+ if (ret == 0) {
+ pthread_mutex_lock (&conf->rpc->conn.lock);
+ {
+ if (!conf->rpc->conn.ping_started) {
+ start_ping = 1;
+ }
+ }
+ pthread_mutex_unlock (&conf->rpc->conn.lock);
+ }
+
+ if (start_ping)
+ client_start_ping ((void *) this);
+
+out:
+ if (new_iobref) {
+ iobref_unref (iobref);
+ }
+
+ iobuf_unref (iobuf);
+
+ return 0;
+}
+
+int
+client_fdctx_destroy (xlator_t *this, clnt_fd_ctx_t *fdctx)
+{
+ call_frame_t *fr = NULL;
+ int32_t ret = -1;
+
+ if (!fdctx)
+ goto out;
+
+ if (fdctx->remote_fd == -1)
+ goto out;
+
+ fr = create_frame (this, this->ctx->pool);
+
+ if (fdctx->is_dir) {
+ gfs3_releasedir_req req = {0,};
+ req.fd = fdctx->remote_fd;
+ client_submit_request (this, &req, fr, &clnt3_1_fop_prog,
+ GFS3_OP_RELEASEDIR, NULL,
+ xdr_from_releasedir_req);
+ } else {
+ gfs3_release_req req = {0,};
+ req.fd = fdctx->remote_fd;
+ client_submit_request (this, &req, fr, &clnt3_1_fop_prog,
+ GFS3_OP_RELEASE, NULL,
+ xdr_from_release_req);
+ }
+
+out:
+ fdctx->remote_fd = -1;
+ inode_unref (fdctx->inode);
+ GF_FREE (fdctx);
+
+ return ret;
+}
+
+int
+client3_1_reopen_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ int32_t ret = -1;
+ gfs3_open_rsp rsp = {0,};
+ clnt_local_t *local = NULL;
+ clnt_conf_t *conf = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ call_frame_t *frame = NULL;
+
+ frame = myframe;
+
+ local = frame->local;
+ conf = frame->this->private;
+ fdctx = local->fdctx;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_open_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "reopen on %s returned %d (%"PRId64")",
+ local->loc.path, rsp.op_ret, rsp.fd);
+
+ if (-1 != rsp.op_ret) {
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx->remote_fd = rsp.fd;
+
+ if (!fdctx->released) {
+ list_add_tail (&fdctx->sfd_pos, &conf->saved_fds);
+ fdctx = NULL;
+ }
+ }
+ pthread_mutex_unlock (&conf->lock);
+ }
+
+out:
+ if (fdctx)
+ client_fdctx_destroy (frame->this, fdctx);
+
+ frame->local = NULL;
+ STACK_DESTROY (frame->root);
+
+ client_local_wipe (local);
+
+ return 0;
+}
+
+int
+client3_1_reopendir_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ int32_t ret = -1;
+ gfs3_open_rsp rsp = {0,};
+ clnt_local_t *local = NULL;
+ clnt_conf_t *conf = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ call_frame_t *frame = NULL;
+
+ frame = myframe;
+ if (!frame || !frame->this)
+ goto out;
+
+ local = frame->local;
+ frame->local = NULL;
+ conf = frame->this->private;
+ fdctx = local->fdctx;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_opendir_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "reopendir on %s returned %d (%"PRId64")",
+ local->loc.path, rsp.op_ret, rsp.fd);
+
+ if (fdctx) {
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx->remote_fd = rsp.fd;
+
+ if (!fdctx->released) {
+ list_add_tail (&fdctx->sfd_pos, &conf->saved_fds);
+ fdctx = NULL;
+ }
+ }
+ pthread_mutex_unlock (&conf->lock);
+ }
+
+out:
+ if (fdctx)
+ client_fdctx_destroy (frame->this, fdctx);
+
+ frame->local = NULL;
+ STACK_DESTROY (frame->root);
+
+ client_local_wipe (local);
+
+ return 0;
+}
+
+int
+protocol_client_reopendir (xlator_t *this, clnt_fd_ctx_t *fdctx)
+{
+ int ret = -1;
+ gfs3_opendir_req req = {0,};
+ clnt_local_t *local = NULL;
+ inode_t *inode = NULL;
+ char *path = NULL;
+ call_frame_t *frame = NULL;
+ clnt_conf_t *conf = NULL;
+
+ if (!this || !fdctx)
+ goto out;
+
+ inode = fdctx->inode;
+ conf = this->private;
+
+ ret = inode_path (inode, NULL, &path);
+ if (ret < 0) {
+ goto out;
+ }
+
+ local = GF_CALLOC (1, sizeof (*local), 0);
+ if (!local) {
+ goto out;
+ }
+
+ local->fdctx = fdctx;
+ local->op = client3_1_reopendir_cbk;
+ local->loc.path = path;
+ path = NULL;
+
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame) {
+ goto out;
+ }
+
+ req.ino = fdctx->ino;
+ req.gen = fdctx->gen;
+ req.path = (char *)local->loc.path;
+
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "attempting reopen on %s", local->loc.path);
+
+ frame->local = local; local = NULL;
+
+ client_submit_request (this, &req, frame, conf->fops,
+ GFS3_OP_OPENDIR, NULL, xdr_from_opendir_req);
+
+ return ret;
+
+out:
+ if (frame) {
+ frame->local = NULL;
+ STACK_DESTROY (frame->root);
+ }
+
+ if (local)
+ client_local_wipe (local);
+
+ if (path)
+ GF_FREE (path);
+
+ return 0;
+
+}
+
+int
+protocol_client_reopen (xlator_t *this, clnt_fd_ctx_t *fdctx)
+{
+ int ret = -1;
+ gfs3_open_req req = {0,};
+ clnt_local_t *local = NULL;
+ inode_t *inode = NULL;
+ char *path = NULL;
+ call_frame_t *frame = NULL;
+ clnt_conf_t *conf = NULL;
+
+ if (!this || !fdctx)
+ goto out;
+
+ inode = fdctx->inode;
+ conf = this->private;
+
+ ret = inode_path (inode, NULL, &path);
+ if (ret < 0) {
+ goto out;
+ }
+
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame) {
+ goto out;
+ }
+
+ local = GF_CALLOC (1, sizeof (*local), 0);
+ if (!local) {
+ goto out;
+ }
+
+ local->fdctx = fdctx;
+ local->op = client3_1_reopen_cbk;
+ local->loc.path = path;
+ path = NULL;
+ frame->local = local;
+
+ req.ino = fdctx->ino;
+ req.gen = fdctx->gen;
+ req.flags = gf_flags_from_flags (fdctx->flags);
+ req.wbflags = fdctx->wbflags;
+ req.path = (char *)local->loc.path;
+
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "attempting reopen on %s", local->loc.path);
+
+ local = NULL;
+ client_submit_request (this, &req, frame, conf->fops,
+ GFS3_OP_OPEN, NULL, xdr_from_open_req);
+
+ return ret;
+
+out:
+ if (frame) {
+ frame->local = NULL;
+ STACK_DESTROY (frame->root);
+ }
+
+ if (local)
+ client_local_wipe (local);
+
+ if (path)
+ GF_FREE (path);
+
+ return 0;
+
+}
+
+
+
+int32_t
+client3_1_releasedir (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_conf_t *conf = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_releasedir_req req = {0,};
+ int64_t remote_fd = -1;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_del_ctx (args->fd, this);
+ if (fdctx != NULL) {
+ remote_fd = fdctx->remote_fd;
+
+ /* fdctx->remote_fd == -1 indicates a reopen attempt
+ in progress. Just mark ->released = 1 and let
+ reopen_cbk handle releasing
+ */
+
+ if (remote_fd != -1)
+ list_del_init (&fdctx->sfd_pos);
+
+ fdctx->released = 1;
+ }
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (remote_fd != -1) {
+ req.fd = remote_fd;
+ client_submit_request (this, &req, frame, prog,
+ GFS3_OP_RELEASEDIR,
+ NULL, xdr_from_releasedir_req);
+ inode_unref (fdctx->inode);
+ GF_FREE (fdctx);
+ }
+
+ return 0;
+unwind:
+ return 0;
+}
+
+int32_t
+client3_1_release (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ int64_t remote_fd = -1;
+ clnt_conf_t *conf = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_release_req req = {0,};
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_del_ctx (args->fd, this);
+ if (fdctx != NULL) {
+ remote_fd = fdctx->remote_fd;
+
+ /* fdctx->remote_fd == -1 indicates a reopen attempt
+ in progress. Just mark ->released = 1 and let
+ reopen_cbk handle releasing
+ */
+
+ if (remote_fd != -1)
+ list_del_init (&fdctx->sfd_pos);
+
+ fdctx->released = 1;
+ }
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (remote_fd != -1) {
+ req.fd = remote_fd;
+ client_submit_request (this, &req, frame, prog,
+ GFS3_OP_RELEASE, NULL,
+ xdr_from_release_req);
+ inode_unref (fdctx->inode);
+ GF_FREE (fdctx);
+ }
+ return 0;
+unwind:
+ return 0;
+}
+
+
+int32_t
+client3_1_lookup (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_local_t *local = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_lookup_req req = {0,};
+ int ret = 0;
+ size_t dict_len = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ local = GF_CALLOC (1, sizeof (*local), gf_client_mt_clnt_local_t);
+ if (!local) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ loc_copy (&local->loc, args->loc);
+ frame->local = local;
+
+ if (args->loc->ino != 1 && args->loc->parent) {
+ ret = inode_ctx_get2 (args->loc->parent, this,
+ &req.par, &req.gen);
+ if (args->loc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "LOOKUP %"PRId64"/%s (%s): failed to get "
+ "remote inode number for parent",
+ args->loc->parent->ino, args->loc->name,
+ args->loc->path);
+ goto unwind;
+ }
+ GF_VALIDATE_OR_GOTO (this->name, args->loc->name, unwind);
+ } else {
+ req.ino = 1;
+ }
+
+ if (args->dict) {
+ ret = dict_allocate_and_serialize (args->dict,
+ &req.dict.dict_val,
+ &dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized length of dict");
+ op_errno = EINVAL;
+ goto unwind;
+ }
+ }
+
+ req.path = (char *)args->loc->path;
+ req.bname = (char *)args->loc->name;
+ req.dict.dict_len = dict_len;
+
+ client_submit_request (this, &req, frame,prog,
+ GFS3_OP_LOOKUP, NULL, xdr_from_lookup_req);
+
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+
+ return 0;
+
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+
+ if (local)
+ client_local_wipe (local);
+
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+
+ return 0;
+}
+
+
+
+int32_t
+client3_1_stat (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_stat_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_STAT,
+ NULL, xdr_from_stat_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (stat, frame, -1, op_errno, NULL);
+ return 0;
+}
+
+
+int32_t
+client3_1_truncate (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_truncate_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+ req.offset = args->offset;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_TRUNCATE,
+ NULL, xdr_from_truncate_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (truncate, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+
+int32_t
+client3_1_ftruncate (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ gfs3_ftruncate_req req = {0,};
+ int op_errno = EINVAL;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.offset = args->offset;
+ req.fd = fdctx->remote_fd;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FTRUNCATE,
+ NULL, xdr_from_ftruncate_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (ftruncate, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_access (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_access_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+ req.mask = args->mask;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_ACCESS,
+ NULL, xdr_from_access_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (access, frame, -1, op_errno);
+ return 0;
+}
+
+int32_t
+client3_1_readlink (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_readlink_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+ req.size = args->size;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_READLINK,
+ NULL, xdr_from_readlink_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (readlink, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+
+
+
+int32_t
+client3_1_unlink (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_unlink_req req = {0,};
+ int ret = 0;
+ int op_errno = 0;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->parent, this, &req.par, &req.gen);
+ if (args->loc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64"/%s (%s): "
+ "failed to get remote inode number for parent",
+ args->loc->parent->ino, args->loc->name, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+ req.bname = (char *)args->loc->name;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_UNLINK,
+ NULL, xdr_from_unlink_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (unlink, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_rmdir (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_rmdir_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->parent, this, &req.par, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64"/%s (%s): "
+ "failed to get remote inode number for parent",
+ args->loc->parent->ino, args->loc->name, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+ req.bname = (char *)args->loc->name;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_RMDIR,
+ NULL, xdr_from_rmdir_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (rmdir, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_symlink (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_local_t *local = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_symlink_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ local = GF_CALLOC (1, sizeof (*local), gf_client_mt_clnt_local_t);
+ if (!local) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ loc_copy (&local->loc, args->loc);
+ frame->local = local;
+
+ ret = inode_ctx_get2 (args->loc->parent, this, &req.par, &req.gen);
+ if (args->loc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "SYMLINK %"PRId64"/%s (%s): failed to get remote inode"
+ " number parent",
+ args->loc->parent->ino, args->loc->name,
+ args->loc->path);
+ goto unwind;
+ }
+
+ req.path = (char *)args->loc->path;
+ req.linkname = (char *)args->linkname;
+ req.bname = (char *)args->loc->name;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_SYMLINK,
+ NULL, xdr_from_symlink_req);
+
+ return 0;
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (symlink, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ if (local)
+ client_local_wipe (local);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_rename (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_rename_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->oldloc->parent, this,
+ &req.oldpar, &req.oldgen);
+ if (args->oldloc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "RENAME %"PRId64"/%s (%s): failed to get remote inode "
+ "number for source parent", args->oldloc->parent->ino,
+ args->oldloc->name, args->oldloc->path);
+ goto unwind;
+ }
+
+ ret = inode_ctx_get2 (args->newloc->parent, this, &req.newpar,
+ &req.newgen);
+ if (args->newloc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "CREATE %"PRId64"/%s (%s): failed to get remote inode "
+ "number for destination parent",
+ args->newloc->parent->ino, args->newloc->name,
+ args->newloc->path);
+ goto unwind;
+ }
+
+ req.oldpath = (char *)args->oldloc->path;
+ req.oldbname = (char *)args->oldloc->name;
+ req.newpath = (char *)args->newloc->path;
+ req.newbname = (char *)args->newloc->name;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_RENAME,
+ NULL, xdr_from_rename_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (rename, frame, -1, op_errno, NULL, NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_link (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_local_t *local = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_link_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ local = GF_CALLOC (1, sizeof (*local), gf_client_mt_clnt_local_t);
+ if (!local) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ loc_copy (&local->loc, args->oldloc);
+ frame->local = local;
+
+ ret = inode_ctx_get2 (args->oldloc->inode, this,
+ &req.oldino, &req.oldgen);
+ if (args->oldloc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "RENAME %"PRId64"/%s (%s): failed to get remote inode "
+ "number for source parent", args->oldloc->parent->ino,
+ args->oldloc->name, args->oldloc->path);
+ goto unwind;
+ }
+
+ ret = inode_ctx_get2 (args->newloc->parent, this, &req.newpar,
+ &req.newgen);
+ if (args->newloc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "CREATE %"PRId64"/%s (%s): failed to get remote inode "
+ "number for destination parent",
+ args->newloc->parent->ino, args->newloc->name,
+ args->newloc->path);
+ goto unwind;
+ }
+
+ req.oldpath = (char *)args->oldloc->path;
+ req.newpath = (char *)args->newloc->path;
+ req.newbname = (char *)args->newloc->name;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_LINK,
+ NULL, xdr_from_link_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (link, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_mknod (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_local_t *local = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_mknod_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ local = GF_CALLOC (1, sizeof (*local), gf_client_mt_clnt_local_t);
+ if (!local) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ loc_copy (&local->loc, args->loc);
+ frame->local = local;
+
+ ret = inode_ctx_get2 (args->loc->parent, this, &req.par, &req.gen);
+ if (args->loc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "SYMLINK %"PRId64"/%s (%s): failed to get remote inode"
+ " number parent",
+ args->loc->parent->ino, args->loc->name,
+ args->loc->path);
+ goto unwind;
+ }
+
+ req.path = (char *)args->loc->path;
+ req.bname = (char *)args->loc->name;
+ req.mode = args->mode;
+ req.dev = args->rdev;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_MKNOD,
+ NULL, xdr_from_mknod_req);
+
+ return 0;
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (mknod, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ if (local)
+ client_local_wipe (local);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_mkdir (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_local_t *local = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_mkdir_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ local = GF_CALLOC (1, sizeof (*local), gf_client_mt_clnt_local_t);
+ if (!local) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ loc_copy (&local->loc, args->loc);
+ frame->local = local;
+
+ ret = inode_ctx_get2 (args->loc->parent, this, &req.par, &req.gen);
+ if (args->loc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "SYMLINK %"PRId64"/%s (%s): failed to get remote inode"
+ " number parent",
+ args->loc->parent->ino, args->loc->name,
+ args->loc->path);
+ goto unwind;
+ }
+
+ req.path = (char *)args->loc->path;
+ req.bname = (char *)args->loc->name;
+ req.mode = args->mode;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_MKDIR,
+ NULL, xdr_from_mkdir_req);
+
+ return 0;
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (mkdir, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+ if (local)
+ client_local_wipe (local);
+ return 0;
+}
+
+
+int32_t
+client3_1_create (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_local_t *local = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_create_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ local = GF_CALLOC (1, sizeof (*local), gf_client_mt_clnt_local_t);
+ if (!local) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ local->fd = fd_ref (args->fd);
+ local->flags = args->flags;
+ loc_copy (&local->loc, args->loc);
+ frame->local = local;
+
+ ret = inode_ctx_get2 (args->loc->parent, this, &req.par, &req.gen);
+ if (args->loc->parent->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "SYMLINK %"PRId64"/%s (%s): failed to get remote inode"
+ " number parent",
+ args->loc->parent->ino, args->loc->name,
+ args->loc->path);
+ goto unwind;
+ }
+
+ req.path = (char *)args->loc->path;
+ req.bname = (char *)args->loc->name;
+ req.mode = args->mode;
+ req.flags = gf_flags_from_flags (args->flags);
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_CREATE,
+ NULL, xdr_from_create_req);
+
+ return 0;
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (create, frame, -1, op_errno, NULL, NULL, NULL, NULL, NULL);
+ if (local)
+ client_local_wipe (local);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_open (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_local_t *local = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_open_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ local = GF_CALLOC (1, sizeof (*local), gf_client_mt_clnt_local_t);
+ if (!local) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ local->fd = fd_ref (args->fd);
+ local->flags = args->flags;
+ local->wbflags = args->wbflags;
+ loc_copy (&local->loc, args->loc);
+ frame->local = local;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "OPEN %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.flags = gf_flags_from_flags (args->flags);
+ req.wbflags = args->wbflags;
+ req.path = (char *)args->loc->path;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_OPEN,
+ NULL, xdr_from_open_req);
+
+ return 0;
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (open, frame, -1, op_errno, NULL);
+ if (local)
+ client_local_wipe (local);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_readv (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ int op_errno = ESTALE;
+ gfs3_read_req req = {0,};
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.size = args->size;
+ req.offset = args->offset;
+ req.fd = fdctx->remote_fd;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_READ,
+ NULL, xdr_from_readv_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (readv, frame, -1, op_errno, NULL, 0, NULL, NULL);
+ return 0;
+}
+
+
+int32_t
+client3_1_writev (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ gfs3_write_req req = {0,};
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.size = args->size;
+ req.offset = args->offset;
+ req.fd = fdctx->remote_fd;
+
+ /* TODO: Buffer */
+
+ client_submit_vec_request (this, &req, frame,prog, GFS3_OP_WRITE,
+ args->vector, args->count,
+ args->iobref, xdr_from_writev_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (writev, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_flush (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_flush_req req = {0,};
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.fd = fdctx->remote_fd;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FLUSH,
+ NULL, xdr_from_flush_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (flush, frame, -1, op_errno);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_fsync (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_fsync_req req = {0,};
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ int op_errno = 0;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.fd = fdctx->remote_fd;
+ req.data = args->flags;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FSYNC,
+ NULL, xdr_from_fsync_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (fsync, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_fstat (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_fstat_req req = {0,};
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.fd = fdctx->remote_fd;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FSTAT,
+ NULL, xdr_from_fstat_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (fstat, frame, -1, op_errno, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_opendir (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_local_t *local = NULL;
+ clnt_args_t *args = NULL;
+ gfs3_opendir_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ local = GF_CALLOC (1, sizeof (*local), gf_client_mt_clnt_local_t);
+ if (!local) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ local->fd = fd_ref (args->fd);
+ loc_copy (&local->loc, args->loc);
+ frame->local = local;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "OPEN %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_OPENDIR,
+ NULL, xdr_from_opendir_req);
+
+ return 0;
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (opendir, frame, -1, op_errno, NULL);
+ if (local)
+ client_local_wipe (local);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_fsyncdir (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ int op_errno = ESTALE;
+ gfs3_fsyncdir_req req = {0,};
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.fd = fdctx->remote_fd;
+ req.data = args->flags;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FSYNCDIR,
+ NULL, xdr_from_fsyncdir_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (fsyncdir, frame, -1, op_errno);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_statfs (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_statfs_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ if (args->loc->inode) {
+ ret = inode_ctx_get2 (args->loc->inode, this,
+ &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STATFS %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ }
+ req.path = (char *)args->loc->path;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_STATFS,
+ NULL, xdr_from_statfs_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (statfs, frame, -1, op_errno, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_setxattr (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_setxattr_req req = {0,};
+ int ret = 0;
+ size_t dict_len = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "SETXATTR %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ if (args->dict) {
+ ret = dict_allocate_and_serialize (args->dict,
+ &req.dict.dict_val,
+ &dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized dict");
+ op_errno = EINVAL;
+ goto unwind;
+ }
+ req.dict.dict_len = dict_len;
+ }
+ req.flags = args->flags;
+ req.path = (char *)args->loc->path;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_SETXATTR,
+ NULL, xdr_from_setxattr_req);
+
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (setxattr, frame, -1, op_errno);
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+ return 0;
+}
+
+
+
+int32_t
+client3_1_fsetxattr (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ gfs3_fsetxattr_req req = {0,};
+ int op_errno = ESTALE;
+ int ret = 0;
+ size_t dict_len = 0;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.fd = fdctx->remote_fd;
+ req.flags = args->flags;
+ req.ino = args->fd->inode->ino;
+
+ if (args->dict) {
+ ret = dict_allocate_and_serialize (args->dict,
+ &req.dict.dict_val,
+ &dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized dict");
+ goto unwind;
+ }
+ req.dict.dict_len = dict_len;
+ }
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FSETXATTR,
+ NULL, xdr_from_fsetxattr_req);
+
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (fsetxattr, frame, -1, op_errno);
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+ return 0;
+}
+
+
+
+
+int32_t
+client3_1_fgetxattr (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ gfs3_fgetxattr_req req = {0,};
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.namelen = 1; /* Use it as a flag */
+ req.fd = fdctx->remote_fd;
+ req.name = (char *)args->name;
+ if (!req.name) {
+ req.name = "";
+ req.namelen = 0;
+ }
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FGETXATTR,
+ NULL, xdr_from_fgetxattr_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (fgetxattr, frame, -1, op_errno, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_getxattr (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_getxattr_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+
+ req.namelen = 1; /* Use it as a flag */
+ req.path = (char *)args->loc->path;
+ req.name = (char *)args->name;
+ if (!req.name) {
+ req.name = "";
+ req.namelen = 0;
+ }
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_GETXATTR,
+ NULL, xdr_from_getxattr_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (getxattr, frame, -1, op_errno, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_xattrop (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_xattrop_req req = {0,};
+ int ret = 0;
+ size_t dict_len = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "SETXATTR %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ if (args->dict) {
+ ret = dict_allocate_and_serialize (args->dict,
+ &req.dict.dict_val,
+ &dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized dict");
+ op_errno = EINVAL;
+ goto unwind;
+ }
+ req.dict.dict_len = dict_len;
+ }
+ req.flags = args->flags;
+ req.path = (char *)args->loc->path;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_XATTROP,
+ NULL, xdr_from_xattrop_req);
+
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (xattrop, frame, -1, op_errno, NULL);
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+ return 0;
+}
+
+
+
+int32_t
+client3_1_fxattrop (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ gfs3_fxattrop_req req = {0,};
+ int op_errno = ESTALE;
+ int ret = 0;
+ size_t dict_len = 0;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.fd = fdctx->remote_fd;
+ req.flags = args->flags;
+ req.ino = args->fd->inode->ino;
+
+ if (args->dict) {
+ ret = dict_allocate_and_serialize (args->dict,
+ &req.dict.dict_val,
+ &dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized dict");
+ goto unwind;
+ }
+ req.dict.dict_len = dict_len;
+ }
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FXATTROP,
+ NULL, xdr_from_fxattrop_req);
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (fxattrop, frame, -1, op_errno, NULL);
+ if (req.dict.dict_val) {
+ GF_FREE (req.dict.dict_val);
+ }
+ return 0;
+}
+
+
+
+int32_t
+client3_1_removexattr (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_removexattr_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "REMOVEXATTR %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+ req.name = (char *)args->name;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_REMOVEXATTR,
+ NULL, xdr_from_removexattr_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (removexattr, frame, -1, op_errno);
+ return 0;
+}
+
+
+int32_t
+client3_1_lk (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_lk_req req = {0,};
+ int32_t gf_cmd = 0;
+ int32_t gf_type = 0;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (args->cmd == F_GETLK || args->cmd == F_GETLK64)
+ gf_cmd = GF_LK_GETLK;
+ else if (args->cmd == F_SETLK || args->cmd == F_SETLK64)
+ gf_cmd = GF_LK_SETLK;
+ else if (args->cmd == F_SETLKW || args->cmd == F_SETLKW64)
+ gf_cmd = GF_LK_SETLKW;
+ else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Unknown cmd (%d)!", gf_cmd);
+ goto unwind;
+ }
+
+ switch (args->flock->l_type) {
+ case F_RDLCK:
+ gf_type = GF_LK_F_RDLCK;
+ break;
+ case F_WRLCK:
+ gf_type = GF_LK_F_WRLCK;
+ break;
+ case F_UNLCK:
+ gf_type = GF_LK_F_UNLCK;
+ break;
+ }
+
+ req.fd = fdctx->remote_fd;
+ req.cmd = gf_cmd;
+ req.type = gf_type;
+ gf_flock_from_flock (&req.flock, args->flock);
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_LK,
+ NULL, xdr_from_lk_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (lk, frame, -1, op_errno, NULL);
+ return 0;
+}
+
+
+int32_t
+client3_1_inodelk (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_inodelk_req req = {0,};
+ int ret = 0;
+ int32_t gf_cmd = 0;
+ int32_t gf_type = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "INODELK %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+
+ if (args->cmd == F_GETLK || args->cmd == F_GETLK64)
+ gf_cmd = GF_LK_GETLK;
+ else if (args->cmd == F_SETLK || args->cmd == F_SETLK64)
+ gf_cmd = GF_LK_SETLK;
+ else if (args->cmd == F_SETLKW || args->cmd == F_SETLKW64)
+ gf_cmd = GF_LK_SETLKW;
+ else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Unknown cmd (%d)!", gf_cmd);
+ op_errno = EINVAL;
+ goto unwind;
+ }
+
+ switch (args->flock->l_type) {
+ case F_RDLCK:
+ gf_type = GF_LK_F_RDLCK;
+ break;
+ case F_WRLCK:
+ gf_type = GF_LK_F_WRLCK;
+ break;
+ case F_UNLCK:
+ gf_type = GF_LK_F_UNLCK;
+ break;
+ }
+
+ req.path = (char *)args->loc->path;
+ req.volume = (char *)args->volume;
+ req.cmd = gf_cmd;
+ req.type = gf_type;
+ gf_flock_from_flock (&req.flock, args->flock);
+
+ client_submit_request (this, &req, frame, prog, GFS3_OP_INODELK,
+ NULL, xdr_from_inodelk_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (inodelk, frame, -1, op_errno);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_finodelk (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_finodelk_req req = {0,};
+ int32_t gf_cmd = 0;
+ int32_t gf_type = 0;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (args->cmd == F_GETLK || args->cmd == F_GETLK64)
+ gf_cmd = GF_LK_GETLK;
+ else if (args->cmd == F_SETLK || args->cmd == F_SETLK64)
+ gf_cmd = GF_LK_SETLK;
+ else if (args->cmd == F_SETLKW || args->cmd == F_SETLKW64)
+ gf_cmd = GF_LK_SETLKW;
+ else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Unknown cmd (%d)!", gf_cmd);
+ goto unwind;
+ }
+
+ switch (args->flock->l_type) {
+ case F_RDLCK:
+ gf_type = GF_LK_F_RDLCK;
+ break;
+ case F_WRLCK:
+ gf_type = GF_LK_F_WRLCK;
+ break;
+ case F_UNLCK:
+ gf_type = GF_LK_F_UNLCK;
+ break;
+ }
+
+ req.volume = (char *)args->volume;
+ req.fd = fdctx->remote_fd;
+ req.cmd = gf_cmd;
+ req.type = gf_type;
+ gf_flock_from_flock (&req.flock, args->flock);
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FINODELK,
+ NULL, xdr_from_finodelk_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (finodelk, frame, -1, op_errno);
+ return 0;
+}
+
+
+int32_t
+client3_1_entrylk (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_entrylk_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+ req.cmd = args->cmd_entrylk;
+ req.type = args->type;
+ req.volume = (char *)args->volume;
+ req.name = "";
+ if (args->basename) {
+ req.name = (char *)args->basename;
+ req.namelen = 1;
+ }
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_ENTRYLK,
+ NULL, xdr_from_entrylk_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (entrylk, frame, -1, op_errno);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_fentrylk (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_fentrylk_req req = {0,};
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.fd = fdctx->remote_fd;
+ req.cmd = args->cmd_entrylk;
+ req.type = args->type;
+ req.volume = (char *)args->volume;
+ req.name = "";
+ if (args->basename) {
+ req.name = (char *)args->basename;
+ req.namelen = 1;
+ }
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FENTRYLK,
+ NULL, xdr_from_fentrylk_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (fentrylk, frame, -1, op_errno);
+ return 0;
+}
+
+
+
+
+int32_t
+client3_1_checksum (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_checksum_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+ req.flag = args->flags;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_CHECKSUM,
+ NULL, xdr_from_checksum_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (checksum, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_rchecksum (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ gfs3_rchecksum_req req = {0,};
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.len = args->len;
+ req.offset = args->offset;
+ req.fd = fdctx->remote_fd;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_RCHECKSUM,
+ NULL, xdr_from_rchecksum_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (rchecksum, frame, -1, op_errno, 0, NULL);
+ return 0;
+}
+
+
+
+int32_t
+client3_1_readdir (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ gfs3_readdir_req req = {0,};
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.size = args->size;
+ req.offset = args->offset;
+ req.fd = fdctx->remote_fd;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_READDIR,
+ NULL, xdr_from_readdir_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (readdir, frame, -1, op_errno, NULL);
+ return 0;
+}
+
+
+int32_t
+client3_1_readdirp (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_readdirp_req req = {0,};
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.size = args->size;
+ req.offset = args->offset;
+ req.fd = fdctx->remote_fd;
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_READDIRP,
+ NULL, xdr_from_readdirp_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (readdirp, frame, -1, op_errno, NULL);
+ return 0;
+}
+
+
+int32_t
+client3_1_setattr (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ gfs3_setattr_req req = {0,};
+ int ret = 0;
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+
+ ret = inode_ctx_get2 (args->loc->inode, this, &req.ino, &req.gen);
+ if (args->loc->inode->ino && ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "STAT %"PRId64" (%s): "
+ "failed to get remote inode number",
+ args->loc->inode->ino, args->loc->path);
+ goto unwind;
+ }
+ req.path = (char *)args->loc->path;
+ req.valid = args->valid;
+ gf_stat_from_iatt (&req.stbuf, args->stbuf);
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_SETATTR,
+ NULL, xdr_from_setattr_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (setattr, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+int32_t
+client3_1_fsetattr (call_frame_t *frame, xlator_t *this, rpc_clnt_prog_t *prog,
+ void *data)
+{
+ clnt_args_t *args = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ clnt_conf_t *conf = NULL;
+ gfs3_fsetattr_req req = {0,};
+ int op_errno = ESTALE;
+
+ if (!frame || !this || !prog || !data)
+ goto unwind;
+
+ args = data;
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ fdctx = this_fd_get_ctx (args->fd, this);
+ }
+ pthread_mutex_unlock (&conf->lock);
+
+ if (fdctx == NULL) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "(%"PRId64"): failed to get fd ctx. EBADFD",
+ args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ if (fdctx->remote_fd == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "(%"PRId64"): failed to get"
+ " fd ctx. EBADFD", args->fd->inode->ino);
+ op_errno = EBADFD;
+ goto unwind;
+ }
+
+ req.fd = fdctx->remote_fd;
+ req.valid = args->valid;
+ gf_stat_from_iatt (&req.stbuf, args->stbuf);
+
+ client_submit_request (this, &req, frame,prog, GFS3_OP_FSETATTR,
+ NULL, xdr_from_fsetattr_req);
+
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT (fsetattr, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+/* CBK */
+int
+client3_1_release_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+
+ frame = myframe;
+ STACK_DESTROY (frame->root);
+ return 0;
+}
+int
+client3_1_releasedir_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+
+ frame = myframe;
+ STACK_DESTROY (frame->root);
+ return 0;
+}
+
+
+int
+client3_1_symlink_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_symlink_rsp rsp = {0,};
+ struct iatt stbuf = {0,};
+ struct iatt preparent = {0,};
+ struct iatt postparent = {0,};
+ int ret = 0;
+ clnt_local_t *local = NULL;
+ inode_t *inode = NULL;
+
+ frame = myframe;
+
+ local = frame->local;
+ frame->local = NULL;
+ inode = local->loc.inode;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_symlink_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.stat, &stbuf);
+
+ ret = inode_ctx_put2 (inode, frame->this,
+ stbuf.ia_ino, stbuf.ia_gen);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "SYMLINK %"PRId64"/%s (%s): failed to set "
+ "remote inode number to inode ctx",
+ local->loc.parent->ino, local->loc.name,
+ local->loc.path);
+ }
+
+ gf_stat_to_iatt (&rsp.preparent, &preparent);
+ gf_stat_to_iatt (&rsp.postparent, &postparent);
+ }
+
+out:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (symlink, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), inode, &stbuf,
+ &preparent, &postparent);
+
+ if (local)
+ client_local_wipe (local);
+
+ return 0;
+}
+
+
+int
+client3_1_mknod_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_mknod_rsp rsp = {0,};
+ struct iatt stbuf = {0,};
+ struct iatt preparent = {0,};
+ struct iatt postparent = {0,};
+ int ret = 0;
+ clnt_local_t *local = NULL;
+ inode_t *inode = NULL;
+
+ frame = myframe;
+
+ local = frame->local;
+ frame->local = NULL;
+ inode = local->loc.inode;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_mknod_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.stat, &stbuf);
+
+ ret = inode_ctx_put2 (inode, frame->this,
+ stbuf.ia_ino, stbuf.ia_gen);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "MKNOD %"PRId64"/%s (%s): failed to set "
+ "remote inode number to inode ctx",
+ local->loc.parent->ino, local->loc.name,
+ local->loc.path);
+ }
+
+ gf_stat_to_iatt (&rsp.preparent, &preparent);
+ gf_stat_to_iatt (&rsp.postparent, &postparent);
+ }
+
+out:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (mknod, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), inode,
+ &stbuf, &preparent, &postparent);
+
+ if (local)
+ client_local_wipe (local);
+
+ return 0;
+}
+
+int
+client3_1_mkdir_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_mkdir_rsp rsp = {0,};
+ struct iatt stbuf = {0,};
+ struct iatt preparent = {0,};
+ struct iatt postparent = {0,};
+ int ret = 0;
+ clnt_local_t *local = NULL;
+ inode_t *inode = NULL;
+
+ frame = myframe;
+
+ local = frame->local;
+ frame->local = NULL;
+ inode = local->loc.inode;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_mkdir_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.stat, &stbuf);
+
+ ret = inode_ctx_put2 (inode, frame->this,
+ stbuf.ia_ino, stbuf.ia_gen);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "MKDIR %"PRId64"/%s (%s): failed to set "
+ "remote inode number to inode ctx",
+ local->loc.parent->ino, local->loc.name,
+ local->loc.path);
+ }
+
+ gf_stat_to_iatt (&rsp.preparent, &preparent);
+ gf_stat_to_iatt (&rsp.postparent, &postparent);
+ }
+
+out:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (mkdir, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), inode,
+ &stbuf, &preparent, &postparent);
+
+ if (local)
+ client_local_wipe (local);
+
+ return 0;
+}
+
+int
+client3_1_open_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ clnt_local_t *local = NULL;
+ clnt_conf_t *conf = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ call_frame_t *frame = NULL;
+ fd_t *fd = NULL;
+ ino_t ino = 0;
+ uint64_t gen = 0;
+ int ret = 0;
+ gfs3_open_rsp rsp = {0,};
+
+ frame = myframe;
+ local = frame->local;
+
+ if (local->op) {
+ local->op (req, iov, 1, myframe);
+ return 0;
+ }
+
+ frame->local = NULL;
+ conf = frame->this->private;
+ fd = local->fd;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_open_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ fdctx = GF_CALLOC (1, sizeof (*fdctx),
+ gf_client_mt_clnt_fdctx_t);
+ if (!fdctx) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOMEM;
+ goto out;
+ }
+
+ inode_ctx_get2 (fd->inode, frame->this, &ino, &gen);
+
+ fdctx->remote_fd = rsp.fd;
+ fdctx->inode = inode_ref (fd->inode);
+ fdctx->ino = ino;
+ fdctx->gen = gen;
+ fdctx->flags = local->flags;
+ fdctx->wbflags = local->wbflags;
+
+ INIT_LIST_HEAD (&fdctx->sfd_pos);
+
+ this_fd_set_ctx (fd, frame->this, &local->loc, fdctx);
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ list_add_tail (&fdctx->sfd_pos, &conf->saved_fds);
+ }
+ pthread_mutex_unlock (&conf->lock);
+ }
+
+out:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (open, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), fd);
+
+ client_local_wipe (local);
+
+ return 0;
+}
+
+
+int
+client3_1_stat_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gfs3_stat_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ struct iatt iatt = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_stat_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.stat, &iatt);
+ }
+
+out:
+ STACK_UNWIND_STRICT (stat, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &iatt);
+
+ return 0;
+}
+
+int
+client3_1_readlink_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gfs3_readlink_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ struct iatt iatt = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_readlink_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.buf, &iatt);
+ }
+
+out:
+ STACK_UNWIND_STRICT (readlink, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), rsp.path, &iatt);
+
+ /* This is allocated by the libc while decoding RPC msg */
+ /* Hence no 'GF_FREE', but just 'free' */
+ if (rsp.path)
+ free (rsp.path);
+
+ return 0;
+}
+
+int
+client3_1_unlink_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_unlink_rsp rsp = {0,};
+ struct iatt preparent = {0,};
+ struct iatt postparent = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_unlink_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.preparent, &preparent);
+ gf_stat_to_iatt (&rsp.postparent, &postparent);
+ }
+
+out:
+ STACK_UNWIND_STRICT (unlink, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &preparent,
+ &postparent);
+
+ return 0;
+}
+
+int
+client3_1_rmdir_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gfs3_rmdir_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ struct iatt preparent = {0,};
+ struct iatt postparent = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_rmdir_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.preparent, &preparent);
+ gf_stat_to_iatt (&rsp.postparent, &postparent);
+ }
+
+out:
+ STACK_UNWIND_STRICT (rmdir, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &preparent,
+ &postparent);
+
+ return 0;
+}
+
+
+int
+client3_1_truncate_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gfs3_truncate_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ struct iatt prestat = {0,};
+ struct iatt poststat = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_truncate_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.prestat, &prestat);
+ gf_stat_to_iatt (&rsp.poststat, &poststat);
+ }
+
+out:
+ STACK_UNWIND_STRICT (truncate, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &prestat,
+ &poststat);
+
+ return 0;
+}
+
+
+int
+client3_1_statfs_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gfs3_statfs_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ struct statvfs statfs = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_statfs_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_statfs_to_statfs (&rsp.statfs, &statfs);
+ }
+
+out:
+ STACK_UNWIND_STRICT (statfs, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &statfs);
+
+ return 0;
+}
+
+
+int
+client3_1_writev_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gfs3_write_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ struct iatt prestat = {0,};
+ struct iatt poststat = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_truncate_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.prestat, &prestat);
+ gf_stat_to_iatt (&rsp.poststat, &poststat);
+ }
+
+out:
+ STACK_UNWIND_STRICT (writev, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &prestat,
+ &poststat);
+
+ return 0;
+}
+
+int
+client3_1_flush_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (flush, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+int
+client3_1_fsync_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gfs3_fsync_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ struct iatt prestat = {0,};
+ struct iatt poststat = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_truncate_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.prestat, &prestat);
+ gf_stat_to_iatt (&rsp.poststat, &poststat);
+ }
+
+out:
+ STACK_UNWIND_STRICT (fsync, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &prestat,
+ &poststat);
+
+ return 0;
+}
+
+int
+client3_1_setxattr_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (setxattr, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+int
+client3_1_getxattr_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+ char *buf = NULL;
+ int dict_len = 0;
+ int op_ret = 0;
+ int op_errno = 0;
+ gfs3_getxattr_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ op_ret = -1;
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_getxattr_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ op_errno = gf_error_to_errno (rsp.op_errno);
+ op_ret = rsp.op_ret;
+ if (-1 != op_ret) {
+ op_ret = -1;
+ dict_len = rsp.dict.dict_len;
+
+ if (dict_len > 0) {
+ dict = dict_new();
+ buf = memdup (rsp.dict.dict_val, rsp.dict.dict_len);
+
+ GF_VALIDATE_OR_GOTO (frame->this->name, dict, out);
+ GF_VALIDATE_OR_GOTO (frame->this->name, buf, out);
+
+ ret = dict_unserialize (buf, dict_len, &dict);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "failed to unserialize xattr dict");
+ op_errno = EINVAL;
+ goto out;
+ }
+ dict->extra_free = buf;
+ buf = NULL;
+ }
+ op_ret = 0;
+ }
+
+out:
+ STACK_UNWIND_STRICT (getxattr, frame, op_ret, op_errno, dict);
+
+ if (rsp.dict.dict_val) {
+ /* don't use GF_FREE, this memory was allocated by libc
+ */
+ free (rsp.dict.dict_val);
+ rsp.dict.dict_val = NULL;
+ }
+
+ if (buf)
+ GF_FREE (buf);
+
+ if (dict)
+ dict_unref (dict);
+
+ return 0;
+}
+
+int
+client3_1_fgetxattr_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ char *buf = NULL;
+ dict_t *dict = NULL;
+ gfs3_fgetxattr_rsp rsp = {0,};
+ int ret = 0;
+ int dict_len = 0;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ op_ret = -1;
+ op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_fgetxattr_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ op_errno = gf_error_to_errno (rsp.op_errno);
+ op_ret = rsp.op_ret;
+ if (-1 != op_ret) {
+ op_ret = -1;
+ dict_len = rsp.dict.dict_len;
+
+ if (dict_len > 0) {
+ dict = dict_new();
+ GF_VALIDATE_OR_GOTO (frame->this->name, dict, out);
+ buf = memdup (rsp.dict.dict_val, rsp.dict.dict_len);
+ GF_VALIDATE_OR_GOTO (frame->this->name, buf, out);
+
+ ret = dict_unserialize (buf, dict_len, &dict);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "failed to unserialize xattr dict");
+ op_errno = EINVAL;
+ goto out;
+ }
+ dict->extra_free = buf;
+ buf = NULL;
+ }
+ op_ret = 0;
+ }
+out:
+ STACK_UNWIND_STRICT (fgetxattr, frame, op_ret, op_errno, dict);
+ if (rsp.dict.dict_val) {
+ /* don't use GF_FREE, this memory was allocated by libc
+ */
+ free (rsp.dict.dict_val);
+ rsp.dict.dict_val = NULL;
+ }
+
+ if (buf)
+ GF_FREE (buf);
+
+ if (dict)
+ dict_unref (dict);
+
+ return 0;
+}
+
+int
+client3_1_removexattr_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (removexattr, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+int
+client3_1_fsyncdir_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (fsyncdir, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+int
+client3_1_access_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (access, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+
+int
+client3_1_ftruncate_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gfs3_ftruncate_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ struct iatt prestat = {0,};
+ struct iatt poststat = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_ftruncate_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.prestat, &prestat);
+ gf_stat_to_iatt (&rsp.poststat, &poststat);
+ }
+
+out:
+ STACK_UNWIND_STRICT (ftruncate, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &prestat,
+ &poststat);
+
+ return 0;
+}
+
+int
+client3_1_fstat_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gfs3_fstat_rsp rsp = {0,};
+ call_frame_t *frame = NULL;
+ struct iatt stat = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_fstat_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.stat, &stat);
+ }
+
+out:
+ STACK_UNWIND_STRICT (fstat, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &stat);
+
+ return 0;
+}
+
+
+int
+client3_1_inodelk_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (inodelk, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+int
+client3_1_finodelk_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (finodelk, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+int
+client3_1_entrylk_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+
+ STACK_UNWIND_STRICT (entrylk, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+int
+client3_1_fentrylk_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (fentrylk, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+int
+client3_1_xattrop_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+ char *buf = NULL;
+ gfs3_xattrop_rsp rsp = {0,};
+ int ret = 0;
+ int op_ret = 0;
+ int dict_len = 0;
+ int op_errno = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ op_ret = -1;
+ op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_xattrop_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ op_ret = rsp.op_ret;
+ if (-1 != op_ret) {
+ op_ret = -1;
+ dict_len = rsp.dict.dict_len;
+
+ if (dict_len > 0) {
+ dict = dict_new();
+ GF_VALIDATE_OR_GOTO (frame->this->name, dict, out);
+
+ buf = memdup (rsp.dict.dict_val, rsp.dict.dict_len);
+ GF_VALIDATE_OR_GOTO (frame->this->name, buf, out);
+ op_ret = dict_unserialize (buf, dict_len, &dict);
+ if (op_ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "failed to unserialize xattr dict");
+ op_errno = EINVAL;
+ goto out;
+ }
+ dict->extra_free = buf;
+ buf = NULL;
+ }
+ op_ret = 0;
+ }
+
+out:
+
+ STACK_UNWIND_STRICT (xattrop, frame, op_ret,
+ gf_error_to_errno (rsp.op_errno), dict);
+
+ if (rsp.dict.dict_val) {
+ /* don't use GF_FREE, this memory was allocated by libc
+ */
+ free (rsp.dict.dict_val);
+ rsp.dict.dict_val = NULL;
+ }
+
+ if (buf)
+ GF_FREE (buf);
+
+ if (dict)
+ dict_unref (dict);
+
+ return 0;
+}
+
+int
+client3_1_fxattrop_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+ char *buf = NULL;
+ gfs3_fxattrop_rsp rsp = {0,};
+ int ret = 0;
+ int op_ret = 0;
+ int dict_len = 0;
+ int op_errno = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ op_ret = -1;
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_fxattrop_rsp (*iov, &rsp);
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ gf_log ("", GF_LOG_ERROR, "error");
+ goto out;
+ }
+
+ op_ret = rsp.op_ret;
+ if (-1 != op_ret) {
+ op_ret = -1;
+ dict_len = rsp.dict.dict_len;
+
+ if (dict_len > 0) {
+ dict = dict_new();
+ GF_VALIDATE_OR_GOTO (frame->this->name, dict, out);
+
+ buf = memdup (rsp.dict.dict_val, rsp.dict.dict_len);
+ GF_VALIDATE_OR_GOTO (frame->this->name, buf, out);
+ op_ret = dict_unserialize (buf, dict_len, &dict);
+ if (op_ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "failed to unserialize xattr dict");
+ op_errno = EINVAL;
+ goto out;
+ }
+ dict->extra_free = buf;
+ buf = NULL;
+ }
+ op_ret = 0;
+ }
+
+out:
+
+ STACK_UNWIND_STRICT (fxattrop, frame, op_ret,
+ gf_error_to_errno (rsp.op_errno), dict);
+
+ if (rsp.dict.dict_val) {
+ /* don't use GF_FREE, this memory was allocated by libc
+ */
+ free (rsp.dict.dict_val);
+ rsp.dict.dict_val = NULL;
+ }
+
+ if (buf)
+ GF_FREE (buf);
+
+ if (dict)
+ dict_unref (dict);
+
+ return 0;
+}
+
+int
+client3_1_fsetxattr_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gf_common_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_common_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (fsetxattr, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno));
+
+ return 0;
+}
+
+int
+client3_1_fsetattr_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_fsetattr_rsp rsp = {0,};
+ struct iatt prestat = {0,};
+ struct iatt poststat = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+ ret = xdr_to_fsetattr_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.statpre, &prestat);
+ gf_stat_to_iatt (&rsp.statpost, &poststat);
+ }
+
+out:
+ STACK_UNWIND_STRICT (fsetattr, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &prestat,
+ &poststat);
+
+ return 0;
+}
+
+
+int
+client3_1_setattr_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_setattr_rsp rsp = {0,};
+ struct iatt prestat = {0,};
+ struct iatt poststat = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_setattr_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.statpre, &prestat);
+ gf_stat_to_iatt (&rsp.statpost, &poststat);
+ }
+
+out:
+ STACK_UNWIND_STRICT (setattr, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &prestat,
+ &poststat);
+
+ return 0;
+}
+
+int
+client3_1_create_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ fd_t *fd = NULL;
+ inode_t *inode = NULL;
+ struct iatt stbuf = {0, };
+ struct iatt preparent = {0, };
+ struct iatt postparent = {0, };
+ int32_t ret = -1;
+ clnt_local_t *local = NULL;
+ clnt_conf_t *conf = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ gfs3_create_rsp rsp = {0,};
+
+ frame = myframe;
+ local = frame->local; frame->local = NULL;
+ conf = frame->this->private;
+ fd = local->fd;
+ inode = local->loc.inode;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_create_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.stat, &stbuf);
+
+ ret = inode_ctx_put2 (inode, frame->this,
+ stbuf.ia_ino, stbuf.ia_gen);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "CREATE %"PRId64"/%s (%s): failed to set "
+ "remote inode number to inode ctx",
+ local->loc.parent->ino, local->loc.name,
+ local->loc.path);
+ }
+
+ gf_stat_to_iatt (&rsp.preparent, &preparent);
+ gf_stat_to_iatt (&rsp.postparent, &postparent);
+
+ fdctx = GF_CALLOC (1, sizeof (*fdctx),
+ gf_client_mt_clnt_fdctx_t);
+ if (!fdctx) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOMEM;
+ goto out;
+ }
+
+ fdctx->remote_fd = rsp.fd;
+ fdctx->inode = inode_ref (inode);
+ fdctx->ino = stbuf.ia_ino;
+ fdctx->gen = stbuf.ia_gen;
+ fdctx->flags = local->flags;
+
+ INIT_LIST_HEAD (&fdctx->sfd_pos);
+
+ this_fd_set_ctx (fd, frame->this, &local->loc, fdctx);
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ list_add_tail (&fdctx->sfd_pos, &conf->saved_fds);
+ }
+ pthread_mutex_unlock (&conf->lock);
+ }
+
+out:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (create, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), fd, inode,
+ &stbuf, &preparent, &postparent);
+
+ client_local_wipe (local);
+ return 0;
+}
+
+
+int
+client3_1_rchecksum_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_rchecksum_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_rchecksum_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (rchecksum, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno),
+ rsp.weak_checksum,
+ (uint8_t *)rsp.strong_checksum.strong_checksum_val);
+
+ if (rsp.strong_checksum.strong_checksum_val) {
+ /* This is allocated by the libc while decoding RPC msg */
+ /* Hence no 'GF_FREE', but just 'free' */
+ free (rsp.strong_checksum.strong_checksum_val);
+ }
+
+ return 0;
+}
+
+int
+client3_1_checksum_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_checksum_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_checksum_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ STACK_UNWIND_STRICT (checksum, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno),
+ (uint8_t *)rsp.fchecksum.fchecksum_val,
+ (uint8_t *)rsp.dchecksum.dchecksum_val);
+
+ /* This is allocated by the libc while decoding RPC msg */
+ /* Hence no 'GF_FREE', but just 'free' */
+ if (rsp.fchecksum.fchecksum_val) {
+ free (rsp.fchecksum.fchecksum_val);
+ }
+ if (rsp.dchecksum.dchecksum_val) {
+ free (rsp.dchecksum.dchecksum_val);
+ }
+ return 0;
+}
+
+int
+client3_1_lk_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ struct flock lock = {0,};
+ gfs3_lk_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_lk_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (rsp.op_ret >= 0) {
+ gf_flock_to_flock (&rsp.flock, &lock);
+ }
+
+out:
+ STACK_UNWIND_STRICT (lk, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &lock);
+
+ return 0;
+}
+
+int
+client3_1_readdir_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_readdir_rsp rsp = {0,};
+ int32_t ret = 0;
+ gf_dirent_t entries;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_readdir_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ INIT_LIST_HEAD (&entries.list);
+ if (rsp.op_ret > 0) {
+ gf_dirent_unserialize (&entries, rsp.buf.buf_val,
+ rsp.buf.buf_len);
+ }
+
+out:
+ STACK_UNWIND_STRICT (readdir, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &entries);
+
+ if (rsp.op_ret != -1) {
+ gf_dirent_free (&entries);
+ }
+
+ /* This is allocated by the libc while decoding RPC msg */
+ /* Hence no 'GF_FREE', but just 'free' */
+ if (rsp.buf.buf_val)
+ free (rsp.buf.buf_val);
+
+ return 0;
+}
+
+
+int
+client3_1_readdirp_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_readdirp_rsp rsp = {0,};
+ int32_t ret = 0;
+ gf_dirent_t entries;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_readdirp_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ INIT_LIST_HEAD (&entries.list);
+ if (rsp.op_ret > 0) {
+ gf_dirent_unserialize (&entries, rsp.buf.buf_val,
+ rsp.buf.buf_len);
+ }
+
+out:
+ STACK_UNWIND_STRICT (readdirp, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &entries);
+
+ if (rsp.op_ret != -1) {
+ gf_dirent_free (&entries);
+ }
+
+ if (rsp.buf.buf_val) {
+ /* don't use GF_FREE as this memory was allocated by libc */
+ free (rsp.buf.buf_val);
+ }
+
+ return 0;
+}
+
+
+int
+client3_1_rename_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_rename_rsp rsp = {0,};
+ struct iatt stbuf = {0,};
+ struct iatt preoldparent = {0,};
+ struct iatt postoldparent = {0,};
+ struct iatt prenewparent = {0,};
+ struct iatt postnewparent = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_rename_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.stat, &stbuf);
+
+ gf_stat_to_iatt (&rsp.preoldparent, &preoldparent);
+ gf_stat_to_iatt (&rsp.postoldparent, &postoldparent);
+
+ gf_stat_to_iatt (&rsp.prenewparent, &prenewparent);
+ gf_stat_to_iatt (&rsp.postnewparent, &postnewparent);
+ }
+
+out:
+ STACK_UNWIND_STRICT (rename, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno),
+ &stbuf, &preoldparent, &postoldparent,
+ &preoldparent, &postoldparent);
+
+ return 0;
+}
+
+int
+client3_1_link_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ gfs3_link_rsp rsp = {0,};
+ struct iatt stbuf = {0,};
+ struct iatt preparent = {0,};
+ struct iatt postparent = {0,};
+ int ret = 0;
+ clnt_local_t *local = NULL;
+ inode_t *inode = NULL;
+
+ frame = myframe;
+
+ local = frame->local;
+ frame->local = NULL;
+ inode = local->loc.inode;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_link_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ gf_stat_to_iatt (&rsp.stat, &stbuf);
+
+ gf_stat_to_iatt (&rsp.preparent, &preparent);
+ gf_stat_to_iatt (&rsp.postparent, &postparent);
+ }
+
+out:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (link, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), inode,
+ &stbuf, &preparent, &postparent);
+
+ client_local_wipe (local);
+ return 0;
+}
+
+
+int
+client3_1_opendir_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ clnt_local_t *local = NULL;
+ clnt_conf_t *conf = NULL;
+ clnt_fd_ctx_t *fdctx = NULL;
+ ino_t ino = 0;
+ uint64_t gen = 0;
+ call_frame_t *frame = NULL;
+ fd_t *fd = NULL;
+ int ret = 0;
+ gfs3_opendir_rsp rsp = {0,};
+
+ frame = myframe;
+ local = frame->local;
+
+ if (local->op) {
+ local->op (req, iov, 1, myframe);
+ return 0;
+ }
+
+ frame->local = NULL;
+ conf = frame->this->private;
+ fd = local->fd;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_opendir_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 != rsp.op_ret) {
+ fdctx = GF_CALLOC (1, sizeof (*fdctx),
+ gf_client_mt_clnt_fdctx_t);
+ if (!fdctx) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOMEM;
+ goto out;
+ }
+
+ inode_ctx_get2 (fd->inode, frame->this, &ino, &gen);
+
+ fdctx->remote_fd = rsp.fd;
+ fdctx->inode = inode_ref (fd->inode);
+ fdctx->ino = ino;
+ fdctx->gen = gen;
+
+ fdctx->is_dir = 1;
+
+ INIT_LIST_HEAD (&fdctx->sfd_pos);
+
+ this_fd_set_ctx (fd, frame->this, &local->loc, fdctx);
+
+ pthread_mutex_lock (&conf->lock);
+ {
+ list_add_tail (&fdctx->sfd_pos, &conf->saved_fds);
+ }
+ pthread_mutex_unlock (&conf->lock);
+ }
+
+out:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (opendir, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), fd);
+
+ client_local_wipe (local);
+
+ return 0;
+}
+
+
+int
+client3_1_lookup_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ clnt_local_t *local = NULL;
+ call_frame_t *frame = NULL;
+ int ret = 0;
+ gfs3_lookup_rsp rsp = {0,};
+ struct iatt stbuf = {0,};
+ struct iatt postparent = {0,};
+ int op_errno = 0;
+ ino_t oldino = 0;
+ uint64_t oldgen = 0;
+ dict_t *xattr = NULL;
+ inode_t *inode = NULL;
+ char *buf = NULL;
+
+ frame = myframe;
+ local = frame->local;
+ inode = local->loc.inode;
+ frame->local = NULL;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_lookup_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ op_errno = gf_error_to_errno (rsp.op_errno);
+ gf_stat_to_iatt (&rsp.postparent, &postparent);
+
+ if (rsp.op_ret == 0) {
+ rsp.op_ret = -1;
+ gf_stat_to_iatt (&rsp.stat, &stbuf);
+
+ ret = inode_ctx_get2 (inode, frame->this, &oldino, &oldgen);
+ if (oldino != stbuf.ia_ino || oldgen != stbuf.ia_gen) {
+ if (oldino) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "LOOKUP %"PRId64"/%s (%s): "
+ "inode number changed from "
+ "{%"PRId64",%"PRId64"} to {%"PRId64",%"PRId64"}",
+ local->loc.parent ?
+ local->loc.parent->ino : (uint64_t) 0,
+ local->loc.name,
+ local->loc.path,
+ oldgen, oldino, stbuf.ia_gen, stbuf.ia_ino);
+ op_errno = ESTALE;
+ goto out;
+ }
+
+ ret = inode_ctx_put2 (inode, frame->this,
+ stbuf.ia_ino, stbuf.ia_gen);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "LOOKUP %"PRId64"/%s (%s) : "
+ "failed to set remote inode "
+ "number to inode ctx",
+ local->loc.parent ?
+ local->loc.parent->ino : (uint64_t) 0,
+ local->loc.name,
+ local->loc.path);
+ }
+ }
+
+ if (rsp.dict.dict_len > 0) {
+ xattr = dict_new();
+ GF_VALIDATE_OR_GOTO (frame->this->name, xattr, out);
+
+ buf = memdup (rsp.dict.dict_val, rsp.dict.dict_len);
+ GF_VALIDATE_OR_GOTO (frame->this->name, buf, out);
+
+ ret = dict_unserialize (buf, rsp.dict.dict_len, &xattr);
+ if (ret < 0) {
+ gf_log (frame->this->name, GF_LOG_DEBUG,
+ "%s (%"PRId64"): failed to "
+ "unserialize dictionary",
+ local->loc.path, inode->ino);
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ xattr->extra_free = buf;
+ buf = NULL;
+ }
+
+ rsp.op_ret = 0;
+ }
+
+out:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT (lookup, frame, rsp.op_ret, rsp.op_errno, inode,
+ &stbuf, xattr, &postparent);
+
+ client_local_wipe (local);
+
+ if (xattr)
+ dict_unref (xattr);
+
+ if (rsp.dict.dict_val) {
+ /* don't use GF_FREE, this memory was allocated by libc
+ */
+ free (rsp.dict.dict_val);
+ rsp.dict.dict_val = NULL;
+ }
+
+ if (buf) {
+ GF_FREE (buf);
+ }
+
+ return 0;
+}
+
+int
+client3_1_readv_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ call_frame_t *frame = NULL;
+ struct iobref *iobref = NULL;
+ struct iovec vector = {0,};
+ struct iatt stat = {0,};
+ gfs3_read_rsp rsp = {0,};
+ int ret = 0;
+
+ frame = myframe;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_readv_rsp (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (rsp.op_ret != -1) {
+ iobref = iobref_new ();
+ gf_stat_to_iatt (&rsp.stat, &stat);
+ vector.iov_len = rsp.op_ret;
+
+ if (rsp.op_ret > 0) {
+ vector.iov_base = req->rsp_procpayload->ptr;
+ iobref_add (iobref, req->rsp_procpayload);
+ }
+ }
+out:
+ STACK_UNWIND_STRICT (readv, frame, rsp.op_ret,
+ gf_error_to_errno (rsp.op_errno), &vector, 1,
+ &stat, iobref);
+
+ if (iobref) {
+ iobref_unref (iobref);
+ }
+
+ return 0;
+}
+
+/* Table Specific to FOPS */
+
+
+rpc_clnt_procedure_t clnt3_1_fop_actors[GF_FOP_MAXVALUE] = {
+ [GF_FOP_NULL] = {"NULL", NULL, NULL },
+ [GF_FOP_STAT] = { "STAT", client3_1_stat, client3_1_stat_cbk },
+ [GF_FOP_READLINK] = { "READLINK", client3_1_readlink, client3_1_readlink_cbk },
+ [GF_FOP_MKNOD] = { "MKNOD", client3_1_mknod, client3_1_mknod_cbk },
+ [GF_FOP_MKDIR] = { "MKDIR", client3_1_mkdir, client3_1_mkdir_cbk },
+ [GF_FOP_UNLINK] = { "UNLINK", client3_1_unlink, client3_1_unlink_cbk },
+ [GF_FOP_RMDIR] = { "RMDIR", client3_1_rmdir, client3_1_rmdir_cbk },
+ [GF_FOP_SYMLINK] = { "SYMLINK", client3_1_symlink, client3_1_symlink_cbk },
+ [GF_FOP_RENAME] = { "RENAME", client3_1_rename, client3_1_rename_cbk },
+ [GF_FOP_LINK] = { "LINK", client3_1_link, client3_1_link_cbk },
+ [GF_FOP_TRUNCATE] = { "TRUNCATE", client3_1_truncate, client3_1_truncate_cbk },
+ [GF_FOP_OPEN] = { "OPEN", client3_1_open, client3_1_open_cbk },
+ [GF_FOP_READ] = { "READ", client3_1_readv, client3_1_readv_cbk },
+ [GF_FOP_WRITE] = { "WRITE", client3_1_writev, client3_1_writev_cbk },
+ [GF_FOP_STATFS] = { "STATFS", client3_1_statfs, client3_1_statfs_cbk },
+ [GF_FOP_FLUSH] = { "FLUSH", client3_1_flush, client3_1_flush_cbk },
+ [GF_FOP_FSYNC] = { "FSYNC", client3_1_fsync, client3_1_fsync_cbk },
+ [GF_FOP_SETXATTR] = { "SETXATTR", client3_1_setxattr, client3_1_setxattr_cbk },
+ [GF_FOP_GETXATTR] = { "GETXATTR", client3_1_getxattr, client3_1_getxattr_cbk },
+ [GF_FOP_REMOVEXATTR] = { "REMOVEXATTR", client3_1_removexattr, client3_1_removexattr_cbk },
+ [GF_FOP_OPENDIR] = { "OPENDIR", client3_1_opendir, client3_1_opendir_cbk },
+ [GF_FOP_FSYNCDIR] = { "FSYNCDIR", client3_1_fsyncdir, client3_1_fsyncdir_cbk },
+ [GF_FOP_ACCESS] = { "ACCESS", client3_1_access, client3_1_access_cbk },
+ [GF_FOP_CREATE] = { "CREATE", client3_1_create, client3_1_create_cbk },
+ [GF_FOP_FTRUNCATE] = { "FTRUNCATE", client3_1_ftruncate, client3_1_ftruncate_cbk },
+ [GF_FOP_FSTAT] = { "FSTAT", client3_1_fstat, client3_1_fstat_cbk },
+ [GF_FOP_LK] = { "LK", client3_1_lk, client3_1_lk_cbk },
+ [GF_FOP_LOOKUP] = { "LOOKUP", client3_1_lookup, client3_1_lookup_cbk },
+ [GF_FOP_READDIR] = { "READDIR", client3_1_readdir, client3_1_readdir_cbk },
+ [GF_FOP_INODELK] = { "INODELK", client3_1_inodelk, client3_1_inodelk_cbk },
+ [GF_FOP_FINODELK] = { "FINODELK", client3_1_finodelk, client3_1_finodelk_cbk },
+ [GF_FOP_ENTRYLK] = { "ENTRYLK", client3_1_entrylk, client3_1_entrylk_cbk },
+ [GF_FOP_FENTRYLK] = { "FENTRYLK", client3_1_fentrylk, client3_1_fentrylk_cbk },
+ [GF_FOP_CHECKSUM] = { "CHECKSUM", client3_1_checksum, client3_1_checksum_cbk },
+ [GF_FOP_XATTROP] = { "XATTROP", client3_1_xattrop, client3_1_xattrop_cbk },
+ [GF_FOP_FXATTROP] = { "FXATTROP", client3_1_fxattrop, client3_1_fxattrop_cbk },
+ [GF_FOP_FGETXATTR] = { "FGETXATTR", client3_1_fgetxattr, client3_1_fgetxattr_cbk },
+ [GF_FOP_FSETXATTR] = { "FSETXATTR", client3_1_fsetxattr, client3_1_fsetxattr_cbk },
+ [GF_FOP_RCHECKSUM] = { "RCHECKSUM", client3_1_rchecksum, client3_1_rchecksum_cbk },
+ [GF_FOP_SETATTR] = { "SETATTR", client3_1_setattr, client3_1_setattr_cbk },
+ [GF_FOP_FSETATTR] = { "FSETATTR", client3_1_fsetattr, client3_1_fsetattr_cbk },
+ [GF_FOP_READDIRP] = { "READDIRP", client3_1_readdirp, client3_1_readdirp_cbk },
+ [GF_FOP_FORGET] = { "FORGET", NULL, client3_1_release_cbk },
+ [GF_FOP_RELEASE] = { "RELEASE", client3_1_release, client3_1_release_cbk },
+ [GF_FOP_RELEASEDIR] = { "RELEASEDIR", client3_1_releasedir, client3_1_releasedir_cbk },
+// [GF_FOP_GETSPEC] = { "GETSPEC", client_getspec, client_getspec_cbk },
+};
+
+rpc_clnt_prog_t clnt3_1_fop_prog = {
+ .progname = "GlusterFS 3.1",
+ .prognum = GLUSTER3_1_FOP_PROGRAM,
+ .progver = GLUSTER3_1_FOP_VERSION,
+ .numproc = GLUSTER3_1_FOP_PROCCNT,
+ .actor = clnt3_1_fop_actors,
+};
+
+rpc_clnt_procedure_t clnt3_1_mgmt_actors[] = {
+ {0,}
+};
+
+rpc_clnt_prog_t clnt3_1_mgmt_prog = {
+ .progname = "Mgmt 3.1",
+ .prognum = GLUSTER1_MGMT_PROGRAM,
+ .progver = GLUSTER1_MGMT_VERSION,
+ .actor = clnt3_1_mgmt_actors,
+ .numproc = (sizeof (*clnt3_1_mgmt_actors) /
+ sizeof (rpc_clnt_procedure_t)),
+};
diff --git a/xlators/protocol/lib/Makefile.am b/xlators/protocol/lib/Makefile.am
index d471a3f9243..af437a64d6d 100644
--- a/xlators/protocol/lib/Makefile.am
+++ b/xlators/protocol/lib/Makefile.am
@@ -1,3 +1 @@
SUBDIRS = src
-
-CLEANFILES =
diff --git a/xlators/protocol/lib/src/Makefile.am b/xlators/protocol/lib/src/Makefile.am
index 6e0b5c4e0c4..ece59ef905a 100644
--- a/xlators/protocol/lib/src/Makefile.am
+++ b/xlators/protocol/lib/src/Makefile.am
@@ -3,12 +3,13 @@ lib_LTLIBRARIES = libgfproto1.la
libgfproto1_la_CFLAGS = -fPIC -Wall -g -shared -nostartfiles $(GF_CFLAGS) $(GF_DARWIN_LIBGLUSTERFS_CFLAGS)
libgfproto1_la_CPPFLAGS = -D_FILE_OFFSET_BITS=64 -D__USE_FILE_OFFSET64 -D_GNU_SOURCE \
- -D$(GF_HOST_OS) -DLIBDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/auth\" \
- -DTRANSPORTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/transport\" \
- -I$(CONTRIBDIR)/rbtree -I$(top_srcdir)/libglusterfs/src/
+ -D$(GF_HOST_OS) -DLIBDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/auth\" \
+ -DTRANSPORTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/transport\" \
+ -I$(CONTRIBDIR)/rbtree -I$(top_srcdir)/libglusterfs/src/ \
+ -I$(top_srcdir)/xlators/protocol/rpc/rpc-lib/src/
libgfproto1_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-libgfproto1_la_SOURCES = authenticate.c
+libgfproto1_la_SOURCES = authenticate.c protocol-common.c msg-xdr.c glusterfs-xdr.c
-noinst_HEADERS = authenticate.h
+noinst_HEADERS = authenticate.h protocol-common.h msg-xdr.h glusterfs-xdr.h
diff --git a/xlators/protocol/lib/src/authenticate.c b/xlators/protocol/lib/src/authenticate.c
index ff2b58162b7..5205b54df61 100644
--- a/xlators/protocol/lib/src/authenticate.c
+++ b/xlators/protocol/lib/src/authenticate.c
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2007-2009 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
This file is part of GlusterFS.
GlusterFS is free software; you can redistribute it and/or modify
@@ -68,7 +68,7 @@ init (dict_t *this,
handle = dlopen (auth_file, RTLD_LAZY);
if (!handle) {
- gf_log ("authenticate", GF_LOG_ERROR, "dlopen(%s): %s\n",
+ gf_log ("authenticate", GF_LOG_ERROR, "dlopen(%s): %s\n",
auth_file, dlerror ());
dict_set (this, key, data_from_dynptr (NULL, 0));
GF_FREE (auth_file);
@@ -76,7 +76,7 @@ init (dict_t *this,
return;
}
GF_FREE (auth_file);
-
+
authenticate = dlsym (handle, "gf_auth");
if (!authenticate) {
gf_log ("authenticate", GF_LOG_ERROR,
@@ -100,12 +100,12 @@ init (dict_t *this,
if (auth_handle->vol_opt->given_opt == NULL) {
gf_log ("authenticate", GF_LOG_DEBUG,
"volume option validation not specified");
- }
+ }
auth_handle->authenticate = authenticate;
auth_handle->handle = handle;
- dict_set (this, key,
+ dict_set (this, key,
data_from_dynptr (auth_handle, sizeof (*auth_handle)));
}
@@ -133,10 +133,10 @@ gf_auth_init (xlator_t *xl, dict_t *auth_modules)
while (pair) {
handle = data_to_ptr (pair->value);
if (handle) {
- list_add_tail (&(handle->vol_opt->list),
+ list_add_tail (&(handle->vol_opt->list),
&(xl->volume_options));
- if (-1 ==
- validate_xlator_volume_options (xl,
+ if (-1 ==
+ validate_xlator_volume_options (xl,
handle->vol_opt->given_opt)) {
gf_log ("authenticate", GF_LOG_ERROR,
"volume option validation "
@@ -158,7 +158,7 @@ gf_auth_init (xlator_t *xl, dict_t *auth_modules)
static dict_t *__input_params;
static dict_t *__config_params;
-void
+void
map (dict_t *this,
char *key,
data_t *value,
@@ -168,17 +168,17 @@ map (dict_t *this,
auth_fn_t authenticate;
auth_handle_t *handle = NULL;
- if (value && (handle = data_to_ptr (value)) &&
+ if (value && (handle = data_to_ptr (value)) &&
(authenticate = handle->authenticate)) {
- dict_set (res, key,
- int_to_data (authenticate (__input_params,
+ dict_set (res, key,
+ int_to_data (authenticate (__input_params,
__config_params)));
} else {
dict_set (res, key, int_to_data (AUTH_DONT_CARE));
}
}
-void
+void
reduce (dict_t *this,
char *key,
data_t *value,
@@ -206,11 +206,11 @@ reduce (dict_t *this,
}
}
-
-auth_result_t
-gf_authenticate (dict_t *input_params,
- dict_t *config_params,
- dict_t *auth_modules)
+
+auth_result_t
+gf_authenticate (dict_t *input_params,
+ dict_t *config_params,
+ dict_t *auth_modules)
{
dict_t *results = NULL;
int64_t result = AUTH_DONT_CARE;
@@ -235,12 +235,12 @@ gf_authenticate (dict_t *input_params,
"accepting remote-client %s", name);
result = AUTH_REJECT;
}
-
+
dict_destroy (results);
return result;
}
-void
+void
gf_auth_fini (dict_t *auth_modules)
{
int32_t dummy;
diff --git a/xlators/protocol/lib/src/authenticate.h b/xlators/protocol/lib/src/authenticate.h
index e777146524c..93d73741b0a 100644
--- a/xlators/protocol/lib/src/authenticate.h
+++ b/xlators/protocol/lib/src/authenticate.h
@@ -42,7 +42,7 @@ typedef enum {
AUTH_DONT_CARE
} auth_result_t;
-typedef auth_result_t (*auth_fn_t) (dict_t *input_params,
+typedef auth_result_t (*auth_fn_t) (dict_t *input_params,
dict_t *config_params);
typedef struct {
@@ -51,8 +51,8 @@ typedef struct {
volume_opt_list_t *vol_opt;
} auth_handle_t;
-auth_result_t gf_authenticate (dict_t *input_params,
- dict_t *config_params,
+auth_result_t gf_authenticate (dict_t *input_params,
+ dict_t *config_params,
dict_t *auth_modules);
int32_t gf_auth_init (xlator_t *xl, dict_t *auth_modules);
void gf_auth_fini (dict_t *auth_modules);
diff --git a/xlators/protocol/lib/src/glusterfs-xdr.c b/xlators/protocol/lib/src/glusterfs-xdr.c
new file mode 100644
index 00000000000..2eccff29c6e
--- /dev/null
+++ b/xlators/protocol/lib/src/glusterfs-xdr.c
@@ -0,0 +1,1847 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "glusterfs-xdr.h"
+#include "iatt.h"
+
+bool_t
+xdr_gf_statfs (XDR *xdrs, gf_statfs *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->bsize))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->frsize))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->blocks))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->bfree))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->bavail))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->files))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ffree))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->favail))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->fsid))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->flag))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->namemax))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_flock (XDR *xdrs, gf_flock *objp)
+{
+
+ if (!xdr_u_int (xdrs, &objp->type))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->whence))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->start))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->len))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->pid))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_iatt (XDR *xdrs, gf_iatt *objp)
+{
+ register int32_t *buf;
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_u_quad_t (xdrs, &objp->ia_ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_gen))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_dev))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 4 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_u_int (xdrs, &objp->mode))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_nlink))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_uid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_gid))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_U_LONG(buf, objp->mode);
+ IXDR_PUT_U_LONG(buf, objp->ia_nlink);
+ IXDR_PUT_U_LONG(buf, objp->ia_uid);
+ IXDR_PUT_U_LONG(buf, objp->ia_gid);
+ }
+ if (!xdr_u_quad_t (xdrs, &objp->ia_rdev))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_size))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_blksize))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_blocks))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 6 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_u_int (xdrs, &objp->ia_atime))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_atime_nsec))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_mtime))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_mtime_nsec))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_ctime))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_ctime_nsec))
+ return FALSE;
+ } else {
+ IXDR_PUT_U_LONG(buf, objp->ia_atime);
+ IXDR_PUT_U_LONG(buf, objp->ia_atime_nsec);
+ IXDR_PUT_U_LONG(buf, objp->ia_mtime);
+ IXDR_PUT_U_LONG(buf, objp->ia_mtime_nsec);
+ IXDR_PUT_U_LONG(buf, objp->ia_ctime);
+ IXDR_PUT_U_LONG(buf, objp->ia_ctime_nsec);
+ }
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_u_quad_t (xdrs, &objp->ia_ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_gen))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_dev))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 4 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_u_int (xdrs, &objp->mode))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_nlink))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_uid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_gid))
+ return FALSE;
+
+ } else {
+ objp->mode = IXDR_GET_U_LONG(buf);
+ objp->ia_nlink = IXDR_GET_U_LONG(buf);
+ objp->ia_uid = IXDR_GET_U_LONG(buf);
+ objp->ia_gid = IXDR_GET_U_LONG(buf);
+ }
+ if (!xdr_u_quad_t (xdrs, &objp->ia_rdev))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_size))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_blksize))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_blocks))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 6 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_u_int (xdrs, &objp->ia_atime))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_atime_nsec))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_mtime))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_mtime_nsec))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_ctime))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_ctime_nsec))
+ return FALSE;
+ } else {
+ objp->ia_atime = IXDR_GET_U_LONG(buf);
+ objp->ia_atime_nsec = IXDR_GET_U_LONG(buf);
+ objp->ia_mtime = IXDR_GET_U_LONG(buf);
+ objp->ia_mtime_nsec = IXDR_GET_U_LONG(buf);
+ objp->ia_ctime = IXDR_GET_U_LONG(buf);
+ objp->ia_ctime_nsec = IXDR_GET_U_LONG(buf);
+ }
+ return TRUE;
+ }
+
+ if (!xdr_u_quad_t (xdrs, &objp->ia_ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_gen))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_dev))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->mode))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_nlink))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_uid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_gid))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_rdev))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_size))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_blksize))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ia_blocks))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_atime))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_atime_nsec))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_mtime))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_mtime_nsec))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_ctime))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ia_ctime_nsec))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_stat_req (XDR *xdrs, gfs3_stat_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_stat_rsp (XDR *xdrs, gfs3_stat_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_readlink_req (XDR *xdrs, gfs3_readlink_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->size))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_readlink_rsp (XDR *xdrs, gfs3_readlink_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->buf))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_mknod_req (XDR *xdrs, gfs3_mknod_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->par))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->dev))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->mode))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->bname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_mknod_rsp (XDR *xdrs, gfs3_mknod_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->preparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postparent))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_mkdir_req (XDR *xdrs, gfs3_mkdir_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->par))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->mode))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->bname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_mkdir_rsp (XDR *xdrs, gfs3_mkdir_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->preparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postparent))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_unlink_req (XDR *xdrs, gfs3_unlink_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->par))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->bname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_unlink_rsp (XDR *xdrs, gfs3_unlink_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->preparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postparent))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_rmdir_req (XDR *xdrs, gfs3_rmdir_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->par))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->bname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_rmdir_rsp (XDR *xdrs, gfs3_rmdir_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->preparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postparent))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_symlink_req (XDR *xdrs, gfs3_symlink_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->par))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->bname, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->linkname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_symlink_rsp (XDR *xdrs, gfs3_symlink_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->preparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postparent))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_rename_req (XDR *xdrs, gfs3_rename_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->oldpar))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->oldgen))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->newpar))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->newgen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->oldpath, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->oldbname, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->newpath, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->newbname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_rename_rsp (XDR *xdrs, gfs3_rename_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->preoldparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postoldparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->prenewparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postnewparent))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_link_req (XDR *xdrs, gfs3_link_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->oldino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->oldgen))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->newpar))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->newgen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->oldpath, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->newpath, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->newbname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_link_rsp (XDR *xdrs, gfs3_link_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->preparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postparent))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_truncate_req (XDR *xdrs, gfs3_truncate_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_truncate_rsp (XDR *xdrs, gfs3_truncate_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->prestat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->poststat))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_open_req (XDR *xdrs, gfs3_open_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->wbflags))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_open_rsp (XDR *xdrs, gfs3_open_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_read_req (XDR *xdrs, gfs3_read_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->size))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_read_rsp (XDR *xdrs, gfs3_read_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->size))
+ return FALSE;
+
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_lookup_req (XDR *xdrs, gfs3_lookup_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->par))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->bname, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val,
+ (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_lookup_rsp (XDR *xdrs, gfs3_lookup_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postparent))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val,
+ (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_write_req (XDR *xdrs, gfs3_write_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->size))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_write_rsp (XDR *xdrs, gfs3_write_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->prestat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->poststat))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_statfs_req (XDR *xdrs, gfs3_statfs_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_statfs_rsp (XDR *xdrs, gfs3_statfs_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_statfs (xdrs, &objp->statfs))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_lk_req (XDR *xdrs, gfs3_lk_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->cmd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->type))
+ return FALSE;
+ if (!xdr_gf_flock (xdrs, &objp->flock))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_lk_rsp (XDR *xdrs, gfs3_lk_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_flock (xdrs, &objp->flock))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_inodelk_req (XDR *xdrs, gfs3_inodelk_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->cmd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->type))
+ return FALSE;
+ if (!xdr_gf_flock (xdrs, &objp->flock))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volume, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_finodelk_req (XDR *xdrs, gfs3_finodelk_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->cmd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->type))
+ return FALSE;
+ if (!xdr_gf_flock (xdrs, &objp->flock))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volume, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_flush_req (XDR *xdrs, gfs3_flush_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fsync_req (XDR *xdrs, gfs3_fsync_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->data))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fsync_rsp (XDR *xdrs, gfs3_fsync_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->prestat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->poststat))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_setxattr_req (XDR *xdrs, gfs3_setxattr_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fsetxattr_req (XDR *xdrs, gfs3_fsetxattr_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_xattrop_req (XDR *xdrs, gfs3_xattrop_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_xattrop_rsp (XDR *xdrs, gfs3_xattrop_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fxattrop_req (XDR *xdrs, gfs3_fxattrop_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fxattrop_rsp (XDR *xdrs, gfs3_fxattrop_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_getxattr_req (XDR *xdrs, gfs3_getxattr_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->namelen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->name, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_getxattr_rsp (XDR *xdrs, gfs3_getxattr_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fgetxattr_req (XDR *xdrs, gfs3_fgetxattr_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->namelen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->name, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fgetxattr_rsp (XDR *xdrs, gfs3_fgetxattr_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_removexattr_req (XDR *xdrs, gfs3_removexattr_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->name, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_opendir_req (XDR *xdrs, gfs3_opendir_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_opendir_rsp (XDR *xdrs, gfs3_opendir_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fsyncdir_req (XDR *xdrs, gfs3_fsyncdir_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->data))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_readdir_req (XDR *xdrs, gfs3_readdir_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->size))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_readdir_rsp (XDR *xdrs, gfs3_readdir_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->buf.buf_val, (u_int *) &objp->buf.buf_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_readdirp_req (XDR *xdrs, gfs3_readdirp_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->size))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_readdirp_rsp (XDR *xdrs, gfs3_readdirp_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->buf.buf_val, (u_int *) &objp->buf.buf_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_setvolume_req (XDR *xdrs, gf_setvolume_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_setvolume_rsp (XDR *xdrs, gf_setvolume_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_access_req (XDR *xdrs, gfs3_access_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->mask))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_create_req (XDR *xdrs, gfs3_create_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->par))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->mode))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->bname, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_create_rsp (XDR *xdrs, gfs3_create_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->preparent))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->postparent))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_ftruncate_req (XDR *xdrs, gfs3_ftruncate_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_ftruncate_rsp (XDR *xdrs, gfs3_ftruncate_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->prestat))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->poststat))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fstat_req (XDR *xdrs, gfs3_fstat_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fstat_rsp (XDR *xdrs, gfs3_fstat_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stat))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_entrylk_req (XDR *xdrs, gfs3_entrylk_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->cmd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->type))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->namelen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->name, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volume, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fentrylk_req (XDR *xdrs, gfs3_fentrylk_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->cmd))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->type))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->namelen))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->name, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->volume, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_checksum_req (XDR *xdrs, gfs3_checksum_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flag))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_checksum_rsp (XDR *xdrs, gfs3_checksum_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->fchecksum.fchecksum_val, (u_int *) &objp->fchecksum.fchecksum_len, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dchecksum.dchecksum_val, (u_int *) &objp->dchecksum.dchecksum_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_setattr_req (XDR *xdrs, gfs3_setattr_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stbuf))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->valid))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->path, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_setattr_rsp (XDR *xdrs, gfs3_setattr_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpre))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpost))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fsetattr_req (XDR *xdrs, gfs3_fsetattr_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->stbuf))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->valid))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_fsetattr_rsp (XDR *xdrs, gfs3_fsetattr_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpre))
+ return FALSE;
+ if (!xdr_gf_iatt (xdrs, &objp->statpost))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_rchecksum_req (XDR *xdrs, gfs3_rchecksum_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->offset))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->len))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_rchecksum_rsp (XDR *xdrs, gfs3_rchecksum_rsp *objp)
+{
+ register int32_t *buf;
+
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->weak_checksum))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ IXDR_PUT_U_LONG(buf, objp->weak_checksum);
+ }
+ if (!xdr_bytes (xdrs, (char **)&objp->strong_checksum.strong_checksum_val, (u_int *) &objp->strong_checksum.strong_checksum_len, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->weak_checksum))
+ return FALSE;
+
+ } else {
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ objp->weak_checksum = IXDR_GET_U_LONG(buf);
+ }
+ if (!xdr_bytes (xdrs, (char **)&objp->strong_checksum.strong_checksum_val, (u_int *) &objp->strong_checksum.strong_checksum_len, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->weak_checksum))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->strong_checksum.strong_checksum_val, (u_int *) &objp->strong_checksum.strong_checksum_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_getspec_req (XDR *xdrs, gf_getspec_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->key, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_getspec_rsp (XDR *xdrs, gf_getspec_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->spec, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_log_req (XDR *xdrs, gf_log_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->msg.msg_val, (u_int *) &objp->msg.msg_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_notify_req (XDR *xdrs, gf_notify_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->buf, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_notify_rsp (XDR *xdrs, gf_notify_rsp *objp)
+{
+ register int32_t *buf;
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ IXDR_PUT_U_LONG(buf, objp->flags);
+ }
+ if (!xdr_string (xdrs, &objp->buf, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+
+ } else {
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ objp->flags = IXDR_GET_U_LONG(buf);
+ }
+ if (!xdr_string (xdrs, &objp->buf, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->buf, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_releasedir_req (XDR *xdrs, gfs3_releasedir_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gfs3_release_req (XDR *xdrs, gfs3_release_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->ino))
+ return FALSE;
+ if (!xdr_u_quad_t (xdrs, &objp->gen))
+ return FALSE;
+ if (!xdr_quad_t (xdrs, &objp->fd))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_common_rsp (XDR *xdrs, gf_common_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
+
+
+bool_t
+xdr_gf_dump_version_req (XDR *xdrs, gf_dump_version_req *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->key, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gf_dump_version_rsp (XDR *xdrs, gf_dump_version_rsp *objp)
+{
+
+ if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->flags))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->msg.msg_val, (u_int *) &objp->msg.msg_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_auth_glusterfs_parms (XDR *xdrs, auth_glusterfs_parms *objp)
+{
+ register int32_t *buf;
+
+ int i;
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, (4 + 16 )* BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_u_int (xdrs, &objp->pid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->uid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->gid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ngrps))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->groups, 16,
+ sizeof (u_int), (xdrproc_t) xdr_u_int))
+ return FALSE;
+ } else {
+ IXDR_PUT_U_LONG(buf, objp->pid);
+ IXDR_PUT_U_LONG(buf, objp->uid);
+ IXDR_PUT_U_LONG(buf, objp->gid);
+ IXDR_PUT_U_LONG(buf, objp->ngrps);
+ {
+ register u_int *genp;
+
+ for (i = 0, genp = objp->groups;
+ i < 16; ++i) {
+ IXDR_PUT_U_LONG(buf, *genp++);
+ }
+ }
+ }
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, (4 + 16 )* BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_u_int (xdrs, &objp->pid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->uid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->gid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ngrps))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->groups, 16,
+ sizeof (u_int), (xdrproc_t) xdr_u_int))
+ return FALSE;
+ } else {
+ objp->pid = IXDR_GET_U_LONG(buf);
+ objp->uid = IXDR_GET_U_LONG(buf);
+ objp->gid = IXDR_GET_U_LONG(buf);
+ objp->ngrps = IXDR_GET_U_LONG(buf);
+ {
+ register u_int *genp;
+
+ for (i = 0, genp = objp->groups;
+ i < 16; ++i) {
+ *genp++ = IXDR_GET_U_LONG(buf);
+ }
+ }
+ }
+ return TRUE;
+ }
+
+ if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->pid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->uid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->gid))
+ return FALSE;
+ if (!xdr_u_int (xdrs, &objp->ngrps))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->groups, 16,
+ sizeof (u_int), (xdrproc_t) xdr_u_int))
+ return FALSE;
+ return TRUE;
+}
diff --git a/xlators/protocol/lib/src/glusterfs-xdr.h b/xlators/protocol/lib/src/glusterfs-xdr.h
new file mode 100644
index 00000000000..3fb6d0bd355
--- /dev/null
+++ b/xlators/protocol/lib/src/glusterfs-xdr.h
@@ -0,0 +1,1337 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef _GLUSTERFS3_H_RPCGEN
+#define _GLUSTERFS3_H_RPCGEN
+
+#include <rpc/rpc.h>
+#include "iatt.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define GF_O_ACCMODE 003
+#define GF_O_RDONLY 00
+#define GF_O_WRONLY 01
+#define GF_O_RDWR 02
+#define GF_O_CREAT 0100
+#define GF_O_EXCL 0200
+#define GF_O_NOCTTY 0400
+#define GF_O_TRUNC 01000
+#define GF_O_APPEND 02000
+#define GF_O_NONBLOCK 04000
+#define GF_O_SYNC 010000
+#define GF_O_ASYNC 020000
+
+#define GF_O_DIRECT 040000
+#define GF_O_DIRECTORY 0200000
+#define GF_O_NOFOLLOW 0400000
+#define GF_O_NOATIME 01000000
+#define GF_O_CLOEXEC 02000000
+
+#define GF_O_LARGEFILE 0100000
+
+#define XLATE_BIT(from, to, bit) do { \
+ if (from & bit) \
+ to = to | GF_##bit; \
+ } while (0)
+
+#define UNXLATE_BIT(from, to, bit) do { \
+ if (from & GF_##bit) \
+ to = to | bit; \
+ } while (0)
+
+#define XLATE_ACCESSMODE(from, to) do { \
+ switch (from & O_ACCMODE) { \
+ case O_RDONLY: to |= GF_O_RDONLY; \
+ break; \
+ case O_WRONLY: to |= GF_O_WRONLY; \
+ break; \
+ case O_RDWR: to |= GF_O_RDWR; \
+ break; \
+ } \
+ } while (0)
+
+#define UNXLATE_ACCESSMODE(from, to) do { \
+ switch (from & GF_O_ACCMODE) { \
+ case GF_O_RDONLY: to |= O_RDONLY; \
+ break; \
+ case GF_O_WRONLY: to |= O_WRONLY; \
+ break; \
+ case GF_O_RDWR: to |= O_RDWR; \
+ break; \
+ } \
+ } while (0)
+
+static inline uint32_t
+gf_flags_from_flags (uint32_t flags)
+{
+ uint32_t gf_flags = 0;
+
+ XLATE_ACCESSMODE (flags, gf_flags);
+
+ XLATE_BIT (flags, gf_flags, O_CREAT);
+ XLATE_BIT (flags, gf_flags, O_EXCL);
+ XLATE_BIT (flags, gf_flags, O_NOCTTY);
+ XLATE_BIT (flags, gf_flags, O_TRUNC);
+ XLATE_BIT (flags, gf_flags, O_APPEND);
+ XLATE_BIT (flags, gf_flags, O_NONBLOCK);
+ XLATE_BIT (flags, gf_flags, O_SYNC);
+ XLATE_BIT (flags, gf_flags, O_ASYNC);
+
+ XLATE_BIT (flags, gf_flags, O_DIRECT);
+ XLATE_BIT (flags, gf_flags, O_DIRECTORY);
+ XLATE_BIT (flags, gf_flags, O_NOFOLLOW);
+#ifdef O_NOATIME
+ XLATE_BIT (flags, gf_flags, O_NOATIME);
+#endif
+#ifdef O_CLOEXEC
+ XLATE_BIT (flags, gf_flags, O_CLOEXEC);
+#endif
+ XLATE_BIT (flags, gf_flags, O_LARGEFILE);
+
+ return gf_flags;
+}
+
+static inline uint32_t
+gf_flags_to_flags (uint32_t gf_flags)
+{
+ uint32_t flags = 0;
+
+ UNXLATE_ACCESSMODE (gf_flags, flags);
+
+ UNXLATE_BIT (gf_flags, flags, O_CREAT);
+ UNXLATE_BIT (gf_flags, flags, O_EXCL);
+ UNXLATE_BIT (gf_flags, flags, O_NOCTTY);
+ UNXLATE_BIT (gf_flags, flags, O_TRUNC);
+ UNXLATE_BIT (gf_flags, flags, O_APPEND);
+ UNXLATE_BIT (gf_flags, flags, O_NONBLOCK);
+ UNXLATE_BIT (gf_flags, flags, O_SYNC);
+ UNXLATE_BIT (gf_flags, flags, O_ASYNC);
+
+ UNXLATE_BIT (gf_flags, flags, O_DIRECT);
+ UNXLATE_BIT (gf_flags, flags, O_DIRECTORY);
+ UNXLATE_BIT (gf_flags, flags, O_NOFOLLOW);
+#ifdef O_NOATIME
+ UNXLATE_BIT (gf_flags, flags, O_NOATIME);
+#endif
+#ifdef O_CLOEXEC
+ UNXLATE_BIT (gf_flags, flags, O_CLOEXEC);
+#endif
+ UNXLATE_BIT (gf_flags, flags, O_LARGEFILE);
+
+ return flags;
+}
+
+
+struct gf_statfs {
+ u_quad_t bsize;
+ u_quad_t frsize;
+ u_quad_t blocks;
+ u_quad_t bfree;
+ u_quad_t bavail;
+ u_quad_t files;
+ u_quad_t ffree;
+ u_quad_t favail;
+ u_quad_t fsid;
+ u_quad_t flag;
+ u_quad_t namemax;
+};
+typedef struct gf_statfs gf_statfs;
+
+static inline void
+gf_statfs_to_statfs (struct gf_statfs *gf_stat, struct statvfs *stat)
+{
+ if (!stat || !gf_stat)
+ return;
+
+ stat->f_bsize = (gf_stat->bsize);
+ stat->f_frsize = (gf_stat->frsize);
+ stat->f_blocks = (gf_stat->blocks);
+ stat->f_bfree = (gf_stat->bfree);
+ stat->f_bavail = (gf_stat->bavail);
+ stat->f_files = (gf_stat->files);
+ stat->f_ffree = (gf_stat->ffree);
+ stat->f_favail = (gf_stat->favail);
+ stat->f_fsid = (gf_stat->fsid);
+ stat->f_flag = (gf_stat->flag);
+ stat->f_namemax = (gf_stat->namemax);
+}
+
+
+static inline void
+gf_statfs_from_statfs (struct gf_statfs *gf_stat, struct statvfs *stat)
+{
+ if (!stat || !gf_stat)
+ return;
+
+ gf_stat->bsize = stat->f_bsize;
+ gf_stat->frsize = stat->f_frsize;
+ gf_stat->blocks = stat->f_blocks;
+ gf_stat->bfree = stat->f_bfree;
+ gf_stat->bavail = stat->f_bavail;
+ gf_stat->files = stat->f_files;
+ gf_stat->ffree = stat->f_ffree;
+ gf_stat->favail = stat->f_favail;
+ gf_stat->fsid = stat->f_fsid;
+ gf_stat->flag = stat->f_flag;
+ gf_stat->namemax = stat->f_namemax;
+}
+
+struct gf_flock {
+ u_int type;
+ u_int whence;
+ u_quad_t start;
+ u_quad_t len;
+ u_int pid;
+};
+typedef struct gf_flock gf_flock;
+
+
+static inline void
+gf_flock_to_flock (struct gf_flock *gf_flock, struct flock *flock)
+{
+ if (!flock || !gf_flock)
+ return;
+
+ flock->l_type = gf_flock->type;
+ flock->l_whence = gf_flock->whence;
+ flock->l_start = gf_flock->start;
+ flock->l_len = gf_flock->len;
+ flock->l_pid = gf_flock->pid;
+}
+
+
+static inline void
+gf_flock_from_flock (struct gf_flock *gf_flock, struct flock *flock)
+{
+ if (!flock || !gf_flock)
+ return;
+
+ gf_flock->type = (flock->l_type);
+ gf_flock->whence = (flock->l_whence);
+ gf_flock->start = (flock->l_start);
+ gf_flock->len = (flock->l_len);
+ gf_flock->pid = (flock->l_pid);
+}
+
+struct gf_iatt {
+ u_quad_t ia_ino;
+ u_quad_t ia_gen;
+ u_quad_t ia_dev;
+ u_int mode;
+ u_int ia_nlink;
+ u_int ia_uid;
+ u_int ia_gid;
+ u_quad_t ia_rdev;
+ u_quad_t ia_size;
+ u_int ia_blksize;
+ u_quad_t ia_blocks;
+ u_int ia_atime;
+ u_int ia_atime_nsec;
+ u_int ia_mtime;
+ u_int ia_mtime_nsec;
+ u_int ia_ctime;
+ u_int ia_ctime_nsec;
+} __attribute__((packed));
+typedef struct gf_iatt gf_iatt;
+
+
+static inline void
+gf_stat_to_iatt (struct gf_iatt *gf_stat, struct iatt *iatt)
+{
+ if (!iatt || !gf_stat)
+ return;
+
+ iatt->ia_ino = gf_stat->ia_ino ;
+ iatt->ia_gen = gf_stat->ia_gen ;
+ iatt->ia_dev = gf_stat->ia_dev ;
+ iatt->ia_type = ia_type_from_st_mode (gf_stat->mode) ;
+ iatt->ia_prot = ia_prot_from_st_mode (gf_stat->mode) ;
+ iatt->ia_nlink = gf_stat->ia_nlink ;
+ iatt->ia_uid = gf_stat->ia_uid ;
+ iatt->ia_gid = gf_stat->ia_gid ;
+ iatt->ia_rdev = gf_stat->ia_rdev ;
+ iatt->ia_size = gf_stat->ia_size ;
+ iatt->ia_blksize = gf_stat->ia_blksize ;
+ iatt->ia_blocks = gf_stat->ia_blocks ;
+ iatt->ia_atime = gf_stat->ia_atime ;
+ iatt->ia_atime_nsec = gf_stat->ia_atime_nsec ;
+ iatt->ia_mtime = gf_stat->ia_mtime ;
+ iatt->ia_mtime_nsec = gf_stat->ia_mtime_nsec ;
+ iatt->ia_ctime = gf_stat->ia_ctime ;
+ iatt->ia_ctime_nsec = gf_stat->ia_ctime_nsec ;
+}
+
+
+static inline void
+gf_stat_from_iatt (struct gf_iatt *gf_stat, struct iatt *iatt)
+{
+ if (!iatt || !gf_stat)
+ return;
+
+ gf_stat->ia_ino = iatt->ia_ino ;
+ gf_stat->ia_gen = iatt->ia_gen ;
+ gf_stat->ia_dev = iatt->ia_dev ;
+ gf_stat->mode = st_mode_from_ia (iatt->ia_prot, iatt->ia_type);
+ gf_stat->ia_nlink = iatt->ia_nlink ;
+ gf_stat->ia_uid = iatt->ia_uid ;
+ gf_stat->ia_gid = iatt->ia_gid ;
+ gf_stat->ia_rdev = iatt->ia_rdev ;
+ gf_stat->ia_size = iatt->ia_size ;
+ gf_stat->ia_blksize = iatt->ia_blksize ;
+ gf_stat->ia_blocks = iatt->ia_blocks ;
+ gf_stat->ia_atime = iatt->ia_atime ;
+ gf_stat->ia_atime_nsec = iatt->ia_atime_nsec ;
+ gf_stat->ia_mtime = iatt->ia_mtime ;
+ gf_stat->ia_mtime_nsec = iatt->ia_mtime_nsec ;
+ gf_stat->ia_ctime = iatt->ia_ctime ;
+ gf_stat->ia_ctime_nsec = iatt->ia_ctime_nsec ;
+}
+
+
+struct gf_dirent_nb {
+ uint64_t d_ino;
+ uint64_t d_off;
+ uint32_t d_len;
+ uint32_t d_type;
+ struct gf_iatt d_stat;
+ char d_name[0];
+} __attribute__((packed));
+
+
+/* Gluster FS Payload structures */
+
+struct gfs3_stat_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ char *path;
+};
+typedef struct gfs3_stat_req gfs3_stat_req;
+
+struct gfs3_stat_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+};
+typedef struct gfs3_stat_rsp gfs3_stat_rsp;
+
+struct gfs3_readlink_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_int size;
+ char *path;
+};
+typedef struct gfs3_readlink_req gfs3_readlink_req;
+
+struct gfs3_readlink_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt buf;
+ char *path;
+};
+typedef struct gfs3_readlink_rsp gfs3_readlink_rsp;
+
+struct gfs3_mknod_req {
+ u_quad_t gfs_id;
+ u_quad_t par;
+ u_quad_t gen;
+ u_quad_t dev;
+ u_int mode;
+ char *path;
+ char *bname;
+};
+typedef struct gfs3_mknod_req gfs3_mknod_req;
+
+struct gfs3_mknod_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+typedef struct gfs3_mknod_rsp gfs3_mknod_rsp;
+
+struct gfs3_mkdir_req {
+ u_quad_t gfs_id;
+ u_quad_t par;
+ u_quad_t gen;
+ u_int mode;
+ char *path;
+ char *bname;
+};
+typedef struct gfs3_mkdir_req gfs3_mkdir_req;
+
+struct gfs3_mkdir_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+typedef struct gfs3_mkdir_rsp gfs3_mkdir_rsp;
+
+struct gfs3_unlink_req {
+ u_quad_t gfs_id;
+ u_quad_t par;
+ u_quad_t gen;
+ char *path;
+ char *bname;
+};
+typedef struct gfs3_unlink_req gfs3_unlink_req;
+
+struct gfs3_unlink_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+typedef struct gfs3_unlink_rsp gfs3_unlink_rsp;
+
+struct gfs3_rmdir_req {
+ u_quad_t gfs_id;
+ u_quad_t par;
+ u_quad_t gen;
+ char *path;
+ char *bname;
+};
+typedef struct gfs3_rmdir_req gfs3_rmdir_req;
+
+struct gfs3_rmdir_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+typedef struct gfs3_rmdir_rsp gfs3_rmdir_rsp;
+
+struct gfs3_symlink_req {
+ u_quad_t gfs_id;
+ u_quad_t par;
+ u_quad_t gen;
+ char *path;
+ char *bname;
+ char *linkname;
+};
+typedef struct gfs3_symlink_req gfs3_symlink_req;
+
+struct gfs3_symlink_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+typedef struct gfs3_symlink_rsp gfs3_symlink_rsp;
+
+struct gfs3_rename_req {
+ u_quad_t gfs_id;
+ u_quad_t oldpar;
+ u_quad_t oldgen;
+ u_quad_t newpar;
+ u_quad_t newgen;
+ char *oldpath;
+ char *oldbname;
+ char *newpath;
+ char *newbname;
+};
+typedef struct gfs3_rename_req gfs3_rename_req;
+
+struct gfs3_rename_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preoldparent;
+ struct gf_iatt postoldparent;
+ struct gf_iatt prenewparent;
+ struct gf_iatt postnewparent;
+};
+typedef struct gfs3_rename_rsp gfs3_rename_rsp;
+
+struct gfs3_link_req {
+ u_quad_t gfs_id;
+ u_quad_t oldino;
+ u_quad_t oldgen;
+ u_quad_t newpar;
+ u_quad_t newgen;
+ char *oldpath;
+ char *newpath;
+ char *newbname;
+};
+typedef struct gfs3_link_req gfs3_link_req;
+
+struct gfs3_link_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+typedef struct gfs3_link_rsp gfs3_link_rsp;
+
+struct gfs3_truncate_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_quad_t offset;
+ char *path;
+};
+typedef struct gfs3_truncate_req gfs3_truncate_req;
+
+struct gfs3_truncate_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt prestat;
+ struct gf_iatt poststat;
+};
+typedef struct gfs3_truncate_rsp gfs3_truncate_rsp;
+
+struct gfs3_open_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_int flags;
+ u_int wbflags;
+ char *path;
+};
+typedef struct gfs3_open_req gfs3_open_req;
+
+struct gfs3_open_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ quad_t fd;
+};
+typedef struct gfs3_open_rsp gfs3_open_rsp;
+
+struct gfs3_read_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_quad_t offset;
+ u_int size;
+};
+typedef struct gfs3_read_req gfs3_read_req;
+
+struct gfs3_read_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ u_int size;
+} __attribute__((packed));
+typedef struct gfs3_read_rsp gfs3_read_rsp;
+
+struct gfs3_lookup_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t par;
+ u_quad_t gen;
+ u_int flags;
+ char *path;
+ char *bname;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gfs3_lookup_req gfs3_lookup_req;
+
+struct gfs3_lookup_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt postparent;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gfs3_lookup_rsp gfs3_lookup_rsp;
+
+struct gfs3_write_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_quad_t offset;
+ u_int size;
+} __attribute__((packed));
+typedef struct gfs3_write_req gfs3_write_req;
+
+struct gfs3_write_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt prestat;
+ struct gf_iatt poststat;
+};
+typedef struct gfs3_write_rsp gfs3_write_rsp;
+
+struct gfs3_statfs_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ char *path;
+};
+typedef struct gfs3_statfs_req gfs3_statfs_req;
+
+struct gfs3_statfs_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_statfs statfs;
+};
+typedef struct gfs3_statfs_rsp gfs3_statfs_rsp;
+
+struct gfs3_lk_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_int cmd;
+ u_int type;
+ struct gf_flock flock;
+};
+typedef struct gfs3_lk_req gfs3_lk_req;
+
+struct gfs3_lk_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_flock flock;
+};
+typedef struct gfs3_lk_rsp gfs3_lk_rsp;
+
+struct gfs3_inodelk_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_int cmd;
+ u_int type;
+ struct gf_flock flock;
+ char *path;
+ char *volume;
+};
+typedef struct gfs3_inodelk_req gfs3_inodelk_req;
+
+struct gfs3_finodelk_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_int cmd;
+ u_int type;
+ struct gf_flock flock;
+ char *volume;
+};
+typedef struct gfs3_finodelk_req gfs3_finodelk_req;
+
+struct gfs3_flush_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+};
+typedef struct gfs3_flush_req gfs3_flush_req;
+
+struct gfs3_fsync_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_int data;
+};
+typedef struct gfs3_fsync_req gfs3_fsync_req;
+
+struct gfs3_fsync_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt prestat;
+ struct gf_iatt poststat;
+};
+typedef struct gfs3_fsync_rsp gfs3_fsync_rsp;
+
+struct gfs3_setxattr_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_int flags;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ char *path;
+};
+typedef struct gfs3_setxattr_req gfs3_setxattr_req;
+
+struct gfs3_fsetxattr_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_int flags;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gfs3_fsetxattr_req gfs3_fsetxattr_req;
+
+struct gfs3_xattrop_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_int flags;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ char *path;
+};
+typedef struct gfs3_xattrop_req gfs3_xattrop_req;
+
+struct gfs3_xattrop_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gfs3_xattrop_rsp gfs3_xattrop_rsp;
+
+struct gfs3_fxattrop_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_int flags;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gfs3_fxattrop_req gfs3_fxattrop_req;
+
+struct gfs3_fxattrop_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gfs3_fxattrop_rsp gfs3_fxattrop_rsp;
+
+struct gfs3_getxattr_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_int namelen;
+ char *path;
+ char *name;
+};
+typedef struct gfs3_getxattr_req gfs3_getxattr_req;
+
+struct gfs3_getxattr_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gfs3_getxattr_rsp gfs3_getxattr_rsp;
+
+struct gfs3_fgetxattr_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_int namelen;
+ char *name;
+};
+typedef struct gfs3_fgetxattr_req gfs3_fgetxattr_req;
+
+struct gfs3_fgetxattr_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gfs3_fgetxattr_rsp gfs3_fgetxattr_rsp;
+
+struct gfs3_removexattr_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ char *path;
+ char *name;
+};
+typedef struct gfs3_removexattr_req gfs3_removexattr_req;
+
+struct gfs3_opendir_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ char *path;
+};
+typedef struct gfs3_opendir_req gfs3_opendir_req;
+
+struct gfs3_opendir_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ quad_t fd;
+};
+typedef struct gfs3_opendir_rsp gfs3_opendir_rsp;
+
+struct gfs3_fsyncdir_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ int data;
+};
+typedef struct gfs3_fsyncdir_req gfs3_fsyncdir_req;
+
+struct gfs3_readdir_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_quad_t offset;
+ u_int size;
+};
+typedef struct gfs3_readdir_req gfs3_readdir_req;
+
+struct gfs3_readdir_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int buf_len;
+ char *buf_val;
+ } buf;
+};
+typedef struct gfs3_readdir_rsp gfs3_readdir_rsp;
+
+struct gfs3_readdirp_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_quad_t offset;
+ u_int size;
+};
+typedef struct gfs3_readdirp_req gfs3_readdirp_req;
+
+struct gfs3_readdirp_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int buf_len;
+ char *buf_val;
+ } buf;
+};
+typedef struct gfs3_readdirp_rsp gfs3_readdirp_rsp;
+
+struct gf_setvolume_req {
+ u_quad_t gfs_id;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gf_setvolume_req gf_setvolume_req;
+
+struct gf_setvolume_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gf_setvolume_rsp gf_setvolume_rsp;
+
+struct gfs3_access_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_int mask;
+ char *path;
+};
+typedef struct gfs3_access_req gfs3_access_req;
+
+struct gfs3_create_req {
+ u_quad_t gfs_id;
+ u_quad_t par;
+ u_quad_t gen;
+ u_int flags;
+ u_int mode;
+ char *path;
+ char *bname;
+};
+typedef struct gfs3_create_req gfs3_create_req;
+
+struct gfs3_create_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ u_quad_t fd;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+typedef struct gfs3_create_rsp gfs3_create_rsp;
+
+struct gfs3_ftruncate_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_quad_t offset;
+};
+typedef struct gfs3_ftruncate_req gfs3_ftruncate_req;
+
+struct gfs3_ftruncate_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt prestat;
+ struct gf_iatt poststat;
+};
+typedef struct gfs3_ftruncate_rsp gfs3_ftruncate_rsp;
+
+struct gfs3_fstat_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+};
+typedef struct gfs3_fstat_req gfs3_fstat_req;
+
+struct gfs3_fstat_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+};
+typedef struct gfs3_fstat_rsp gfs3_fstat_rsp;
+
+struct gfs3_entrylk_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_int cmd;
+ u_int type;
+ u_quad_t namelen;
+ char *path;
+ char *name;
+ char *volume;
+};
+typedef struct gfs3_entrylk_req gfs3_entrylk_req;
+
+struct gfs3_fentrylk_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+ u_int cmd;
+ u_int type;
+ u_quad_t namelen;
+ char *name;
+ char *volume;
+};
+typedef struct gfs3_fentrylk_req gfs3_fentrylk_req;
+
+struct gfs3_checksum_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ u_int flag;
+ char *path;
+};
+typedef struct gfs3_checksum_req gfs3_checksum_req;
+
+struct gfs3_checksum_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int fchecksum_len;
+ char *fchecksum_val;
+ } fchecksum;
+ struct {
+ u_int dchecksum_len;
+ char *dchecksum_val;
+ } dchecksum;
+};
+typedef struct gfs3_checksum_rsp gfs3_checksum_rsp;
+
+struct gfs3_setattr_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ struct gf_iatt stbuf;
+ int valid;
+ char *path;
+};
+typedef struct gfs3_setattr_req gfs3_setattr_req;
+
+struct gfs3_setattr_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+};
+typedef struct gfs3_setattr_rsp gfs3_setattr_rsp;
+
+struct gfs3_fsetattr_req {
+ u_quad_t gfs_id;
+ quad_t fd;
+ struct gf_iatt stbuf;
+ int valid;
+};
+typedef struct gfs3_fsetattr_req gfs3_fsetattr_req;
+
+struct gfs3_fsetattr_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+};
+typedef struct gfs3_fsetattr_rsp gfs3_fsetattr_rsp;
+
+struct gfs3_rchecksum_req {
+ u_quad_t gfs_id;
+ quad_t fd;
+ u_quad_t offset;
+ u_int len;
+};
+typedef struct gfs3_rchecksum_req gfs3_rchecksum_req;
+
+struct gfs3_rchecksum_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ u_int weak_checksum;
+ struct {
+ u_int strong_checksum_len;
+ char *strong_checksum_val;
+ } strong_checksum;
+};
+typedef struct gfs3_rchecksum_rsp gfs3_rchecksum_rsp;
+
+struct gf_getspec_req {
+ u_quad_t gfs_id;
+ u_int flags;
+ char *key;
+};
+typedef struct gf_getspec_req gf_getspec_req;
+
+struct gf_getspec_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ char *spec;
+};
+typedef struct gf_getspec_rsp gf_getspec_rsp;
+
+struct gf_log_req {
+ u_quad_t gfs_id;
+ struct {
+ u_int msg_len;
+ char *msg_val;
+ } msg;
+};
+typedef struct gf_log_req gf_log_req;
+
+struct gf_notify_req {
+ u_quad_t gfs_id;
+ u_int flags;
+ char *buf;
+};
+typedef struct gf_notify_req gf_notify_req;
+
+struct gf_notify_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ u_int flags;
+ char *buf;
+};
+typedef struct gf_notify_rsp gf_notify_rsp;
+
+struct gfs3_releasedir_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+};
+typedef struct gfs3_releasedir_req gfs3_releasedir_req;
+
+struct gfs3_release_req {
+ u_quad_t gfs_id;
+ u_quad_t ino;
+ u_quad_t gen;
+ quad_t fd;
+};
+typedef struct gfs3_release_req gfs3_release_req;
+
+struct gf_common_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gf_common_rsp gf_common_rsp;
+
+struct gf_dump_version_req {
+ u_quad_t gfs_id;
+ u_int flags;
+ char *key;
+};
+typedef struct gf_dump_version_req gf_dump_version_req;
+
+struct gf_dump_version_rsp {
+ u_quad_t gfs_id;
+ int op_ret;
+ int op_errno;
+ u_int flags;
+ struct {
+ u_int msg_len;
+ char *msg_val;
+ } msg;
+};
+typedef struct gf_dump_version_rsp gf_dump_version_rsp;
+
+struct auth_glusterfs_parms {
+ u_quad_t lk_owner;
+ u_int pid;
+ u_int uid;
+ u_int gid;
+ u_int ngrps;
+ u_int groups[16];
+};
+typedef struct auth_glusterfs_parms auth_glusterfs_parms;
+
+/* the xdr functions */
+
+#if defined(__STDC__) || defined(__cplusplus)
+extern bool_t xdr_gf_statfs (XDR *, gf_statfs*);
+extern bool_t xdr_gf_flock (XDR *, gf_flock*);
+extern bool_t xdr_gf_iatt (XDR *, gf_iatt*);
+extern bool_t xdr_gfs3_stat_req (XDR *, gfs3_stat_req*);
+extern bool_t xdr_gfs3_stat_rsp (XDR *, gfs3_stat_rsp*);
+extern bool_t xdr_gfs3_readlink_req (XDR *, gfs3_readlink_req*);
+extern bool_t xdr_gfs3_readlink_rsp (XDR *, gfs3_readlink_rsp*);
+extern bool_t xdr_gfs3_mknod_req (XDR *, gfs3_mknod_req*);
+extern bool_t xdr_gfs3_mknod_rsp (XDR *, gfs3_mknod_rsp*);
+extern bool_t xdr_gfs3_mkdir_req (XDR *, gfs3_mkdir_req*);
+extern bool_t xdr_gfs3_mkdir_rsp (XDR *, gfs3_mkdir_rsp*);
+extern bool_t xdr_gfs3_unlink_req (XDR *, gfs3_unlink_req*);
+extern bool_t xdr_gfs3_unlink_rsp (XDR *, gfs3_unlink_rsp*);
+extern bool_t xdr_gfs3_rmdir_req (XDR *, gfs3_rmdir_req*);
+extern bool_t xdr_gfs3_rmdir_rsp (XDR *, gfs3_rmdir_rsp*);
+extern bool_t xdr_gfs3_symlink_req (XDR *, gfs3_symlink_req*);
+extern bool_t xdr_gfs3_symlink_rsp (XDR *, gfs3_symlink_rsp*);
+extern bool_t xdr_gfs3_rename_req (XDR *, gfs3_rename_req*);
+extern bool_t xdr_gfs3_rename_rsp (XDR *, gfs3_rename_rsp*);
+extern bool_t xdr_gfs3_link_req (XDR *, gfs3_link_req*);
+extern bool_t xdr_gfs3_link_rsp (XDR *, gfs3_link_rsp*);
+extern bool_t xdr_gfs3_truncate_req (XDR *, gfs3_truncate_req*);
+extern bool_t xdr_gfs3_truncate_rsp (XDR *, gfs3_truncate_rsp*);
+extern bool_t xdr_gfs3_open_req (XDR *, gfs3_open_req*);
+extern bool_t xdr_gfs3_open_rsp (XDR *, gfs3_open_rsp*);
+extern bool_t xdr_gfs3_read_req (XDR *, gfs3_read_req*);
+extern bool_t xdr_gfs3_read_rsp (XDR *, gfs3_read_rsp*);
+extern bool_t xdr_gfs3_lookup_req (XDR *, gfs3_lookup_req*);
+extern bool_t xdr_gfs3_lookup_rsp (XDR *, gfs3_lookup_rsp*);
+extern bool_t xdr_gfs3_write_req (XDR *, gfs3_write_req*);
+extern bool_t xdr_gfs3_write_rsp (XDR *, gfs3_write_rsp*);
+extern bool_t xdr_gfs3_statfs_req (XDR *, gfs3_statfs_req*);
+extern bool_t xdr_gfs3_statfs_rsp (XDR *, gfs3_statfs_rsp*);
+extern bool_t xdr_gfs3_lk_req (XDR *, gfs3_lk_req*);
+extern bool_t xdr_gfs3_lk_rsp (XDR *, gfs3_lk_rsp*);
+extern bool_t xdr_gfs3_inodelk_req (XDR *, gfs3_inodelk_req*);
+extern bool_t xdr_gfs3_finodelk_req (XDR *, gfs3_finodelk_req*);
+extern bool_t xdr_gfs3_flush_req (XDR *, gfs3_flush_req*);
+extern bool_t xdr_gfs3_fsync_req (XDR *, gfs3_fsync_req*);
+extern bool_t xdr_gfs3_fsync_rsp (XDR *, gfs3_fsync_rsp*);
+extern bool_t xdr_gfs3_setxattr_req (XDR *, gfs3_setxattr_req*);
+extern bool_t xdr_gfs3_fsetxattr_req (XDR *, gfs3_fsetxattr_req*);
+extern bool_t xdr_gfs3_xattrop_req (XDR *, gfs3_xattrop_req*);
+extern bool_t xdr_gfs3_xattrop_rsp (XDR *, gfs3_xattrop_rsp*);
+extern bool_t xdr_gfs3_fxattrop_req (XDR *, gfs3_fxattrop_req*);
+extern bool_t xdr_gfs3_fxattrop_rsp (XDR *, gfs3_fxattrop_rsp*);
+extern bool_t xdr_gfs3_getxattr_req (XDR *, gfs3_getxattr_req*);
+extern bool_t xdr_gfs3_getxattr_rsp (XDR *, gfs3_getxattr_rsp*);
+extern bool_t xdr_gfs3_fgetxattr_req (XDR *, gfs3_fgetxattr_req*);
+extern bool_t xdr_gfs3_fgetxattr_rsp (XDR *, gfs3_fgetxattr_rsp*);
+extern bool_t xdr_gfs3_removexattr_req (XDR *, gfs3_removexattr_req*);
+extern bool_t xdr_gfs3_opendir_req (XDR *, gfs3_opendir_req*);
+extern bool_t xdr_gfs3_opendir_rsp (XDR *, gfs3_opendir_rsp*);
+extern bool_t xdr_gfs3_fsyncdir_req (XDR *, gfs3_fsyncdir_req*);
+extern bool_t xdr_gfs3_readdir_req (XDR *, gfs3_readdir_req*);
+extern bool_t xdr_gfs3_readdir_rsp (XDR *, gfs3_readdir_rsp*);
+extern bool_t xdr_gfs3_readdirp_req (XDR *, gfs3_readdirp_req*);
+extern bool_t xdr_gfs3_readdirp_rsp (XDR *, gfs3_readdirp_rsp*);
+extern bool_t xdr_gf_setvolume_req (XDR *, gf_setvolume_req*);
+extern bool_t xdr_gf_setvolume_rsp (XDR *, gf_setvolume_rsp*);
+extern bool_t xdr_gfs3_access_req (XDR *, gfs3_access_req*);
+extern bool_t xdr_gfs3_create_req (XDR *, gfs3_create_req*);
+extern bool_t xdr_gfs3_create_rsp (XDR *, gfs3_create_rsp*);
+extern bool_t xdr_gfs3_ftruncate_req (XDR *, gfs3_ftruncate_req*);
+extern bool_t xdr_gfs3_ftruncate_rsp (XDR *, gfs3_ftruncate_rsp*);
+extern bool_t xdr_gfs3_fstat_req (XDR *, gfs3_fstat_req*);
+extern bool_t xdr_gfs3_fstat_rsp (XDR *, gfs3_fstat_rsp*);
+extern bool_t xdr_gfs3_entrylk_req (XDR *, gfs3_entrylk_req*);
+extern bool_t xdr_gfs3_fentrylk_req (XDR *, gfs3_fentrylk_req*);
+extern bool_t xdr_gfs3_checksum_req (XDR *, gfs3_checksum_req*);
+extern bool_t xdr_gfs3_checksum_rsp (XDR *, gfs3_checksum_rsp*);
+extern bool_t xdr_gfs3_setattr_req (XDR *, gfs3_setattr_req*);
+extern bool_t xdr_gfs3_setattr_rsp (XDR *, gfs3_setattr_rsp*);
+extern bool_t xdr_gfs3_fsetattr_req (XDR *, gfs3_fsetattr_req*);
+extern bool_t xdr_gfs3_fsetattr_rsp (XDR *, gfs3_fsetattr_rsp*);
+extern bool_t xdr_gfs3_rchecksum_req (XDR *, gfs3_rchecksum_req*);
+extern bool_t xdr_gfs3_rchecksum_rsp (XDR *, gfs3_rchecksum_rsp*);
+extern bool_t xdr_gf_getspec_req (XDR *, gf_getspec_req*);
+extern bool_t xdr_gf_getspec_rsp (XDR *, gf_getspec_rsp*);
+extern bool_t xdr_gf_log_req (XDR *, gf_log_req*);
+extern bool_t xdr_gf_notify_req (XDR *, gf_notify_req*);
+extern bool_t xdr_gf_notify_rsp (XDR *, gf_notify_rsp*);
+extern bool_t xdr_gfs3_releasedir_req (XDR *, gfs3_releasedir_req*);
+extern bool_t xdr_gfs3_release_req (XDR *, gfs3_release_req*);
+extern bool_t xdr_gf_common_rsp (XDR *, gf_common_rsp*);
+extern bool_t xdr_gf_dump_version_req (XDR *, gf_dump_version_req *);
+extern bool_t xdr_gf_dump_version_rsp (XDR *, gf_dump_version_rsp *);
+extern bool_t xdr_auth_glusterfs_parms (XDR *, auth_glusterfs_parms*);
+
+#else /* K&R C */
+extern bool_t xdr_gf_statfs ();
+extern bool_t xdr_gf_flock ();
+extern bool_t xdr_gf_iatt ();
+extern bool_t xdr_gfs3_stat_req ();
+extern bool_t xdr_gfs3_stat_rsp ();
+extern bool_t xdr_gfs3_readlink_req ();
+extern bool_t xdr_gfs3_readlink_rsp ();
+extern bool_t xdr_gfs3_mknod_req ();
+extern bool_t xdr_gfs3_mknod_rsp ();
+extern bool_t xdr_gfs3_mkdir_req ();
+extern bool_t xdr_gfs3_mkdir_rsp ();
+extern bool_t xdr_gfs3_unlink_req ();
+extern bool_t xdr_gfs3_unlink_rsp ();
+extern bool_t xdr_gfs3_rmdir_req ();
+extern bool_t xdr_gfs3_rmdir_rsp ();
+extern bool_t xdr_gfs3_symlink_req ();
+extern bool_t xdr_gfs3_symlink_rsp ();
+extern bool_t xdr_gfs3_rename_req ();
+extern bool_t xdr_gfs3_rename_rsp ();
+extern bool_t xdr_gfs3_link_req ();
+extern bool_t xdr_gfs3_link_rsp ();
+extern bool_t xdr_gfs3_truncate_req ();
+extern bool_t xdr_gfs3_truncate_rsp ();
+extern bool_t xdr_gfs3_open_req ();
+extern bool_t xdr_gfs3_open_rsp ();
+extern bool_t xdr_gfs3_read_req ();
+extern bool_t xdr_gfs3_read_rsp ();
+extern bool_t xdr_gfs3_lookup_req ();
+extern bool_t xdr_gfs3_lookup_rsp ();
+extern bool_t xdr_gfs3_write_req ();
+extern bool_t xdr_gfs3_write_rsp ();
+extern bool_t xdr_gfs3_statfs_req ();
+extern bool_t xdr_gfs3_statfs_rsp ();
+extern bool_t xdr_gfs3_lk_req ();
+extern bool_t xdr_gfs3_lk_rsp ();
+extern bool_t xdr_gfs3_inodelk_req ();
+extern bool_t xdr_gfs3_finodelk_req ();
+extern bool_t xdr_gfs3_flush_req ();
+extern bool_t xdr_gfs3_fsync_req ();
+extern bool_t xdr_gfs3_fsync_rsp ();
+extern bool_t xdr_gfs3_setxattr_req ();
+extern bool_t xdr_gfs3_fsetxattr_req ();
+extern bool_t xdr_gfs3_xattrop_req ();
+extern bool_t xdr_gfs3_xattrop_rsp ();
+extern bool_t xdr_gfs3_fxattrop_req ();
+extern bool_t xdr_gfs3_fxattrop_rsp ();
+extern bool_t xdr_gfs3_getxattr_req ();
+extern bool_t xdr_gfs3_getxattr_rsp ();
+extern bool_t xdr_gfs3_fgetxattr_req ();
+extern bool_t xdr_gfs3_fgetxattr_rsp ();
+extern bool_t xdr_gfs3_removexattr_req ();
+extern bool_t xdr_gfs3_opendir_req ();
+extern bool_t xdr_gfs3_opendir_rsp ();
+extern bool_t xdr_gfs3_fsyncdir_req ();
+extern bool_t xdr_gfs3_readdir_req ();
+extern bool_t xdr_gfs3_readdir_rsp ();
+extern bool_t xdr_gfs3_readdirp_req ();
+extern bool_t xdr_gfs3_readdirp_rsp ();
+extern bool_t xdr_gf_setvolume_req ();
+extern bool_t xdr_gf_setvolume_rsp ();
+extern bool_t xdr_gfs3_access_req ();
+extern bool_t xdr_gfs3_create_req ();
+extern bool_t xdr_gfs3_create_rsp ();
+extern bool_t xdr_gfs3_ftruncate_req ();
+extern bool_t xdr_gfs3_ftruncate_rsp ();
+extern bool_t xdr_gfs3_fstat_req ();
+extern bool_t xdr_gfs3_fstat_rsp ();
+extern bool_t xdr_gfs3_entrylk_req ();
+extern bool_t xdr_gfs3_fentrylk_req ();
+extern bool_t xdr_gfs3_checksum_req ();
+extern bool_t xdr_gfs3_checksum_rsp ();
+extern bool_t xdr_gfs3_setattr_req ();
+extern bool_t xdr_gfs3_setattr_rsp ();
+extern bool_t xdr_gfs3_fsetattr_req ();
+extern bool_t xdr_gfs3_fsetattr_rsp ();
+extern bool_t xdr_gfs3_rchecksum_req ();
+extern bool_t xdr_gfs3_rchecksum_rsp ();
+extern bool_t xdr_gfs3_releasedir_req ();
+extern bool_t xdr_gfs3_release_req ();
+extern bool_t xdr_gf_getspec_req ();
+extern bool_t xdr_gf_getspec_rsp ();
+extern bool_t xdr_gf_log_req ();
+extern bool_t xdr_gf_notify_req ();
+extern bool_t xdr_gf_notify_rsp ();
+extern bool_t xdr_gf_common_rsp ();
+extern bool_t xdr_gf_dump_version_req ();
+extern bool_t xdr_gf_dump_version_rsp ();
+extern bool_t xdr_auth_glusterfs_parms ();
+
+#endif /* K&R C */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_GLUSTERFS3_H_RPCGEN */
diff --git a/xlators/protocol/lib/src/glusterfs3.x b/xlators/protocol/lib/src/glusterfs3.x
new file mode 100644
index 00000000000..b6cc6e6abad
--- /dev/null
+++ b/xlators/protocol/lib/src/glusterfs3.x
@@ -0,0 +1,755 @@
+#define GF_REQUEST_MAXGROUPS 16
+struct gf_statfs {
+ unsigned hyper bsize;
+ unsigned hyper frsize;
+ unsigned hyper blocks;
+ unsigned hyper bfree;
+ unsigned hyper bavail;
+ unsigned hyper files;
+ unsigned hyper ffree;
+ unsigned hyper favail;
+ unsigned hyper fsid;
+ unsigned hyper flag;
+ unsigned hyper namemax;
+};
+
+struct gf_flock {
+ unsigned int type;
+ unsigned int whence;
+ unsigned hyper start;
+ unsigned hyper len;
+ unsigned int pid;
+} ;
+
+
+struct gf_iatt {
+ unsigned hyper ia_ino; /* inode number */
+ unsigned hyper ia_gen; /* generation number */
+ unsigned hyper ia_dev; /* backing device ID */
+ unsigned int mode; /* mode (type + protection )*/
+ unsigned int ia_nlink; /* Link count */
+ unsigned int ia_uid; /* user ID of owner */
+ unsigned int ia_gid; /* group ID of owner */
+ unsigned hyper ia_rdev; /* device ID (if special file) */
+ unsigned hyper ia_size; /* file size in bytes */
+ unsigned int ia_blksize; /* blocksize for filesystem I/O */
+ unsigned hyper ia_blocks; /* number of 512B blocks allocated */
+ unsigned int ia_atime; /* last access time */
+ unsigned int ia_atime_nsec;
+ unsigned int ia_mtime; /* last modification time */
+ unsigned int ia_mtime_nsec;
+ unsigned int ia_ctime; /* last status change time */
+ unsigned int ia_ctime_nsec;
+};
+
+struct gfs3_stat_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ string path<>; /* NULL terminated */
+};
+struct gfs3_stat_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+} ;
+
+
+struct gfs3_readlink_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned int size;
+ string path<>; /* NULL terminated */
+} ;
+ struct gfs3_readlink_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt buf;
+ string path<>; /* NULL terminated */
+} ;
+
+
+ struct gfs3_mknod_req {
+ unsigned hyper gfs_id;
+ unsigned hyper par;
+ unsigned hyper gen;
+ unsigned hyper dev;
+ unsigned int mode;
+ string path<>; /* NULL terminated */
+ string bname<>; /* NULL terminated */
+} ;
+ struct gfs3_mknod_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+
+
+ struct gfs3_mkdir_req {
+ unsigned hyper gfs_id;
+ unsigned hyper par;
+ unsigned hyper gen;
+ unsigned int mode;
+ string path<>; /* NULL terminated */
+ string bname<>; /* NULL terminated */
+} ;
+ struct gfs3_mkdir_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+} ;
+
+
+ struct gfs3_unlink_req {
+ unsigned hyper gfs_id;
+ unsigned hyper par;
+ unsigned hyper gen;
+ string path<>; /* NULL terminated */
+ string bname<>; /* NULL terminated */
+};
+ struct gfs3_unlink_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+
+
+ struct gfs3_rmdir_req {
+ unsigned hyper gfs_id;
+ unsigned hyper par;
+ unsigned hyper gen;
+ string path<>;
+ string bname<>; /* NULL terminated */
+};
+ struct gfs3_rmdir_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+
+
+ struct gfs3_symlink_req {
+ unsigned hyper gfs_id;
+ unsigned hyper par;
+ unsigned hyper gen;
+ string path<>;
+ string bname<>;
+ string linkname<>;
+};
+ struct gfs3_symlink_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+
+
+ struct gfs3_rename_req {
+ unsigned hyper gfs_id;
+ unsigned hyper oldpar;
+ unsigned hyper oldgen;
+ unsigned hyper newpar;
+ unsigned hyper newgen;
+ string oldpath<>;
+ string oldbname<>; /* NULL terminated */
+ string newpath<>;
+ string newbname<>; /* NULL terminated */
+};
+ struct gfs3_rename_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preoldparent;
+ struct gf_iatt postoldparent;
+ struct gf_iatt prenewparent;
+ struct gf_iatt postnewparent;
+};
+
+
+ struct gfs3_link_req {
+ unsigned hyper gfs_id;
+ unsigned hyper oldino;
+ unsigned hyper oldgen;
+ unsigned hyper newpar;
+ unsigned hyper newgen;
+ string oldpath<>;
+ string newpath<>;
+ string newbname<>;
+};
+ struct gfs3_link_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+};
+
+ struct gfs3_truncate_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned hyper offset;
+ string path<>;
+};
+ struct gfs3_truncate_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt prestat;
+ struct gf_iatt poststat;
+};
+
+
+ struct gfs3_open_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned int flags;
+ unsigned int wbflags;
+ string path<>;
+};
+ struct gfs3_open_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ hyper fd;
+};
+
+
+ struct gfs3_read_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned hyper offset;
+ unsigned int size;
+};
+ struct gfs3_read_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ string buf<>;
+} ;
+
+struct gfs3_lookup_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino; /* NOTE: used only in case of 'root' lookup */
+ unsigned hyper par;
+ unsigned hyper gen;
+ unsigned int flags;
+ string path<>;
+ string bname<>;
+ opaque dict<>;
+};
+ struct gfs3_lookup_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ struct gf_iatt postparent;
+ opaque dict<>;
+} ;
+
+
+
+ struct gfs3_write_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned hyper offset;
+ unsigned int size;
+};
+ struct gfs3_write_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt prestat;
+ struct gf_iatt poststat;
+} ;
+
+
+ struct gfs3_statfs_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ string path<>;
+} ;
+ struct gfs3_statfs_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_statfs statfs;
+} ;
+
+ struct gfs3_lk_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned int cmd;
+ unsigned int type;
+ struct gf_flock flock;
+} ;
+ struct gfs3_lk_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_flock flock;
+} ;
+
+ struct gfs3_inodelk_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned int cmd;
+ unsigned int type;
+ struct gf_flock flock;
+ string path<>;
+ string volume<>;
+} ;
+
+struct gfs3_finodelk_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned int cmd;
+ unsigned int type;
+ struct gf_flock flock;
+ string volume<>;
+} ;
+
+
+ struct gfs3_flush_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+} ;
+
+
+ struct gfs3_fsync_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned int data;
+} ;
+ struct gfs3_fsync_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt prestat;
+ struct gf_iatt poststat;
+} ;
+
+
+ struct gfs3_setxattr_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned int flags;
+ opaque dict<>;
+ string path<>;
+} ;
+
+
+
+ struct gfs3_fsetxattr_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned int flags;
+ opaque dict<>;
+} ;
+
+
+
+ struct gfs3_xattrop_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned int flags;
+ opaque dict<>;
+ string path<>;
+} ;
+
+ struct gfs3_xattrop_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ opaque dict<>;
+} ;
+
+
+ struct gfs3_fxattrop_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned int flags;
+ opaque dict<>;
+} ;
+
+ struct gfs3_fxattrop_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ opaque dict<>;
+} ;
+
+
+ struct gfs3_getxattr_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned int namelen;
+ string path<>;
+ string name<>;
+} ;
+ struct gfs3_getxattr_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ opaque dict<>;
+} ;
+
+
+ struct gfs3_fgetxattr_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned int namelen;
+ string name<>;
+} ;
+ struct gfs3_fgetxattr_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ opaque dict<>;
+} ;
+
+
+ struct gfs3_removexattr_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ string path<>;
+ string name<>;
+} ;
+
+
+
+ struct gfs3_opendir_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ string path<>;
+} ;
+ struct gfs3_opendir_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ hyper fd;
+} ;
+
+
+ struct gfs3_fsyncdir_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ int data;
+} ;
+
+ struct gfs3_readdir_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned hyper offset;
+ unsigned int size;
+};
+struct gfs3_readdir_res {
+ unsigned hyper gfs_id;
+ opaque buf<>;
+} ;
+
+
+ struct gfs3_readdirp_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned hyper offset;
+ unsigned int size;
+} ;
+ struct gfs3_readdirp_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ opaque buf<>;
+} ;
+
+
+ struct gf_setvolume_req {
+ unsigned hyper gfs_id;
+ opaque dict<>;
+} ;
+ struct gf_setvolume_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ opaque dict<>;
+} ;
+
+struct gfs3_access_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned int mask;
+ string path<>;
+} ;
+
+
+struct gfs3_create_req {
+ unsigned hyper gfs_id;
+ unsigned hyper par;
+ unsigned hyper gen;
+ unsigned int flags;
+ unsigned int mode;
+ string path<>;
+ string bname<>;
+} ;
+struct gfs3_create_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+ unsigned hyper fd;
+ struct gf_iatt preparent;
+ struct gf_iatt postparent;
+} ;
+
+
+
+struct gfs3_ftruncate_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned hyper offset;
+} ;
+struct gfs3_ftruncate_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt prestat;
+ struct gf_iatt poststat;
+} ;
+
+
+struct gfs3_fstat_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+} ;
+ struct gfs3_fstat_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt stat;
+} ;
+
+
+
+ struct gfs3_entrylk_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned int cmd;
+ unsigned int type;
+ unsigned hyper namelen;
+ string path<>;
+ string name<>;
+ string volume<>;
+};
+
+ struct gfs3_fentrylk_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+ unsigned int cmd;
+ unsigned int type;
+ unsigned hyper namelen;
+ string name<>;
+ string volume<>;
+};
+
+
+struct gfs3_checksum_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ unsigned int flag;
+ string path<>;
+};
+ struct gfs3_checksum_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ opaque fchecksum<>;
+ opaque dchecksum<>;
+} ;
+
+ struct gfs3_setattr_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ struct gf_iatt stbuf;
+ int valid;
+ string path<>;
+} ;
+ struct gfs3_setattr_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+} ;
+
+ struct gfs3_fsetattr_req {
+ unsigned hyper gfs_id;
+ hyper fd;
+ struct gf_iatt stbuf;
+ int valid;
+} ;
+ struct gfs3_fsetattr_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ struct gf_iatt statpre;
+ struct gf_iatt statpost;
+} ;
+
+ struct gfs3_rchecksum_req {
+ unsigned hyper gfs_id;
+ hyper fd;
+ unsigned hyper offset;
+ unsigned int len;
+} ;
+ struct gfs3_rchecksum_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ unsigned int weak_checksum;
+ opaque strong_checksum<>;
+} ;
+ struct gfs3_releasedir_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+} ;
+
+struct gfs3_release_req {
+ unsigned hyper gfs_id;
+ unsigned hyper ino;
+ unsigned hyper gen;
+ hyper fd;
+} ;
+
+
+ struct gf_getspec_req {
+ unsigned hyper gfs_id;
+ unsigned int flags;
+ string key<>;
+} ;
+ struct gf_getspec_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ string spec<>;
+} ;
+
+
+ struct gf_log_req {
+ unsigned hyper gfs_id;
+ opaque msg<>;
+};
+
+ struct gf_notify_req {
+ unsigned hyper gfs_id;
+ unsigned int flags;
+ string buf<>;
+} ;
+ struct gf_notify_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ unsigned int flags;
+ string buf<>;
+} ;
+
+
+
+struct gf_common_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+} ;
+
+
+struct gf_dump_version_req {
+ unsigned hyper gfs_id;
+ unsigned int flags;
+ string key<>;
+};
+
+
+struct gf_dump_version_rsp {
+ unsigned hyper gfs_id;
+ int op_ret;
+ int op_errno;
+ unsigned int flags;
+ opaque msg<>;
+};
+
+struct auth_glusterfs_parms {
+ unsigned int pid;
+ unsigned int uid;
+ unsigned int gid;
+
+ /* Number of groups being sent through the array above. */
+ unsigned int ngrps;
+
+ /* Array of groups to which the uid belongs apart from the primary group
+ * in gid.
+ */
+ unsigned int groups[GF_REQUEST_MAXGROUPS];
+
+ unsigned hyper lk_owner;
+};
diff --git a/xlators/protocol/lib/src/msg-xdr.c b/xlators/protocol/lib/src/msg-xdr.c
new file mode 100644
index 00000000000..8cab3726c3d
--- /dev/null
+++ b/xlators/protocol/lib/src/msg-xdr.c
@@ -0,0 +1,1264 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#include "msg-xdr.h"
+
+
+ssize_t
+xdr_serialize_generic (struct iovec outmsg, void *res, xdrproc_t proc)
+{
+ ssize_t ret = -1;
+ XDR xdr;
+
+ if ((!outmsg.iov_base) || (!res) || (!proc))
+ return -1;
+
+ xdrmem_create (&xdr, outmsg.iov_base, (unsigned int)outmsg.iov_len,
+ XDR_ENCODE);
+
+ if (!proc (&xdr, res)) {
+ ret = -1;
+ goto ret;
+ }
+
+ ret = xdr_encoded_length (xdr);
+
+ret:
+ return ret;
+}
+
+
+ssize_t
+xdr_to_generic (struct iovec inmsg, void *args, xdrproc_t proc)
+{
+ XDR xdr;
+ ssize_t ret = -1;
+
+ if ((!inmsg.iov_base) || (!args) || (!proc))
+ return -1;
+
+ xdrmem_create (&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len,
+ XDR_DECODE);
+
+ if (!proc (&xdr, args)) {
+ ret = -1;
+ goto ret;
+ }
+
+ ret = xdr_decoded_length (xdr);
+ret:
+ return ret;
+}
+
+
+ssize_t
+xdr_to_generic_payload (struct iovec inmsg, void *args, xdrproc_t proc,
+ struct iovec *pendingpayload)
+{
+ XDR xdr;
+ ssize_t ret = -1;
+
+ if ((!inmsg.iov_base) || (!args) || (!proc))
+ return -1;
+
+ xdrmem_create (&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len,
+ XDR_DECODE);
+
+ if (!proc (&xdr, args)) {
+ ret = -1;
+ goto ret;
+ }
+
+ ret = xdr_decoded_length (xdr);
+
+ if (pendingpayload) {
+ pendingpayload->iov_base = xdr_decoded_remaining_addr (xdr);
+ pendingpayload->iov_len = xdr_decoded_remaining_len (xdr);
+ }
+
+ret:
+ return ret;
+}
+
+/* Encode */
+
+ssize_t
+xdr_serialize_getspec_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf_getspec_rsp);
+
+}
+
+ssize_t
+xdr_serialize_lookup_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_lookup_rsp);
+
+}
+
+ssize_t
+xdr_serialize_common_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf_common_rsp);
+
+}
+
+ssize_t
+xdr_serialize_setvolume_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf_setvolume_rsp);
+
+}
+ssize_t
+xdr_serialize_statfs_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_statfs_rsp);
+
+}
+ssize_t
+xdr_serialize_stat_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_stat_rsp);
+
+}
+ssize_t
+xdr_serialize_fstat_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fstat_rsp);
+
+}
+ssize_t
+xdr_serialize_open_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_open_rsp);
+
+}
+ssize_t
+xdr_serialize_read_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_read_rsp);
+
+}
+ssize_t
+xdr_serialize_write_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_write_rsp);
+
+}
+ssize_t
+xdr_serialize_rename_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_rename_rsp);
+
+}
+ssize_t
+xdr_serialize_fsync_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fsync_rsp);
+
+}
+ssize_t
+xdr_serialize_rmdir_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_rmdir_rsp);
+}
+ssize_t
+xdr_serialize_unlink_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_unlink_rsp);
+}
+ssize_t
+xdr_serialize_writev_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_write_rsp);
+}
+ssize_t
+xdr_serialize_readv_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_read_rsp);
+}
+ssize_t
+xdr_serialize_readdir_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_readdir_rsp);
+}
+ssize_t
+xdr_serialize_readdirp_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_readdirp_rsp);
+}
+ssize_t
+xdr_serialize_rchecksum_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_rchecksum_rsp);
+}
+ssize_t
+xdr_serialize_setattr_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_setattr_rsp);
+}
+ssize_t
+xdr_serialize_fsetattr_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fsetattr_rsp);
+}
+
+ssize_t
+xdr_serialize_readlink_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_readlink_rsp);
+
+}
+ssize_t
+xdr_serialize_symlink_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_symlink_rsp);
+
+}
+ssize_t
+xdr_serialize_create_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_create_rsp);
+
+}
+ssize_t
+xdr_serialize_link_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_link_rsp);
+
+}
+ssize_t
+xdr_serialize_mkdir_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_mkdir_rsp);
+
+}
+ssize_t
+xdr_serialize_mknod_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_mknod_rsp);
+
+}
+ssize_t
+xdr_serialize_getxattr_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_getxattr_rsp);
+
+}
+ssize_t
+xdr_serialize_fgetxattr_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fgetxattr_rsp);
+
+}
+ssize_t
+xdr_serialize_xattrop_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_xattrop_rsp);
+
+}
+ssize_t
+xdr_serialize_fxattrop_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fxattrop_rsp);
+}
+
+ssize_t
+xdr_serialize_truncate_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_truncate_rsp);
+}
+
+ssize_t
+xdr_serialize_lk_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_lk_rsp);
+}
+
+ssize_t
+xdr_serialize_opendir_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_opendir_rsp);
+}
+
+ssize_t
+xdr_serialize_checksum_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_checksum_rsp);
+}
+
+ssize_t
+xdr_serialize_ftruncate_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_ftruncate_rsp);
+}
+
+
+ssize_t
+xdr_serialize_dump_version_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf_dump_version_rsp);
+}
+
+
+/* Decode */
+
+
+ssize_t
+xdr_to_dump_version_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf_dump_version_req);
+}
+
+ssize_t
+xdr_to_lookup_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_lookup_req);
+}
+
+ssize_t
+xdr_to_getspec_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf_getspec_req);
+
+}
+
+ssize_t
+xdr_to_setvolume_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gf_setvolume_req);
+
+}
+
+ssize_t
+xdr_to_statfs_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_statfs_req);
+
+}
+
+ssize_t
+xdr_to_fsync_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_fsync_req);
+
+}
+
+ssize_t
+xdr_to_flush_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_flush_req);
+
+}
+
+ssize_t
+xdr_to_xattrop_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_xattrop_req);
+
+}
+
+ssize_t
+xdr_to_fxattrop_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_fxattrop_req);
+
+}
+
+ssize_t
+xdr_to_getxattr_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_getxattr_req);
+
+}
+ssize_t
+xdr_to_fgetxattr_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_fgetxattr_req);
+
+}
+ssize_t
+xdr_to_open_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_open_req);
+
+}
+ssize_t
+xdr_to_create_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_create_req);
+
+}
+ssize_t
+xdr_to_symlink_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_symlink_req);
+}
+ssize_t
+xdr_to_link_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_link_req);
+}
+ssize_t
+xdr_to_readlink_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_readlink_req);
+}
+ssize_t
+xdr_to_rename_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_rename_req);
+}
+ssize_t
+xdr_to_mkdir_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_mkdir_req);
+}
+ssize_t
+xdr_to_mknod_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_mknod_req);
+}
+ssize_t
+xdr_to_readv_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_read_req);
+}
+ssize_t
+xdr_to_writev_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_write_req);
+}
+
+ssize_t
+xdr_to_readdir_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_readdir_req);
+}
+
+ssize_t
+xdr_to_opendir_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_opendir_req);
+}
+
+ssize_t
+xdr_to_rmdir_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_rmdir_req);
+}
+
+ssize_t
+xdr_to_fsetxattr_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_fsetxattr_req);
+}
+ssize_t
+xdr_to_setattr_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_setattr_req);
+}
+ssize_t
+xdr_to_fsetattr_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_fsetattr_req);
+}
+
+ssize_t
+xdr_to_finodelk_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_finodelk_req);
+}
+
+ssize_t
+xdr_to_inodelk_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_inodelk_req);
+}
+
+ssize_t
+xdr_to_ftruncate_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_ftruncate_req);
+}
+
+ssize_t
+xdr_to_fsyncdir_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_fsyncdir_req);
+}
+
+ssize_t
+xdr_to_fstat_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_fstat_req);
+}
+
+ssize_t
+xdr_to_checksum_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_checksum_req);
+}
+ssize_t
+xdr_to_rchecksum_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_rchecksum_req);
+}
+ssize_t
+xdr_to_removexattr_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_removexattr_req);
+}
+ssize_t
+xdr_to_setxattr_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_setxattr_req);
+}
+
+ssize_t
+xdr_to_fentrylk_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_fentrylk_req);
+}
+
+ssize_t
+xdr_to_entrylk_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_entrylk_req);
+}
+
+ssize_t
+xdr_to_lk_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_lk_req);
+}
+
+ssize_t
+xdr_to_stat_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_stat_req);
+}
+
+ssize_t
+xdr_to_release_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_release_req);
+}
+
+ssize_t
+xdr_to_readdirp_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_readdirp_req);
+}
+ssize_t
+xdr_to_truncate_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_truncate_req);
+}
+ssize_t
+xdr_to_access_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_access_req);
+}
+ssize_t
+xdr_to_unlink_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gfs3_unlink_req);
+}
+
+ssize_t
+xdr_from_lookup_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_lookup_req);
+
+}
+
+ssize_t
+xdr_from_stat_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_stat_req);
+
+}
+
+ssize_t
+xdr_from_fstat_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_fstat_req);
+
+}
+
+ssize_t
+xdr_from_mkdir_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_mkdir_req);
+
+}
+
+ssize_t
+xdr_from_mknod_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_mknod_req);
+
+}
+
+ssize_t
+xdr_from_symlink_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_symlink_req);
+
+}
+
+ssize_t
+xdr_from_readlink_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_readlink_req);
+
+}
+
+ssize_t
+xdr_from_rename_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_rename_req);
+
+}
+
+ssize_t
+xdr_from_link_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_link_req);
+
+}
+
+ssize_t
+xdr_from_create_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_create_req);
+
+}
+
+ssize_t
+xdr_from_open_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_open_req);
+
+}
+
+ssize_t
+xdr_from_opendir_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_opendir_req);
+
+}
+
+ssize_t
+xdr_from_readdir_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_readdir_req);
+
+}
+
+ssize_t
+xdr_from_readdirp_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_readdirp_req);
+
+}
+
+ssize_t
+xdr_from_fsyncdir_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_fsyncdir_req);
+
+}
+ssize_t
+xdr_from_releasedir_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_releasedir_req);
+
+}
+ssize_t
+xdr_from_release_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_release_req);
+
+}
+ssize_t
+xdr_from_lk_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_lk_req);
+
+}
+ssize_t
+xdr_from_entrylk_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_entrylk_req);
+
+}
+ssize_t
+xdr_from_fentrylk_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_fentrylk_req);
+
+}
+ssize_t
+xdr_from_inodelk_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_inodelk_req);
+
+}
+ssize_t
+xdr_from_finodelk_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_finodelk_req);
+
+}
+ssize_t
+xdr_from_setxattr_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_setxattr_req);
+
+}
+ssize_t
+xdr_from_fsetxattr_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_fsetxattr_req);
+
+}
+ssize_t
+xdr_from_getxattr_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_getxattr_req);
+
+}
+ssize_t
+xdr_from_fgetxattr_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_fgetxattr_req);
+
+}
+ssize_t
+xdr_from_removexattr_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_removexattr_req);
+
+}
+ssize_t
+xdr_from_xattrop_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_xattrop_req);
+
+}
+ssize_t
+xdr_from_fxattrop_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_fxattrop_req);
+
+}
+ssize_t
+xdr_from_access_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_access_req);
+
+}
+ssize_t
+xdr_from_setattr_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_setattr_req);
+
+}
+ssize_t
+xdr_from_truncate_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_truncate_req);
+
+}
+ssize_t
+xdr_from_ftruncate_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_ftruncate_req);
+
+}
+ssize_t
+xdr_from_fsetattr_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_fsetattr_req);
+
+}
+ssize_t
+xdr_from_readv_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_read_req);
+
+}
+ssize_t
+xdr_from_writev_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_write_req);
+
+}
+ssize_t
+xdr_from_fsync_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_fsync_req);
+
+}
+ssize_t
+xdr_from_flush_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_flush_req);
+
+}
+ssize_t
+xdr_from_statfs_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_statfs_req);
+
+}
+ssize_t
+xdr_from_checksum_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_checksum_req);
+
+}
+ssize_t
+xdr_from_rchecksum_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_rchecksum_req);
+
+}
+ssize_t
+xdr_from_getspec_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf_getspec_req);
+
+}
+ssize_t
+xdr_from_setvolume_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf_setvolume_req);
+
+}
+ssize_t
+xdr_from_dump_version_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gf_dump_version_req);
+
+}
+ssize_t
+xdr_from_rmdir_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_rmdir_req);
+
+}
+ssize_t
+xdr_from_unlink_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gfs3_unlink_req);
+
+}
+
+/* Client decode */
+
+ssize_t
+xdr_to_lookup_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_lookup_rsp);
+
+}
+
+ssize_t
+xdr_to_stat_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_stat_rsp);
+
+}
+
+ssize_t
+xdr_to_fstat_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fstat_rsp);
+
+}
+
+ssize_t
+xdr_to_mkdir_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_mkdir_rsp);
+
+}
+
+ssize_t
+xdr_to_mknod_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_mknod_rsp);
+
+}
+
+ssize_t
+xdr_to_symlink_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_symlink_rsp);
+
+}
+
+ssize_t
+xdr_to_readlink_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_readlink_rsp);
+
+}
+
+ssize_t
+xdr_to_rename_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_rename_rsp);
+
+}
+
+ssize_t
+xdr_to_link_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_link_rsp);
+
+}
+
+ssize_t
+xdr_to_create_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_create_rsp);
+
+}
+
+ssize_t
+xdr_to_open_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_open_rsp);
+
+}
+
+ssize_t
+xdr_to_opendir_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_opendir_rsp);
+
+}
+
+ssize_t
+xdr_to_readdir_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_readdir_rsp);
+
+}
+
+ssize_t
+xdr_to_readdirp_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_readdirp_rsp);
+
+}
+ssize_t
+xdr_to_lk_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_lk_rsp);
+
+}
+ssize_t
+xdr_to_getxattr_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_getxattr_rsp);
+
+}
+ssize_t
+xdr_to_fgetxattr_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fgetxattr_rsp);
+
+}
+ssize_t
+xdr_to_xattrop_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_xattrop_rsp);
+
+}
+ssize_t
+xdr_to_fxattrop_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fxattrop_rsp);
+
+}
+ssize_t
+xdr_to_setattr_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_setattr_rsp);
+
+}
+ssize_t
+xdr_to_truncate_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_truncate_rsp);
+
+}
+ssize_t
+xdr_to_ftruncate_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_ftruncate_rsp);
+
+}
+ssize_t
+xdr_to_fsetattr_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fsetattr_rsp);
+
+}
+ssize_t
+xdr_to_readv_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_read_rsp);
+
+}
+ssize_t
+xdr_to_writev_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_write_rsp);
+
+}
+ssize_t
+xdr_to_fsync_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_fsync_rsp);
+
+}
+ssize_t
+xdr_to_statfs_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_statfs_rsp);
+
+}
+ssize_t
+xdr_to_checksum_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_checksum_rsp);
+
+}
+ssize_t
+xdr_to_rchecksum_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_rchecksum_rsp);
+
+}
+ssize_t
+xdr_to_getspec_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf_getspec_rsp);
+
+}
+ssize_t
+xdr_to_setvolume_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf_setvolume_rsp);
+
+}
+ssize_t
+xdr_to_dump_version_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf_dump_version_rsp);
+
+}
+ssize_t
+xdr_to_rmdir_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_rmdir_rsp);
+
+}
+ssize_t
+xdr_to_unlink_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gfs3_unlink_rsp);
+
+}
+ssize_t
+xdr_to_common_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_to_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gf_common_rsp);
+
+}
diff --git a/xlators/protocol/lib/src/msg-xdr.h b/xlators/protocol/lib/src/msg-xdr.h
new file mode 100644
index 00000000000..4c383e5a52a
--- /dev/null
+++ b/xlators/protocol/lib/src/msg-xdr.h
@@ -0,0 +1,536 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _MSG_XDR_H
+#define _MSG_XDR_H
+
+#include <sys/uio.h>
+
+#include "glusterfs-xdr.h"
+
+#define xdr_decoded_remaining_addr(xdr) ((&xdr)->x_private)
+#define xdr_decoded_remaining_len(xdr) ((&xdr)->x_handy)
+#define xdr_encoded_length(xdr) (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base))
+#define xdr_decoded_length(xdr) (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base))
+
+
+/* FOPS */
+ssize_t
+xdr_serialize_lookup_rsp (struct iovec outmsg, void *resp);
+
+ssize_t
+xdr_serialize_getspec_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_common_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_setvolume_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_open_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_create_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_mknod_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_mkdir_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_symlink_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_link_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_rename_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_writev_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_readv_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_readdir_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_readdirp_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_opendir_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_setattr_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_fsetattr_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_truncate_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_ftruncate_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_checksum_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_statfs_rsp (struct iovec outmsg, void *rsp);
+
+
+ssize_t
+xdr_serialize_lk_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_xattrop_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_fxattrop_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_getxattr_rsp (struct iovec outmsg, void *rsp);
+
+
+ssize_t
+xdr_serialize_fgetxattr_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_unlink_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_rmdir_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_rchecksum_rsp (struct iovec outmsg, void *rsp);
+
+
+ssize_t
+xdr_serialize_fstat_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_fsync_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_readlink_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+xdr_serialize_stat_rsp (struct iovec outmsg, void *rsp);
+
+
+ssize_t
+xdr_to_lookup_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_getspec_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_setvolume_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_statfs_req (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_stat_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_getattr_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fstat_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_setattr_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fsetattr_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_readv_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_writev_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fsetattr_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_readlink_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_create_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_open_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_release_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_xattrop_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fxattrop_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_setxattr_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fsetxattr_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_flush_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_unlink_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fsync_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_ftruncate_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_truncate_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_getxattr_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fgetxattr_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_removexattr_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_entrylk_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fentrylk_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_inodelk_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_finodelk_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_lk_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_access_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_opendir_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_readdirp_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_readdir_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fsyncdir_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_mknod_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_mkdir_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_symlink_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_rmdir_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_checksum_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_rchecksum_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_rename_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_link_req (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_from_lookup_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_getspec_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_stat_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_access_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_truncate_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_ftruncate_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_readlink_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_writev_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_readv_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_flush_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_fstat_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_fsync_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_open_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_unlink_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_rmdir_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_fsyncdir_req (struct iovec outmsg, void *args);
+
+
+ssize_t
+xdr_from_fsetxattr_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_setxattr_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_getxattr_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_fgetxattr_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_statfs_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_opendir_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_lk_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_inodelk_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_finodelk_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_entrylk_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_fentrylk_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_removexattr_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_xattrop_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_fxattrop_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_checksum_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_rchecksum_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_readdir_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_readdirp_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_setattr_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_fsetattr_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_symlink_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_rename_req (struct iovec outmsg, void *args);
+
+
+ssize_t
+xdr_from_link_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_rename_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_create_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_mkdir_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_mknod_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_releasedir_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_release_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_dump_version_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_from_setvolume_req (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_to_setvolume_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_dump_version_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_serialize_dump_version_rsp (struct iovec outmsg, void *args);
+
+ssize_t
+xdr_to_dump_version_req (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_statfs_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_stat_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fstat_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_rename_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_readlink_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_link_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_access_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_truncate_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_ftruncate_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_unlink_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_rmdir_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_open_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_create_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_mkdir_rsp (struct iovec inmsg, void *args);
+
+
+ssize_t
+xdr_to_mknod_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_setattr_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fsetattr_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_common_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_getxattr_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fxattrop_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_xattrop_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_symlink_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_fgetxattr_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_checksum_rsp (struct iovec inmsg, void *args);
+ssize_t
+xdr_to_rchecksum_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_lk_rsp (struct iovec inmsg, void *args);
+ssize_t
+xdr_to_readdirp_rsp (struct iovec inmsg, void *args);
+
+ssize_t
+xdr_to_readdir_rsp (struct iovec inmsg, void *args);
+ssize_t
+xdr_to_opendir_rsp (struct iovec inmsg, void *args);
+ssize_t
+xdr_to_lookup_rsp (struct iovec inmsg, void *args);
+ssize_t
+xdr_to_readv_rsp (struct iovec inmsg, void *args);
+
+#endif /* !_MSG_XDR_H */
diff --git a/xlators/protocol/lib/src/protocol-common.c b/xlators/protocol/lib/src/protocol-common.c
new file mode 100644
index 00000000000..4a9845e082d
--- /dev/null
+++ b/xlators/protocol/lib/src/protocol-common.c
@@ -0,0 +1,109 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#include "globals.h"
+#include "compat.h"
+#include "protocol-common.h"
+#include "glusterfs-xdr.h"
+
+
+static int
+gf_dirent_nb_size (gf_dirent_t *entries)
+{
+ return (sizeof (struct gf_dirent_nb) + strlen (entries->d_name) + 1);
+}
+
+int
+gf_dirent_serialize (gf_dirent_t *entries, char *buf, size_t buf_size)
+{
+ struct gf_dirent_nb *entry_nb = NULL;
+ gf_dirent_t *entry = NULL;
+ int size = 0;
+ int entry_size = 0;
+
+
+ list_for_each_entry (entry, &entries->list, list) {
+ entry_size = gf_dirent_nb_size (entry);
+
+ if (buf && (size + entry_size <= buf_size)) {
+ entry_nb = (void *) (buf + size);
+
+ entry_nb->d_ino = entry->d_ino;
+ entry_nb->d_off = entry->d_off;
+ entry_nb->d_len = entry->d_len;
+ entry_nb->d_type = entry->d_type;
+
+ gf_stat_from_iatt (&entry_nb->d_stat, &entry->d_stat);
+
+ strcpy (entry_nb->d_name, entry->d_name);
+ }
+ size += entry_size;
+ }
+
+ return size;
+}
+
+
+int
+gf_dirent_unserialize (gf_dirent_t *entries, const char *buf, size_t buf_size)
+{
+ struct gf_dirent_nb *entry_nb = NULL;
+ int remaining_size = 0;
+ int least_dirent_size = 0;
+ int count = 0;
+ gf_dirent_t *entry = NULL;
+ int entry_strlen = 0;
+ int entry_len = 0;
+
+
+ remaining_size = buf_size;
+ least_dirent_size = (sizeof (struct gf_dirent_nb) + 2);
+
+ while (remaining_size >= least_dirent_size) {
+ entry_nb = (void *)(buf + (buf_size - remaining_size));
+
+ entry_strlen = strnlen (entry_nb->d_name, remaining_size);
+ if (entry_strlen == remaining_size) {
+ break;
+ }
+
+ entry_len = sizeof (gf_dirent_t) + entry_strlen + 1;
+ entry = GF_CALLOC (1, entry_len, gf_common_mt_gf_dirent_t);
+ if (!entry) {
+ break;
+ }
+
+ entry->d_ino = entry_nb->d_ino;
+ entry->d_off = entry_nb->d_off;
+ entry->d_len = entry_nb->d_len;
+ entry->d_type = entry_nb->d_type;
+
+ gf_stat_to_iatt (&entry_nb->d_stat, &entry->d_stat);
+
+ strcpy (entry->d_name, entry_nb->d_name);
+
+ list_add_tail (&entry->list, &entries->list);
+
+ remaining_size -= (sizeof (*entry_nb) + entry_strlen + 1);
+ count++;
+ }
+
+ return count;
+}
diff --git a/xlators/protocol/lib/src/protocol-common.h b/xlators/protocol/lib/src/protocol-common.h
new file mode 100644
index 00000000000..98a79d0fa17
--- /dev/null
+++ b/xlators/protocol/lib/src/protocol-common.h
@@ -0,0 +1,104 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _PROTOCOL_COMMON_H
+#define _PROTOCOL_COMMON_H
+
+#define GF_PROTOCOL_DEFAULT_PORT 6971
+
+#define GF_LOCAL_IOV 1 /* All headers are here */
+#define GF_EXTRA_IOV 2 /* needed for write/read etc */
+
+enum gf_fop_procnum {
+ GFS3_OP_NULL, /* 0 */
+ GFS3_OP_STAT,
+ GFS3_OP_READLINK,
+ GFS3_OP_MKNOD,
+ GFS3_OP_MKDIR,
+ GFS3_OP_UNLINK,
+ GFS3_OP_RMDIR,
+ GFS3_OP_SYMLINK,
+ GFS3_OP_RENAME,
+ GFS3_OP_LINK,
+ GFS3_OP_TRUNCATE,
+ GFS3_OP_OPEN,
+ GFS3_OP_READ,
+ GFS3_OP_WRITE,
+ GFS3_OP_STATFS,
+ GFS3_OP_FLUSH,
+ GFS3_OP_FSYNC,
+ GFS3_OP_SETXATTR,
+ GFS3_OP_GETXATTR,
+ GFS3_OP_REMOVEXATTR,
+ GFS3_OP_OPENDIR,
+ GFS3_OP_FSYNCDIR,
+ GFS3_OP_ACCESS,
+ GFS3_OP_CREATE,
+ GFS3_OP_FTRUNCATE,
+ GFS3_OP_FSTAT,
+ GFS3_OP_LK,
+ GFS3_OP_LOOKUP,
+ GFS3_OP_READDIR,
+ GFS3_OP_INODELK,
+ GFS3_OP_FINODELK,
+ GFS3_OP_ENTRYLK,
+ GFS3_OP_FENTRYLK,
+ GFS3_OP_CHECKSUM,
+ GFS3_OP_XATTROP,
+ GFS3_OP_FXATTROP,
+ GFS3_OP_FGETXATTR,
+ GFS3_OP_FSETXATTR,
+ GFS3_OP_RCHECKSUM,
+ GFS3_OP_SETATTR,
+ GFS3_OP_FSETATTR,
+ GFS3_OP_READDIRP,
+ GFS3_OP_RELEASE,
+ GFS3_OP_RELEASEDIR,
+ GFS3_OP_MAXVALUE,
+} ;
+
+enum gf_handshake_procnum {
+ GF_HNDSK_NULL,
+ GF_HNDSK_DUMP_VERSION,
+ GF_HNDSK_SETVOLUME,
+ GF_HNDSK_GETSPEC,
+ GF_HNDSK_PING,
+};
+
+enum gf_mgmt_procnum {
+ GF1_MGMT_NULL, /* 0 */
+};
+
+
+#define GLUSTER3_1_FOP_PROGRAM 1298437 /* Completely random */
+#define GLUSTER3_1_FOP_VERSION 310 /* 3.1.0 */
+#define GLUSTER3_1_FOP_PROCCNT GFS3_OP_MAXVALUE
+
+#define GLUSTER1_MGMT_PROGRAM 1298433 /* Completely random */
+#define GLUSTER1_MGMT_VERSION 1 /* 0.0.1 */
+
+#define GLUSTER_HNDSK_PROGRAM 14398633 /* Completely random */
+#define GLUSTER_HNDSK_VERSION 1 /* 0.0.1 */
+
+int
+gf_dirent_unserialize (gf_dirent_t *entries, const char *buf, size_t buf_size);
+int
+gf_dirent_serialize (gf_dirent_t *entries, char *buf, size_t buf_size);
+
+#endif /* !_PROTOCOL_COMMON_H */
diff --git a/xlators/protocol/rpc/Makefile.am b/xlators/protocol/rpc/Makefile.am
new file mode 100644
index 00000000000..bd435fa6d38
--- /dev/null
+++ b/xlators/protocol/rpc/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = rpc-lib rpc-transport
diff --git a/xlators/protocol/rpc/rpc-lib/Makefile.am b/xlators/protocol/rpc/rpc-lib/Makefile.am
new file mode 100644
index 00000000000..af437a64d6d
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = src
diff --git a/xlators/protocol/rpc/rpc-lib/src/Makefile.am b/xlators/protocol/rpc/rpc-lib/src/Makefile.am
new file mode 100644
index 00000000000..4df8888a08d
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/Makefile.am
@@ -0,0 +1,15 @@
+lib_LTLIBRARIES = libgfrpc.la
+libgfrpc_la_LDFLAGS = -module -avoidversion
+
+libgfrpc_la_SOURCES = auth-unix.c rpcsvc-auth.c rpcsvc.c auth-null.c \
+ rpc-transport.c xdr-rpc.c xdr-rpcclnt.c rpc-clnt.c auth-glusterfs.c
+libgfrpc_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+noinst_HEADERS = rpcsvc.h rpc-transport.h xdr-common.h xdr-rpc.h xdr-rpcclnt.h \
+ rpc-clnt.h rpcsvc-common.h
+AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\
+ -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS) \
+ -I$(top_srcdir)/xlators/protocol/lib/src \
+ -DRPC_TRANSPORTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/rpc-transport\"
+
+CLEANFILES = *~
diff --git a/xlators/protocol/rpc/rpc-lib/src/auth-glusterfs.c b/xlators/protocol/rpc/rpc-lib/src/auth-glusterfs.c
new file mode 100644
index 00000000000..165e52a176b
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/auth-glusterfs.c
@@ -0,0 +1,112 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpcsvc.h"
+#include "list.h"
+#include "dict.h"
+#include "xdr-rpc.h"
+#include "glusterfs-xdr.h"
+
+ssize_t
+xdr_to_glusterfs_auth (char *buf, struct auth_glusterfs_parms *req)
+{
+ XDR xdr;
+ ssize_t ret = -1;
+
+ if ((!buf) || (!req))
+ return -1;
+
+ xdrmem_create (&xdr, buf, sizeof (struct auth_glusterfs_parms),
+ XDR_DECODE);
+ if (!xdr_auth_glusterfs_parms (&xdr, req)) {
+ ret = -1;
+ goto ret;
+ }
+
+ ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));
+ret:
+ return ret;
+
+}
+int
+auth_glusterfs_request_init (rpcsvc_request_t *req, void *priv)
+{
+ if (!req)
+ return -1;
+ memset (req->verf.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
+ req->verf.datalen = 0;
+ req->verf.flavour = AUTH_NULL;
+
+ return 0;
+}
+
+int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv)
+{
+ int ret = RPCSVC_AUTH_REJECT;
+ struct auth_glusterfs_parms au = {0,};
+
+ if (!req)
+ return ret;
+
+ ret = xdr_to_glusterfs_auth (req->cred.authdata, &au);
+ if (ret == -1) {
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
+ req->pid = au.pid;
+ req->uid = au.uid;
+ req->gid = au.gid;
+ req->lk_owner = au.lk_owner;
+ req->auxgidcount = au.ngrps;
+
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"
+ ", gid: %d, owner: %"PRId64,
+ req->pid, req->uid, req->gid, req->lk_owner);
+ ret = RPCSVC_AUTH_ACCEPT;
+err:
+ return ret;
+}
+
+rpcsvc_auth_ops_t auth_glusterfs_ops = {
+ .conn_init = NULL,
+ .request_init = auth_glusterfs_request_init,
+ .authenticate = auth_glusterfs_authenticate
+};
+
+rpcsvc_auth_t rpcsvc_auth_glusterfs = {
+ .authname = "AUTH_GLUSTERFS",
+ .authnum = AUTH_GLUSTERFS,
+ .authops = &auth_glusterfs_ops,
+ .authprivate = NULL
+};
+
+
+rpcsvc_auth_t *
+rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options)
+{
+ return &rpcsvc_auth_glusterfs;
+}
diff --git a/xlators/protocol/rpc/rpc-lib/src/auth-null.c b/xlators/protocol/rpc/rpc-lib/src/auth-null.c
new file mode 100644
index 00000000000..a2581a1718d
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/auth-null.c
@@ -0,0 +1,70 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpcsvc.h"
+#include "list.h"
+#include "dict.h"
+
+
+int
+auth_null_request_init (rpcsvc_request_t *req, void *priv)
+{
+ if (!req)
+ return -1;
+
+ memset (req->cred.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
+ req->cred.datalen = 0;
+
+ memset (req->verf.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
+ req->verf.datalen = 0;
+
+ return 0;
+}
+
+int auth_null_authenticate (rpcsvc_request_t *req, void *priv)
+{
+ /* Always succeed. */
+ return RPCSVC_AUTH_ACCEPT;
+}
+
+rpcsvc_auth_ops_t auth_null_ops = {
+ .conn_init = NULL,
+ .request_init = auth_null_request_init,
+ .authenticate = auth_null_authenticate
+};
+
+rpcsvc_auth_t rpcsvc_auth_null = {
+ .authname = "AUTH_NULL",
+ .authnum = AUTH_NULL,
+ .authops = &auth_null_ops,
+ .authprivate = NULL
+};
+
+
+rpcsvc_auth_t *
+rpcsvc_auth_null_init (rpcsvc_t *svc, dict_t *options)
+{
+ return &rpcsvc_auth_null;
+}
diff --git a/xlators/protocol/rpc/rpc-lib/src/auth-unix.c b/xlators/protocol/rpc/rpc-lib/src/auth-unix.c
new file mode 100644
index 00000000000..aed3c1f9d46
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/auth-unix.c
@@ -0,0 +1,90 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpcsvc.h"
+#include "list.h"
+#include "dict.h"
+#include "xdr-rpc.h"
+
+
+int
+auth_unix_request_init (rpcsvc_request_t *req, void *priv)
+{
+ if (!req)
+ return -1;
+ memset (req->verf.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
+ req->verf.datalen = 0;
+ req->verf.flavour = AUTH_NULL;
+
+ return 0;
+}
+
+int auth_unix_authenticate (rpcsvc_request_t *req, void *priv)
+{
+ int ret = RPCSVC_AUTH_REJECT;
+ struct authunix_parms aup;
+ char machname[MAX_MACHINE_NAME];
+
+ if (!req)
+ return ret;
+
+ ret = xdr_to_auth_unix_cred (req->cred.authdata, req->cred.datalen,
+ &aup, machname, req->auxgids);
+ if (ret == -1) {
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
+ req->uid = aup.aup_uid;
+ req->gid = aup.aup_gid;
+ req->auxgidcount = aup.aup_len;
+
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: machine name: %s, uid: %d"
+ ", gid: %d", machname, req->uid, req->gid);
+ ret = RPCSVC_AUTH_ACCEPT;
+err:
+ return ret;
+}
+
+rpcsvc_auth_ops_t auth_unix_ops = {
+ .conn_init = NULL,
+ .request_init = auth_unix_request_init,
+ .authenticate = auth_unix_authenticate
+};
+
+rpcsvc_auth_t rpcsvc_auth_unix = {
+ .authname = "AUTH_UNIX",
+ .authnum = AUTH_UNIX,
+ .authops = &auth_unix_ops,
+ .authprivate = NULL
+};
+
+
+rpcsvc_auth_t *
+rpcsvc_auth_unix_init (rpcsvc_t *svc, dict_t *options)
+{
+ return &rpcsvc_auth_unix;
+}
diff --git a/xlators/protocol/rpc/rpc-lib/src/rpc-clnt.c b/xlators/protocol/rpc/rpc-lib/src/rpc-clnt.c
new file mode 100644
index 00000000000..92f57e5213f
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/rpc-clnt.c
@@ -0,0 +1,1281 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpc-clnt.h"
+#include "xdr-rpcclnt.h"
+#include "rpc-transport.h"
+#include "protocol-common.h"
+
+uint64_t
+rpc_clnt_new_callid (struct rpc_clnt *clnt)
+{
+ uint64_t callid = 0;
+
+ pthread_mutex_lock (&clnt->lock);
+ {
+ callid = ++clnt->xid;
+ }
+ pthread_mutex_unlock (&clnt->lock);
+
+ return callid;
+}
+
+
+struct saved_frame *
+__saved_frames_get_timedout (struct saved_frames *frames, uint32_t timeout,
+ struct timeval *current)
+{
+ struct saved_frame *bailout_frame = NULL, *tmp = NULL;
+
+ if (!list_empty(&frames->sf.list)) {
+ tmp = list_entry (frames->sf.list.next, typeof (*tmp), list);
+ if ((tmp->saved_at.tv_sec + timeout) < current->tv_sec) {
+ bailout_frame = tmp;
+ list_del_init (&bailout_frame->list);
+ frames->count--;
+ }
+ }
+
+ return bailout_frame;
+}
+
+
+struct saved_frame *
+__saved_frames_put (struct saved_frames *frames, void *frame,
+ int32_t procnum, rpc_clnt_prog_t *prog, int64_t callid)
+{
+ struct saved_frame *saved_frame = NULL;
+
+ saved_frame = GF_CALLOC (sizeof (*saved_frame), 1, 0);
+ if (!saved_frame) {
+ gf_log ("rpc-clnt", GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+ /* THIS should be saved and set back */
+
+ INIT_LIST_HEAD (&saved_frame->list);
+
+ saved_frame->capital_this = THIS;
+ saved_frame->frame = frame;
+ saved_frame->procnum = procnum;
+ saved_frame->callid = callid;
+ saved_frame->prog = prog;
+
+ gettimeofday (&saved_frame->saved_at, NULL);
+
+ list_add_tail (&saved_frame->list, &frames->sf.list);
+ frames->count++;
+
+out:
+ return saved_frame;
+}
+
+
+void
+saved_frames_delete (struct saved_frame *saved_frame,
+ rpc_clnt_connection_t *conn)
+{
+ if (!saved_frame || !conn) {
+ goto out;
+ }
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ list_del_init (&saved_frame->list);
+ conn->saved_frames->count--;
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ GF_FREE (saved_frame);
+out:
+ return;
+}
+
+
+static void
+call_bail (void *data)
+{
+ struct rpc_clnt *clnt = NULL;
+ rpc_clnt_connection_t *conn = NULL;
+ struct timeval current;
+ struct list_head list;
+ struct saved_frame *saved_frame = NULL;
+ struct saved_frame *trav = NULL;
+ struct saved_frame *tmp = NULL;
+ struct tm frame_sent_tm;
+ char frame_sent[32] = {0,};
+ struct timeval timeout = {0,};
+ gf_timer_cbk_t timer_cbk = NULL;
+ struct rpc_req req;
+ struct iovec iov = {0,};
+
+ GF_VALIDATE_OR_GOTO ("client", data, out);
+
+ clnt = data;
+
+ conn = &clnt->conn;
+
+ gettimeofday (&current, NULL);
+ INIT_LIST_HEAD (&list);
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ /* Chaining to get call-always functionality from
+ call-once timer */
+ if (conn->timer) {
+ timer_cbk = conn->timer->callbk;
+
+ timeout.tv_sec = 10;
+ timeout.tv_usec = 0;
+
+ gf_timer_call_cancel (clnt->ctx, conn->timer);
+ conn->timer = gf_timer_call_after (clnt->ctx,
+ timeout,
+ call_bail,
+ (void *) clnt);
+
+ if (conn->timer == NULL) {
+ gf_log (conn->trans->name, GF_LOG_DEBUG,
+ "Cannot create bailout timer");
+ }
+ }
+
+ do {
+ saved_frame =
+ __saved_frames_get_timedout (conn->saved_frames,
+ conn->frame_timeout,
+ &current);
+ if (saved_frame)
+ list_add (&saved_frame->list, &list);
+
+ } while (saved_frame);
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ list_for_each_entry_safe (trav, tmp, &list, list) {
+ localtime_r (&trav->saved_at.tv_sec, &frame_sent_tm);
+ strftime (frame_sent, 32, "%Y-%m-%d %H:%M:%S", &frame_sent_tm);
+
+ gf_log (conn->trans->name, GF_LOG_ERROR,
+ "bailing out frame type(%s) op(%s) frame sent = %s. "
+ "frame-timeout = %d",
+ trav->prog->progname,
+ trav->prog->actor[trav->procnum].procname, frame_sent,
+ conn->frame_timeout);
+
+ trav->prog->actor [trav->procnum].cbkfn (&req, &iov, 1,
+ trav->frame);
+
+ list_del_init (&trav->list);
+ GF_FREE (trav);
+ }
+out:
+ return;
+}
+
+
+/* to be called with conn->lock held */
+struct saved_frame *
+__save_frame (struct rpc_clnt *rpc_clnt, call_frame_t *frame, int procnum,
+ rpc_clnt_prog_t *prog, uint64_t callid)
+{
+ rpc_clnt_connection_t *conn = NULL;
+ struct timeval timeout = {0, };
+ struct saved_frame *saved_frame = NULL;
+
+ conn = &rpc_clnt->conn;
+
+ saved_frame = __saved_frames_put (conn->saved_frames, frame,
+ procnum, prog, callid);
+ if (saved_frame == NULL) {
+ goto out;
+ }
+
+ /* TODO: make timeout configurable */
+ if (conn->timer == NULL) {
+ timeout.tv_sec = 10;
+ timeout.tv_usec = 0;
+ conn->timer = gf_timer_call_after (rpc_clnt->ctx,
+ timeout,
+ call_bail,
+ (void *) rpc_clnt);
+ }
+
+out:
+ return saved_frame;
+}
+
+
+struct saved_frames *
+saved_frames_new (void)
+{
+ struct saved_frames *saved_frames = NULL;
+
+ saved_frames = GF_CALLOC (sizeof (*saved_frames), 1, 0);
+ if (!saved_frames) {
+ gf_log ("rpc-clnt", GF_LOG_ERROR, "out of memory");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD (&saved_frames->sf.list);
+
+ return saved_frames;
+}
+
+
+int
+__saved_frame_copy (struct saved_frames *frames, int64_t callid,
+ struct saved_frame *saved_frame)
+{
+ struct saved_frame *tmp = NULL;
+ int ret = -1;
+
+ if (!saved_frame) {
+ ret = 0;
+ goto out;
+ }
+
+ list_for_each_entry (tmp, &frames->sf.list, list) {
+ if (tmp->callid == callid) {
+ *saved_frame = *tmp;
+ ret = 0;
+ break;
+ }
+ }
+
+out:
+ return ret;
+}
+
+
+struct saved_frame *
+__saved_frame_get (struct saved_frames *frames, int64_t callid)
+{
+ struct saved_frame *saved_frame = NULL;
+ struct saved_frame *tmp = NULL;
+
+ list_for_each_entry (tmp, &frames->sf.list, list) {
+ if (tmp->callid == callid) {
+ list_del_init (&tmp->list);
+ frames->count--;
+ saved_frame = tmp;
+ break;
+ }
+ }
+
+ if (saved_frame) {
+ THIS = saved_frame->capital_this;
+ }
+
+ return saved_frame;
+}
+
+void
+saved_frames_unwind (struct saved_frames *saved_frames)
+{
+ struct saved_frame *trav = NULL;
+ struct saved_frame *tmp = NULL;
+
+ struct rpc_req req;
+ struct iovec iov = {0,};
+
+ memset (&req, 0, sizeof (req));
+
+ req.rpc_status = -1;
+
+ list_for_each_entry_safe (trav, tmp, &saved_frames->sf.list, list) {
+ gf_log ("rpc-clnt", GF_LOG_ERROR,
+ "forced unwinding frame type(%s) op(%s)",
+ trav->prog->progname,
+ trav->prog->actor [trav->procnum].procname);
+
+ saved_frames->count--;
+
+ trav->prog->actor [trav->procnum].cbkfn (&req, &iov, 1,
+ trav->frame);
+
+ list_del_init (&trav->list);
+ GF_FREE (trav);
+ }
+}
+
+
+void
+saved_frames_destroy (struct saved_frames *frames)
+{
+ saved_frames_unwind (frames);
+
+ GF_FREE (frames);
+}
+
+
+void
+rpc_clnt_reconnect (void *trans_ptr)
+{
+ rpc_transport_t *trans = NULL;
+ rpc_clnt_connection_t *conn = NULL;
+ struct timeval tv = {0, 0};
+ int32_t ret = 0;
+ struct rpc_clnt *clnt = NULL;
+
+ trans = trans_ptr;
+ if (!trans || !trans->mydata)
+ return;
+
+ conn = trans->mydata;
+ clnt = conn->rpc_clnt;
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ if (conn->reconnect)
+ gf_timer_call_cancel (clnt->ctx,
+ conn->reconnect);
+ conn->reconnect = 0;
+
+ if (conn->connected == 0) {
+ tv.tv_sec = 3;
+
+ gf_log (trans->name, GF_LOG_TRACE,
+ "attempting reconnect");
+ ret = rpc_transport_connect (trans);
+
+ conn->reconnect =
+ gf_timer_call_after (clnt->ctx, tv,
+ rpc_clnt_reconnect,
+ trans);
+ } else {
+ gf_log (trans->name, GF_LOG_TRACE,
+ "breaking reconnect chain");
+ }
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ if ((ret == -1) && (errno != EINPROGRESS) && (clnt->notifyfn)) {
+ clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_DISCONNECT, NULL);
+ }
+
+ return;
+}
+
+
+int
+rpc_clnt_fill_request_info (struct rpc_clnt *clnt, rpc_request_info_t *info)
+{
+ struct saved_frame saved_frame = {{}, 0};
+ int ret = -1;
+
+ pthread_mutex_lock (&clnt->conn.lock);
+ {
+ ret = __saved_frame_copy (clnt->conn.saved_frames, info->xid,
+ &saved_frame);
+ }
+ pthread_mutex_unlock (&clnt->conn.lock);
+
+ if (ret == -1) {
+ gf_log ("rpc-clnt", GF_LOG_CRITICAL, "cannot lookup the saved "
+ "frame corresponding to xid (%d)", info->xid);
+ goto out;
+ }
+
+ info->prognum = saved_frame.prog->prognum;
+ info->procnum = saved_frame.procnum;
+ info->progver = saved_frame.prog->progver;
+ info->rsp = saved_frame.rsp;
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+/*
+ * client_protocol_cleanup - cleanup function
+ * @trans: transport object
+ *
+ */
+int
+rpc_clnt_connection_cleanup (rpc_clnt_connection_t *conn)
+{
+ struct saved_frames *saved_frames = NULL;
+ struct rpc_clnt *clnt = NULL;
+
+ if (!conn) {
+ goto out;
+ }
+
+ clnt = conn->rpc_clnt;
+
+ gf_log ("rpc-clnt", GF_LOG_DEBUG,
+ "cleaning up state in transport object %p", conn->trans);
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ saved_frames = conn->saved_frames;
+ conn->saved_frames = saved_frames_new ();
+
+ /* bailout logic cleanup */
+ if (conn->timer) {
+ gf_timer_call_cancel (clnt->ctx, conn->timer);
+ conn->timer = NULL;
+ }
+
+ if (conn->reconnect == NULL) {
+ /* :O This part is empty.. any thing missing? */
+ }
+
+ conn->connected = 0;
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ saved_frames_destroy (saved_frames);
+
+out:
+ return 0;
+}
+
+/*
+ * lookup_frame - lookup call frame corresponding to a given callid
+ * @trans: transport object
+ * @callid: call id of the frame
+ *
+ * not for external reference
+ */
+
+static struct saved_frame *
+lookup_frame (rpc_clnt_connection_t *conn, int64_t callid)
+{
+ struct saved_frame *frame = NULL;
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ frame = __saved_frame_get (conn->saved_frames, callid);
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ return frame;
+}
+
+
+int
+rpc_clnt_reply_fill (rpc_transport_pollin_t *msg,
+ rpc_clnt_connection_t *conn,
+ struct rpc_msg *replymsg, struct iovec progmsg,
+ struct rpc_req *req, struct saved_frame *saved_frame)
+{
+ int ret = -1;
+
+ if ((!conn) || (!replymsg)|| (!req) || (!saved_frame) || (!msg)) {
+ goto out;
+ }
+
+ req->rpc_status = 0;
+ if ((rpc_reply_status (replymsg) == MSG_DENIED)
+ || (rpc_accepted_reply_status (replymsg) != SUCCESS)) {
+ req->rpc_status = -1;
+ }
+
+ req->xid = rpc_reply_xid (replymsg);
+ req->prog = saved_frame->prog;
+ req->procnum = saved_frame->procnum;
+ req->conn = conn;
+
+ req->rsp[0] = progmsg;
+
+ if (msg->vectored) {
+ req->rsp[1].iov_base = iobuf_ptr (msg->data.vector.iobuf2);
+ req->rsp[1].iov_len = msg->data.vector.size2;
+
+ req->rspcnt = 2;
+
+ req->rsp_prochdr = iobuf_ref (msg->data.vector.iobuf1);
+ req->rsp_procpayload = iobuf_ref (msg->data.vector.iobuf2);
+ } else {
+ req->rspcnt = 1;
+
+ req->rsp_prochdr = iobuf_ref (msg->data.simple.iobuf);
+ }
+
+ /* By this time, the data bytes for the auth scheme would have already
+ * been copied into the required sections of the req structure,
+ * we just need to fill in the meta-data about it now.
+ */
+ if (req->rpc_status == 0) {
+ /*
+ * req->verf.flavour = rpc_reply_verf_flavour (replymsg);
+ * req->verf.datalen = rpc_reply_verf_len (replymsg);
+ */
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+void
+rpc_clnt_reply_deinit (struct rpc_req *req)
+{
+ if (!req) {
+ goto out;
+ }
+
+ if (req->rsp_prochdr) {
+ iobuf_unref (req->rsp_prochdr);
+ }
+
+ if (req->rsp_procpayload) {
+ iobuf_unref (req->rsp_procpayload);
+ }
+
+out:
+ return;
+}
+
+
+/* TODO: use mem-pool for allocating requests */
+int
+rpc_clnt_reply_init (rpc_clnt_connection_t *conn, rpc_transport_pollin_t *msg,
+ struct rpc_req *req, struct saved_frame *saved_frame)
+{
+ char *msgbuf = NULL;
+ struct rpc_msg rpcmsg;
+ struct iovec progmsg; /* RPC Program payload */
+ size_t msglen = 0;
+ int ret = -1;
+
+ if (msg->vectored) {
+ msgbuf = iobuf_ptr (msg->data.vector.iobuf1);
+ msglen = msg->data.vector.size1;
+ } else {
+ msgbuf = iobuf_ptr (msg->data.simple.iobuf);
+ msglen = msg->data.simple.size;
+ }
+
+ ret = xdr_to_rpc_reply (msgbuf, msglen, &rpcmsg, &progmsg,
+ req->verf.authdata);
+ if (ret != 0) {
+ gf_log ("rpc-clnt", GF_LOG_ERROR, "RPC reply decoding failed");
+ goto out;
+ }
+
+ ret = rpc_clnt_reply_fill (msg, conn, &rpcmsg, progmsg, req,
+ saved_frame);
+ if (ret != 0) {
+ goto out;
+ }
+
+ gf_log ("rpc-clnt", GF_LOG_TRACE, "RPC XID: %"PRIx64", Program: %s,"
+ " ProgVers: %d, Proc: %d", saved_frame->callid,
+ saved_frame->prog->progname, saved_frame->prog->progver,
+ saved_frame->procnum);
+/* TODO: */
+ /* TODO: AUTH */
+ /* The verifier that is sent in a reply is a string that can be used as
+ * a shorthand in credentials for future transactions. We can opt not to
+ * use this shorthand, preffering to use the original AUTH_UNIX method
+ * for authentication (containing all the details for authentication in
+ * credential itself). Hence it is not mandatory for us to be checking
+ * the verifier. See Appendix A of rfc-5531 for more details.
+ */
+
+ /*
+ * ret = rpc_authenticate (req);
+ * if (ret == RPC_AUTH_REJECT) {
+ * gf_log ("rpc-clnt", GF_LOG_ERROR, "Failed authentication");
+ * ret = -1;
+ * goto out;
+ * }
+ */
+
+ /* If the error is not RPC_MISMATCH, we consider the call as accepted
+ * since we are not handling authentication failures for now.
+ */
+ req->rpc_status = 0;
+
+out:
+ if (ret != 0) {
+ req->rpc_status = -1;
+ }
+
+ return ret;
+}
+
+
+int
+rpc_clnt_handle_reply (struct rpc_clnt *clnt, rpc_transport_pollin_t *pollin)
+{
+ rpc_clnt_connection_t *conn = NULL;
+ struct saved_frame *saved_frame = NULL;
+ rpc_request_info_t *request_info = NULL;
+ int ret = -1;
+ struct rpc_req req = {0, };
+
+ conn = &clnt->conn;
+
+ request_info = pollin->private;
+
+ saved_frame = lookup_frame (conn, (int64_t)request_info->xid);
+ if (saved_frame == NULL) {
+ gf_log ("rpc-clnt", GF_LOG_CRITICAL, "cannot lookup the "
+ "saved frame for reply with xid (%d), "
+ "prog-version (%d), prog-num (%d),"
+ "procnum (%d)", request_info->xid,
+ request_info->progver, request_info->prognum,
+ request_info->procnum);
+ goto out;
+ }
+
+ ret = rpc_clnt_reply_init (conn, pollin, &req, saved_frame);
+ if (ret != 0) {
+ req.rpc_status = -1;
+ gf_log ("rpc-clnt", GF_LOG_DEBUG, "initialising rpc reply "
+ "failed");
+ }
+
+ saved_frame->prog->actor [request_info->procnum].cbkfn (&req, req.rsp,
+ req.rspcnt,
+ saved_frame->frame);
+
+ if (ret == 0) {
+ rpc_clnt_reply_deinit (&req);
+ }
+
+ ret = 0;
+out:
+
+ if (saved_frame) {
+ GF_FREE (saved_frame);
+ }
+
+ return ret;
+}
+
+
+inline void
+rpc_clnt_set_connected (rpc_clnt_connection_t *conn)
+{
+ if (!conn) {
+ goto out;
+ }
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ conn->connected = 1;
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+out:
+ return;
+}
+
+
+void
+rpc_clnt_unset_connected (rpc_clnt_connection_t *conn)
+{
+ if (!conn) {
+ goto out;
+ }
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ conn->connected = 0;
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+out:
+ return;
+}
+
+
+int
+rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
+ rpc_transport_event_t event, void *data, ...)
+{
+ rpc_clnt_connection_t *conn = NULL;
+ struct rpc_clnt *clnt = NULL;
+ int ret = -1;
+ rpc_request_info_t *req_info = NULL;
+ rpc_transport_pollin_t *pollin = NULL;
+ struct timeval tv = {0, };
+
+ conn = mydata;
+ if (conn == NULL) {
+ goto out;
+ }
+ clnt = conn->rpc_clnt;
+
+ switch (event) {
+ case RPC_TRANSPORT_DISCONNECT:
+ {
+ rpc_clnt_connection_cleanup (&clnt->conn);
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ if (conn->reconnect == NULL) {
+ tv.tv_sec = 10;
+
+ conn->reconnect =
+ gf_timer_call_after (clnt->ctx, tv,
+ rpc_clnt_reconnect,
+ conn->trans);
+ }
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_DISCONNECT,
+ NULL);
+ break;
+ }
+
+ case RPC_TRANSPORT_CLEANUP:
+ /* this event should not be received on a client for, a
+ * transport is only disconnected, but never destroyed.
+ */
+ ret = 0;
+ break;
+
+ case RPC_TRANSPORT_MAP_XID_REQUEST:
+ {
+ req_info = data;
+ ret = rpc_clnt_fill_request_info (clnt, req_info);
+ break;
+ }
+
+ case RPC_TRANSPORT_MSG_RECEIVED:
+ {
+ pollin = data;
+ ret = rpc_clnt_handle_reply (clnt, pollin);
+ /* ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_MSG,
+ * data);
+ */
+ break;
+ }
+
+ case RPC_TRANSPORT_MSG_SENT:
+ {
+ pthread_mutex_lock (&conn->lock);
+ {
+ gettimeofday (&conn->last_sent, NULL);
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ ret = 0;
+ break;
+ }
+
+ case RPC_TRANSPORT_CONNECT:
+ {
+ ret = clnt->notifyfn (clnt, clnt->mydata, RPC_CLNT_CONNECT, NULL);
+ break;
+ }
+
+ case RPC_TRANSPORT_ACCEPT:
+ /* only meaningful on a server, no need of handling this event
+ * in a client.
+ */
+ ret = 0;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+
+void
+rpc_clnt_connection_deinit (rpc_clnt_connection_t *conn)
+{
+ return;
+}
+
+
+inline int
+rpc_clnt_connection_init (struct rpc_clnt *clnt, glusterfs_ctx_t *ctx,
+ dict_t *options, char *name)
+{
+ int ret = -1;
+ rpc_clnt_connection_t *conn = NULL;
+
+ conn = &clnt->conn;
+ pthread_mutex_init (&clnt->conn.lock, NULL);
+
+ ret = dict_get_int32 (options, "frame-timeout",
+ &conn->frame_timeout);
+ if (ret >= 0) {
+ gf_log (name, GF_LOG_DEBUG,
+ "setting frame-timeout to %d", conn->frame_timeout);
+ } else {
+ gf_log (name, GF_LOG_DEBUG,
+ "defaulting frame-timeout to 30mins");
+ conn->frame_timeout = 1800;
+ }
+
+ conn->trans = rpc_transport_load (ctx, options, name);
+ if (!conn->trans) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG, "loading of new rpc-transport"
+ " failed");
+ goto out;
+ }
+
+ rpc_transport_ref (conn->trans);
+
+ conn->rpc_clnt = clnt;
+
+ ret = rpc_transport_register_notify (conn->trans, rpc_clnt_notify,
+ conn);
+ if (ret == -1) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG, "registering notify failed");
+ rpc_clnt_connection_cleanup (conn);
+ conn = NULL;
+ goto out;
+ }
+
+ conn->saved_frames = saved_frames_new ();
+ if (!conn->saved_frames) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG, "creation of saved_frames "
+ "failed");
+ rpc_clnt_connection_cleanup (conn);
+ goto out;
+ }
+
+ rpc_clnt_reconnect (conn->trans);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+struct rpc_clnt *
+rpc_clnt_init (struct rpc_clnt_config *config, dict_t *options,
+ glusterfs_ctx_t *ctx, char *name)
+{
+ int ret = -1;
+ struct rpc_clnt *rpc = NULL;
+
+ rpc = GF_CALLOC (1, sizeof (*rpc), 0);
+ if (!rpc) {
+ gf_log ("rpc-clnt", GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+
+ pthread_mutex_init (&rpc->lock, NULL);
+
+ ret = rpc_clnt_connection_init (rpc, ctx, options, name);
+ if (ret == -1) {
+ pthread_mutex_destroy (&rpc->lock);
+ GF_FREE (rpc);
+ rpc = NULL;
+ goto out;
+ }
+ rpc->ctx = ctx;
+out:
+ return rpc;
+}
+
+
+int
+rpc_clnt_register_notify (struct rpc_clnt *rpc, rpc_clnt_notify_t fn,
+ void *mydata)
+{
+ rpc->mydata = mydata;
+ rpc->notifyfn = fn;
+
+ return 0;
+}
+
+ssize_t
+xdr_serialize_glusterfs_auth (char *dest, struct auth_glusterfs_parms *au)
+{
+ ssize_t ret = -1;
+ XDR xdr;
+
+ if ((!dest) || (!au))
+ return -1;
+
+ xdrmem_create (&xdr, dest, 1024,
+ XDR_ENCODE);
+
+ if (!xdr_auth_glusterfs_parms (&xdr, au)) {
+ ret = -1;
+ goto ret;
+ }
+
+ ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));
+
+ret:
+ return ret;
+}
+
+
+int
+rpc_clnt_fill_request (int prognum, int progver, int procnum, int payload,
+ uint64_t xid, struct auth_glusterfs_parms *au,
+ struct rpc_msg *request)
+{
+ int ret = -1;
+ char dest[1024] = {0,};
+
+ if (!request) {
+ goto out;
+ }
+
+ memset (request, 0, sizeof (*request));
+
+ request->rm_xid = xid;
+ request->rm_direction = CALL;
+
+ request->rm_call.cb_rpcvers = 2;
+ request->rm_call.cb_prog = prognum;
+ request->rm_call.cb_vers = progver;
+ request->rm_call.cb_proc = procnum;
+
+ /* TODO: Using AUTH_GLUSTERFS for time-being. Make it modular in
+ * future so it is easy to plug-in new authentication schemes.
+ */
+ ret = xdr_serialize_glusterfs_auth (dest, au);
+ if (ret == -1) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG, "cannot encode credentials");
+ goto out;
+ }
+
+ request->rm_call.cb_cred.oa_flavor = AUTH_GLUSTERFS;
+ request->rm_call.cb_cred.oa_base = dest;
+ request->rm_call.cb_cred.oa_length = ret;
+
+ request->rm_call.cb_verf.oa_flavor = AUTH_NONE;
+ request->rm_call.cb_verf.oa_base = NULL;
+ request->rm_call.cb_verf.oa_length = 0;
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+void
+rpc_clnt_set_lastfrag (uint32_t *fragsize) {
+ (*fragsize) |= 0x80000000U;
+}
+
+
+void
+rpc_clnt_set_frag_header_size (uint32_t size, char *haddr)
+{
+ size = htonl (size);
+ memcpy (haddr, &size, sizeof (size));
+}
+
+
+void
+rpc_clnt_set_last_frag_header_size (uint32_t size, char *haddr)
+{
+ rpc_clnt_set_lastfrag (&size);
+ rpc_clnt_set_frag_header_size (size, haddr);
+}
+
+
+struct iovec
+rpc_clnt_record_build_header (char *recordstart, size_t rlen,
+ struct rpc_msg *request, size_t payload)
+{
+ struct iovec requesthdr = {0, };
+ struct iovec txrecord = {0, 0};
+ size_t fraglen = 0;
+ int ret = -1;
+
+ /* After leaving aside the 4 bytes for the fragment header, lets
+ * encode the RPC reply structure into the buffer given to us.
+ */
+ ret = rpc_request_to_xdr (request, (recordstart + RPC_FRAGHDR_SIZE),
+ rlen, &requesthdr);
+ if (ret == -1) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG,
+ "Failed to create RPC request");
+ goto out;
+ }
+
+ fraglen = payload + requesthdr.iov_len;
+ gf_log ("rpc-clnt", GF_LOG_TRACE, "Request fraglen %zu, payload: %zu, "
+ "rpc hdr: %zu", fraglen, payload, requesthdr.iov_len);
+
+ /* Since we're not spreading RPC records over mutiple fragments
+ * we just set this fragment as the first and last fragment for this
+ * record.
+ */
+ rpc_clnt_set_last_frag_header_size (fraglen, recordstart);
+
+ /* Even though the RPC record starts at recordstart+RPCSVC_FRAGHDR_SIZE
+ * we need to transmit the record with the fragment header, which starts
+ * at recordstart.
+ */
+ txrecord.iov_base = recordstart;
+
+ /* Remember, this is only the vec for the RPC header and does not
+ * include the payload above. We needed the payload only to calculate
+ * the size of the full fragment. This size is sent in the fragment
+ * header.
+ */
+ txrecord.iov_len = RPC_FRAGHDR_SIZE + requesthdr.iov_len;
+
+out:
+ return txrecord;
+}
+
+
+struct iobuf *
+rpc_clnt_record_build_record (struct rpc_clnt *clnt, int prognum, int progver,
+ int procnum, size_t payload, uint64_t xid,
+ struct auth_glusterfs_parms *au, struct iovec *recbuf)
+{
+ struct rpc_msg request = {0, };
+ struct iobuf *request_iob = NULL;
+ char *record = NULL;
+ struct iovec recordhdr = {0, };
+ size_t pagesize = 0;
+ int ret = -1;
+
+ if ((!clnt) || (!recbuf) || (!au)) {
+ goto out;
+ }
+
+ /* First, try to get a pointer into the buffer which the RPC
+ * layer can use.
+ */
+ request_iob = iobuf_get (clnt->ctx->iobuf_pool);
+ if (!request_iob) {
+ gf_log ("rpc-clnt", GF_LOG_ERROR, "Failed to get iobuf");
+ goto out;
+ }
+
+ pagesize = ((struct iobuf_pool *)clnt->ctx->iobuf_pool)->page_size;
+
+ record = iobuf_ptr (request_iob); /* Now we have it. */
+
+ /* Fill the rpc structure and XDR it into the buffer got above. */
+ ret = rpc_clnt_fill_request (prognum, progver, procnum, payload, xid,
+ au, &request);
+ if (ret == -1) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG, "cannot build a rpc-request "
+ "xid (%"PRIu64")", xid);
+ goto out;
+ }
+
+ recordhdr = rpc_clnt_record_build_header (record, pagesize, &request,
+ payload);
+
+ //GF_FREE (request.rm_call.cb_cred.oa_base);
+
+ if (!recordhdr.iov_base) {
+ gf_log ("rpc-clnt", GF_LOG_ERROR, "Failed to build record "
+ " header");
+ iobuf_unref (request_iob);
+ request_iob = NULL;
+ recbuf->iov_base = NULL;
+ goto out;
+ }
+
+ recbuf->iov_base = recordhdr.iov_base;
+ recbuf->iov_len = recordhdr.iov_len;
+
+out:
+ return request_iob;
+}
+
+
+struct iobuf *
+rpc_clnt_record (struct rpc_clnt *clnt, call_frame_t *call_frame,
+ rpc_clnt_prog_t *prog,int procnum, size_t payload_len,
+ struct iovec *rpchdr, uint64_t callid)
+{
+ struct auth_glusterfs_parms au = {0, };
+ struct iobuf *request_iob = NULL;
+
+ if (!prog || !rpchdr || !call_frame) {
+ goto out;
+ }
+
+ au.pid = call_frame->root->pid;
+ au.uid = call_frame->root->uid;
+ au.gid = call_frame->root->gid;
+ au.ngrps = call_frame->root->ngrps;
+ au.lk_owner = call_frame->root->lk_owner;
+ if (!au.lk_owner)
+ au.lk_owner = au.pid;
+
+ gf_log ("", GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"
+ ", gid: %d, owner: %"PRId64,
+ au.pid, au.uid, au.gid, au.lk_owner);
+
+ memcpy (au.groups, call_frame->root->groups, 16);
+
+ //rpc_transport_get_myname (clnt->conn.trans, myname, UNIX_PATH_MAX);
+ //au.aup_machname = myname;
+
+ /* Assuming the client program would like to speak to the same versioned
+ * program on server.
+ */
+ request_iob = rpc_clnt_record_build_record (clnt, prog->prognum,
+ prog->progver,
+ procnum, payload_len,
+ callid, &au,
+ rpchdr);
+ if (!request_iob) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG, "cannot build rpc-record");
+ goto out;
+ }
+
+out:
+ return request_iob;
+}
+
+
+int
+rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog, int procnum,
+ struct iovec *proghdr, int proghdrcount,
+ struct iovec *progpayload, int progpayloadcount,
+ struct iobref *iobref, void *frame)
+{
+ rpc_clnt_connection_t *conn = NULL;
+ struct iobuf *request_iob = NULL;
+ struct iovec rpchdr = {0,};
+ struct rpc_req rpcreq = {0,};
+ rpc_transport_req_t req;
+ int ret = -1;
+ int proglen = 0;
+ char new_iobref = 0;
+ uint64_t callid = 0;
+
+ if (!rpc || !prog || !frame) {
+ goto out;
+ }
+
+ memset (&req, 0, sizeof (req));
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ gf_log ("rpc-clnt", GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ callid = rpc_clnt_new_callid (rpc);
+
+ conn = &rpc->conn;
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ if (conn->connected == 0) {
+ rpc_transport_connect (conn->trans);
+ }
+
+ ret = -1;
+
+ if (conn->connected ||
+ /* FIXME: hack!! hack!! find a neater way to do this */
+ ((prog->prognum == GLUSTER_HNDSK_PROGRAM) &&
+ ((procnum == GF_HNDSK_SETVOLUME) ||
+ (procnum == GF_HNDSK_DUMP_VERSION)))) {
+ if (proghdr) {
+ proglen += iov_length (proghdr, proghdrcount);
+ }
+
+ if (progpayload) {
+ proglen += iov_length (progpayload,
+ progpayloadcount);
+ }
+
+ request_iob = rpc_clnt_record (rpc, frame, prog,
+ procnum, proglen,
+ &rpchdr, callid);
+ if (!request_iob) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG,
+ "cannot build rpc-record");
+ goto unlock;
+ }
+
+ iobref_add (iobref, request_iob);
+
+ req.msg.rpchdr = &rpchdr;
+ req.msg.rpchdrcount = 1;
+ req.msg.proghdr = proghdr;
+ req.msg.proghdrcount = proghdrcount;
+ req.msg.progpayload = progpayload;
+ req.msg.progpayloadcount = progpayloadcount;
+ req.msg.iobref = iobref;
+
+ ret = rpc_transport_submit_request (rpc->conn.trans,
+ &req);
+ if (ret == -1) {
+ gf_log ("rpc-clnt", GF_LOG_DEBUG,
+ "transmission of rpc-request failed");
+ }
+ }
+
+ if ((ret >= 0) && frame) {
+ gettimeofday (&conn->last_sent, NULL);
+ /* Save the frame in queue */
+ __save_frame (rpc, frame, procnum, prog, callid);
+ }
+
+ }
+unlock:
+ pthread_mutex_unlock (&conn->lock);
+
+ if (ret == -1) {
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ iobuf_unref (request_iob);
+
+ if (new_iobref && iobref) {
+ iobref_unref (iobref);
+ }
+
+ if (frame && (ret == -1)) {
+ rpcreq.rpc_status = -1;
+ prog->actor [procnum].cbkfn (&rpcreq, NULL, 0, frame);
+ }
+ return ret;
+}
+
+
+void
+rpc_clnt_destroy (struct rpc_clnt *rpc)
+{
+ rpc_clnt_connection_cleanup (&rpc->conn);
+ pthread_mutex_destroy (&rpc->lock);
+ pthread_mutex_destroy (&rpc->conn.lock);
+ GF_FREE (rpc);
+ return;
+}
diff --git a/xlators/protocol/rpc/rpc-lib/src/rpc-clnt.h b/xlators/protocol/rpc/rpc-lib/src/rpc-clnt.h
new file mode 100644
index 00000000000..d962d021307
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/rpc-clnt.h
@@ -0,0 +1,174 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _RPC_CLNT_H
+#define _RPC_CLNT_H
+
+#include "stack.h"
+#include "rpc-transport.h"
+#include "timer.h"
+#include "glusterfs-xdr.h"
+
+typedef enum {
+ RPC_CLNT_CONNECT,
+ RPC_CLNT_DISCONNECT,
+ RPC_CLNT_MSG
+} rpc_clnt_event_t;
+
+#define AUTH_GLUSTERFS 5
+
+struct xptr_clnt;
+struct rpc_req;
+struct rpc_clnt;
+struct rpc_clnt_config;
+struct rpc_clnt_program;
+
+typedef int (*rpc_clnt_notify_t) (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t fn, void *data);
+
+typedef int (*fop_cbk_fn_t) (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe);
+
+typedef int (*clnt_fn_t) (call_frame_t *, xlator_t *,
+ struct rpc_clnt_program *, void *args);
+
+struct saved_frame {
+ union {
+ struct list_head list;
+ struct {
+ struct saved_frame *frame_next;
+ struct saved_frame *frame_prev;
+ };
+ };
+ void *capital_this;
+ void *frame;
+ struct timeval saved_at;
+ int32_t procnum;
+ struct rpc_clnt_program *prog;
+ uint64_t callid;
+ rpc_transport_rsp_t rsp;
+};
+
+
+struct saved_frames {
+ int64_t count;
+ struct saved_frame sf;
+};
+
+/* TODO: */
+struct xptr_clnt {
+ int remote_port;
+ char * remote_host;
+
+ /* xptr specific */
+ peer_info_t peerinfo;
+};
+
+/* Initialized by procnum */
+typedef struct rpc_clnt_procedure {
+ char *procname;
+ clnt_fn_t fn;
+ fop_cbk_fn_t cbkfn;
+} rpc_clnt_procedure_t;
+
+typedef struct rpc_clnt_program {
+ char *progname;
+ int prognum;
+ int progver;
+ rpc_clnt_procedure_t *actor;
+ int numproc;
+} rpc_clnt_prog_t;
+
+#define RPC_MAX_AUTH_BYTES 400
+typedef struct rpc_auth_data {
+ int flavour;
+ int datalen;
+ char authdata[RPC_MAX_AUTH_BYTES];
+} rpc_auth_data_t;
+
+#define rpc_auth_flavour(au) ((au).flavour)
+
+struct rpc_clnt_connection {
+ pthread_mutex_t lock;
+ rpc_transport_t *trans;
+ gf_timer_t *reconnect;
+ gf_timer_t *timer;
+ gf_timer_t *ping_timer;
+ struct rpc_clnt *rpc_clnt;
+ char connected;
+ struct saved_frames *saved_frames;
+ int32_t frame_timeout;
+ struct timeval last_sent;
+ struct timeval last_received;
+ int32_t ping_started;
+};
+typedef struct rpc_clnt_connection rpc_clnt_connection_t;
+
+struct rpc_req {
+ rpc_clnt_connection_t *conn;
+ uint32_t xid;
+ struct iovec req[2];
+ int reqcnt;
+ struct iovec rsp[2];
+ int rspcnt;
+ struct iobuf *rsp_prochdr;
+ struct iobuf *rsp_procpayload;
+ int rpc_status;
+ rpc_auth_data_t verf;
+ rpc_clnt_prog_t *prog;
+ int procnum;
+};
+
+struct rpc_clnt {
+ pthread_mutex_t lock;
+ rpc_clnt_notify_t notifyfn;
+ rpc_clnt_connection_t conn;
+ void *mydata;
+ uint64_t xid;
+ glusterfs_ctx_t *ctx;
+};
+
+struct rpc_clnt_config {
+ int rpc_timeout;
+ int remote_port;
+ char * remote_host;
+};
+
+
+struct rpc_clnt * rpc_clnt_init (struct rpc_clnt_config *config,
+ dict_t *options, glusterfs_ctx_t *ctx,
+ char *name);
+
+int rpc_clnt_register_notify (struct rpc_clnt *rpc, rpc_clnt_notify_t fn,
+ void *mydata);
+
+int rpc_clnt_submit (struct rpc_clnt *rpc, rpc_clnt_prog_t *prog, int procnum,
+ struct iovec *proghdr, int proghdrcount,
+ struct iovec *progpayload, int progpayloadcount,
+ struct iobref *iobref, void *frame);
+
+void rpc_clnt_destroy (struct rpc_clnt *rpc);
+
+void rpc_clnt_set_connected (rpc_clnt_connection_t *conn);
+
+void rpc_clnt_unset_connected (rpc_clnt_connection_t *conn);
+
+void rpc_clnt_reconnect (void *trans_ptr);
+
+#endif /* !_RPC_CLNT_H */
diff --git a/xlators/protocol/rpc/rpc-lib/src/rpc-transport.c b/xlators/protocol/rpc/rpc-lib/src/rpc-transport.c
new file mode 100644
index 00000000000..da3ba3521c7
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/rpc-transport.c
@@ -0,0 +1,1300 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/poll.h>
+#include <fnmatch.h>
+#include <stdint.h>
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "logging.h"
+#include "rpc-transport.h"
+#include "glusterfs.h"
+/* FIXME: xlator.h is needed for volume_option_t, need to define the datatype
+ * in some other header
+ */
+#include "xlator.h"
+#include "list.h"
+
+#ifndef GF_OPTION_LIST_EMPTY
+#define GF_OPTION_LIST_EMPTY(_opt) (_opt->value[0] == NULL)
+#endif
+
+/* RFC 1123 & 952 */
+static char
+valid_host_name (char *address, int length)
+{
+ int i = 0;
+ char ret = 1;
+
+ if ((length > 75) || (length == 1)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!isalnum (address[length - 1])) {
+ ret = 0;
+ goto out;
+ }
+
+ for (i = 0; i < length; i++) {
+ if (!isalnum (address[i]) && (address[i] != '.')
+ && (address[i] != '-')) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static char
+valid_ipv4_address (char *address, int length)
+{
+ int octets = 0;
+ int value = 0;
+ char *tmp = NULL, *ptr = NULL, *prev = NULL, *endptr = NULL;
+ char ret = 1;
+
+ prev = tmp = gf_strdup (address);
+ prev = strtok_r (tmp, ".", &ptr);
+
+ while (prev != NULL)
+ {
+ octets++;
+ value = strtol (prev, &endptr, 10);
+ if ((value > 255) || (value < 0) || (endptr != NULL)) {
+ ret = 0;
+ goto out;
+ }
+
+ prev = strtok_r (NULL, ".", &ptr);
+ }
+
+ if (octets != 4) {
+ ret = 0;
+ }
+
+out:
+ GF_FREE (tmp);
+ return ret;
+}
+
+
+static char
+valid_ipv6_address (char *address, int length)
+{
+ int hex_numbers = 0;
+ int value = 0;
+ char *tmp = NULL, *ptr = NULL, *prev = NULL, *endptr = NULL;
+ char ret = 1;
+
+ tmp = gf_strdup (address);
+ prev = strtok_r (tmp, ":", &ptr);
+
+ while (prev != NULL)
+ {
+ hex_numbers++;
+ value = strtol (prev, &endptr, 16);
+ if ((value > 0xffff) || (value < 0)
+ || (endptr != NULL && *endptr != '\0')) {
+ ret = 0;
+ goto out;
+ }
+
+ prev = strtok_r (NULL, ":", &ptr);
+ }
+
+ if (hex_numbers > 8) {
+ ret = 0;
+ }
+
+out:
+ GF_FREE (tmp);
+ return ret;
+}
+
+
+static char
+valid_internet_address (char *address)
+{
+ char ret = 0;
+ int length = 0;
+
+ if (address == NULL) {
+ goto out;
+ }
+
+ length = strlen (address);
+ if (length == 0) {
+ goto out;
+ }
+
+ if (valid_ipv4_address (address, length)
+ || valid_ipv6_address (address, length)
+ || valid_host_name (address, length)) {
+ ret = 1;
+ }
+
+out:
+ return ret;
+}
+
+
+int
+__volume_option_value_validate (char *name,
+ data_pair_t *pair,
+ volume_option_t *opt)
+{
+ int i = 0;
+ int ret = -1;
+ uint64_t input_size = 0;
+ long long inputll = 0;
+
+ /* Key is valid, validate the option */
+ switch (opt->type) {
+ case GF_OPTION_TYPE_XLATOR:
+ break;
+
+ case GF_OPTION_TYPE_PATH:
+ {
+ if (strstr (pair->value->data, "../")) {
+ gf_log (name, GF_LOG_ERROR,
+ "invalid path given '%s'",
+ pair->value->data);
+ ret = -1;
+ goto out;
+ }
+
+ /* Make sure the given path is valid */
+ if (pair->value->data[0] != '/') {
+ gf_log (name, GF_LOG_WARNING,
+ "option %s %s: '%s' is not an "
+ "absolute path name",
+ pair->key, pair->value->data,
+ pair->value->data);
+ }
+ ret = 0;
+ }
+ break;
+ case GF_OPTION_TYPE_INT:
+ {
+ /* Check the range */
+ if (gf_string2longlong (pair->value->data,
+ &inputll) != 0) {
+ gf_log (name, GF_LOG_ERROR,
+ "invalid number format \"%s\" in "
+ "\"option %s\"",
+ pair->value->data, pair->key);
+ goto out;
+ }
+
+ if ((opt->min == 0) && (opt->max == 0)) {
+ gf_log (name, GF_LOG_DEBUG,
+ "no range check required for "
+ "'option %s %s'",
+ pair->key, pair->value->data);
+ ret = 0;
+ break;
+ }
+ if ((inputll < opt->min) ||
+ (inputll > opt->max)) {
+ gf_log (name, GF_LOG_WARNING,
+ "'%lld' in 'option %s %s' is out of "
+ "range [%"PRId64" - %"PRId64"]",
+ inputll, pair->key,
+ pair->value->data,
+ opt->min, opt->max);
+ }
+ ret = 0;
+ }
+ break;
+ case GF_OPTION_TYPE_SIZET:
+ {
+ /* Check the range */
+ if (gf_string2bytesize (pair->value->data,
+ &input_size) != 0) {
+ gf_log (name, GF_LOG_ERROR,
+ "invalid size format \"%s\" in "
+ "\"option %s\"",
+ pair->value->data, pair->key);
+ goto out;
+ }
+
+ if ((opt->min == 0) && (opt->max == 0)) {
+ gf_log (name, GF_LOG_DEBUG,
+ "no range check required for "
+ "'option %s %s'",
+ pair->key, pair->value->data);
+ ret = 0;
+ break;
+ }
+ if ((input_size < opt->min) ||
+ (input_size > opt->max)) {
+ gf_log (name, GF_LOG_ERROR,
+ "'%"PRId64"' in 'option %s %s' is "
+ "out of range [%"PRId64" - %"PRId64"]",
+ input_size, pair->key,
+ pair->value->data,
+ opt->min, opt->max);
+ }
+ ret = 0;
+ }
+ break;
+ case GF_OPTION_TYPE_BOOL:
+ {
+ /* Check if the value is one of
+ '0|1|on|off|no|yes|true|false|enable|disable' */
+ gf_boolean_t bool_value;
+ if (gf_string2boolean (pair->value->data,
+ &bool_value) != 0) {
+ gf_log (name, GF_LOG_ERROR,
+ "option %s %s: '%s' is not a valid "
+ "boolean value",
+ pair->key, pair->value->data,
+ pair->value->data);
+ goto out;
+ }
+ ret = 0;
+ }
+ break;
+ case GF_OPTION_TYPE_STR:
+ {
+ /* Check if the '*str' is valid */
+ if (GF_OPTION_LIST_EMPTY(opt)) {
+ ret = 0;
+ goto out;
+ }
+
+ for (i = 0; (i < ZR_OPTION_MAX_ARRAY_SIZE) &&
+ opt->value[i]; i++) {
+ if (strcasecmp (opt->value[i],
+ pair->value->data) == 0) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if ((i == ZR_OPTION_MAX_ARRAY_SIZE)
+ || ((i < ZR_OPTION_MAX_ARRAY_SIZE)
+ && (!opt->value[i]))) {
+ /* enter here only if
+ * 1. reached end of opt->value array and haven't
+ * validated input
+ * OR
+ * 2. valid input list is less than
+ * ZR_OPTION_MAX_ARRAY_SIZE and input has not
+ * matched all possible input values.
+ */
+ char given_array[4096] = {0,};
+ for (i = 0; (i < ZR_OPTION_MAX_ARRAY_SIZE) &&
+ opt->value[i]; i++) {
+ strcat (given_array, opt->value[i]);
+ strcat (given_array, ", ");
+ }
+
+ gf_log (name, GF_LOG_ERROR,
+ "option %s %s: '%s' is not valid "
+ "(possible options are %s)",
+ pair->key, pair->value->data,
+ pair->value->data, given_array);
+
+ goto out;
+ }
+ }
+ break;
+ case GF_OPTION_TYPE_PERCENT:
+ {
+ uint32_t percent = 0;
+
+
+ /* Check if the value is valid percentage */
+ if (gf_string2percent (pair->value->data,
+ &percent) != 0) {
+ gf_log (name, GF_LOG_ERROR,
+ "invalid percent format \"%s\" "
+ "in \"option %s\"",
+ pair->value->data, pair->key);
+ goto out;
+ }
+
+ if ((percent < 0) || (percent > 100)) {
+ gf_log (name, GF_LOG_ERROR,
+ "'%d' in 'option %s %s' is out of "
+ "range [0 - 100]",
+ percent, pair->key,
+ pair->value->data);
+ }
+ ret = 0;
+ }
+ break;
+ case GF_OPTION_TYPE_PERCENT_OR_SIZET:
+ {
+ uint32_t percent = 0;
+ uint64_t input_size = 0;
+
+ /* Check if the value is valid percentage */
+ if (gf_string2percent (pair->value->data,
+ &percent) == 0) {
+ if (percent > 100) {
+ gf_log (name, GF_LOG_DEBUG,
+ "value given was greater than 100, "
+ "assuming this is actually a size");
+ if (gf_string2bytesize (pair->value->data,
+ &input_size) == 0) {
+ /* Check the range */
+ if ((opt->min == 0) &&
+ (opt->max == 0)) {
+ gf_log (name, GF_LOG_DEBUG,
+ "no range check "
+ "required for "
+ "'option %s %s'",
+ pair->key,
+ pair->value->data);
+ // It is a size
+ ret = 0;
+ goto out;
+ }
+ if ((input_size < opt->min) ||
+ (input_size > opt->max)) {
+ gf_log (name, GF_LOG_ERROR,
+ "'%"PRId64"' in "
+ "'option %s %s' is out"
+ " of range [%"PRId64""
+ "- %"PRId64"]",
+ input_size, pair->key,
+ pair->value->data,
+ opt->min, opt->max);
+ }
+ // It is a size
+ ret = 0;
+ goto out;
+ } else {
+ // It's not a percent or size
+ gf_log (name, GF_LOG_ERROR,
+ "invalid number format \"%s\" "
+ "in \"option %s\"",
+ pair->value->data, pair->key);
+ }
+
+ }
+ // It is a percent
+ ret = 0;
+ goto out;
+ } else {
+ if (gf_string2bytesize (pair->value->data,
+ &input_size) == 0) {
+ /* Check the range */
+ if ((opt->min == 0) && (opt->max == 0)) {
+ gf_log (name, GF_LOG_DEBUG,
+ "no range check required for "
+ "'option %s %s'",
+ pair->key, pair->value->data);
+ // It is a size
+ ret = 0;
+ goto out;
+ }
+ if ((input_size < opt->min) ||
+ (input_size > opt->max)) {
+ gf_log (name, GF_LOG_ERROR,
+ "'%"PRId64"' in 'option %s %s'"
+ " is out of range [%"PRId64" -"
+ " %"PRId64"]",
+ input_size, pair->key,
+ pair->value->data,
+ opt->min, opt->max);
+ }
+ } else {
+ // It's not a percent or size
+ gf_log (name, GF_LOG_ERROR,
+ "invalid number format \"%s\" "
+ "in \"option %s\"",
+ pair->value->data, pair->key);
+ }
+ //It is a size
+ ret = 0;
+ goto out;
+ }
+
+ }
+ break;
+ case GF_OPTION_TYPE_TIME:
+ {
+ uint32_t input_time = 0;
+
+ /* Check if the value is valid percentage */
+ if (gf_string2time (pair->value->data,
+ &input_time) != 0) {
+ gf_log (name,
+ GF_LOG_ERROR,
+ "invalid time format \"%s\" in "
+ "\"option %s\"",
+ pair->value->data, pair->key);
+ goto out;
+ }
+
+ if ((opt->min == 0) && (opt->max == 0)) {
+ gf_log (name, GF_LOG_DEBUG,
+ "no range check required for "
+ "'option %s %s'",
+ pair->key, pair->value->data);
+ ret = 0;
+ goto out;
+ }
+ if ((input_time < opt->min) ||
+ (input_time > opt->max)) {
+ gf_log (name, GF_LOG_ERROR,
+ "'%"PRIu32"' in 'option %s %s' is "
+ "out of range [%"PRId64" - %"PRId64"]",
+ input_time, pair->key,
+ pair->value->data,
+ opt->min, opt->max);
+ }
+ ret = 0;
+ }
+ break;
+ case GF_OPTION_TYPE_DOUBLE:
+ {
+ double input_time = 0.0;
+
+ /* Check if the value is valid double */
+ if (gf_string2double (pair->value->data,
+ &input_time) != 0) {
+ gf_log (name,
+ GF_LOG_ERROR,
+ "invalid time format \"%s\" in \"option %s\"",
+ pair->value->data, pair->key);
+ goto out;
+ }
+
+ if (input_time < 0.0) {
+ gf_log (name,
+ GF_LOG_ERROR,
+ "invalid time format \"%s\" in \"option %s\"",
+ pair->value->data, pair->key);
+ goto out;
+ }
+
+ if ((opt->min == 0) && (opt->max == 0)) {
+ gf_log (name, GF_LOG_DEBUG,
+ "no range check required for 'option %s %s'",
+ pair->key, pair->value->data);
+ ret = 0;
+ goto out;
+ }
+ ret = 0;
+ }
+ break;
+ case GF_OPTION_TYPE_INTERNET_ADDRESS:
+ {
+ if (valid_internet_address (pair->value->data)) {
+ ret = 0;
+ }
+ }
+ break;
+ case GF_OPTION_TYPE_ANY:
+ /* NO CHECK */
+ ret = 0;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+/* FIXME: this procedure should be removed from transport */
+int
+validate_volume_options (char *name, dict_t *options, volume_option_t *opt)
+{
+ int i = 0;
+ int ret = -1;
+ int index = 0;
+ volume_option_t *trav = NULL;
+ data_pair_t *pairs = NULL;
+
+ if (!opt) {
+ ret = 0;
+ goto out;
+ }
+
+ /* First search for not supported options, if any report error */
+ pairs = options->members_list;
+ while (pairs) {
+ ret = -1;
+ for (index = 0;
+ opt[index].key && opt[index].key[0] ; index++) {
+ trav = &(opt[index]);
+ for (i = 0 ;
+ (i < ZR_VOLUME_MAX_NUM_KEY) &&
+ trav->key[i]; i++) {
+ /* Check if the key is valid */
+ if (fnmatch (trav->key[i],
+ pairs->key, FNM_NOESCAPE) == 0) {
+ ret = 0;
+ break;
+ }
+ }
+ if (!ret) {
+ if (i) {
+ gf_log (name, GF_LOG_WARNING,
+ "option '%s' is deprecated, "
+ "preferred is '%s', continuing"
+ " with correction",
+ trav->key[i], trav->key[0]);
+ /* TODO: some bytes lost */
+ pairs->key = gf_strdup (trav->key[0]);
+ }
+ break;
+ }
+ }
+ if (!ret) {
+ ret = __volume_option_value_validate (name, pairs, trav);
+ if (-1 == ret) {
+ goto out;
+ }
+ }
+
+ pairs = pairs->next;
+ }
+
+ ret = 0;
+ out:
+ return ret;
+}
+
+int32_t
+rpc_transport_get_myaddr (rpc_transport_t *this, char *peeraddr, int addrlen,
+ struct sockaddr *sa, size_t salen)
+{
+ if (!this)
+ return -1;
+
+ return this->ops->get_myaddr (this, peeraddr, addrlen, sa, salen);
+}
+
+int32_t
+rpc_transport_get_myname (rpc_transport_t *this, char *hostname, int hostlen)
+{
+ if (!this)
+ return -1;
+
+ return this->ops->get_myname (this, hostname, hostlen);
+}
+
+int32_t
+rpc_transport_get_peername (rpc_transport_t *this, char *hostname, int hostlen)
+{
+ if (!this)
+ return -1;
+ return this->ops->get_peername (this, hostname, hostlen);
+}
+
+int32_t
+rpc_transport_get_peeraddr (rpc_transport_t *this, char *peeraddr, int addrlen,
+ struct sockaddr *sa, size_t salen)
+{
+ if (!this)
+ return -1;
+ return this->ops->get_peeraddr (this, peeraddr, addrlen, sa, salen);
+}
+
+void
+rpc_transport_pollin_destroy (rpc_transport_pollin_t *pollin)
+{
+ if (!pollin) {
+ goto out;
+ }
+
+ if (pollin->vectored) {
+ if (pollin->data.vector.iobuf1) {
+ iobuf_unref (pollin->data.vector.iobuf1);
+ }
+
+ if (pollin->data.vector.iobuf2) {
+ iobuf_unref (pollin->data.vector.iobuf2);
+ }
+ } else {
+ if (pollin->data.simple.iobuf) {
+ iobuf_unref (pollin->data.simple.iobuf);
+ }
+ }
+
+ if (pollin->private) {
+ /* */
+ GF_FREE (pollin->private);
+ }
+
+ GF_FREE (pollin);
+out:
+ return;
+}
+
+
+rpc_transport_pollin_t *
+rpc_transport_pollin_alloc (rpc_transport_t *this, struct iobuf *iobuf,
+ size_t size, struct iobuf *vectored_buf,
+ size_t vectored_size, void *private)
+{
+ rpc_transport_pollin_t *msg = NULL;
+ msg = GF_CALLOC (1, sizeof (*msg), 0);
+ if (!msg) {
+ gf_log ("rpc-transport", GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+
+ if (vectored_buf) {
+ msg->vectored = 1;
+ msg->data.vector.iobuf1 = iobuf_ref (iobuf);
+ msg->data.vector.size1 = size;
+
+ msg->data.vector.iobuf2 = iobuf_ref (vectored_buf);
+ msg->data.vector.size2 = vectored_size;
+ } else {
+ msg->data.simple.iobuf = iobuf_ref (iobuf);
+ msg->data.simple.size = size;
+ }
+
+ msg->private = private;
+out:
+ return msg;
+}
+
+
+rpc_transport_pollin_t *
+rpc_transport_same_process_pollin_alloc (rpc_transport_t *this,
+ struct iovec *rpchdr, int rpchdrcount,
+ struct iovec *proghdr,
+ int proghdrcount,
+ struct iovec *progpayload,
+ int progpayloadcount,
+ rpc_transport_rsp_t *rsp,
+ char is_request)
+{
+ rpc_transport_pollin_t *msg = NULL;
+ int rpchdrlen = 0, proghdrlen = 0;
+ int progpayloadlen = 0;
+ char vectored = 0;
+ char *hdr = NULL, *progpayloadbuf = NULL;
+
+ if (!rpchdr || !proghdr) {
+ goto err;
+ }
+
+ msg = GF_CALLOC (1, sizeof (*msg), 0);
+ if (!msg) {
+ gf_log ("rpc-transport", GF_LOG_ERROR, "out of memory");
+ goto err;
+ }
+
+ rpchdrlen = iov_length (rpchdr, rpchdrcount);
+ proghdrlen = iov_length (proghdr, proghdrcount);
+
+ if (progpayload) {
+ vectored = 1;
+ progpayloadlen = iov_length (progpayload, progpayloadcount);
+ }
+
+ /* FIXME: we are assuming rpchdr and proghdr will fit into
+ * an iobuf (128KB)
+ */
+ if ((rpchdrlen + proghdrlen) > this->ctx->page_size) {
+ gf_log ("rpc_transport", GF_LOG_DEBUG, "program hdr and rpc"
+ " hdr together combined (%d) is bigger than "
+ "iobuf size (%zu)", (rpchdrlen + proghdrlen),
+ this->ctx->page_size);
+ goto err;
+ }
+
+ if (vectored) {
+ msg->data.vector.iobuf1 = iobuf_get (this->ctx->iobuf_pool);
+ if (!msg->data.vector.iobuf1) {
+ gf_log ("rpc_transport", GF_LOG_ERROR,
+ "out of memory");
+ goto err;
+ }
+
+ msg->data.vector.size1 = rpchdrlen + proghdrlen;
+ hdr = iobuf_ptr (msg->data.vector.iobuf1);
+
+ if (!is_request && rsp) {
+ msg->data.vector.iobuf2 = rsp->rspbuf;
+ progpayloadbuf = rsp->rspvec->iov_base;
+ } else {
+ msg->data.vector.iobuf2 = iobuf_get (this->ctx->iobuf_pool);
+ if (!msg->data.vector.iobuf2) {
+ gf_log ("rpc_transport", GF_LOG_ERROR,
+ "out of memory");
+ goto err;
+ }
+
+ progpayloadbuf = iobuf_ptr (msg->data.vector.iobuf2);
+ }
+ msg->data.vector.size2 = progpayloadlen;
+ } else {
+ if (!is_request && rsp) {
+ /* FIXME: Assuming rspvec contains only one vector */
+ hdr = rsp->rspvec->iov_base;
+ msg->data.simple.iobuf = rsp->rspbuf;
+ } else {
+ msg->data.simple.iobuf = iobuf_get (this->ctx->iobuf_pool);
+ if (!msg->data.simple.iobuf) {
+ gf_log ("rpc_transport", GF_LOG_ERROR,
+ "out of memory");
+ goto err;
+ }
+
+ hdr = iobuf_ptr (msg->data.simple.iobuf);
+ }
+
+ msg->data.simple.size = rpchdrlen + proghdrlen;
+ }
+
+ iov_unload (hdr, rpchdr, rpchdrcount);
+ hdr += rpchdrlen;
+ iov_unload (hdr, proghdr, proghdrcount);
+
+ if (progpayload) {
+ iov_unload (progpayloadbuf, progpayload,
+ progpayloadcount);
+ }
+
+ if (is_request) {
+ msg->private = rsp;
+ }
+ return msg;
+err:
+ if (msg) {
+ rpc_transport_pollin_destroy (msg);
+ }
+
+ return NULL;
+}
+
+
+rpc_transport_handover_t *
+rpc_transport_handover_alloc (rpc_transport_pollin_t *pollin)
+{
+ rpc_transport_handover_t *msg = NULL;
+
+ msg = GF_CALLOC (1, sizeof (*msg), 0);
+ if (!msg) {
+ gf_log ("rpc_transport", GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+
+ msg->pollin = pollin;
+ INIT_LIST_HEAD (&msg->list);
+out:
+ return msg;
+}
+
+
+void
+rpc_transport_handover_destroy (rpc_transport_handover_t *msg)
+{
+ if (!msg) {
+ goto out;
+ }
+
+ if (msg->pollin) {
+ rpc_transport_pollin_destroy (msg->pollin);
+ }
+
+ GF_FREE (msg);
+
+out:
+ return;
+}
+
+
+rpc_transport_t *
+rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
+{
+ struct rpc_transport *trans = NULL, *return_trans = NULL;
+ char *name = NULL;
+ void *handle = NULL;
+ char *type = NULL;
+ char str[] = "ERROR";
+ int32_t ret = -1;
+ int8_t is_tcp = 0, is_unix = 0, is_ibsdp = 0;
+ volume_opt_list_t *vol_opt = NULL;
+
+ GF_VALIDATE_OR_GOTO("rpc-transport", options, fail);
+ GF_VALIDATE_OR_GOTO("rpc-transport", ctx, fail);
+ GF_VALIDATE_OR_GOTO("rpc-transport", trans_name, fail);
+
+ trans = GF_CALLOC (1, sizeof (struct rpc_transport), 0);
+ GF_VALIDATE_OR_GOTO("rpc-transport", trans, fail);
+
+ trans->name = gf_strdup (trans_name);
+ GF_VALIDATE_OR_GOTO ("rpc-transport", trans->name, fail);
+
+ trans->ctx = ctx;
+ type = str;
+
+ /* Backward compatibility */
+ ret = dict_get_str (options, "rpc-transport-type", &type);
+ if (ret < 0) {
+ ret = dict_set_str (options, "rpc-transport-type", "socket");
+ if (ret < 0)
+ gf_log ("dict", GF_LOG_DEBUG,
+ "setting rpc-transport-type failed");
+ gf_log ("rpc-transport", GF_LOG_WARNING,
+ "missing 'option rpc-transport-type'. defaulting to "
+ "\"socket\"");
+ } else {
+ {
+ /* Backword compatibility to handle * /client,
+ * * /server.
+ */
+ char *tmp = strchr (type, '/');
+ if (tmp)
+ *tmp = '\0';
+ }
+
+ is_tcp = strcmp (type, "tcp");
+ is_unix = strcmp (type, "unix");
+ is_ibsdp = strcmp (type, "ib-sdp");
+ if ((is_tcp == 0) ||
+ (is_unix == 0) ||
+ (is_ibsdp == 0)) {
+ if (is_unix == 0)
+ ret = dict_set_str (options,
+ "rpc-transport.address-family",
+ "unix");
+ if (is_ibsdp == 0)
+ ret = dict_set_str (options,
+ "rpc-transport.address-family",
+ "inet-sdp");
+
+ if (ret < 0)
+ gf_log ("dict", GF_LOG_DEBUG,
+ "setting address-family failed");
+
+ ret = dict_set_str (options,
+ "rpc-transport-type", "socket");
+ if (ret < 0)
+ gf_log ("dict", GF_LOG_DEBUG,
+ "setting rpc-transport-type failed");
+ }
+ }
+
+ ret = dict_get_str (options, "rpc-transport-type", &type);
+ if (ret < 0) {
+ gf_log ("rpc-transport", GF_LOG_ERROR,
+ "'option rpc-transport-type <xx>' missing in volume '%s'",
+ trans_name);
+ goto fail;
+ }
+
+ ret = gf_asprintf (&name, "%s/%s.so", RPC_TRANSPORTDIR, type);
+ if (-1 == ret) {
+ gf_log ("rpc-transport", GF_LOG_ERROR, "asprintf failed");
+ goto fail;
+ }
+ gf_log ("rpc-transport", GF_LOG_DEBUG,
+ "attempt to load file %s", name);
+
+ handle = dlopen (name, RTLD_NOW|RTLD_GLOBAL);
+ if (handle == NULL) {
+ gf_log ("rpc-transport", GF_LOG_ERROR, "%s", dlerror ());
+ gf_log ("rpc-transport", GF_LOG_ERROR,
+ "volume '%s': rpc-transport-type '%s' is not valid or "
+ "not found on this machine",
+ trans_name, type);
+ goto fail;
+ }
+
+ trans->ops = dlsym (handle, "tops");
+ if (trans->ops == NULL) {
+ gf_log ("rpc-transport", GF_LOG_ERROR,
+ "dlsym (rpc_transport_ops) on %s", dlerror ());
+ goto fail;
+ }
+
+ trans->init = dlsym (handle, "init");
+ if (trans->init == NULL) {
+ gf_log ("rpc-transport", GF_LOG_ERROR,
+ "dlsym (gf_rpc_transport_init) on %s", dlerror ());
+ goto fail;
+ }
+
+ trans->fini = dlsym (handle, "fini");
+ if (trans->fini == NULL) {
+ gf_log ("rpc-transport", GF_LOG_ERROR,
+ "dlsym (gf_rpc_transport_fini) on %s", dlerror ());
+ goto fail;
+ }
+
+ vol_opt = GF_CALLOC (1, sizeof (volume_opt_list_t), 0);
+ if (!vol_opt) {
+ gf_log (trans_name, GF_LOG_ERROR, "out of memory");
+ goto fail;
+ }
+
+ vol_opt->given_opt = dlsym (handle, "options");
+ if (vol_opt->given_opt == NULL) {
+ gf_log ("rpc-transport", GF_LOG_DEBUG,
+ "volume option validation not specified");
+ } else {
+ /* FIXME: is adding really needed? */
+ /* list_add_tail (&vol_opt->list, &xl->volume_options); */
+ if (-1 ==
+ validate_volume_options (trans_name, options,
+ vol_opt->given_opt)) {
+ gf_log ("rpc-transport", GF_LOG_ERROR,
+ "volume option validation failed");
+ goto fail;
+ }
+ }
+
+ ret = trans->init (trans);
+ if (ret != 0) {
+ gf_log ("rpc-transport", GF_LOG_ERROR,
+ "'%s' initialization failed", type);
+ goto fail;
+ }
+
+ trans->options = options;
+
+ pthread_mutex_init (&trans->lock, NULL);
+ return_trans = trans;
+ return return_trans;
+
+fail:
+ if (trans) {
+ if (trans->name) {
+ GF_FREE (trans->name);
+ }
+
+ GF_FREE (trans);
+ }
+
+ if (name) {
+ GF_FREE (name);
+ }
+
+ if (vol_opt) {
+ GF_FREE (vol_opt);
+ }
+
+ return NULL;
+}
+
+
+int32_t
+rpc_transport_submit_request (rpc_transport_t *this, rpc_transport_req_t *req)
+{
+ int32_t ret = -1;
+ rpc_transport_t *peer_trans = NULL;
+ rpc_transport_pollin_t *pollin = NULL;
+ rpc_transport_handover_t *handover_msg = NULL;
+ rpc_transport_rsp_t *rsp = NULL;
+
+ if (this->peer_trans) {
+ peer_trans = this->peer_trans;
+
+ rsp = GF_CALLOC (1, sizeof (*rsp), 0);
+ if (!rsp) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ *rsp = req->rsp;
+
+ pollin = rpc_transport_same_process_pollin_alloc (this, req->msg.rpchdr,
+ req->msg.rpchdrcount,
+ req->msg.proghdr,
+ req->msg.proghdrcount,
+ req->msg.progpayload,
+ req->msg.progpayloadcount,
+ rsp, 1);
+ if (!pollin) {
+ GF_FREE (rsp);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ handover_msg = rpc_transport_handover_alloc (pollin);
+ if (!handover_msg) {
+ rpc_transport_pollin_destroy (pollin);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ pthread_mutex_lock (&peer_trans->handover.mutex);
+ {
+ list_add_tail (&handover_msg->list,
+ &peer_trans->handover.msgs);
+ pthread_cond_broadcast (&peer_trans->handover.cond);
+ }
+ pthread_mutex_unlock (&peer_trans->handover.mutex);
+
+ return 0;
+ }
+
+ GF_VALIDATE_OR_GOTO("rpc_transport", this, fail);
+ GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail);
+
+ ret = this->ops->submit_request (this, req);
+fail:
+ return ret;
+}
+
+
+int32_t
+rpc_transport_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply)
+{
+ int32_t ret = -1;
+ rpc_transport_t *peer_trans = NULL;
+ rpc_transport_pollin_t *pollin = NULL;
+ rpc_transport_handover_t *handover_msg = NULL;
+
+ if (this->peer_trans) {
+ peer_trans = this->peer_trans;
+
+ pollin = rpc_transport_same_process_pollin_alloc (this, reply->msg.rpchdr,
+ reply->msg.rpchdrcount,
+ reply->msg.proghdr,
+ reply->msg.proghdrcount,
+ reply->msg.progpayload,
+ reply->msg.progpayloadcount,
+ reply->private, 0);
+ if (!pollin) {
+ return -ENOMEM;
+ }
+
+ handover_msg = rpc_transport_handover_alloc (pollin);
+ if (!handover_msg) {
+ rpc_transport_pollin_destroy (pollin);
+ return -ENOMEM;
+ }
+
+ pthread_mutex_lock (&peer_trans->handover.mutex);
+ {
+ list_add_tail (&handover_msg->list,
+ &peer_trans->handover.msgs);
+ pthread_cond_broadcast (&peer_trans->handover.cond);
+ }
+ pthread_mutex_unlock (&peer_trans->handover.mutex);
+
+ return 0;
+ }
+
+ GF_VALIDATE_OR_GOTO("rpc_transport", this, fail);
+ GF_VALIDATE_OR_GOTO("rpc_transport", this->ops, fail);
+
+ ret = this->ops->submit_reply (this, reply);
+fail:
+ return ret;
+}
+
+
+int32_t
+rpc_transport_connect (rpc_transport_t *this)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("rpc_transport", this, fail);
+
+ ret = this->ops->connect (this);
+fail:
+ return ret;
+}
+
+
+int32_t
+rpc_transport_listen (rpc_transport_t *this)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("rpc_transport", this, fail);
+
+ ret = this->ops->listen (this);
+fail:
+ return ret;
+}
+
+
+int32_t
+rpc_transport_disconnect (rpc_transport_t *this)
+{
+ int32_t ret = -1;
+
+ GF_VALIDATE_OR_GOTO("rpc_transport", this, fail);
+
+ ret = this->ops->disconnect (this);
+fail:
+ return ret;
+}
+
+
+int32_t
+rpc_transport_destroy (rpc_transport_t *this)
+{
+ int32_t ret = -1;
+
+ GF_VALIDATE_OR_GOTO("rpc_transport", this, fail);
+
+ if (this->fini)
+ this->fini (this);
+ pthread_mutex_destroy (&this->lock);
+ GF_FREE (this);
+fail:
+ return ret;
+}
+
+
+rpc_transport_t *
+rpc_transport_ref (rpc_transport_t *this)
+{
+ rpc_transport_t *return_this = NULL;
+
+ GF_VALIDATE_OR_GOTO("rpc_transport", this, fail);
+
+ pthread_mutex_lock (&this->lock);
+ {
+ this->refcount ++;
+ }
+ pthread_mutex_unlock (&this->lock);
+
+ return_this = this;
+fail:
+ return return_this;
+}
+
+
+int32_t
+rpc_transport_unref (rpc_transport_t *this)
+{
+ int32_t refcount = 0;
+ int32_t ret = -1;
+
+ GF_VALIDATE_OR_GOTO("rpc_transport", this, fail);
+
+ pthread_mutex_lock (&this->lock);
+ {
+ refcount = --this->refcount;
+ }
+ pthread_mutex_unlock (&this->lock);
+
+ if (refcount == 0) {
+ /* xlator_notify (this->xl, GF_EVENT_RPC_TRANSPORT_CLEANUP,
+ this); */
+ rpc_transport_destroy (this);
+ }
+
+ ret = 0;
+fail:
+ return ret;
+}
+
+
+int32_t
+rpc_transport_notify (rpc_transport_t *this, rpc_transport_event_t event,
+ void *data, ...)
+{
+ int32_t ret = -1;
+
+ if (this == NULL) {
+ goto out;
+ }
+
+ //ret = this->notify (this, this->notify_data, event, data);
+ ret = this->notify (this, this->mydata, event, data);
+out:
+ return ret;
+}
+
+
+void *
+rpc_transport_peerproc (void *trans_data)
+{
+ rpc_transport_t *trans = NULL;
+ rpc_transport_handover_t *msg = NULL;
+
+ trans = trans_data;
+
+ while (1) {
+ pthread_mutex_lock (&trans->handover.mutex);
+ {
+ while (list_empty (&trans->handover.msgs))
+ pthread_cond_wait (&trans->handover.cond,
+ &trans->handover.mutex);
+
+ msg = list_entry (trans->handover.msgs.next,
+ rpc_transport_handover_t, list);
+
+ list_del_init (&msg->list);
+ }
+ pthread_mutex_unlock (&trans->handover.mutex);
+
+ rpc_transport_notify (trans, RPC_TRANSPORT_MSG_RECEIVED, msg->pollin);
+ rpc_transport_handover_destroy (msg);
+ }
+}
+
+
+int
+rpc_transport_setpeer (rpc_transport_t *trans, rpc_transport_t *peer_trans)
+{
+ trans->peer_trans = rpc_transport_ref (peer_trans);
+
+ INIT_LIST_HEAD (&trans->handover.msgs);
+ pthread_cond_init (&trans->handover.cond, NULL);
+ pthread_mutex_init (&trans->handover.mutex, NULL);
+ pthread_create (&trans->handover.thread, NULL,
+ rpc_transport_peerproc, trans);
+
+ peer_trans->peer_trans = rpc_transport_ref (trans);
+
+ INIT_LIST_HEAD (&peer_trans->handover.msgs);
+ pthread_cond_init (&peer_trans->handover.cond, NULL);
+ pthread_mutex_init (&peer_trans->handover.mutex, NULL);
+ pthread_create (&peer_trans->handover.thread, NULL,
+ rpc_transport_peerproc, peer_trans);
+
+ return 0;
+}
+
+
+inline int
+rpc_transport_register_notify (rpc_transport_t *trans,
+ rpc_transport_notify_t notify, void *mydata)
+{
+ int ret = -1;
+
+ if (trans == NULL) {
+ goto out;
+ }
+
+ trans->notify = notify;
+ trans->mydata = mydata;
+
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/xlators/protocol/rpc/rpc-lib/src/rpc-transport.h b/xlators/protocol/rpc/rpc-lib/src/rpc-transport.h
new file mode 100644
index 00000000000..06ac34a49ed
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/rpc-transport.h
@@ -0,0 +1,287 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __RPC_TRANSPORT_H__
+#define __RPC_TRANSPORT_H__
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <inttypes.h>
+#include <rpc/auth.h>
+#include <rpc/rpc_msg.h>
+
+/* Given the 4-byte fragment header, returns non-zero if this fragment
+ * is the last fragment for the RPC record being assemebled.
+ * RPC Record marking standard defines a 32 bit value as the fragment
+ * header with the MSB signifying whether the fragment is the last
+ * fragment for the record being asembled.
+ */
+#define RPC_LASTFRAG(fraghdr) ((uint32_t)(fraghdr & 0x80000000U))
+
+/* Given the 4-byte fragment header, extracts the bits that contain
+ * the fragment size.
+ */
+#define RPC_FRAGSIZE(fraghdr) ((uint32_t)(fraghdr & 0x7fffffffU))
+
+#define RPC_FRAGHDR_SIZE 4
+#define RPC_MSGTYPE_SIZE 8
+
+/* size of the msg from the start of call-body till and including credlen */
+#define RPC_CALL_BODY_SIZE 24
+
+#define RPC_REPLY_STATUS_SIZE 4
+
+#define RPC_AUTH_FLAVOUR_N_LENGTH_SIZE 8
+
+#define RPC_ACCEPT_STATUS_LEN 4
+
+struct rpc_transport_ops;
+typedef struct rpc_transport rpc_transport_t;
+
+#include "dict.h"
+#include "compat.h"
+#include "rpcsvc-common.h"
+
+struct peer_info {
+ struct sockaddr_storage sockaddr;
+ socklen_t sockaddr_len;
+ char identifier[UNIX_PATH_MAX];
+};
+typedef struct peer_info peer_info_t;
+
+typedef enum msg_type msg_type_t;
+
+typedef enum {
+ RPC_TRANSPORT_ACCEPT, /* New client has been accepted */
+ RPC_TRANSPORT_DISCONNECT, /* Connection is disconnected */
+ RPC_TRANSPORT_CLEANUP, /* connection is about to be freed */
+ /*RPC_TRANSPORT_READ,*/ /* An event used to enable rpcsvc to instruct
+ * transport the number of bytes to read.
+ * This helps in reading large msgs, wherein
+ * the rpc actors might decide to place the
+ * actor's payload in new iobufs separate
+ * from the rpc header, proghdr and
+ * authentication information. glusterfs/nfs
+ * read and write actors are few examples
+ * that might beniefit from this. While
+ * reading a single msg, this event may be
+ * delivered more than once.
+ */
+ RPC_TRANSPORT_MAP_XID_REQUEST, /* reciever of this event should send
+ * the prognum and procnum corresponding
+ * to xid.
+ */
+ RPC_TRANSPORT_MSG_RECEIVED, /* Complete rpc msg has been read */
+ RPC_TRANSPORT_CONNECT, /* client is connected to server */
+ RPC_TRANSPORT_MSG_SENT,
+} rpc_transport_event_t;
+
+struct rpc_transport_msg {
+ struct iovec *rpchdr;
+ int rpchdrcount;
+ struct iovec *proghdr;
+ int proghdrcount;
+ struct iovec *progpayload;
+ int progpayloadcount;
+ struct iobref *iobref;
+};
+typedef struct rpc_transport_msg rpc_transport_msg_t;
+
+struct rpc_transport_rsp {
+ /* as of now, the entire rsp payload is read into rspbuf and hence
+ * rspcount is always set to one.
+ */
+ struct iovec *rspvec;
+ int rspcount;
+ struct iobuf *rspbuf;
+};
+typedef struct rpc_transport_rsp rpc_transport_rsp_t;
+
+struct rpc_transport_req {
+ rpc_transport_msg_t msg;
+ rpc_transport_rsp_t rsp;
+};
+typedef struct rpc_transport_req rpc_transport_req_t;
+
+struct rpc_transport_reply {
+ rpc_transport_msg_t msg;
+ void *private;
+};
+typedef struct rpc_transport_reply rpc_transport_reply_t;
+
+struct rpc_request_info {
+ uint32_t xid;
+ int prognum;
+ int progver;
+ int procnum;
+ rpc_transport_rsp_t rsp;
+};
+typedef struct rpc_request_info rpc_request_info_t;
+
+
+struct rpc_transport_pollin {
+ union {
+ struct vectored {
+ struct iobuf *iobuf1;
+ size_t size1;
+ struct iobuf *iobuf2;
+ size_t size2;
+ } vector;
+ struct simple {
+ struct iobuf *iobuf;
+ size_t size;
+ } simple;
+ } data;
+ char vectored;
+ void *private;
+};
+typedef struct rpc_transport_pollin rpc_transport_pollin_t;
+
+typedef int (*rpc_transport_notify_t) (rpc_transport_t *, void *mydata,
+ rpc_transport_event_t, void *data, ...);
+struct rpc_transport {
+ struct rpc_transport_ops *ops;
+ void *private;
+ void *xl_private;
+ void *mydata;
+ pthread_mutex_t lock;
+ int32_t refcount;
+
+ glusterfs_ctx_t *ctx;
+ dict_t *options;
+ char *name;
+ void *dnscache;
+ data_t *buf;
+ int32_t (*init) (rpc_transport_t *this);
+ void (*fini) (rpc_transport_t *this);
+ rpc_transport_notify_t notify;
+ void *notify_data;
+ peer_info_t peerinfo;
+ peer_info_t myinfo;
+
+ rpc_transport_t *peer_trans;
+ struct {
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_t thread;
+ struct list_head msgs;
+ /* any request/reply will be transformed as pollin data on the
+ * peer, hence we are building up a pollin data even before
+ * handing it over to peer rpc_transport. In order to decide whether
+ * the pollin data is vectored or simple, we follow a simple
+ * algo i.e., if there is a progpayload in request/reply, its
+ * considered vectored, otherwise its a simple pollin data.
+ */
+ rpc_transport_pollin_t *msg;
+ } handover;
+};
+
+typedef struct {
+ rpc_transport_pollin_t *pollin;
+ struct list_head list;
+} rpc_transport_handover_t;
+
+struct rpc_transport_ops {
+ /* no need of receive op, msg will be delivered through an event
+ * notification
+ */
+ int32_t (*submit_request) (rpc_transport_t *this,
+ rpc_transport_req_t *req);
+ int32_t (*submit_reply) (rpc_transport_t *this,
+ rpc_transport_reply_t *reply);
+ int32_t (*connect) (rpc_transport_t *this);
+ int32_t (*listen) (rpc_transport_t *this);
+ int32_t (*disconnect) (rpc_transport_t *this);
+ int32_t (*get_peername) (rpc_transport_t *this, char *hostname,
+ int hostlen);
+ int32_t (*get_peeraddr) (rpc_transport_t *this, char *peeraddr,
+ int addrlen, struct sockaddr *sa,
+ socklen_t sasize);
+ int32_t (*get_myname) (rpc_transport_t *this, char *hostname,
+ int hostlen);
+ int32_t (*get_myaddr) (rpc_transport_t *this, char *peeraddr,
+ int addrlen, struct sockaddr *sa,
+ socklen_t sasize);
+};
+
+
+int32_t
+rpc_transport_listen (rpc_transport_t *this);
+
+int32_t
+rpc_transport_connect (rpc_transport_t *this);
+
+int32_t
+rpc_transport_disconnect (rpc_transport_t *this);
+
+int32_t
+rpc_transport_notify (rpc_transport_t *this, rpc_transport_event_t event,
+ void *data, ...);
+
+int32_t
+rpc_transport_submit_request (rpc_transport_t *this, rpc_transport_req_t *req);
+
+int32_t
+rpc_transport_submit_reply (rpc_transport_t *this,
+ rpc_transport_reply_t *reply);
+
+int32_t
+rpc_transport_destroy (rpc_transport_t *this);
+
+rpc_transport_t *
+rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *name);
+
+rpc_transport_t *
+rpc_transport_ref (rpc_transport_t *trans);
+
+int32_t
+rpc_transport_unref (rpc_transport_t *trans);
+
+int
+rpc_transport_setpeer (rpc_transport_t *trans, rpc_transport_t *trans_peer);
+
+int
+rpc_transport_register_notify (rpc_transport_t *trans, rpc_transport_notify_t,
+ void *mydata);
+
+int32_t
+rpc_transport_get_peername (rpc_transport_t *this, char *hostname, int hostlen);
+
+int32_t
+rpc_transport_get_peeraddr (rpc_transport_t *this, char *peeraddr, int addrlen,
+ struct sockaddr *sa, size_t salen);
+
+int32_t
+rpc_transport_get_myname (rpc_transport_t *this, char *hostname, int hostlen);
+
+int32_t
+rpc_transport_get_myaddr (rpc_transport_t *this, char *peeraddr, int addrlen,
+ struct sockaddr *sa, size_t salen);
+
+rpc_transport_pollin_t *
+rpc_transport_pollin_alloc (rpc_transport_t *this, struct iobuf *iobuf,
+ size_t iobuf_size, struct iobuf *vectoriob,
+ size_t vectoriob_size, void *private);
+void
+rpc_transport_pollin_destroy (rpc_transport_pollin_t *pollin);
+
+#endif /* __RPC_TRANSPORT_H__ */
diff --git a/xlators/protocol/rpc/rpc-lib/src/rpcsvc-auth.c b/xlators/protocol/rpc/rpc-lib/src/rpcsvc-auth.c
new file mode 100644
index 00000000000..50a55f5db83
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/rpcsvc-auth.c
@@ -0,0 +1,409 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include "rpcsvc.h"
+#include "logging.h"
+#include "dict.h"
+
+extern rpcsvc_auth_t *
+rpcsvc_auth_null_init (rpcsvc_t *svc, dict_t *options);
+
+extern rpcsvc_auth_t *
+rpcsvc_auth_unix_init (rpcsvc_t *svc, dict_t *options);
+
+extern rpcsvc_auth_t *
+rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options);
+
+int
+rpcsvc_auth_add_initer (struct list_head *list, char *idfier,
+ rpcsvc_auth_initer_t init)
+{
+ struct rpcsvc_auth_list *new = NULL;
+
+ if ((!list) || (!init) || (!idfier))
+ return -1;
+
+ new = GF_CALLOC (1, sizeof (*new), 0);
+ if (!new) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Memory allocation failed");
+ return -1;
+ }
+
+ new->init = init;
+ strcpy (new->name, idfier);
+ INIT_LIST_HEAD (&new->authlist);
+ list_add_tail (&new->authlist, list);
+ return 0;
+}
+
+
+
+int
+rpcsvc_auth_add_initers (rpcsvc_t *svc)
+{
+ int ret = -1;
+
+ ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-glusterfs",
+ (rpcsvc_auth_initer_t)
+ rpcsvc_auth_glusterfs_init);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_GLUSTERFS");
+ goto err;
+ }
+
+ ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-unix",
+ (rpcsvc_auth_initer_t)
+ rpcsvc_auth_unix_init);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_UNIX");
+ goto err;
+ }
+
+ ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-null",
+ (rpcsvc_auth_initer_t)
+ rpcsvc_auth_null_init);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add AUTH_NULL");
+ goto err;
+ }
+
+ ret = 0;
+err:
+ return 0;
+}
+
+
+int
+rpcsvc_auth_init_auth (rpcsvc_t *svc, dict_t *options,
+ struct rpcsvc_auth_list *authitem)
+{
+ int ret = -1;
+
+ if ((!svc) || (!options) || (!authitem))
+ return -1;
+
+ if (!authitem->init) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "No init function defined");
+ ret = -1;
+ goto err;
+ }
+
+ authitem->auth = authitem->init (svc, options);
+ if (!authitem->auth) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Registration of auth failed:"
+ " %s", authitem->name);
+ ret = -1;
+ goto err;
+ }
+
+ authitem->enable = 1;
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "Authentication enabled: %s",
+ authitem->auth->authname);
+
+ ret = 0;
+err:
+ return ret;
+}
+
+
+int
+rpcsvc_auth_init_auths (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = -1;
+ struct rpcsvc_auth_list *auth = NULL;
+ struct rpcsvc_auth_list *tmp = NULL;
+
+ if (!svc)
+ return -1;
+
+ if (list_empty (&svc->authschemes)) {
+ gf_log (GF_RPCSVC, GF_LOG_WARNING, "No authentication!");
+ ret = 0;
+ goto err;
+ }
+
+ /* If auth null and sys are not disabled by the user, we must enable
+ * it by default. This is a globally default rule, the user is still
+ * allowed to disable the two for particular subvolumes.
+ */
+ if (!dict_get (options, "rpc-auth.auth-null"))
+ ret = dict_set_str (options, "rpc-auth.auth-null", "on");
+
+ if (!dict_get (options, "rpc-auth.auth-unix"))
+ ret = dict_set_str (options, "rpc-auth.auth-unix", "on");
+
+ if (!dict_get (options, "rpc-auth.auth-glusterfs"))
+ ret = dict_set_str (options, "rpc-auth.auth-glusterfs", "on");
+
+ list_for_each_entry_safe (auth, tmp, &svc->authschemes, authlist) {
+ ret = rpcsvc_auth_init_auth (svc, options, auth);
+ if (ret == -1)
+ goto err;
+ }
+
+ ret = 0;
+err:
+ return ret;
+
+}
+
+int
+rpcsvc_auth_init (rpcsvc_t *svc, dict_t *options)
+{
+ int ret = -1;
+
+ if ((!svc) || (!options))
+ return -1;
+
+ ret = rpcsvc_auth_add_initers (svc);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to add initers");
+ goto out;
+ }
+
+ ret = rpcsvc_auth_init_auths (svc, options);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to init auth schemes");
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+
+rpcsvc_auth_t *
+__rpcsvc_auth_get_handler (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ struct rpcsvc_auth_list *auth = NULL;
+ struct rpcsvc_auth_list *tmp = NULL;
+ rpcsvc_t *svc = NULL;
+
+ if (!req)
+ return NULL;
+
+ svc = rpcsvc_request_service (req);
+ if (!svc)
+ gf_log ("", 1, "something wrong, !svc");
+
+ if (list_empty (&svc->authschemes)) {
+ gf_log (GF_RPCSVC, GF_LOG_WARNING, "No authentication!");
+ ret = 0;
+ goto err;
+ }
+
+ list_for_each_entry_safe (auth, tmp, &svc->authschemes, authlist) {
+ if (!auth->enable)
+ continue;
+ if (auth->auth->authnum == req->cred.flavour)
+ goto err;
+
+ }
+
+ auth = NULL;
+err:
+ if (auth)
+ return auth->auth;
+ else
+ return NULL;
+}
+
+rpcsvc_auth_t *
+rpcsvc_auth_get_handler (rpcsvc_request_t *req)
+{
+ rpcsvc_auth_t *auth = NULL;
+
+ auth = __rpcsvc_auth_get_handler (req);
+ if (auth)
+ goto ret;
+
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "No auth handler: %d",
+ req->cred.flavour);
+
+ /* The requested scheme was not available so fall back the to one
+ * scheme that will always be present.
+ */
+ req->cred.flavour = AUTH_NULL;
+ req->verf.flavour = AUTH_NULL;
+ auth = __rpcsvc_auth_get_handler (req);
+ret:
+ return auth;
+}
+
+
+int
+rpcsvc_auth_request_init (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ rpcsvc_auth_t *auth = NULL;
+
+ if (!req)
+ return -1;
+
+ auth = rpcsvc_auth_get_handler (req);
+ if (!auth)
+ goto err;
+ ret = 0;
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth handler: %s", auth->authname);
+ if (!auth->authops->request_init)
+ ret = auth->authops->request_init (req, auth->authprivate);
+
+err:
+ return ret;
+}
+
+
+int
+rpcsvc_authenticate (rpcsvc_request_t *req)
+{
+ int ret = RPCSVC_AUTH_REJECT;
+ rpcsvc_auth_t *auth = NULL;
+ int minauth = 0;
+
+ if (!req)
+ return ret;
+
+ //minauth = rpcsvc_request_prog_minauth (req);
+ minauth = 1;
+ if (minauth > rpcsvc_request_cred_flavour (req)) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Auth too weak");
+ rpcsvc_request_set_autherr (req, AUTH_TOOWEAK);
+ goto err;
+ }
+
+ auth = rpcsvc_auth_get_handler (req);
+ if (!auth) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "No auth handler found");
+ goto err;
+ }
+
+ if (auth->authops->authenticate)
+ ret = auth->authops->authenticate (req, auth->authprivate);
+
+err:
+ return ret;
+}
+
+
+int
+rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen)
+{
+ int count = 0;
+ int gen = RPCSVC_AUTH_REJECT;
+ int spec = RPCSVC_AUTH_REJECT;
+ int final = RPCSVC_AUTH_REJECT;
+ char *srchstr = NULL;
+ char *valstr = NULL;
+ gf_boolean_t boolval = _gf_false;
+ int ret = 0;
+
+ struct rpcsvc_auth_list *auth = NULL;
+ struct rpcsvc_auth_list *tmp = NULL;
+
+ if ((!svc) || (!autharr) || (!volname))
+ return -1;
+
+ memset (autharr, 0, arrlen * sizeof(int));
+ if (list_empty (&svc->authschemes)) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "No authentication!");
+ goto err;
+ }
+
+ list_for_each_entry_safe (auth, tmp, &svc->authschemes, authlist) {
+ if (count >= arrlen)
+ break;
+
+ gen = gf_asprintf (&srchstr, "rpc-auth.%s", auth->name);
+ if (gen == -1) {
+ count = -1;
+ goto err;
+ }
+
+ gen = RPCSVC_AUTH_REJECT;
+ if (dict_get (svc->options, srchstr)) {
+ ret = dict_get_str (svc->options, srchstr, &valstr);
+ if (ret == 0) {
+ ret = gf_string2boolean (valstr, &boolval);
+ if (ret == 0) {
+ if (boolval == _gf_true)
+ gen = RPCSVC_AUTH_ACCEPT;
+ } else
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Faile"
+ "d to read auth val");
+ } else
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Faile"
+ "d to read auth val");
+ }
+
+ GF_FREE (srchstr);
+ spec = gf_asprintf (&srchstr, "rpc-auth.%s.%s", auth->name,
+ volname);
+ if (spec == -1) {
+ count = -1;
+ goto err;
+ }
+
+ spec = RPCSVC_AUTH_DONTCARE;
+ if (dict_get (svc->options, srchstr)) {
+ ret = dict_get_str (svc->options, srchstr, &valstr);
+ if (ret == 0) {
+ ret = gf_string2boolean (valstr, &boolval);
+ if (ret == 0) {
+ if (boolval == _gf_true)
+ spec = RPCSVC_AUTH_ACCEPT;
+ else
+ spec = RPCSVC_AUTH_REJECT;
+ } else
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Faile"
+ "d to read auth val");
+ } else
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Faile"
+ "d to read auth val");
+ }
+
+ GF_FREE (srchstr);
+ final = rpcsvc_combine_gen_spec_volume_checks (gen, spec);
+ if (final == RPCSVC_AUTH_ACCEPT) {
+ autharr[count] = auth->auth->authnum;
+ ++count;
+ }
+ }
+
+err:
+ return count;
+}
+
+
+gid_t *
+rpcsvc_auth_unix_auxgids (rpcsvc_request_t *req, int *arrlen)
+{
+ if ((!req) || (!arrlen))
+ return NULL;
+
+ if ((req->cred.flavour != AUTH_UNIX) ||
+ (req->cred.flavour != AUTH_GLUSTERFS))
+ return NULL;
+
+ *arrlen = req->auxgidcount;
+ if (*arrlen == 0)
+ return NULL;
+
+ return &req->auxgids[0];
+}
diff --git a/xlators/protocol/rpc/rpc-lib/src/rpcsvc-common.h b/xlators/protocol/rpc/rpc-lib/src/rpcsvc-common.h
new file mode 100644
index 00000000000..0b9d84cfdf0
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/rpcsvc-common.h
@@ -0,0 +1,83 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _RPCSVC_COMMON_H
+#define _RPCSVC_COMMON_H
+
+#include <pthread.h>
+#include "list.h"
+#include "compat.h"
+#include "glusterfs.h"
+#include "dict.h"
+
+typedef enum {
+ RPCSVC_EVENT_ACCEPT,
+ RPCSVC_EVENT_DISCONNECT,
+ RPCSVC_EVENT_LISTENER_DEAD,
+} rpcsvc_event_t;
+
+
+struct rpcsvc_state;
+
+typedef int (*rpcsvc_notify_t) (struct rpcsvc_state *, void *mydata,
+ rpcsvc_event_t, void *data);
+
+
+/* Contains global state required for all the RPC services.
+ */
+typedef struct rpcsvc_state {
+
+ /* Contains list of (program, version) handlers.
+ * other options.
+ */
+
+ pthread_mutex_t rpclock;
+
+ unsigned int memfactor;
+
+ /* List of the authentication schemes available. */
+ struct list_head authschemes;
+
+ /* Reference to the options */
+ dict_t *options;
+
+ /* Allow insecure ports. */
+ int allow_insecure;
+
+ glusterfs_ctx_t *ctx;
+
+ void *listener;
+
+ /* list of connections which will listen for incoming connections */
+ struct list_head listeners;
+
+ /* list of programs registered with rpcsvc */
+ struct list_head programs;
+
+ /* list of notification callbacks */
+ struct list_head notify;
+ int notify_count;
+
+ void *mydata; /* This is xlator */
+ rpcsvc_notify_t notifyfn;
+
+} rpcsvc_t;
+
+
+#endif /* #ifndef _RPCSVC_COMMON_H */
diff --git a/xlators/protocol/rpc/rpc-lib/src/rpcsvc.c b/xlators/protocol/rpc/rpc-lib/src/rpcsvc.c
new file mode 100644
index 00000000000..10d74759cf0
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/rpcsvc.c
@@ -0,0 +1,2010 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpcsvc.h"
+#include "rpc-transport.h"
+#include "dict.h"
+#include "logging.h"
+#include "byte-order.h"
+#include "common-utils.h"
+#include "compat-errno.h"
+#include "list.h"
+#include "xdr-rpc.h"
+#include "iobuf.h"
+#include "globals.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <rpc/rpc.h>
+#include <rpc/pmap_clnt.h>
+#include <arpa/inet.h>
+#include <rpc/xdr.h>
+#include <fnmatch.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+
+#define rpcsvc_alloc_request(con, request) \
+ do { \
+ request = (rpcsvc_request_t *) mem_get ((con)->rxpool); \
+ memset (request, 0, sizeof (rpcsvc_request_t)); \
+ } while (0)
+
+
+int
+rpcsvc_conn_peer_check_search (dict_t *options, char *pattern, char *clstr)
+{
+ int ret = -1;
+ char *addrtok = NULL;
+ char *addrstr = NULL;
+ char *svptr = NULL;
+
+ if ((!options) || (!clstr))
+ return -1;
+
+ if (!dict_get (options, pattern))
+ return -1;
+
+ ret = dict_get_str (options, pattern, &addrstr);
+ if (ret < 0) {
+ ret = -1;
+ goto err;
+ }
+
+ if (!addrstr) {
+ ret = -1;
+ goto err;
+ }
+
+ addrtok = strtok_r (addrstr, ",", &svptr);
+ while (addrtok) {
+
+ ret = fnmatch (addrtok, clstr, FNM_CASEFOLD);
+ if (ret == 0)
+ goto err;
+
+ addrtok = strtok_r (NULL, ",", &svptr);
+ }
+
+ ret = -1;
+err:
+
+ return ret;
+}
+
+
+int
+rpcsvc_conn_peer_check_allow (dict_t *options, char *volname, char *clstr)
+{
+ int ret = RPCSVC_AUTH_DONTCARE;
+ char *srchstr = NULL;
+ char globalrule[] = "rpc-auth.addr.allow";
+
+ if ((!options) || (!clstr))
+ return ret;
+
+ /* If volname is NULL, then we're searching for the general rule to
+ * determine the current address in clstr is allowed or not for all
+ * subvolumes.
+ */
+ if (volname) {
+ ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ ret = RPCSVC_AUTH_DONTCARE;
+ goto out;
+ }
+ } else
+ srchstr = globalrule;
+
+ ret = rpcsvc_conn_peer_check_search (options, srchstr, clstr);
+ if (volname)
+ GF_FREE (srchstr);
+
+ if (ret == 0)
+ ret = RPCSVC_AUTH_ACCEPT;
+ else
+ ret = RPCSVC_AUTH_DONTCARE;
+out:
+ return ret;
+}
+
+int
+rpcsvc_conn_peer_check_reject (dict_t *options, char *volname, char *clstr)
+{
+ int ret = RPCSVC_AUTH_DONTCARE;
+ char *srchstr = NULL;
+ char generalrule[] = "rpc-auth.addr.reject";
+
+ if ((!options) || (!clstr))
+ return ret;
+
+ if (volname) {
+ ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.reject", volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ ret = RPCSVC_AUTH_REJECT;
+ goto out;
+ }
+ } else
+ srchstr = generalrule;
+
+ ret = rpcsvc_conn_peer_check_search (options, srchstr, clstr);
+ if (volname)
+ GF_FREE (srchstr);
+
+ if (ret == 0)
+ ret = RPCSVC_AUTH_REJECT;
+ else
+ ret = RPCSVC_AUTH_DONTCARE;
+out:
+ return ret;
+}
+
+
+/* This function tests the results of the allow rule and the reject rule to
+ * combine them into a single result that can be used to determine if the
+ * connection should be allowed to proceed.
+ * Heres the test matrix we need to follow in this function.
+ *
+ * A - Allow, the result of the allow test. Never returns R.
+ * R - Reject, result of the reject test. Never returns A.
+ * Both can return D or dont care if no rule was given.
+ *
+ * | @allow | @reject | Result |
+ * | A | R | R |
+ * | D | D | D |
+ * | A | D | A |
+ * | D | R | R |
+ */
+int
+rpcsvc_combine_allow_reject_volume_check (int allow, int reject)
+{
+ int final = RPCSVC_AUTH_REJECT;
+
+ /* If allowed rule allows but reject rule rejects, we stay cautious
+ * and reject. */
+ if ((allow == RPCSVC_AUTH_ACCEPT) && (reject == RPCSVC_AUTH_REJECT))
+ final = RPCSVC_AUTH_REJECT;
+ /* if both are dont care, that is user did not specify for either allow
+ * or reject, we leave it up to the general rule to apply, in the hope
+ * that there is one.
+ */
+ else if ((allow == RPCSVC_AUTH_DONTCARE) &&
+ (reject == RPCSVC_AUTH_DONTCARE))
+ final = RPCSVC_AUTH_DONTCARE;
+ /* If one is dont care, the other one applies. */
+ else if ((allow == RPCSVC_AUTH_ACCEPT) &&
+ (reject == RPCSVC_AUTH_DONTCARE))
+ final = RPCSVC_AUTH_ACCEPT;
+ else if ((allow == RPCSVC_AUTH_DONTCARE) &&
+ (reject == RPCSVC_AUTH_REJECT))
+ final = RPCSVC_AUTH_REJECT;
+
+ return final;
+}
+
+
+/* Combines the result of the general rule test against, the specific rule
+ * to determine final permission for the client's address.
+ *
+ * | @gen | @spec | Result |
+ * | A | A | A |
+ * | A | R | R |
+ * | A | D | A |
+ * | D | A | A |
+ * | D | R | R |
+ * | D | D | D |
+ * | R | A | A |
+ * | R | D | R |
+ * | R | R | R |
+ */
+int
+rpcsvc_combine_gen_spec_addr_checks (int gen, int spec)
+{
+ int final = RPCSVC_AUTH_REJECT;
+
+ if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_ACCEPT))
+ final = RPCSVC_AUTH_ACCEPT;
+ else if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_REJECT))
+ final = RPCSVC_AUTH_REJECT;
+ else if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_DONTCARE))
+ final = RPCSVC_AUTH_ACCEPT;
+ else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec == RPCSVC_AUTH_ACCEPT))
+ final = RPCSVC_AUTH_ACCEPT;
+ else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec == RPCSVC_AUTH_REJECT))
+ final = RPCSVC_AUTH_REJECT;
+ else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec== RPCSVC_AUTH_DONTCARE))
+ final = RPCSVC_AUTH_DONTCARE;
+ else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_ACCEPT))
+ final = RPCSVC_AUTH_ACCEPT;
+ else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_DONTCARE))
+ final = RPCSVC_AUTH_REJECT;
+ else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_REJECT))
+ final = RPCSVC_AUTH_REJECT;
+
+ return final;
+}
+
+
+
+/* Combines the result of the general rule test against, the specific rule
+ * to determine final test for the connection coming in for a given volume.
+ *
+ * | @gen | @spec | Result |
+ * | A | A | A |
+ * | A | R | R |
+ * | A | D | A |
+ * | D | A | A |
+ * | D | R | R |
+ * | D | D | R |, special case, we intentionally disallow this.
+ * | R | A | A |
+ * | R | D | R |
+ * | R | R | R |
+ */
+int
+rpcsvc_combine_gen_spec_volume_checks (int gen, int spec)
+{
+ int final = RPCSVC_AUTH_REJECT;
+
+ if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_ACCEPT))
+ final = RPCSVC_AUTH_ACCEPT;
+ else if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_REJECT))
+ final = RPCSVC_AUTH_REJECT;
+ else if ((gen == RPCSVC_AUTH_ACCEPT) && (spec == RPCSVC_AUTH_DONTCARE))
+ final = RPCSVC_AUTH_ACCEPT;
+ else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec == RPCSVC_AUTH_ACCEPT))
+ final = RPCSVC_AUTH_ACCEPT;
+ else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec == RPCSVC_AUTH_REJECT))
+ final = RPCSVC_AUTH_REJECT;
+ /* On no rule, we reject. */
+ else if ((gen == RPCSVC_AUTH_DONTCARE) && (spec== RPCSVC_AUTH_DONTCARE))
+ final = RPCSVC_AUTH_REJECT;
+ else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_ACCEPT))
+ final = RPCSVC_AUTH_ACCEPT;
+ else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_DONTCARE))
+ final = RPCSVC_AUTH_REJECT;
+ else if ((gen == RPCSVC_AUTH_REJECT) && (spec == RPCSVC_AUTH_REJECT))
+ final = RPCSVC_AUTH_REJECT;
+
+ return final;
+}
+
+
+int
+rpcsvc_conn_peer_check_name (dict_t *options, char *volname,
+ rpcsvc_conn_t *conn)
+{
+ int ret = RPCSVC_AUTH_REJECT;
+ int aret = RPCSVC_AUTH_REJECT;
+ int rjret = RPCSVC_AUTH_REJECT;
+ char clstr[RPCSVC_PEER_STRLEN];
+
+ if (!conn)
+ return ret;
+
+ ret = rpcsvc_conn_peername (conn, clstr, RPCSVC_PEER_STRLEN);
+ if (ret != 0) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get remote addr: "
+ "%s", gai_strerror (ret));
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
+ aret = rpcsvc_conn_peer_check_allow (options, volname, clstr);
+ rjret = rpcsvc_conn_peer_check_reject (options, volname, clstr);
+
+ ret = rpcsvc_combine_allow_reject_volume_check (aret, rjret);
+
+err:
+ return ret;
+}
+
+
+int
+rpcsvc_conn_peer_check_addr (dict_t *options, char *volname,rpcsvc_conn_t *conn)
+{
+ int ret = RPCSVC_AUTH_REJECT;
+ int aret = RPCSVC_AUTH_DONTCARE;
+ int rjret = RPCSVC_AUTH_REJECT;
+ char clstr[RPCSVC_PEER_STRLEN];
+
+ if (!conn)
+ return ret;
+
+ ret = rpcsvc_conn_peeraddr (conn, clstr, RPCSVC_PEER_STRLEN, NULL, 0);
+ if (ret != 0) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get remote addr: "
+ "%s", gai_strerror (ret));
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
+ aret = rpcsvc_conn_peer_check_allow (options, volname, clstr);
+ rjret = rpcsvc_conn_peer_check_reject (options, volname, clstr);
+
+ ret = rpcsvc_combine_allow_reject_volume_check (aret, rjret);
+err:
+ return ret;
+}
+
+
+int
+rpcsvc_conn_check_volume_specific (dict_t *options, char *volname,
+ rpcsvc_conn_t *conn)
+{
+ int namechk = RPCSVC_AUTH_REJECT;
+ int addrchk = RPCSVC_AUTH_REJECT;
+ gf_boolean_t namelookup = _gf_true;
+ char *namestr = NULL;
+ int ret = 0;
+
+ if ((!options) || (!volname) || (!conn))
+ return RPCSVC_AUTH_REJECT;
+
+ /* Enabled by default */
+ if ((dict_get (options, "rpc-auth.addr.namelookup"))) {
+ ret = dict_get_str (options, "rpc-auth.addr.namelookup"
+ , &namestr);
+ if (ret == 0)
+ ret = gf_string2boolean (namestr, &namelookup);
+ }
+
+ /* We need two separate checks because the rules with addresses in them
+ * can be network addresses which can be general and names can be
+ * specific which will over-ride the network address rules.
+ */
+ if (namelookup)
+ namechk = rpcsvc_conn_peer_check_name (options, volname, conn);
+ addrchk = rpcsvc_conn_peer_check_addr (options, volname, conn);
+
+ if (namelookup)
+ ret = rpcsvc_combine_gen_spec_addr_checks (addrchk, namechk);
+ else
+ ret = addrchk;
+
+ return ret;
+}
+
+
+int
+rpcsvc_conn_check_volume_general (dict_t *options, rpcsvc_conn_t *conn)
+{
+ int addrchk = RPCSVC_AUTH_REJECT;
+ int namechk = RPCSVC_AUTH_REJECT;
+ gf_boolean_t namelookup = _gf_true;
+ char *namestr = NULL;
+ int ret = 0;
+
+ if ((!options) || (!conn))
+ return RPCSVC_AUTH_REJECT;
+
+ /* Enabled by default */
+ if ((dict_get (options, "rpc-auth.addr.namelookup"))) {
+ ret = dict_get_str (options, "rpc-auth.addr.namelookup"
+ , &namestr);
+ if (ret == 0)
+ ret = gf_string2boolean (namestr, &namelookup);
+ }
+
+ /* We need two separate checks because the rules with addresses in them
+ * can be network addresses which can be general and names can be
+ * specific which will over-ride the network address rules.
+ */
+ if (namelookup)
+ namechk = rpcsvc_conn_peer_check_name (options, NULL, conn);
+ addrchk = rpcsvc_conn_peer_check_addr (options, NULL, conn);
+
+ if (namelookup)
+ ret = rpcsvc_combine_gen_spec_addr_checks (addrchk, namechk);
+ else
+ ret = addrchk;
+
+ return ret;
+}
+
+int
+rpcsvc_conn_peer_check (dict_t *options, char *volname, rpcsvc_conn_t *conn)
+{
+ int general_chk = RPCSVC_AUTH_REJECT;
+ int specific_chk = RPCSVC_AUTH_REJECT;
+
+ if ((!options) || (!volname) || (!conn))
+ return RPCSVC_AUTH_REJECT;
+
+ general_chk = rpcsvc_conn_check_volume_general (options, conn);
+ specific_chk = rpcsvc_conn_check_volume_specific (options, volname,
+ conn);
+
+ return rpcsvc_combine_gen_spec_volume_checks (general_chk,specific_chk);
+}
+
+
+char *
+rpcsvc_volume_allowed (dict_t *options, char *volname)
+{
+ char globalrule[] = "rpc-auth.addr.allow";
+ char *srchstr = NULL;
+ char *addrstr = NULL;
+ int ret = -1;
+
+ if ((!options) || (!volname))
+ return NULL;
+
+ ret = gf_asprintf (&srchstr, "rpc-auth.addr.%s.allow", volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ goto out;
+ }
+
+ if (!dict_get (options, srchstr)) {
+ GF_FREE (srchstr);
+ srchstr = globalrule;
+ ret = dict_get_str (options, srchstr, &addrstr);
+ } else
+ ret = dict_get_str (options, srchstr, &addrstr);
+
+out:
+ return addrstr;
+}
+
+
+
+/* Initialize the core of a connection */
+rpcsvc_conn_t *
+rpcsvc_conn_alloc (rpcsvc_t *svc, rpc_transport_t *trans)
+{
+ rpcsvc_conn_t *conn = NULL;
+ int ret = -1;
+ unsigned int poolcount = 0;
+
+ conn = GF_CALLOC (1, sizeof(*conn), 0);
+ if (!conn) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "memory allocation failed");
+ return NULL;
+ }
+
+ conn->trans = trans;
+ conn->svc = svc;
+ poolcount = RPCSVC_POOLCOUNT_MULT * svc->memfactor;
+
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "rx pool: %d", poolcount);
+ conn->rxpool = mem_pool_new (rpcsvc_request_t, poolcount);
+ if (!conn->rxpool) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "mem pool allocation failed");
+ goto free_conn;
+ }
+
+ /* Cannot consider a connection connected unless the user of this
+ * connection decides it is ready to use. It is possible that we have
+ * to free this connection soon after. That free will not happpen
+ * unless the state is disconnected.
+ */
+ conn->connstate = RPCSVC_CONNSTATE_DISCONNECTED;
+ pthread_mutex_init (&conn->connlock, NULL);
+ conn->connref = 0;
+
+ ret = 0;
+
+free_conn:
+ if (ret == -1) {
+ GF_FREE (conn);
+ conn = NULL;
+ }
+
+ return conn;
+}
+
+int
+rpcsvc_notify (rpc_transport_t *trans, void *mydata,
+ rpc_transport_event_t event, void *data, ...);
+
+void
+rpcsvc_conn_state_init (rpcsvc_conn_t *conn)
+{
+ if (!conn)
+ return;
+
+ ++conn->connref;
+ conn->connstate = RPCSVC_CONNSTATE_CONNECTED;
+}
+
+
+rpcsvc_notify_wrapper_t *
+rpcsvc_notify_wrapper_alloc (void)
+{
+ rpcsvc_notify_wrapper_t *wrapper = NULL;
+
+ wrapper = GF_CALLOC (1, sizeof (*wrapper), 0);
+ if (!wrapper) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "memory allocation failed");
+ goto out;
+ }
+
+ INIT_LIST_HEAD (&wrapper->list);
+out:
+ return wrapper;
+}
+
+
+void
+rpcsvc_listener_destroy (rpcsvc_listener_t *listener)
+{
+ rpcsvc_t *svc = NULL;
+
+ if (!listener) {
+ goto out;
+ }
+
+ if (!listener->conn) {
+ goto listener_free;
+ }
+
+ svc = listener->conn->svc;
+ if (!svc) {
+ goto listener_free;
+ }
+
+ pthread_mutex_lock (&svc->rpclock);
+ {
+ list_del_init (&listener->list);
+ }
+ pthread_mutex_unlock (&svc->rpclock);
+
+listener_free:
+ GF_FREE (listener);
+out:
+ return;
+}
+
+
+void
+rpcsvc_conn_destroy (rpcsvc_conn_t *conn)
+{
+ rpcsvc_notify_wrapper_t *wrapper = NULL;
+ rpcsvc_event_t event = 0;
+ rpcsvc_listener_t *listener = NULL;
+ rpcsvc_t *svc = NULL;
+ rpcsvc_notify_wrapper_t *wrappers = NULL;
+ int i = 0, wrapper_count = 0;
+
+ if (!conn)
+ goto out;
+
+ mem_pool_destroy (conn->rxpool);
+
+ listener = conn->listener;
+ if (!listener)
+ goto out;
+
+ event = (listener->conn == conn) ? RPCSVC_EVENT_LISTENER_DEAD
+ : RPCSVC_EVENT_DISCONNECT;
+
+ svc = conn->svc;
+ if (!svc)
+ goto out;
+
+ pthread_mutex_lock (&svc->rpclock);
+ {
+ wrappers = GF_CALLOC (svc->notify_count, sizeof (*wrapper), 0);
+ if (!wrappers) {
+ goto unlock;
+ }
+
+ list_for_each_entry (wrapper, &conn->listener->list,
+ list) {
+ if (wrapper->notify) {
+ wrappers[i++] = *wrapper;
+ }
+ }
+ wrapper_count = i;
+ }
+unlock:
+ pthread_mutex_unlock (&svc->rpclock);
+
+ if (wrappers) {
+ for (i = 0; i < wrapper_count; i++) {
+ wrappers[i].notify (conn->svc, wrappers[i].data,
+ event, conn);
+ }
+
+ GF_FREE (wrappers);
+ }
+
+ if (listener->conn == conn) {
+ rpcsvc_listener_destroy (listener);
+ }
+
+ /* Need to destory record state, txlists etc. */
+ GF_FREE (conn);
+out:
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Connection destroyed");
+}
+
+
+rpcsvc_conn_t *
+rpcsvc_conn_init (rpcsvc_t *svc, rpc_transport_t *trans)
+{
+ int ret = -1;
+ rpcsvc_conn_t *conn = NULL;
+
+ conn = rpcsvc_conn_alloc (svc, trans);
+ if (!conn) {
+ ret = -1;
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "cannot init a connection");
+ goto out;
+ }
+
+ ret = rpc_transport_register_notify (trans, rpcsvc_notify, conn);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "registering notify failed");
+ rpcsvc_conn_destroy (conn);
+ conn = NULL;
+ goto out;
+ }
+
+ rpcsvc_conn_state_init (conn);
+
+out:
+ return conn;
+}
+
+
+int
+__rpcsvc_conn_unref (rpcsvc_conn_t *conn)
+{
+ --conn->connref;
+ return conn->connref;
+}
+
+
+void
+__rpcsvc_conn_deinit (rpcsvc_conn_t *conn)
+{
+ if (!conn)
+ return;
+
+ if (rpcsvc_conn_check_active (conn)) {
+ conn->connstate = RPCSVC_CONNSTATE_DISCONNECTED;
+ }
+
+ if (conn->trans) {
+ rpc_transport_disconnect (conn->trans);
+ conn->trans = NULL;
+ }
+}
+
+
+void
+rpcsvc_conn_deinit (rpcsvc_conn_t *conn)
+{
+ int ref = 0;
+
+ if (!conn)
+ return;
+
+ pthread_mutex_lock (&conn->connlock);
+ {
+ __rpcsvc_conn_deinit (conn);
+ ref = __rpcsvc_conn_unref (conn);
+ }
+ pthread_mutex_unlock (&conn->connlock);
+
+ if (ref == 0)
+ rpcsvc_conn_destroy (conn);
+
+ return;
+}
+
+
+void
+rpcsvc_conn_unref (rpcsvc_conn_t *conn)
+{
+ int ref = 0;
+ if (!conn)
+ return;
+
+ pthread_mutex_lock (&conn->connlock);
+ {
+ ref = __rpcsvc_conn_unref (conn);
+ }
+ pthread_mutex_unlock (&conn->connlock);
+
+ if (ref == 0) {
+ rpcsvc_conn_destroy (conn);
+ }
+}
+
+
+int
+rpcsvc_conn_active (rpcsvc_conn_t *conn)
+{
+ int status = 0;
+
+ if (!conn)
+ return 0;
+
+ pthread_mutex_lock (&conn->connlock);
+ {
+ status = rpcsvc_conn_check_active (conn);
+ }
+ pthread_mutex_unlock (&conn->connlock);
+
+ return status;
+}
+
+
+void
+rpcsvc_conn_ref (rpcsvc_conn_t *conn)
+{
+ if (!conn)
+ return;
+
+ pthread_mutex_lock (&conn->connlock);
+ {
+ ++conn->connref;
+ }
+ pthread_mutex_unlock (&conn->connlock);
+
+ return;
+}
+
+
+int
+rpcsvc_conn_privport_check (rpcsvc_t *svc, char *volname, rpcsvc_conn_t *conn)
+{
+ struct sockaddr_in sa;
+ int ret = RPCSVC_AUTH_REJECT;
+ socklen_t sasize = sizeof (sa);
+ char *srchstr = NULL;
+ char *valstr = NULL;
+ int globalinsecure = RPCSVC_AUTH_REJECT;
+ int exportinsecure = RPCSVC_AUTH_DONTCARE;
+ uint16_t port = 0;
+ gf_boolean_t insecure = _gf_false;
+
+ if ((!svc) || (!volname) || (!conn))
+ return ret;
+
+ ret = rpcsvc_conn_peeraddr (conn, NULL, 0, (struct sockaddr *)&sa,
+ sasize);
+ if (ret != 0) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get peer addr: %s",
+ gai_strerror (ret));
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
+ port = ntohs (sa.sin_port);
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "Client port: %d", (int)port);
+ /* If the port is already a privileged one, dont bother with checking
+ * options.
+ */
+ if (port <= 1024) {
+ ret = RPCSVC_AUTH_ACCEPT;
+ goto err;
+ }
+
+ /* Disabled by default */
+ if ((dict_get (svc->options, "rpc-auth.ports.insecure"))) {
+ ret = dict_get_str (svc->options, "rpc-auth.ports.insecure"
+ , &srchstr);
+ if (ret == 0) {
+ ret = gf_string2boolean (srchstr, &insecure);
+ if (ret == 0) {
+ if (insecure == _gf_true)
+ globalinsecure = RPCSVC_AUTH_ACCEPT;
+ } else
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
+ " read rpc-auth.ports.insecure value");
+ } else
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
+ " read rpc-auth.ports.insecure value");
+ }
+
+ /* Disabled by default */
+ ret = gf_asprintf (&srchstr, "rpc-auth.ports.%s.insecure", volname);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "asprintf failed");
+ ret = RPCSVC_AUTH_REJECT;
+ goto err;
+ }
+
+ if (dict_get (svc->options, srchstr)) {
+ ret = dict_get_str (svc->options, srchstr, &valstr);
+ if (ret == 0) {
+ ret = gf_string2boolean (srchstr, &insecure);
+ if (ret == 0) {
+ if (insecure == _gf_true)
+ exportinsecure = RPCSVC_AUTH_ACCEPT;
+ else
+ exportinsecure = RPCSVC_AUTH_REJECT;
+ } else
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
+ " read rpc-auth.ports.insecure value");
+ } else
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to"
+ " read rpc-auth.ports.insecure value");
+ }
+
+ ret = rpcsvc_combine_gen_spec_volume_checks (globalinsecure,
+ exportinsecure);
+ if (ret == RPCSVC_AUTH_ACCEPT)
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Unprivileged port allowed");
+ else
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Unprivileged port not"
+ " allowed");
+
+err:
+ return ret;
+}
+
+
+/* This needs to change to returning errors, since
+ * we need to return RPC specific error messages when some
+ * of the pointers below are NULL.
+ */
+rpcsvc_actor_t *
+rpcsvc_program_actor (rpcsvc_conn_t *conn, rpcsvc_request_t *req)
+{
+ rpcsvc_program_t *program = NULL;
+ int err = SYSTEM_ERR;
+ rpcsvc_actor_t *actor = NULL;
+ rpcsvc_t *svc = NULL;
+ char found = 0;
+
+ if ((!conn) || (!req))
+ goto err;
+
+ svc = conn->svc;
+ pthread_mutex_lock (&svc->rpclock);
+ {
+ list_for_each_entry (program, &svc->programs, program) {
+ if (program->prognum == req->prognum) {
+ err = PROG_MISMATCH;
+ }
+
+ if ((program->prognum == req->prognum)
+ && (program->progver == req->progver)) {
+ found = 1;
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock (&svc->rpclock);
+
+ if (!found) {
+ if (err != PROG_MISMATCH) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "RPC program not available");
+ err = PROG_UNAVAIL;
+ goto err;
+ }
+
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC program version not"
+ " available");
+ goto err;
+ }
+ req->prog = program;
+ if (!program->actors) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC System error");
+ err = SYSTEM_ERR;
+ goto err;
+ }
+
+ if ((req->procnum < 0) || (req->procnum >= program->numactors)) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC Program procedure not"
+ " available");
+ err = PROC_UNAVAIL;
+ goto err;
+ }
+
+ actor = &program->actors[req->procnum];
+ if (!actor->actor) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC Program procedure not"
+ " available");
+ err = PROC_UNAVAIL;
+ actor = NULL;
+ goto err;
+ }
+
+ err = SUCCESS;
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "Actor found: %s - %s",
+ program->progname, actor->procname);
+err:
+ if (req)
+ req->rpc_err = err;
+
+ return actor;
+}
+
+
+/* this procedure can only pass 4 arguments to registered notifyfn. To send more
+ * arguements call wrapper->notify directly.
+ */
+inline void
+rpcsvc_program_notify (rpcsvc_listener_t *listener, rpcsvc_event_t event,
+ void *data)
+{
+ rpcsvc_notify_wrapper_t *wrapper = NULL;
+
+ if (!listener) {
+ goto out;
+ }
+
+ list_for_each_entry (wrapper, &listener->list, list) {
+ if (wrapper->notify) {
+ wrapper->notify (listener->conn->svc,
+ wrapper->data,
+ event, data);
+ }
+ }
+
+out:
+ return;
+}
+
+
+int
+rpcsvc_accept (rpcsvc_conn_t *listen_conn, rpc_transport_t *new_trans)
+{
+ rpcsvc_listener_t *listener = NULL;
+ rpcsvc_conn_t *conn = NULL;
+ char clstr[RPCSVC_PEER_STRLEN];
+
+ listener = listen_conn->listener;
+ conn = rpcsvc_conn_init (listen_conn->svc, new_trans);
+ if (!conn) {
+ rpc_transport_disconnect (new_trans);
+ memset (clstr, 0, RPCSVC_PEER_STRLEN);
+ rpc_transport_get_peername (new_trans, clstr,
+ RPCSVC_PEER_STRLEN);
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "allocating connection for "
+ "new transport (%s) failed", clstr);
+ goto out;
+ }
+
+ conn->listener = listener;
+
+ //rpcsvc_program_notify (listener, RPCSVC_EVENT_ACCEPT, conn);
+out:
+ return 0;
+}
+
+
+void
+rpcsvc_request_destroy (rpcsvc_conn_t *conn, rpcsvc_request_t *req)
+{
+ if (!conn || !req) {
+ goto out;
+ }
+
+ if (req->recordiob) {
+ iobuf_unref (req->recordiob);
+ }
+
+ if (req->vectorediob) {
+ iobuf_unref (req->vectorediob);
+ }
+
+ mem_put (conn->rxpool, req);
+out:
+ return;
+}
+
+
+rpcsvc_request_t *
+rpcsvc_request_init (rpcsvc_conn_t *conn, struct rpc_msg *callmsg,
+ struct iovec progmsg, rpc_transport_pollin_t *msg,
+ rpcsvc_request_t *req)
+{
+ if ((!conn) || (!callmsg)|| (!req) || (!msg))
+ return NULL;
+
+ /* We start a RPC request as always denied. */
+ req->rpc_status = MSG_DENIED;
+ req->xid = rpc_call_xid (callmsg);
+ req->prognum = rpc_call_program (callmsg);
+ req->progver = rpc_call_progver (callmsg);
+ req->procnum = rpc_call_progproc (callmsg);
+ req->conn = conn;
+ req->msg[0] = progmsg;
+ if (msg->vectored) {
+ req->msg[1].iov_base = iobuf_ptr (msg->data.vector.iobuf2);
+ req->msg[1].iov_len = msg->data.vector.size2;
+
+ req->recordiob = iobuf_ref (msg->data.vector.iobuf1);
+ req->vectorediob = iobuf_ref (msg->data.vector.iobuf2);
+ } else {
+ req->recordiob = iobuf_ref (msg->data.simple.iobuf);
+ }
+
+ req->trans_private = msg->private;
+
+ INIT_LIST_HEAD (&req->txlist);
+ req->payloadsize = 0;
+
+ /* By this time, the data bytes for the auth scheme would have already
+ * been copied into the required sections of the req structure,
+ * we just need to fill in the meta-data about it now.
+ */
+ req->cred.flavour = rpc_call_cred_flavour (callmsg);
+ req->cred.datalen = rpc_call_cred_len (callmsg);
+ req->verf.flavour = rpc_call_verf_flavour (callmsg);
+ req->verf.datalen = rpc_call_verf_len (callmsg);
+
+ /* AUTH */
+ rpcsvc_auth_request_init (req);
+ return req;
+}
+
+
+rpcsvc_request_t *
+rpcsvc_request_create (rpcsvc_conn_t *conn, rpc_transport_pollin_t *msg)
+{
+ char *msgbuf = NULL;
+ struct rpc_msg rpcmsg;
+ struct iovec progmsg; /* RPC Program payload */
+ rpcsvc_request_t *req = NULL;
+ size_t msglen = 0;
+ int ret = -1;
+
+ if (!conn)
+ return NULL;
+
+ /* We need to allocate the request before actually calling
+ * rpcsvc_request_init on the request so that we, can fill the auth
+ * data directly into the request structure from the message iobuf.
+ * This avoids a need to keep a temp buffer into which the auth data
+ * would've been copied otherwise.
+ */
+ rpcsvc_alloc_request (conn, req);
+ if (!req) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to alloc request");
+ goto err;
+ }
+
+ if (msg->vectored) {
+ msgbuf = iobuf_ptr (msg->data.vector.iobuf1);
+ msglen = msg->data.vector.size1;
+ } else {
+ msgbuf = iobuf_ptr (msg->data.simple.iobuf);
+ msglen = msg->data.simple.size;
+ }
+
+ ret = xdr_to_rpc_call (msgbuf, msglen, &rpcmsg, &progmsg,
+ req->cred.authdata,req->verf.authdata);
+
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC call decoding failed");
+ rpcsvc_request_seterr (req, GARBAGE_ARGS);
+ goto err;
+ }
+
+ ret = -1;
+ rpcsvc_request_init (conn, &rpcmsg, progmsg, msg, req);
+
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "RPC XID: %lx, Ver: %ld, Program: %ld,"
+ " ProgVers: %ld, Proc: %ld", rpc_call_xid (&rpcmsg),
+ rpc_call_rpcvers (&rpcmsg), rpc_call_program (&rpcmsg),
+ rpc_call_progver (&rpcmsg), rpc_call_progproc (&rpcmsg));
+
+ if (rpc_call_rpcvers (&rpcmsg) != 2) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "RPC version not supported");
+ rpcsvc_request_seterr (req, RPC_MISMATCH);
+ goto err;
+ }
+
+ ret = rpcsvc_authenticate (req);
+ if (ret == RPCSVC_AUTH_REJECT) {
+ /* No need to set auth_err, that is the responsibility of
+ * the authentication handler since only that know what exact
+ * error happened.
+ */
+ rpcsvc_request_seterr (req, AUTH_ERROR);
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed authentication");
+ ret = -1;
+ goto err;
+ }
+
+
+ /* If the error is not RPC_MISMATCH, we consider the call as accepted
+ * since we are not handling authentication failures for now.
+ */
+ req->rpc_status = MSG_ACCEPTED;
+ ret = 0;
+err:
+ if (ret == -1) {
+ ret = rpcsvc_error_reply (req);
+ req = NULL;
+ }
+
+ return req;
+}
+
+
+int
+rpcsvc_handle_rpc_call (rpcsvc_conn_t *conn, rpc_transport_pollin_t *msg)
+{
+ rpcsvc_actor_t *actor = NULL;
+ rpcsvc_request_t *req = NULL;
+ int ret = -1;
+
+ if (!conn)
+ return -1;
+
+ req = rpcsvc_request_create (conn, msg);
+ if (!req)
+ goto err;
+
+ if (!rpcsvc_request_accepted (req))
+ goto err_reply;
+
+ actor = rpcsvc_program_actor (conn, req);
+ if (!actor)
+ goto err_reply;
+
+ if (actor) {
+ if (req->vectorediob) {
+ if (actor->vector_actor) {
+ rpcsvc_conn_ref (conn);
+ ret = actor->vector_actor (req,
+ req->vectorediob);
+ } else {
+ rpcsvc_request_seterr (req, PROC_UNAVAIL);
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,
+ "No vectored handler present");
+ ret = RPCSVC_ACTOR_ERROR;
+ }
+ } else if (actor->actor) {
+ rpcsvc_conn_ref (req->conn);
+ ret = actor->actor (req);
+ }
+ }
+
+err_reply:
+ if (ret == RPCSVC_ACTOR_ERROR)
+ ret = rpcsvc_error_reply (req);
+
+ /* No need to propagate error beyond this function since the reply
+ * has now been queued. */
+ ret = 0;
+err:
+ return ret;
+}
+
+
+int
+rpcsvc_notify (rpc_transport_t *trans, void *mydata,
+ rpc_transport_event_t event, void *data, ...)
+{
+ rpcsvc_conn_t *conn = NULL;
+ rpcsvc_t *svc = NULL;
+ int ret = -1;
+ rpc_transport_pollin_t *msg = NULL;
+ rpc_transport_t *new_trans = NULL;
+
+ conn = mydata;
+ if (conn == NULL) {
+ goto out;
+ }
+
+ svc = conn->svc;
+
+ switch (event) {
+ case RPC_TRANSPORT_ACCEPT:
+ new_trans = data;
+ ret = rpcsvc_accept (conn, new_trans);
+ break;
+
+ case RPC_TRANSPORT_DISCONNECT:
+ //rpcsvc_conn_deinit (conn);
+ ret = 0;
+ break;
+
+ case RPC_TRANSPORT_MSG_RECEIVED:
+ msg = data;
+ ret = rpcsvc_handle_rpc_call (conn, msg);
+ break;
+
+ case RPC_TRANSPORT_MSG_SENT:
+ ret = 0;
+ break;
+
+ case RPC_TRANSPORT_CONNECT:
+ /* do nothing, no need for rpcsvc to handle this, client should
+ * handle this event
+ */
+ gf_log ("rpcsvc", GF_LOG_CRITICAL,
+ "got CONNECT event, which should have not come");
+ ret = 0;
+ break;
+
+ case RPC_TRANSPORT_CLEANUP:
+ /* FIXME: think about this later */
+ ret = 0;
+ break;
+
+ case RPC_TRANSPORT_MAP_XID_REQUEST:
+ /* FIXME: think about this later */
+ gf_log ("rpcsvc", GF_LOG_CRITICAL,
+ "got MAP_XID event, which should have not come");
+ ret = 0;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+
+void
+rpcsvc_set_lastfrag (uint32_t *fragsize) {
+ (*fragsize) |= 0x80000000U;
+}
+
+void
+rpcsvc_set_frag_header_size (uint32_t size, char *haddr)
+{
+ size = htonl (size);
+ memcpy (haddr, &size, sizeof (size));
+}
+
+void
+rpcsvc_set_last_frag_header_size (uint32_t size, char *haddr)
+{
+ rpcsvc_set_lastfrag (&size);
+ rpcsvc_set_frag_header_size (size, haddr);
+}
+
+
+/* Given the RPC reply structure and the payload handed by the RPC program,
+ * encode the RPC record header into the buffer pointed by recordstart.
+ */
+struct iovec
+rpcsvc_record_build_header (char *recordstart, size_t rlen,
+ struct rpc_msg reply, size_t payload)
+{
+ struct iovec replyhdr;
+ struct iovec txrecord = {0, 0};
+ size_t fraglen = 0;
+ int ret = -1;
+
+ /* After leaving aside the 4 bytes for the fragment header, lets
+ * encode the RPC reply structure into the buffer given to us.
+ */
+ ret = rpc_reply_to_xdr (&reply,(recordstart + RPCSVC_FRAGHDR_SIZE),
+ rlen, &replyhdr);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to create RPC reply");
+ goto err;
+ }
+
+ fraglen = payload + replyhdr.iov_len;
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "Reply fraglen %zu, payload: %zu, "
+ "rpc hdr: %zu", fraglen, payload, replyhdr.iov_len);
+
+ /* Since we're not spreading RPC records over mutiple fragments
+ * we just set this fragment as the first and last fragment for this
+ * record.
+ */
+ rpcsvc_set_last_frag_header_size (fraglen, recordstart);
+
+ /* Even though the RPC record starts at recordstart+RPCSVC_FRAGHDR_SIZE
+ * we need to transmit the record with the fragment header, which starts
+ * at recordstart.
+ */
+ txrecord.iov_base = recordstart;
+
+ /* Remember, this is only the vec for the RPC header and does not
+ * include the payload above. We needed the payload only to calculate
+ * the size of the full fragment. This size is sent in the fragment
+ * header.
+ */
+ txrecord.iov_len = RPCSVC_FRAGHDR_SIZE + replyhdr.iov_len;
+err:
+ return txrecord;
+}
+
+
+int
+rpcsvc_conn_submit (rpcsvc_conn_t *conn, struct iovec *hdrvec,
+ int hdrcount, struct iovec *proghdr, int proghdrcount,
+ struct iovec *progpayload, int progpayloadcount,
+ struct iobref *iobref, void *priv)
+{
+ int ret = -1;
+ rpc_transport_reply_t reply = {{0, }};
+
+ if ((!conn) || (!hdrvec) || (!hdrvec->iov_base) || (!conn->trans)) {
+ goto out;
+ }
+
+ reply.msg.rpchdr = hdrvec;
+ reply.msg.rpchdrcount = hdrcount;
+ reply.msg.proghdr = proghdr;
+ reply.msg.proghdrcount = proghdrcount;
+ reply.msg.progpayload = progpayload;
+ reply.msg.progpayloadcount = progpayloadcount;
+ reply.msg.iobref = iobref;
+ reply.private = priv;
+
+ /* Now that we have both the RPC and Program buffers in xdr format
+ * lets hand it to the transmission layer.
+ */
+ if (!rpcsvc_conn_check_active (conn)) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Connection inactive");
+ goto out;
+ }
+
+ ret = rpc_transport_submit_reply (conn->trans, &reply);
+
+out:
+ return ret;
+}
+
+
+int
+rpcsvc_fill_reply (rpcsvc_request_t *req, struct rpc_msg *reply)
+{
+ rpcsvc_program_t *prog = NULL;
+ if ((!req) || (!reply))
+ return -1;
+
+ prog = rpcsvc_request_program (req);
+ rpc_fill_empty_reply (reply, req->xid);
+
+ if (req->rpc_status == MSG_DENIED)
+ rpc_fill_denied_reply (reply, req->rpc_err, req->auth_err);
+ else if (req->rpc_status == MSG_ACCEPTED)
+ rpc_fill_accepted_reply (reply, req->rpc_err, prog->proglowvers,
+ prog->proghighvers, req->verf.flavour,
+ req->verf.datalen,
+ req->verf.authdata);
+ else
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Invalid rpc_status value");
+
+ return 0;
+}
+
+
+/* Given a request and the reply payload, build a reply and encodes the reply
+ * into a record header. This record header is encoded into the vector pointed
+ * to be recbuf.
+ * msgvec is the buffer that points to the payload of the RPC program.
+ * This buffer can be NULL, if an RPC error reply is being constructed.
+ * The only reason it is needed here is that in case the buffer is provided,
+ * we should account for the length of that buffer in the RPC fragment header.
+ */
+struct iobuf *
+rpcsvc_record_build_record (rpcsvc_request_t *req, size_t payload,
+ struct iovec *recbuf)
+{
+ struct rpc_msg reply;
+ struct iobuf *replyiob = NULL;
+ char *record = NULL;
+ struct iovec recordhdr = {0, };
+ size_t pagesize = 0;
+ rpcsvc_conn_t *conn = NULL;
+ rpcsvc_t *svc = NULL;
+
+ if ((!req) || (!req->conn) || (!recbuf))
+ return NULL;
+
+ /* First, try to get a pointer into the buffer which the RPC
+ * layer can use.
+ */
+ conn = req->conn;
+ svc = rpcsvc_conn_rpcsvc (conn);
+ replyiob = iobuf_get (svc->ctx->iobuf_pool);
+ pagesize = iobpool_pagesize ((struct iobuf_pool *)svc->ctx->iobuf_pool);
+ if (!replyiob) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to get iobuf");
+ goto err_exit;
+ }
+
+ record = iobuf_ptr (replyiob); /* Now we have it. */
+
+ /* Fill the rpc structure and XDR it into the buffer got above. */
+ rpcsvc_fill_reply (req, &reply);
+ recordhdr = rpcsvc_record_build_header (record, pagesize, reply,
+ payload);
+ if (!recordhdr.iov_base) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to build record "
+ " header");
+ iobuf_unref (replyiob);
+ replyiob = NULL;
+ recbuf->iov_base = NULL;
+ goto err_exit;
+ }
+
+ recbuf->iov_base = recordhdr.iov_base;
+ recbuf->iov_len = recordhdr.iov_len;
+err_exit:
+ return replyiob;
+}
+
+
+/*
+ * The function to submit a program message to the RPC service.
+ * This message is added to the transmission queue of the
+ * conn.
+ *
+ * Program callers are not expected to use the msgvec->iov_base
+ * address for anything else.
+ * Nor are they expected to free it once this function returns.
+ * Once the transmission of the buffer is completed by the RPC service,
+ * the memory area as referenced through @msg will be unrefed.
+ * If a higher layer does not want anything to do with this iobuf
+ * after this function returns, it should call unref on it. For keeping
+ * it around till the transmission is actually complete, rpcsvc also refs it.
+ * *
+ * If this function returns an error by returning -1, the
+ * higher layer programs should assume that a disconnection happened
+ * and should know that the conn memory area as well as the req structure
+ * has been freed internally.
+ *
+ * For now, this function assumes that a submit is always called
+ * to send a new record. Later, if there is a situation where different
+ * buffers for the same record come from different sources, then we'll
+ * need to change this code to account for multiple submit calls adding
+ * the buffers into a single record.
+ */
+
+int
+rpcsvc_submit_generic (rpcsvc_request_t *req, struct iovec *proghdr,
+ int hdrcount, struct iovec *payload, int payloadcount,
+ struct iobref *iobref)
+{
+ int ret = -1, i = 0;
+ struct iobuf *replyiob = NULL;
+ struct iovec recordhdr = {0, };
+ rpcsvc_conn_t *conn = NULL;
+ size_t msglen = 0;
+ char new_iobref = 0;
+
+ if ((!req) || (!req->conn))
+ return -1;
+
+ conn = req->conn;
+
+ for (i = 0; i < hdrcount; i++) {
+ msglen += proghdr[i].iov_len;
+ }
+
+ for (i = 0; i < payloadcount; i++) {
+ msglen += payload[i].iov_len;
+ }
+
+ gf_log (GF_RPCSVC, GF_LOG_TRACE, "Tx message: %zu", msglen);
+
+ /* Build the buffer containing the encoded RPC reply. */
+ replyiob = rpcsvc_record_build_record (req, msglen, &recordhdr);
+ if (!replyiob) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR,"Reply record creation failed");
+ goto disconnect_exit;
+ }
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "memory allocation "
+ "failed");
+ goto disconnect_exit;
+ }
+
+ new_iobref = 1;
+ }
+
+ iobref_add (iobref, replyiob);
+
+ ret = rpcsvc_conn_submit (conn, &recordhdr, 1, proghdr, hdrcount,
+ payload, payloadcount, iobref,
+ req->trans_private);
+
+ rpcsvc_request_destroy (conn, req);
+
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to submit message");
+ }
+
+disconnect_exit:
+ if (replyiob) {
+ iobuf_unref (replyiob);
+ }
+
+ if (new_iobref) {
+ iobref_unref (iobref);
+ }
+
+ /* Note that a unref is called everytime a reply is sent. This is in
+ * response to the ref that is performed on the conn when a request is
+ * handed to the RPC program.
+ *
+ * The catch, however, is that if the reply is an rpc error, we must
+ * not unref. This is because the ref only contains
+ * references for the actors to which the request was handed plus one
+ * reference maintained by the RPC layer. By unrefing for a case where
+ * no actor was called, we will be losing the ref held for the RPC
+ * layer.
+ */
+ if ((rpcsvc_request_accepted (req)) &&
+ (rpcsvc_request_accepted_success (req)))
+ rpcsvc_conn_unref (conn);
+
+ return ret;
+}
+
+
+int
+rpcsvc_error_reply (rpcsvc_request_t *req)
+{
+ struct iovec dummyvec = {0, };
+
+ if (!req)
+ return -1;
+
+ /* At this point the req should already have been filled with the
+ * appropriate RPC error numbers.
+ */
+ return rpcsvc_submit_generic (req, &dummyvec, 0, NULL, 0, NULL);
+}
+
+
+/* Register the program with the local portmapper service. */
+int
+rpcsvc_program_register_portmap (rpcsvc_program_t *newprog, rpcsvc_conn_t *conn)
+{
+ int ret = 0;
+ struct sockaddr_in sa = {0, };
+
+ if (!newprog || !conn->trans) {
+ goto out;
+ }
+
+ if (!(pmap_set (newprog->prognum, newprog->progver, IPPROTO_TCP,
+ sa.sin_port))) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not register with"
+ " portmap");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+int
+rpcsvc_program_unregister_portmap (rpcsvc_program_t *prog)
+{
+ if (!prog)
+ return -1;
+
+ if (!(pmap_unset(prog->prognum, prog->progver))) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Could not unregister with"
+ " portmap");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+rpcsvc_listener_t *
+rpcsvc_get_listener (rpcsvc_t *svc, uint16_t port)
+{
+ rpcsvc_listener_t *listener = NULL;
+ char found = 0;
+
+ if (!svc) {
+ goto out;
+ }
+
+ pthread_mutex_lock (&svc->rpclock);
+ {
+ list_for_each_entry (listener, &svc->listeners, list) {
+ if (((struct sockaddr_in *)&listener->sa)->sin_port
+ == port) {
+ found = 1;
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock (&svc->rpclock);
+
+ if (!found) {
+ listener = NULL;
+ }
+
+out:
+ return listener;
+}
+
+
+/* The only difference between the generic submit and this one is that the
+ * generic submit is also used for submitting RPC error replies in where there
+ * are no payloads so the msgvec and msgbuf can be NULL.
+ * Since RPC programs should be using this function along with their payloads
+ * we must perform NULL checks before calling the generic submit.
+ */
+int
+rpcsvc_submit_message (rpcsvc_request_t *req, struct iovec *proghdr,
+ int hdrcount, struct iovec *payload, int payloadcount,
+ struct iobref *iobref)
+{
+ if ((!req) || (!req->conn) || (!proghdr) || (!proghdr->iov_base))
+ return -1;
+
+ return rpcsvc_submit_generic (req, proghdr, hdrcount, payload,
+ payloadcount, iobref);
+}
+
+
+int
+rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t prog)
+{
+ int ret = -1;
+
+ if (!svc)
+ return -1;
+
+ /* TODO: De-init the listening connection for this program. */
+ ret = rpcsvc_program_unregister_portmap (&prog);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "portmap unregistration of"
+ " program failed");
+ goto err;
+ }
+
+ ret = 0;
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "Program unregistered: %s, Num: %d,"
+ " Ver: %d, Port: %d", prog.progname, prog.prognum,
+ prog.progver, prog.progport);
+
+err:
+ if (ret == -1)
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Program unregistration failed"
+ ": %s, Num: %d, Ver: %d, Port: %d", prog.progname,
+ prog.prognum, prog.progver, prog.progport);
+
+ return ret;
+}
+
+
+int
+rpcsvc_conn_peername (rpcsvc_conn_t *conn, char *hostname, int hostlen)
+{
+ if (!conn || !conn->trans)
+ return -1;
+
+ return rpc_transport_get_peername (conn->trans, hostname, hostlen);
+}
+
+
+int
+rpcsvc_conn_peeraddr (rpcsvc_conn_t *conn, char *addrstr, int addrlen,
+ struct sockaddr *sa, socklen_t sasize)
+{
+ if (!conn || !conn->trans)
+ return -1;
+
+ return rpc_transport_get_peeraddr(conn->trans, addrstr, addrlen, sa,
+ sasize);
+}
+
+
+rpcsvc_conn_t *
+rpcsvc_conn_create (rpcsvc_t *svc, dict_t *options, char *name)
+{
+ int ret = -1;
+ rpc_transport_t *trans = NULL;
+ rpcsvc_conn_t *conn = NULL;
+
+ trans = rpc_transport_load (svc->ctx, options, name);
+ if (!trans) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "cannot create listener, "
+ "initing the transport failed");
+ goto out;
+ }
+
+ ret = rpc_transport_listen (trans);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG,
+ "listening on transport failed");
+ goto out;
+ }
+
+ conn = rpcsvc_conn_init (svc, trans);
+ if (!conn) {
+ ret = -1;
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG,
+ "initializing connection for transport failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if ((ret == -1) && (trans)) {
+ rpc_transport_disconnect (trans);
+ }
+
+ return conn;
+}
+
+rpcsvc_listener_t *
+rpcsvc_listener_alloc (rpcsvc_t *svc, rpcsvc_conn_t *conn)
+{
+ rpcsvc_listener_t *listener = NULL;
+ int ret = -1;
+
+ listener = GF_CALLOC (1, sizeof (*listener), 0);
+ if (!listener) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "memory allocation failed");
+ goto out;
+ }
+
+ /* TODO: unresolved symbol */
+ ret = rpc_transport_get_myaddr (conn->trans, NULL, 0,
+ &listener->sa,
+ sizeof (listener->sa));
+ ret = 0;
+ if (ret == -1) {
+ GF_FREE (listener);
+ listener = NULL;
+ goto out;
+ }
+
+ listener->conn = conn;
+
+ INIT_LIST_HEAD (&listener->list);
+
+ pthread_mutex_lock (&svc->rpclock);
+ {
+ list_add_tail (&listener->list, &svc->listeners);
+ }
+ pthread_mutex_unlock (&svc->rpclock);
+out:
+ return listener;
+}
+
+
+rpcsvc_listener_t *
+rpcsvc_create_listener (rpcsvc_t *svc, dict_t *options, char *name)
+{
+ rpcsvc_conn_t *conn = NULL;
+ rpcsvc_listener_t *listener = NULL;
+
+ if (!svc || !options) {
+ goto out;
+ }
+
+ conn = rpcsvc_conn_create (svc, options, name);
+ if (!conn) {
+ goto out;
+ }
+
+ listener = rpcsvc_listener_alloc (svc, conn);
+ if (listener == NULL) {
+ goto out;
+ }
+
+ conn->listener = listener;
+out:
+ if (!listener && conn) {
+ rpcsvc_conn_deinit (conn);
+ }
+
+ return listener;
+}
+
+
+int
+rpcsvc_unregister_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata)
+{
+ rpcsvc_notify_wrapper_t *wrapper = NULL, *tmp = NULL;
+ int ret = 0;
+
+ if (!svc || !notify) {
+ goto out;
+ }
+
+ pthread_mutex_lock (&svc->rpclock);
+ {
+ list_for_each_entry_safe (wrapper, tmp, &svc->notify, list) {
+ if ((wrapper->notify == notify)
+ && (mydata == wrapper->data)) {
+ list_del_init (&wrapper->list);
+ GF_FREE (wrapper);
+ ret++;
+ }
+ }
+ }
+ pthread_mutex_unlock (&svc->rpclock);
+
+out:
+ return ret;
+}
+
+int
+rpcsvc_register_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata)
+{
+ rpcsvc_notify_wrapper_t *wrapper = NULL;
+ int ret = -1;
+
+ wrapper = rpcsvc_notify_wrapper_alloc ();
+ if (!wrapper) {
+ goto out;
+ }
+ svc->mydata = mydata; /* this_xlator */
+ wrapper->data = mydata;
+ wrapper->notify = notify;
+
+ pthread_mutex_lock (&svc->rpclock);
+ {
+ list_add_tail (&wrapper->list, &svc->notify);
+ svc->notify_count++;
+ }
+ pthread_mutex_unlock (&svc->rpclock);
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+
+int
+rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t program)
+{
+ rpcsvc_program_t *newprog = NULL;
+ int ret = -1;
+ rpcsvc_listener_t *listener = NULL;
+
+ if (!svc)
+ return -1;
+
+ newprog = GF_CALLOC (1, sizeof(*newprog), 0);
+ if (!newprog)
+ return -1;
+
+ if (!program.actors)
+ goto free_prog;
+
+ memcpy (newprog, &program, sizeof (program));
+
+ listener = svc->listener;
+
+ ret = rpcsvc_program_register_portmap (newprog, listener->conn);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "portmap registration of"
+ " program failed");
+ goto free_prog;
+ }
+
+ pthread_mutex_lock (&svc->rpclock);
+ {
+ list_add_tail (&newprog->program, &svc->programs);
+ }
+ pthread_mutex_unlock (&svc->rpclock);
+
+ ret = 0;
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "New program registered: %s, Num: %d,"
+ " Ver: %d, Port: %d", newprog->progname, newprog->prognum,
+ newprog->progver, newprog->progport);
+
+free_prog:
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Program registration failed:"
+ " %s, Num: %d, Ver: %d, Port: %d", newprog->progname,
+ newprog->prognum, newprog->progver, newprog->progport);
+ GF_FREE (newprog);
+ }
+
+ return ret;
+}
+
+
+int
+rpcsvc_init_options (rpcsvc_t *svc, dict_t *options)
+{
+ svc->memfactor = RPCSVC_DEFAULT_MEMFACTOR;
+ return 0;
+}
+
+
+/* The global RPC service initializer.
+ */
+rpcsvc_t *
+rpcsvc_init (glusterfs_ctx_t *ctx, dict_t *options)
+{
+ rpcsvc_t *svc = NULL;
+ int ret = -1;
+ rpcsvc_listener_t *listener = NULL;
+
+ if ((!ctx) || (!options))
+ return NULL;
+
+ svc = GF_CALLOC (1, sizeof (*svc), 0);
+ if (!svc)
+ return NULL;
+
+ pthread_mutex_init (&svc->rpclock, NULL);
+ INIT_LIST_HEAD (&svc->authschemes);
+ INIT_LIST_HEAD (&svc->notify);
+ INIT_LIST_HEAD (&svc->listeners);
+ INIT_LIST_HEAD (&svc->programs);
+
+ ret = rpcsvc_init_options (svc, options);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to init options");
+ goto free_svc;
+ }
+
+ ret = rpcsvc_auth_init (svc, options);
+ if (ret == -1) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "Failed to init "
+ "authentication");
+ goto free_svc;
+ }
+
+ ret = -1;
+ svc->options = options;
+ svc->ctx = ctx;
+ gf_log (GF_RPCSVC, GF_LOG_DEBUG, "RPC service inited.");
+
+ /* One listen port per RPC */
+ listener = rpcsvc_get_listener (svc, 0);
+ if (!listener) {
+ /* FIXME: listener is given the name of first program that
+ * creates it. This is not always correct. For eg., multiple
+ * programs can be listening on the same listener
+ * (glusterfs 3.1.0, 3.1.2, 3.1.3 etc).
+ */
+ listener = rpcsvc_create_listener (svc, options, "RPC");
+ if (!listener) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "creation of listener"
+ " for program failed");
+ goto free_svc;
+ }
+ }
+
+ if (!listener->conn) {
+ gf_log (GF_RPCSVC, GF_LOG_ERROR, "listener with no connection "
+ "found");
+ goto free_svc;
+ }
+
+ svc->listener = listener;
+
+ ret = 0;
+free_svc:
+ if (ret == -1) {
+ GF_FREE (svc);
+ svc = NULL;
+ }
+
+ return svc;
+}
diff --git a/xlators/protocol/rpc/rpc-lib/src/rpcsvc.h b/xlators/protocol/rpc/rpc-lib/src/rpcsvc.h
new file mode 100644
index 00000000000..c3b003f629f
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/rpcsvc.h
@@ -0,0 +1,584 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _RPCSVC_H
+#define _RPCSVC_H
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "event.h"
+#include "rpc-transport.h"
+#include "logging.h"
+#include "dict.h"
+#include "mem-pool.h"
+#include "list.h"
+#include "iobuf.h"
+#include "xdr-rpc.h"
+#include "glusterfs.h"
+#include "rpcsvc-common.h"
+
+#include <pthread.h>
+#include <sys/uio.h>
+
+#include <sys/uio.h>
+#include <inttypes.h>
+#include "compat.h"
+
+#ifndef NGRPS
+#define NGRPS 16
+#endif /* !NGRPS */
+
+#define GF_RPCSVC "rpc-service"
+#define RPCSVC_THREAD_STACK_SIZE ((size_t)(1024 * GF_UNIT_KB))
+
+#define RPCSVC_FRAGHDR_SIZE 4 /* 4-byte RPC fragment header size */
+
+#define RPCSVC_DEFAULT_MEMFACTOR 15
+#define RPCSVC_EVENTPOOL_SIZE_MULT 1024
+#define RPCSVC_POOLCOUNT_MULT 35
+#define RPCSVC_CONN_READ (128 * GF_UNIT_KB)
+#define RPCSVC_PAGE_SIZE (128 * GF_UNIT_KB)
+
+/* RPC Record States */
+#define RPCSVC_READ_FRAGHDR 1
+#define RPCSVC_READ_FRAG 2
+/* The size in bytes, if crossed by a fragment will be handed over to the
+ * vectored actor so that it can allocate its buffers the way it wants.
+ * In our RPC layer, we assume that vectored RPC requests/records are never
+ * spread over multiple RPC fragments since that prevents us from determining
+ * whether the record should be handled in RPC layer completely or handed to
+ * the vectored handler.
+ */
+#define RPCSVC_VECTORED_FRAGSZ 4096
+#define RPCSVC_VECTOR_READCRED 1003
+#define RPCSVC_VECTOR_READVERFSZ 1004
+#define RPCSVC_VECTOR_READVERF 1005
+#define RPCSVC_VECTOR_IGNORE 1006
+#define RPCSVC_VECTOR_READVEC 1007
+#define RPCSVC_VECTOR_READPROCHDR 1008
+
+#define rpcsvc_record_vectored_baremsg(rs) (((rs)->state == RPCSVC_READ_FRAG) && (rs)->vecstate == 0)
+#define rpcsvc_record_vectored_cred(rs) ((rs)->vecstate == RPCSVC_VECTOR_READCRED)
+#define rpcsvc_record_vectored_verfsz(rs) ((rs)->vecstate == RPCSVC_VECTOR_READVERFSZ)
+#define rpcsvc_record_vectored_verfread(rs) ((rs)->vecstate == RPCSVC_VECTOR_READVERF)
+#define rpcsvc_record_vectored_ignore(rs) ((rs)->vecstate == RPCSVC_VECTOR_IGNORE)
+#define rpcsvc_record_vectored_readvec(rs) ((rs)->vecstate == RPCSVC_VECTOR_READVEC)
+#define rpcsvc_record_vectored_readprochdr(rs) ((rs)->vecstate == RPCSVC_VECTOR_READPROCHDR)
+#define rpcsvc_record_vectored(rs) ((rs)->fragsize > RPCSVC_VECTORED_FRAGSZ)
+/* Includes bytes up to and including the credential length field. The credlen
+ * will be followed by @credlen bytes of credential data which will have to be
+ * read separately by the vectored reader. After the credentials comes the
+ * verifier which will also have to be read separately including the 8 bytes of
+ * verf flavour and verflen.
+ */
+#define RPCSVC_BARERPC_MSGSZ 32
+#define rpcsvc_record_readfraghdr(rs) ((rs)->state == RPCSVC_READ_FRAGHDR)
+#define rpcsvc_record_readfrag(rs) ((rs)->state == RPCSVC_READ_FRAG)
+
+#define RPCSVC_LOWVERS 2
+#define RPCSVC_HIGHVERS 2
+
+
+#if 0
+#error "defined in /usr/include/rpc/auth.h"
+
+#define AUTH_NONE 0 /* no authentication */
+#define AUTH_NULL 0 /* backward compatibility */
+#define AUTH_SYS 1 /* unix style (uid, gids) */
+#define AUTH_UNIX AUTH_SYS
+#define AUTH_SHORT 2 /* short hand unix style */
+#define AUTH_DES 3 /* des style (encrypted timestamps) */
+#define AUTH_DH AUTH_DES /* Diffie-Hellman (this is DES) */
+#define AUTH_KERB 4 /* kerberos style */
+#endif /* */
+
+#define AUTH_GLUSTERFS 5
+
+typedef struct rpcsvc_program rpcsvc_program_t;
+
+struct rpcsvc_notify_wrapper {
+ struct list_head list;
+ void *data;
+ rpcsvc_notify_t notify;
+};
+typedef struct rpcsvc_notify_wrapper rpcsvc_notify_wrapper_t;
+
+#define RPCSVC_CONNSTATE_CONNECTED 1
+#define RPCSVC_CONNSTATE_DISCONNECTED 2
+
+#define rpcsvc_conn_check_active(conn) ((conn)->connstate==RPCSVC_CONNSTATE_CONNECTED)
+
+typedef struct rpcsvc_request rpcsvc_request_t;
+
+typedef struct rpc_conn_state rpcsvc_conn_t;
+typedef struct {
+ rpcsvc_conn_t *conn;
+ struct sockaddr sa;
+ struct list_head list;
+} rpcsvc_listener_t;
+
+struct rpcsvc_config {
+ int max_block_size;
+};
+
+/* Contains the state for each connection that is used for transmitting and
+ * receiving RPC messages.
+ *
+ * Anything that can be accessed by a RPC program must be synced through
+ * connlock.
+ */
+struct rpc_conn_state {
+
+ /* Transport or connection state */
+ rpc_transport_t *trans;
+
+ rpcsvc_t *svc;
+ /* RPC Records and Fragments assembly state.
+ * All incoming data is staged here before being
+ * called a full RPC message.
+ */
+ /* rpcsvc_record_state_t rstate; */
+
+ /* It is possible that a client disconnects while
+ * the higher layer RPC service is busy in a call.
+ * In this case, we cannot just free the conn
+ * structure, since the higher layer service could
+ * still have a reference to it.
+ * The refcount avoids freeing until all references
+ * have been given up, although the connection is clos()ed at the first
+ * call to unref.
+ */
+ int connref;
+ pthread_mutex_t connlock;
+ int connstate;
+
+ /* Memory pool for rpcsvc_request_t */
+ struct mem_pool *rxpool;
+
+ /* The request which hasnt yet been handed to the RPC program because
+ * this request is being treated as a vector request and so needs some
+ * more data to be got from the network.
+ */
+ /* rpcsvc_request_t *vectoredreq; */
+ rpcsvc_listener_t *listener;
+};
+
+#define RPCSVC_CONNSTATE_CONNECTED 1
+#define RPCSVC_CONNSTATE_DISCONNECTED 2
+
+#define RPCSVC_MAX_AUTH_BYTES 400
+typedef struct rpcsvc_auth_data {
+ int flavour;
+ int datalen;
+ char authdata[RPCSVC_MAX_AUTH_BYTES];
+} rpcsvc_auth_data_t;
+
+#define rpcsvc_auth_flavour(au) ((au).flavour)
+
+/* The container for the RPC call handed up to an actor.
+ * Dynamically allocated. Lives till the call reply is completely
+ * transmitted.
+ * */
+struct rpcsvc_request {
+ /* connection over which this request came. */
+ rpcsvc_conn_t *conn;
+
+ rpcsvc_program_t *prog;
+
+ /* The identifier for the call from client.
+ * Needed to pair the reply with the call.
+ */
+ uint32_t xid;
+
+ int prognum;
+
+ int progver;
+
+ int procnum;
+
+ int type;
+
+ /* Uid and gid filled by the rpc-auth module during the authentication
+ * phase.
+ */
+ uid_t uid;
+ gid_t gid;
+ pid_t pid;
+
+ uint64_t lk_owner;
+ uint64_t gfs_id;
+
+ /* Might want to move this to AUTH_UNIX specifix state since this array
+ * is not available for every authenticatino scheme.
+ */
+ gid_t auxgids[NGRPS];
+ int auxgidcount;
+
+
+ /* The RPC message payload, contains the data required
+ * by the program actors. This is the buffer that will need to
+ * be de-xdred by the actor.
+ */
+ struct iovec msg[2];
+
+ /* The full message buffer allocated to store the RPC headers.
+ * This buffer is ref'd when allocated why RPC svc and unref'd after
+ * the buffer is handed to the actor. That means if the actor or any
+ * higher layer wants to keep this buffer around, they too must ref it
+ * right after entering the program actor.
+ */
+ struct iobuf *recordiob;
+
+ /* iobuf to hold payload of calls like write. By storing large payloads
+ * starting from page-aligned addresses, performance increases while
+ * accessing the payload
+ */
+ struct iobuf *vectorediob;
+
+
+ /* Status of the RPC call, whether it was accepted or denied. */
+ int rpc_status;
+
+ /* In case, the call was denied, the RPC error is stored here
+ * till the reply is sent.
+ */
+ int rpc_err;
+
+ /* In case the failure happened because of an authentication problem
+ * , this value needs to be assigned the correct auth error number.
+ */
+ int auth_err;
+
+ /* There can be cases of RPC requests where the reply needs to
+ * be built from multiple sources. For eg. where even the NFS reply can
+ * contain a payload, as in the NFSv3 read reply. Here the RPC header
+ * ,NFS header and the read data are brought together separately from
+ * different buffers, so we need to stage the buffers temporarily here
+ * before all of them get added to the connection's transmission list.
+ */
+ struct list_head txlist;
+
+ /* While the reply record is being built, this variable keeps track
+ * of how many bytes have been added to the record.
+ */
+ size_t payloadsize;
+
+ /* The credentials extracted from the rpc request */
+ rpcsvc_auth_data_t cred;
+
+ /* The verified extracted from the rpc request. In request side
+ * processing this contains the verifier sent by the client, on reply
+ * side processing, it is filled with the verified that will be
+ * sent to the client.
+ */
+ rpcsvc_auth_data_t verf;
+
+ /* Container for a RPC program wanting to store a temp
+ * request-specific item.
+ */
+ void *private;
+
+ /* Container for transport to store request-specific item */
+ void *trans_private;
+};
+
+#define rpcsvc_request_program(req) ((rpcsvc_program_t *)((req)->prog))
+#define rpcsvc_request_program_private(req) (((rpcsvc_program_t *)((req)->program))->private)
+#define rpcsvc_request_conn(req) (req)->conn
+#define rpcsvc_request_accepted(req) ((req)->rpc_status == MSG_ACCEPTED)
+#define rpcsvc_request_accepted_success(req) ((req)->rpc_err == SUCCESS)
+#define rpcsvc_request_uid(req) ((req)->uid)
+#define rpcsvc_request_gid(req) ((req)->gid)
+#define rpcsvc_conn_rpcsvc(conn) ((conn)->svc)
+#define rpcsvc_request_service(req) (rpcsvc_conn_rpcsvc(rpcsvc_request_conn(req)))
+#define rpcsvc_request_prog_minauth(req) (rpcsvc_request_program(req)->min_auth)
+#define rpcsvc_request_cred_flavour(req) (rpcsvc_auth_flavour(req->cred))
+#define rpcsvc_request_verf_flavour(req) (rpcsvc_auth_flavour(req->verf))
+
+#define rpcsvc_request_uid(req) ((req)->uid)
+#define rpcsvc_request_gid(req) ((req)->gid)
+#define rpcsvc_request_private(req) ((req)->private)
+#define rpcsvc_request_xid(req) ((req)->xid)
+#define rpcsvc_request_set_private(req,prv) (req)->private = (void *)(prv)
+#define rpcsvc_request_record_iob(rq) ((rq)->recordiob)
+#define rpcsvc_request_record_ref(req) (iobuf_ref ((req)->recordiob))
+#define rpcsvc_request_record_unref(req) (iobuf_unref ((req)->recordiob))
+
+
+#define RPCSVC_ACTOR_SUCCESS 0
+#define RPCSVC_ACTOR_ERROR (-1)
+
+/* Functor for every type of protocol actor
+ * must be defined like this.
+ *
+ * See the request structure for info on how to handle the request
+ * in the program actor.
+ *
+ * On successful santify checks inside the actor, it should return
+ * RPCSVC_ACTOR_SUCCESS.
+ * On an error, on which the RPC layer is expected to return a reply, the actor
+ * should return RPCSVC_ACTOR_ERROR.
+ *
+ */
+typedef int (*rpcsvc_actor) (rpcsvc_request_t *req);
+typedef int (*rpcsvc_vector_actor) (rpcsvc_request_t *req, struct iobuf *iob);
+typedef int (*rpcsvc_vector_sizer) (rpcsvc_request_t *req, ssize_t *readsize,
+ int *newiob);
+
+/* Every protocol actor will also need to specify the function the RPC layer
+ * will use to serialize or encode the message into XDR format just before
+ * transmitting on the connection.
+ */
+typedef void *(*rpcsvc_encode_reply) (void *msg);
+
+/* Once the reply has been transmitted, the message will have to be de-allocated
+ * , so every actor will need to provide a function that deallocates the message
+ * it had allocated as a response.
+ */
+typedef void (*rpcsvc_deallocate_reply) (void *msg);
+
+
+#define RPCSVC_NAME_MAX 32
+/* The descriptor for each procedure/actor that runs
+ * over the RPC service.
+ */
+typedef struct rpcsvc_actor_desc {
+ char procname[RPCSVC_NAME_MAX];
+ int procnum;
+ rpcsvc_actor actor;
+
+ /* Handler for cases where the RPC requests fragments are large enough
+ * to benefit from being decoded into aligned memory addresses. While
+ * decoding the request in a non-vectored manner, due to the nature of
+ * the XDR scheme, RPC cannot guarantee memory aligned addresses for
+ * the resulting message-specific structures. Allowing a specialized
+ * handler for letting the RPC program read the data from the network
+ * directly into its alligned buffers.
+ */
+ rpcsvc_vector_actor vector_actor;
+ rpcsvc_vector_sizer vector_sizer;
+
+} rpcsvc_actor_t;
+
+/* Describes a program and its version along with the function pointers
+ * required to handle the procedures/actors of each program/version.
+ * Never changed ever by any thread so no need for a lock.
+ */
+struct rpcsvc_program {
+ char progname[RPCSVC_NAME_MAX];
+ int prognum;
+ int progver;
+ /* FIXME */
+ dict_t *options; /* An opaque dictionary
+ * populated by the program
+ * (probably from xl->options)
+ * which contain enough
+ * information for transport to
+ * initialize. As a part of
+ * cleanup, the members of
+ * options which are of interest
+ * to transport should be put
+ * into a structure for better
+ * readability and structure
+ * should replace options member
+ * here.
+ */
+ uint16_t progport; /* Registered with portmap */
+#if 0
+ int progaddrfamily; /* AF_INET or AF_INET6 */
+ char *proghost; /* Bind host, can be NULL */
+#endif
+ rpcsvc_actor_t *actors; /* All procedure handlers */
+ int numactors; /* Num actors in actor array */
+ int proghighvers; /* Highest ver for program
+ supported by the system. */
+ int proglowvers; /* Lowest ver */
+
+ /* Program specific state handed to actors */
+ void *private;
+
+
+ /* This upcall is provided by the program during registration.
+ * It is used to notify the program about events like connection being
+ * destroyed etc. The rpc program may take appropriate actions, for eg.,
+ * in the case of connection being destroyed, it should cleanup its
+ * state stored in the connection.
+ */
+ rpcsvc_notify_t notify;
+
+ /* An integer that identifies the min auth strength that is required
+ * by this protocol, for eg. MOUNT3 needs AUTH_UNIX at least.
+ * See RFC 1813, Section 5.2.1.
+ */
+ int min_auth;
+
+ /* list member to link to list of registered services with rpcsvc */
+ struct list_head program;
+};
+
+
+/* All users of RPC services should use this API to register their
+ * procedure handlers.
+ */
+extern int
+rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t program);
+
+extern int
+rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t program);
+
+/* This will create and add a listener to listener pool. Programs can
+ * use any of the listener in this pool. A single listener can be used by
+ * multiple programs and vice versa. There can also be a one to one mapping
+ * between a program and a listener. After registering a program with rpcsvc,
+ * the program has to be associated with a listener using
+ * rpcsvc_program_register_portmap.
+ */
+/* FIXME: can multiple programs registered on same port? */
+extern rpcsvc_listener_t *
+rpcsvc_create_listener (rpcsvc_t *svc, dict_t *options, char *name);
+
+extern int
+rpcsvc_program_register_portmap (rpcsvc_program_t *newprog,
+ rpcsvc_conn_t *conn);
+
+/* Inits the global RPC service data structures.
+ * Called in main.
+ */
+extern rpcsvc_t *
+rpcsvc_init (glusterfs_ctx_t *ctx, dict_t *options);
+
+int
+rpcsvc_register_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata);
+
+/* unregister a notification callback @notify with data @mydata from svc.
+ * returns the number of notification callbacks unregistered.
+ */
+int
+rpcsvc_unregister_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata);
+
+int
+rpcsvc_submit_message (rpcsvc_request_t *req, struct iovec *proghdr,
+ int hdrcount, struct iovec *payload, int payloadcount,
+ struct iobref *iobref);
+
+int
+rpcsvc_submit_generic (rpcsvc_request_t *req, struct iovec *proghdr,
+ int hdrcount, struct iovec *payload, int payloadcount,
+ struct iobref *iobref);
+
+extern int
+rpcsvc_error_reply (rpcsvc_request_t *req);
+
+#define RPCSVC_PEER_STRLEN 1024
+#define RPCSVC_AUTH_ACCEPT 1
+#define RPCSVC_AUTH_REJECT 2
+#define RPCSVC_AUTH_DONTCARE 3
+
+extern int
+rpcsvc_conn_peername (rpcsvc_conn_t *conn, char *hostname, int hostlen);
+
+extern int
+rpcsvc_conn_peeraddr (rpcsvc_conn_t *conn, char *addrstr, int addrlen,
+ struct sockaddr *returnsa, socklen_t sasize);
+
+extern int
+rpcsvc_conn_peer_check (dict_t *options, char *volname, rpcsvc_conn_t *conn);
+
+extern int
+rpcsvc_conn_privport_check (rpcsvc_t *svc, char *volname, rpcsvc_conn_t *conn);
+#define rpcsvc_request_seterr(req, err) (req)->rpc_err = err
+#define rpcsvc_request_set_autherr(req, err) (req)->auth_err = err
+
+extern int rpcsvc_submit_vectors (rpcsvc_request_t *req);
+
+extern int rpcsvc_request_attach_vector (rpcsvc_request_t *req,
+ struct iovec msgvec, struct iobuf *iob,
+ struct iobref *ioref, int finalvector);
+
+
+typedef int (*auth_init_conn) (rpcsvc_conn_t *conn, void *priv);
+typedef int (*auth_init_request) (rpcsvc_request_t *req, void *priv);
+typedef int (*auth_request_authenticate) (rpcsvc_request_t *req, void *priv);
+
+/* This structure needs to be registered by every authentication scheme.
+ * Our authentication schemes are stored per connection because
+ * each connection will end up using a different authentication scheme.
+ */
+typedef struct rpcsvc_auth_ops {
+ auth_init_conn conn_init;
+ auth_init_request request_init;
+ auth_request_authenticate authenticate;
+} rpcsvc_auth_ops_t;
+
+typedef struct rpcsvc_auth_flavour_desc {
+ char authname[RPCSVC_NAME_MAX];
+ int authnum;
+ rpcsvc_auth_ops_t *authops;
+ void *authprivate;
+} rpcsvc_auth_t;
+
+typedef void * (*rpcsvc_auth_initer_t) (rpcsvc_t *svc, dict_t *options);
+
+struct rpcsvc_auth_list {
+ struct list_head authlist;
+ rpcsvc_auth_initer_t init;
+ /* Should be the name with which we identify the auth scheme given
+ * in the volfile options.
+ * This should be different from the authname in rpc_auth_t
+ * in way that makes it easier to specify this scheme in the volfile.
+ * This is because the technical names of the schemes can be a bit
+ * arcane.
+ */
+ char name[RPCSVC_NAME_MAX];
+ rpcsvc_auth_t *auth;
+ int enable;
+};
+
+extern int
+rpcsvc_auth_request_init (rpcsvc_request_t *req);
+
+extern int
+rpcsvc_auth_init (rpcsvc_t *svc, dict_t *options);
+
+extern int
+rpcsvc_auth_conn_init (rpcsvc_conn_t *xprt);
+
+extern int
+rpcsvc_authenticate (rpcsvc_request_t *req);
+
+extern int
+rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen);
+
+/* If the request has been sent using AUTH_UNIX, this function returns the
+ * auxiliary gids as an array, otherwise, it returns NULL.
+ * Move to auth-unix specific source file when we need to modularize the
+ * authentication code even further to support mode auth schemes.
+ */
+extern gid_t *
+rpcsvc_auth_unix_auxgids (rpcsvc_request_t *req, int *arrlen);
+
+extern int
+rpcsvc_combine_gen_spec_volume_checks (int gen, int spec);
+
+extern char *
+rpcsvc_volume_allowed (dict_t *options, char *volname);
+#endif
diff --git a/xlators/protocol/rpc/rpc-lib/src/xdr-common.h b/xlators/protocol/rpc/rpc-lib/src/xdr-common.h
new file mode 100644
index 00000000000..50a57ade932
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/xdr-common.h
@@ -0,0 +1,48 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _XDR_COMMON_H_
+#define _XDR_COMMON_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <rpc/rpc.h>
+#define XDR_BYTES_PER_UNIT 4
+
+/* Returns the address of the byte that follows the
+ * last byte used for decoding the previous xdr component.
+ * For eg, once the RPC call for NFS has been decoded, thie macro will return
+ * the address from which the NFS header starts.
+ */
+#define xdr_decoded_remaining_addr(xdr) ((&xdr)->x_private)
+
+/* Returns the length of the remaining record after the previous decode
+ * operation completed.
+ */
+#define xdr_decoded_remaining_len(xdr) ((&xdr)->x_handy)
+
+/* Returns the number of bytes used by the last encode operation. */
+#define xdr_encoded_length(xdr) (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base))
+
+#define xdr_decoded_length(xdr) (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base))
+
+#endif
diff --git a/xlators/protocol/rpc/rpc-lib/src/xdr-rpc.c b/xlators/protocol/rpc/rpc-lib/src/xdr-rpc.c
new file mode 100644
index 00000000000..1bcd9bbaa4c
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/xdr-rpc.c
@@ -0,0 +1,189 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <rpc/rpc.h>
+#include <rpc/pmap_clnt.h>
+#include <arpa/inet.h>
+#include <rpc/xdr.h>
+#include <sys/uio.h>
+#include <rpc/auth_unix.h>
+
+#include "mem-pool.h"
+#include "xdr-rpc.h"
+#include "xdr-common.h"
+#include "logging.h"
+
+/* Decodes the XDR format in msgbuf into rpc_msg.
+ * The remaining payload is returned into payload.
+ */
+int
+xdr_to_rpc_call (char *msgbuf, size_t len, struct rpc_msg *call,
+ struct iovec *payload, char *credbytes, char *verfbytes)
+{
+ XDR xdr;
+ char opaquebytes[MAX_AUTH_BYTES];
+ struct opaque_auth *oa = NULL;
+
+ if ((!msgbuf) || (!call))
+ return -1;
+
+ memset (call, 0, sizeof (*call));
+
+ oa = &call->rm_call.cb_cred;
+ if (!credbytes)
+ oa->oa_base = opaquebytes;
+ else
+ oa->oa_base = credbytes;
+
+ oa = &call->rm_call.cb_verf;
+ if (!verfbytes)
+ oa->oa_base = opaquebytes;
+ else
+ oa->oa_base = verfbytes;
+
+ xdrmem_create (&xdr, msgbuf, len, XDR_DECODE);
+ if (!xdr_callmsg (&xdr, call))
+ return -1;
+
+ if (payload) {
+ payload->iov_base = xdr_decoded_remaining_addr (xdr);
+ payload->iov_len = xdr_decoded_remaining_len (xdr);
+ }
+
+ return 0;
+}
+
+
+bool_t
+true_func (XDR *s, caddr_t *a)
+{
+ return TRUE;
+}
+
+
+int
+rpc_fill_empty_reply (struct rpc_msg *reply, uint32_t xid)
+{
+ if (!reply)
+ return -1;
+
+ /* Setting to 0 also results in reply verifier flavor to be
+ * set to AUTH_NULL which is what we want right now.
+ */
+ memset (reply, 0, sizeof (*reply));
+ reply->rm_xid = xid;
+ reply->rm_direction = REPLY;
+
+ return 0;
+}
+
+int
+rpc_fill_denied_reply (struct rpc_msg *reply, int rjstat, int auth_err)
+{
+ if (!reply)
+ return -1;
+
+ reply->rm_reply.rp_stat = MSG_DENIED;
+ reply->rjcted_rply.rj_stat = rjstat;
+ if (rjstat == RPC_MISMATCH) {
+ /* No problem with hardocoding
+ * RPC version numbers. We only support
+ * v2 anyway.
+ */
+ reply->rjcted_rply.rj_vers.low = 2;
+ reply->rjcted_rply.rj_vers.high = 2;
+ } else if (rjstat == AUTH_ERROR)
+ reply->rjcted_rply.rj_why = auth_err;
+
+ return 0;
+}
+
+
+int
+rpc_fill_accepted_reply (struct rpc_msg *reply, int arstat, int proglow,
+ int proghigh, int verf, int len, char *vdata)
+{
+ if (!reply)
+ return -1;
+
+ reply->rm_reply.rp_stat = MSG_ACCEPTED;
+ reply->acpted_rply.ar_stat = arstat;
+
+ reply->acpted_rply.ar_verf.oa_flavor = verf;
+ reply->acpted_rply.ar_verf.oa_length = len;
+ reply->acpted_rply.ar_verf.oa_base = vdata;
+ if (arstat == PROG_MISMATCH) {
+ reply->acpted_rply.ar_vers.low = proglow;
+ reply->acpted_rply.ar_vers.high = proghigh;
+ } else if (arstat == SUCCESS) {
+
+ /* This is a hack. I'd really like to build a custom
+ * XDR library because Sun RPC interface is not very flexible.
+ */
+ reply->acpted_rply.ar_results.proc = (xdrproc_t)true_func;
+ reply->acpted_rply.ar_results.where = NULL;
+ }
+
+ return 0;
+}
+
+int
+rpc_reply_to_xdr (struct rpc_msg *reply, char *dest, size_t len,
+ struct iovec *dst)
+{
+ XDR xdr;
+
+ if ((!dest) || (!reply) || (!dst))
+ return -1;
+
+ xdrmem_create (&xdr, dest, len, XDR_ENCODE);
+ if (!xdr_replymsg(&xdr, reply))
+ return -1;
+
+ dst->iov_base = dest;
+ dst->iov_len = xdr_encoded_length (xdr);
+
+ return 0;
+}
+
+
+int
+xdr_to_auth_unix_cred (char *msgbuf, int msglen, struct authunix_parms *au,
+ char *machname, gid_t *gids)
+{
+ XDR xdr;
+
+ if ((!msgbuf) || (!machname) || (!gids) || (!au))
+ return -1;
+
+ au->aup_machname = machname;
+ au->aup_gids = gids;
+
+ xdrmem_create (&xdr, msgbuf, msglen, XDR_DECODE);
+
+ if (!xdr_authunix_parms (&xdr, au))
+ return -1;
+
+ return 0;
+}
diff --git a/xlators/protocol/rpc/rpc-lib/src/xdr-rpc.h b/xlators/protocol/rpc/rpc-lib/src/xdr-rpc.h
new file mode 100644
index 00000000000..4c0ee69b138
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/xdr-rpc.h
@@ -0,0 +1,74 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _XDR_RPC_H
+#define _XDR_RPC_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <rpc/rpc.h>
+#include <rpc/pmap_clnt.h>
+#include <arpa/inet.h>
+#include <rpc/xdr.h>
+#include <sys/uio.h>
+
+/* Converts a given network buffer from its XDR format to a structure
+ * that contains everything an RPC call needs to work.
+ */
+extern int
+xdr_to_rpc_call (char *msgbuf, size_t len, struct rpc_msg *call,
+ struct iovec *payload, char *credbytes, char *verfbytes);
+
+extern int
+rpc_fill_empty_reply (struct rpc_msg *reply, uint32_t xid);
+
+extern int
+rpc_fill_denied_reply (struct rpc_msg *reply, int rjstat, int auth_err);
+
+extern int
+rpc_fill_accepted_reply (struct rpc_msg *reply, int arstat, int proglow,
+ int proghigh, int verf, int len, char *vdata);
+extern int
+rpc_reply_to_xdr (struct rpc_msg *reply, char *dest, size_t len,
+ struct iovec *dst);
+
+extern int
+xdr_to_auth_unix_cred (char *msgbuf, int msglen, struct authunix_parms *au,
+ char *machname, gid_t *gids);
+/* Macros that simplify accesing the members of an RPC call structure. */
+#define rpc_call_xid(call) ((call)->rm_xid)
+#define rpc_call_direction(call) ((call)->rm_direction)
+#define rpc_call_rpcvers(call) ((call)->ru.RM_cmb.cb_rpcvers)
+#define rpc_call_program(call) ((call)->ru.RM_cmb.cb_prog)
+#define rpc_call_progver(call) ((call)->ru.RM_cmb.cb_vers)
+#define rpc_call_progproc(call) ((call)->ru.RM_cmb.cb_proc)
+#define rpc_opaque_auth_flavour(oa) ((oa)->oa_flavor)
+#define rpc_opaque_auth_len(oa) ((oa)->oa_length)
+
+#define rpc_call_cred_flavour(call) (rpc_opaque_auth_flavour ((&(call)->ru.RM_cmb.cb_cred)))
+#define rpc_call_cred_len(call) (rpc_opaque_auth_len ((&(call)->ru.RM_cmb.cb_cred)))
+
+
+#define rpc_call_verf_flavour(call) (rpc_opaque_auth_flavour ((&(call)->ru.RM_cmb.cb_verf)))
+#define rpc_call_verf_len(call) (rpc_opaque_auth_len ((&(call)->ru.RM_cmb.cb_verf)))
+
+#endif
diff --git a/xlators/protocol/rpc/rpc-lib/src/xdr-rpcclnt.c b/xlators/protocol/rpc/rpc-lib/src/xdr-rpcclnt.c
new file mode 100644
index 00000000000..98676ae61ab
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/xdr-rpcclnt.c
@@ -0,0 +1,131 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <rpc/rpc.h>
+#include <rpc/pmap_clnt.h>
+#include <arpa/inet.h>
+#include <rpc/xdr.h>
+#include <sys/uio.h>
+#include <rpc/auth_unix.h>
+#include <errno.h>
+
+#include "mem-pool.h"
+#include "xdr-rpc.h"
+#include "xdr-common.h"
+#include "logging.h"
+
+/* Decodes the XDR format in msgbuf into rpc_msg.
+ * The remaining payload is returned into payload.
+ */
+int
+xdr_to_rpc_reply (char *msgbuf, size_t len, struct rpc_msg *reply,
+ struct iovec *payload, char *verfbytes)
+{
+ XDR xdr;
+ int ret = -1;
+
+ if ((!msgbuf) || (!reply)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memset (reply, 0, sizeof (struct rpc_msg));
+
+ reply->acpted_rply.ar_verf = _null_auth;
+ reply->acpted_rply.ar_results.where = NULL;
+ reply->acpted_rply.ar_results.proc = (xdrproc_t)(xdr_void);
+
+ xdrmem_create (&xdr, msgbuf, len, XDR_DECODE);
+ if (!xdr_replymsg (&xdr, reply)) {
+ ret = -errno;
+ goto out;
+ }
+ if (payload) {
+ payload->iov_base = xdr_decoded_remaining_addr (xdr);
+ payload->iov_len = xdr_decoded_remaining_len (xdr);
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+#if 0
+bool_t
+true_func (XDR *s, caddr_t *a)
+{
+ return TRUE;
+}
+#endif
+
+int
+rpc_request_to_xdr (struct rpc_msg *request, char *dest, size_t len,
+ struct iovec *dst)
+{
+ XDR xdr;
+ int ret = -1;
+
+ if ((!dest) || (!request) || (!dst)) {
+ goto out;
+ }
+
+ xdrmem_create (&xdr, dest, len, XDR_ENCODE);
+ if (!xdr_callmsg (&xdr, request)) {
+ goto out;
+ }
+
+ dst->iov_base = dest;
+ dst->iov_len = xdr_encoded_length (xdr);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+int
+auth_unix_cred_to_xdr (struct authunix_parms *au, char *dest, size_t len,
+ struct iovec *iov)
+{
+ XDR xdr;
+ int ret = -1;
+
+ if (!au || !dest || !iov) {
+ goto out;
+ }
+
+ xdrmem_create (&xdr, dest, len, XDR_DECODE);
+
+ if (!xdr_authunix_parms (&xdr, au)) {
+ goto out;
+ }
+
+ iov->iov_base = dest;
+ iov->iov_len = xdr_encoded_length (xdr);
+
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/xlators/protocol/rpc/rpc-lib/src/xdr-rpcclnt.h b/xlators/protocol/rpc/rpc-lib/src/xdr-rpcclnt.h
new file mode 100644
index 00000000000..37c3046d330
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-lib/src/xdr-rpcclnt.h
@@ -0,0 +1,51 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _XDR_RPCCLNT_H
+#define _XDR_RPCCLNT_H
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <rpc/rpc.h>
+#include <rpc/pmap_clnt.h>
+#include <arpa/inet.h>
+#include <rpc/xdr.h>
+#include <sys/uio.h>
+#include <rpc/rpc_msg.h>
+#include <rpc/auth_unix.h>
+
+/* Macros that simplify accesing the members of an RPC call structure. */
+#define rpc_reply_xid(reply) ((reply)->rm_xid)
+#define rpc_reply_status(reply) ((reply)->ru.RM_rmb.rp_stat)
+#define rpc_accepted_reply_status(reply) ((reply)->acpted_rply.ar_stat)
+#define rpc_reply_verf_flavour(reply) ((reply)->acpted_rply.ar_verf.oa_flavor)
+
+int xdr_to_rpc_reply (char *msgbuf, size_t len, struct rpc_msg *reply,
+ struct iovec *payload, char *verfbytes);
+int
+rpc_request_to_xdr (struct rpc_msg *request, char *dest, size_t len,
+ struct iovec *dst);
+int
+auth_unix_cred_to_xdr (struct authunix_parms *au, char *dest, size_t len,
+ struct iovec *iov);
+
+#endif
diff --git a/xlators/protocol/rpc/rpc-transport/Makefile.am b/xlators/protocol/rpc/rpc-transport/Makefile.am
new file mode 100644
index 00000000000..7dd9f026cfc
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-transport/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = socket
diff --git a/xlators/protocol/rpc/rpc-transport/socket/Makefile.am b/xlators/protocol/rpc/rpc-transport/socket/Makefile.am
new file mode 100644
index 00000000000..f963effea22
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-transport/socket/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = src \ No newline at end of file
diff --git a/xlators/protocol/rpc/rpc-transport/socket/src/Makefile.am b/xlators/protocol/rpc/rpc-transport/socket/src/Makefile.am
new file mode 100644
index 00000000000..325a58bb05d
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-transport/socket/src/Makefile.am
@@ -0,0 +1,15 @@
+noinst_HEADERS = socket.h name.h
+
+rpctransport_LTLIBRARIES = socket.la
+rpctransportdir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/rpc-transport
+
+socket_la_LDFLAGS = -module -avoidversion
+
+socket_la_SOURCES = socket.c name.c
+socket_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\
+ -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/xlators/protocol/rpc/rpc-lib/src/ \
+ -I$(top_srcdir)/xlators/protocol/lib/src/ -shared -nostartfiles $(GF_CFLAGS)
+
+CLEANFILES = *~
diff --git a/xlators/protocol/rpc/rpc-transport/socket/src/name.c b/xlators/protocol/rpc/rpc-transport/socket/src/name.c
new file mode 100644
index 00000000000..d8fc7d42277
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-transport/socket/src/name.c
@@ -0,0 +1,737 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <errno.h>
+#include <netdb.h>
+#include <string.h>
+
+#ifdef CLIENT_PORT_CEILING
+#undef CLIENT_PORT_CEILING
+#endif
+
+#define CLIENT_PORT_CEILING 1024
+
+#ifndef AF_INET_SDP
+#define AF_INET_SDP 27
+#endif
+
+#include "rpc-transport.h"
+#include "socket.h"
+
+int32_t
+gf_resolve_ip6 (const char *hostname,
+ uint16_t port,
+ int family,
+ void **dnscache,
+ struct addrinfo **addr_info);
+
+static int32_t
+af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr,
+ socklen_t sockaddr_len, int ceiling)
+{
+ int32_t ret = -1;
+ /* struct sockaddr_in sin = {0, }; */
+ uint16_t port = ceiling - 1;
+
+ while (port)
+ {
+ switch (sockaddr->sa_family)
+ {
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sockaddr)->sin6_port = htons (port);
+ break;
+
+ case AF_INET_SDP:
+ case AF_INET:
+ ((struct sockaddr_in *)sockaddr)->sin_port = htons (port);
+ break;
+ }
+
+ ret = bind (fd, sockaddr, sockaddr_len);
+
+ if (ret == 0)
+ break;
+
+ if (ret == -1 && errno == EACCES)
+ break;
+
+ port--;
+ }
+
+ return ret;
+}
+
+static int32_t
+af_unix_client_bind (rpc_transport_t *this,
+ struct sockaddr *sockaddr,
+ socklen_t sockaddr_len,
+ int sock)
+{
+ data_t *path_data = NULL;
+ struct sockaddr_un *addr = NULL;
+ int32_t ret = 0;
+
+ path_data = dict_get (this->options, "rpc-transport.socket.bind-path");
+ if (path_data) {
+ char *path = data_to_str (path_data);
+ if (!path || strlen (path) > UNIX_PATH_MAX) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "bind-path not specfied for unix socket, "
+ "letting connect to assign default value");
+ goto err;
+ }
+
+ addr = (struct sockaddr_un *) sockaddr;
+ strcpy (addr->sun_path, path);
+ ret = bind (sock, (struct sockaddr *)addr, sockaddr_len);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "cannot bind to unix-domain socket %d (%s)",
+ sock, strerror (errno));
+ goto err;
+ }
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "bind-path not specfied for unix socket, "
+ "letting connect to assign default value");
+ }
+
+err:
+ return ret;
+}
+
+int32_t
+client_fill_address_family (rpc_transport_t *this, sa_family_t *sa_family)
+{
+ data_t *address_family_data = NULL;
+ int32_t ret = -1;
+
+ if (sa_family == NULL) {
+ goto out;
+ }
+
+ address_family_data = dict_get (this->options,
+ "rpc-transport.address-family");
+ if (!address_family_data) {
+ data_t *remote_host_data = NULL, *connect_path_data = NULL;
+ remote_host_data = dict_get (this->options, "remote-host");
+ connect_path_data = dict_get (this->options,
+ "rpc-transport.socket.connect-path");
+
+ if (!(remote_host_data || connect_path_data) ||
+ (remote_host_data && connect_path_data)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "rpc-transport.address-family not specified and "
+ "not able to determine the "
+ "same from other options (remote-host:%s and "
+ "rpc-transport.unix.connect-path:%s)",
+ data_to_str (remote_host_data),
+ data_to_str (connect_path_data));
+ goto out;
+ }
+
+ if (remote_host_data) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "address-family not specified, guessing it "
+ "to be inet/inet6");
+ *sa_family = AF_UNSPEC;
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "address-family not specified, guessing it "
+ "to be unix");
+ *sa_family = AF_UNIX;
+ }
+
+ } else {
+ char *address_family = data_to_str (address_family_data);
+ if (!strcasecmp (address_family, "unix")) {
+ *sa_family = AF_UNIX;
+ } else if (!strcasecmp (address_family, "inet")) {
+ *sa_family = AF_INET;
+ } else if (!strcasecmp (address_family, "inet6")) {
+ *sa_family = AF_INET6;
+ } else if (!strcasecmp (address_family, "inet-sdp")) {
+ *sa_family = AF_INET_SDP;
+ } else if (!strcasecmp (address_family, "inet/inet6")
+ || !strcasecmp (address_family, "inet6/inet")) {
+ *sa_family = AF_UNSPEC;
+ } else {
+ gf_log (this->name, GF_LOG_ERROR,
+ "unknown address-family (%s) specified",
+ address_family);
+ goto out;
+ }
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int32_t
+af_inet_client_get_remote_sockaddr (rpc_transport_t *this,
+ struct sockaddr *sockaddr,
+ socklen_t *sockaddr_len)
+{
+ dict_t *options = this->options;
+ data_t *remote_host_data = NULL;
+ data_t *remote_port_data = NULL;
+ char *remote_host = NULL;
+ uint16_t remote_port = 0;
+ struct addrinfo *addr_info = NULL;
+ int32_t ret = 0;
+
+ remote_host_data = dict_get (options, "remote-host");
+ if (remote_host_data == NULL)
+ {
+ gf_log (this->name, GF_LOG_ERROR,
+ "option remote-host missing in volume %s", this->name);
+ ret = -1;
+ goto err;
+ }
+
+ remote_host = data_to_str (remote_host_data);
+ if (remote_host == NULL)
+ {
+ gf_log (this->name, GF_LOG_ERROR,
+ "option remote-host has data NULL in volume %s", this->name);
+ ret = -1;
+ goto err;
+ }
+
+ remote_port_data = dict_get (options, "remote-port");
+ if (remote_port_data == NULL)
+ {
+ gf_log (this->name, GF_LOG_TRACE,
+ "option remote-port missing in volume %s. Defaulting to %d",
+ this->name, GF_DEFAULT_SOCKET_LISTEN_PORT);
+
+ remote_port = GF_DEFAULT_SOCKET_LISTEN_PORT;
+ }
+ else
+ {
+ remote_port = data_to_uint16 (remote_port_data);
+ }
+
+ if (remote_port == (uint16_t)-1)
+ {
+ gf_log (this->name, GF_LOG_ERROR,
+ "option remote-port has invalid port in volume %s",
+ this->name);
+ ret = -1;
+ goto err;
+ }
+
+ /* TODO: gf_resolve is a blocking call. kick in some
+ non blocking dns techniques */
+ ret = gf_resolve_ip6 (remote_host, remote_port,
+ sockaddr->sa_family, &this->dnscache, &addr_info);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "DNS resolution failed on host %s", remote_host);
+ goto err;
+ }
+
+ memcpy (sockaddr, addr_info->ai_addr, addr_info->ai_addrlen);
+ *sockaddr_len = addr_info->ai_addrlen;
+
+err:
+ return ret;
+}
+
+static int32_t
+af_unix_client_get_remote_sockaddr (rpc_transport_t *this,
+ struct sockaddr *sockaddr,
+ socklen_t *sockaddr_len)
+{
+ struct sockaddr_un *sockaddr_un = NULL;
+ char *connect_path = NULL;
+ data_t *connect_path_data = NULL;
+ int32_t ret = 0;
+
+ connect_path_data = dict_get (this->options,
+ "rpc-transport.socket.connect-path");
+ if (!connect_path_data) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "option rpc-transport.unix.connect-path not specified for "
+ "address-family unix");
+ ret = -1;
+ goto err;
+ }
+
+ connect_path = data_to_str (connect_path_data);
+ if (!connect_path) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "rpc-transport.unix.connect-path is null-string");
+ ret = -1;
+ goto err;
+ }
+
+ if (strlen (connect_path) > UNIX_PATH_MAX) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "connect-path value length %"GF_PRI_SIZET" > %d octets",
+ strlen (connect_path), UNIX_PATH_MAX);
+ ret = -1;
+ goto err;
+ }
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "using connect-path %s", connect_path);
+ sockaddr_un = (struct sockaddr_un *)sockaddr;
+ strcpy (sockaddr_un->sun_path, connect_path);
+ *sockaddr_len = sizeof (struct sockaddr_un);
+
+err:
+ return ret;
+}
+
+static int32_t
+af_unix_server_get_local_sockaddr (rpc_transport_t *this,
+ struct sockaddr *addr,
+ socklen_t *addr_len)
+{
+ data_t *listen_path_data = NULL;
+ char *listen_path = NULL;
+ int32_t ret = 0;
+ struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
+
+
+ listen_path_data = dict_get (this->options,
+ "rpc-transport.socket.listen-path");
+ if (!listen_path_data) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "missing option rpc-transport.socket.listen-path");
+ ret = -1;
+ goto err;
+ }
+
+ listen_path = data_to_str (listen_path_data);
+
+#ifndef UNIX_PATH_MAX
+#define UNIX_PATH_MAX 108
+#endif
+
+ if (strlen (listen_path) > UNIX_PATH_MAX) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "option rpc-transport.unix.listen-path has value length "
+ "%"GF_PRI_SIZET" > %d",
+ strlen (listen_path), UNIX_PATH_MAX);
+ ret = -1;
+ goto err;
+ }
+
+ sunaddr->sun_family = AF_UNIX;
+ strcpy (sunaddr->sun_path, listen_path);
+ *addr_len = sizeof (struct sockaddr_un);
+
+err:
+ return ret;
+}
+
+static int32_t
+af_inet_server_get_local_sockaddr (rpc_transport_t *this,
+ struct sockaddr *addr,
+ socklen_t *addr_len)
+{
+ struct addrinfo hints, *res = 0;
+ data_t *listen_port_data = NULL, *listen_host_data = NULL;
+ uint16_t listen_port = -1;
+ char service[NI_MAXSERV], *listen_host = NULL;
+ dict_t *options = NULL;
+ int32_t ret = 0;
+
+ options = this->options;
+
+ listen_port_data = dict_get (options, "rpc-transport.socket.listen-port");
+ listen_host_data = dict_get (options, "rpc-transport.socket.bind-address");
+
+ if (listen_port_data)
+ {
+ listen_port = data_to_uint16 (listen_port_data);
+ }
+
+ if (listen_port == (uint16_t) -1)
+ listen_port = GF_DEFAULT_SOCKET_LISTEN_PORT;
+
+
+ if (listen_host_data)
+ {
+ listen_host = data_to_str (listen_host_data);
+ } else {
+ if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *in = (struct sockaddr_in6 *) addr;
+ in->sin6_addr = in6addr_any;
+ in->sin6_port = htons(listen_port);
+ *addr_len = sizeof(struct sockaddr_in6);
+ goto out;
+ } else if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *in = (struct sockaddr_in *) addr;
+ in->sin_addr.s_addr = htonl(INADDR_ANY);
+ in->sin_port = htons(listen_port);
+ *addr_len = sizeof(struct sockaddr_in);
+ goto out;
+ }
+ }
+
+ memset (service, 0, sizeof (service));
+ sprintf (service, "%d", listen_port);
+
+ memset (&hints, 0, sizeof (hints));
+ hints.ai_family = addr->sa_family;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = AI_ADDRCONFIG | AI_PASSIVE;
+
+ ret = getaddrinfo(listen_host, service, &hints, &res);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "getaddrinfo failed for host %s, service %s (%s)",
+ listen_host, service, gai_strerror (ret));
+ ret = -1;
+ goto out;
+ }
+
+ memcpy (addr, res->ai_addr, res->ai_addrlen);
+ *addr_len = res->ai_addrlen;
+
+ freeaddrinfo (res);
+
+out:
+ return ret;
+}
+
+int32_t
+client_bind (rpc_transport_t *this,
+ struct sockaddr *sockaddr,
+ socklen_t *sockaddr_len,
+ int sock)
+{
+ int ret = 0;
+
+ *sockaddr_len = sizeof (struct sockaddr_in6);
+ switch (sockaddr->sa_family)
+ {
+ case AF_INET_SDP:
+ case AF_INET:
+ *sockaddr_len = sizeof (struct sockaddr_in);
+
+ case AF_INET6:
+ ret = af_inet_bind_to_port_lt_ceiling (sock, sockaddr,
+ *sockaddr_len, CLIENT_PORT_CEILING);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "cannot bind inet socket (%d) to port less than %d (%s)",
+ sock, CLIENT_PORT_CEILING, strerror (errno));
+ ret = 0;
+ }
+ break;
+
+ case AF_UNIX:
+ *sockaddr_len = sizeof (struct sockaddr_un);
+ ret = af_unix_client_bind (this, (struct sockaddr *)sockaddr,
+ *sockaddr_len, sock);
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_ERROR,
+ "unknown address family %d", sockaddr->sa_family);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+int32_t
+socket_client_get_remote_sockaddr (rpc_transport_t *this,
+ struct sockaddr *sockaddr,
+ socklen_t *sockaddr_len,
+ sa_family_t *sa_family)
+{
+ int32_t ret = 0;
+
+ if ((sockaddr == NULL) || (sockaddr_len == NULL)
+ || (sa_family == NULL)) {
+ ret = -1;
+ goto err;
+ }
+
+
+ ret = client_fill_address_family (this, &sockaddr->sa_family);
+ if (ret) {
+ ret = -1;
+ goto err;
+ }
+
+ *sa_family = sockaddr->sa_family;
+
+ switch (sockaddr->sa_family)
+ {
+ case AF_INET_SDP:
+ sockaddr->sa_family = AF_INET;
+
+ case AF_INET:
+ case AF_INET6:
+ case AF_UNSPEC:
+ ret = af_inet_client_get_remote_sockaddr (this, sockaddr,
+ sockaddr_len);
+ break;
+
+ case AF_UNIX:
+ ret = af_unix_client_get_remote_sockaddr (this, sockaddr,
+ sockaddr_len);
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_ERROR,
+ "unknown address-family %d", sockaddr->sa_family);
+ ret = -1;
+ }
+
+ if (*sa_family == AF_UNSPEC) {
+ *sa_family = sockaddr->sa_family;
+ }
+
+err:
+ return ret;
+}
+
+
+int32_t
+server_fill_address_family (rpc_transport_t *this, sa_family_t *sa_family)
+{
+ data_t *address_family_data = NULL;
+ int32_t ret = -1;
+
+ if (sa_family == NULL) {
+ goto out;
+ }
+
+ address_family_data = dict_get (this->options,
+ "rpc-transport.address-family");
+ if (address_family_data) {
+ char *address_family = NULL;
+ address_family = data_to_str (address_family_data);
+
+ if (!strcasecmp (address_family, "inet")) {
+ *sa_family = AF_INET;
+ } else if (!strcasecmp (address_family, "inet6")) {
+ *sa_family = AF_INET6;
+ } else if (!strcasecmp (address_family, "inet-sdp")) {
+ *sa_family = AF_INET_SDP;
+ } else if (!strcasecmp (address_family, "unix")) {
+ *sa_family = AF_UNIX;
+ } else if (!strcasecmp (address_family, "inet/inet6")
+ || !strcasecmp (address_family, "inet6/inet")) {
+ *sa_family = AF_UNSPEC;
+ } else {
+ gf_log (this->name, GF_LOG_ERROR,
+ "unknown address family (%s) specified", address_family);
+ goto out;
+ }
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "option address-family not specified, defaulting to inet/inet6");
+ *sa_family = AF_UNSPEC;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+int32_t
+socket_server_get_local_sockaddr (rpc_transport_t *this, struct sockaddr *addr,
+ socklen_t *addr_len, sa_family_t *sa_family)
+{
+ int32_t ret = -1;
+
+ if ((addr == NULL) || (addr_len == NULL) || (sa_family == NULL)) {
+ goto err;
+ }
+
+ ret = server_fill_address_family (this, &addr->sa_family);
+ if (ret == -1) {
+ goto err;
+ }
+
+ *sa_family = addr->sa_family;
+
+ switch (addr->sa_family)
+ {
+ case AF_INET_SDP:
+ addr->sa_family = AF_INET;
+
+ case AF_INET:
+ case AF_INET6:
+ case AF_UNSPEC:
+ ret = af_inet_server_get_local_sockaddr (this, addr, addr_len);
+ break;
+
+ case AF_UNIX:
+ ret = af_unix_server_get_local_sockaddr (this, addr, addr_len);
+ break;
+ }
+
+ if (*sa_family == AF_UNSPEC) {
+ *sa_family = addr->sa_family;
+ }
+
+err:
+ return ret;
+}
+
+int32_t
+fill_inet6_inet_identifiers (rpc_transport_t *this, struct sockaddr_storage *addr,
+ int32_t addr_len, char *identifier)
+{
+ int32_t ret = 0, tmpaddr_len = 0;
+ char service[NI_MAXSERV], host[NI_MAXHOST];
+ struct sockaddr_storage tmpaddr;
+
+ memset (&tmpaddr, 0, sizeof (tmpaddr));
+ tmpaddr = *addr;
+ tmpaddr_len = addr_len;
+
+ if (((struct sockaddr *) &tmpaddr)->sa_family == AF_INET6) {
+ int32_t one_to_four, four_to_eight, twelve_to_sixteen;
+ int16_t eight_to_ten, ten_to_twelve;
+
+ one_to_four = four_to_eight = twelve_to_sixteen = 0;
+ eight_to_ten = ten_to_twelve = 0;
+
+ one_to_four = ((struct sockaddr_in6 *) &tmpaddr)->sin6_addr.s6_addr32[0];
+ four_to_eight = ((struct sockaddr_in6 *) &tmpaddr)->sin6_addr.s6_addr32[1];
+#ifdef GF_SOLARIS_HOST_OS
+ eight_to_ten = S6_ADDR16(((struct sockaddr_in6 *) &tmpaddr)->sin6_addr)[4];
+#else
+ eight_to_ten = ((struct sockaddr_in6 *) &tmpaddr)->sin6_addr.s6_addr16[4];
+#endif
+
+#ifdef GF_SOLARIS_HOST_OS
+ ten_to_twelve = S6_ADDR16(((struct sockaddr_in6 *) &tmpaddr)->sin6_addr)[5];
+#else
+ ten_to_twelve = ((struct sockaddr_in6 *) &tmpaddr)->sin6_addr.s6_addr16[5];
+#endif
+
+ twelve_to_sixteen = ((struct sockaddr_in6 *) &tmpaddr)->sin6_addr.s6_addr32[3];
+
+ /* ipv4 mapped ipv6 address has
+ bits 0-80: 0
+ bits 80-96: 0xffff
+ bits 96-128: ipv4 address
+ */
+
+ if (one_to_four == 0 &&
+ four_to_eight == 0 &&
+ eight_to_ten == 0 &&
+ ten_to_twelve == -1) {
+ struct sockaddr_in *in_ptr = (struct sockaddr_in *)&tmpaddr;
+ memset (&tmpaddr, 0, sizeof (tmpaddr));
+
+ in_ptr->sin_family = AF_INET;
+ in_ptr->sin_port = ((struct sockaddr_in6 *)addr)->sin6_port;
+ in_ptr->sin_addr.s_addr = twelve_to_sixteen;
+ tmpaddr_len = sizeof (*in_ptr);
+ }
+ }
+
+ ret = getnameinfo ((struct sockaddr *) &tmpaddr,
+ tmpaddr_len,
+ host, sizeof (host),
+ service, sizeof (service),
+ NI_NUMERICHOST | NI_NUMERICSERV);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "getnameinfo failed (%s)", gai_strerror (ret));
+ }
+
+ sprintf (identifier, "%s:%s", host, service);
+
+ return ret;
+}
+
+int32_t
+get_transport_identifiers (rpc_transport_t *this)
+{
+ int32_t ret = 0;
+ char is_inet_sdp = 0;
+
+ switch (((struct sockaddr *) &this->myinfo.sockaddr)->sa_family)
+ {
+ case AF_INET_SDP:
+ is_inet_sdp = 1;
+ ((struct sockaddr *) &this->peerinfo.sockaddr)->sa_family = ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family = AF_INET;
+
+ case AF_INET:
+ case AF_INET6:
+ {
+ ret = fill_inet6_inet_identifiers (this,
+ &this->myinfo.sockaddr,
+ this->myinfo.sockaddr_len,
+ this->myinfo.identifier);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "cannot fill inet/inet6 identifier for server");
+ goto err;
+ }
+
+ ret = fill_inet6_inet_identifiers (this,
+ &this->peerinfo.sockaddr,
+ this->peerinfo.sockaddr_len,
+ this->peerinfo.identifier);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "cannot fill inet/inet6 identifier for client");
+ goto err;
+ }
+
+ if (is_inet_sdp) {
+ ((struct sockaddr *) &this->peerinfo.sockaddr)->sa_family = ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family = AF_INET_SDP;
+ }
+ }
+ break;
+
+ case AF_UNIX:
+ {
+ struct sockaddr_un *sunaddr = NULL;
+
+ sunaddr = (struct sockaddr_un *) &this->myinfo.sockaddr;
+ strcpy (this->myinfo.identifier, sunaddr->sun_path);
+
+ sunaddr = (struct sockaddr_un *) &this->peerinfo.sockaddr;
+ strcpy (this->peerinfo.identifier, sunaddr->sun_path);
+ }
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_ERROR,
+ "unknown address family (%d)",
+ ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family);
+ ret = -1;
+ break;
+ }
+
+err:
+ return ret;
+}
diff --git a/xlators/protocol/rpc/rpc-transport/socket/src/name.h b/xlators/protocol/rpc/rpc-transport/socket/src/name.h
new file mode 100644
index 00000000000..6a89d383b65
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-transport/socket/src/name.h
@@ -0,0 +1,44 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _SOCKET_NAME_H
+#define _SOCKET_NAME_H
+
+#include "compat.h"
+
+int32_t
+client_bind (rpc_transport_t *this,
+ struct sockaddr *sockaddr,
+ socklen_t *sockaddr_len,
+ int sock);
+
+int32_t
+socket_client_get_remote_sockaddr (rpc_transport_t *this,
+ struct sockaddr *sockaddr,
+ socklen_t *sockaddr_len,
+ sa_family_t *sa_family);
+
+int32_t
+socket_server_get_local_sockaddr (rpc_transport_t *this, struct sockaddr *addr,
+ socklen_t *addr_len, sa_family_t *sa_family);
+
+int32_t
+get_transport_identifiers (rpc_transport_t *this);
+
+#endif /* _SOCKET_NAME_H */
diff --git a/xlators/protocol/rpc/rpc-transport/socket/src/socket.c b/xlators/protocol/rpc/rpc-transport/socket/src/socket.c
new file mode 100644
index 00000000000..01bc84039dc
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-transport/socket/src/socket.c
@@ -0,0 +1,2307 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "socket.h"
+#include "name.h"
+#include "dict.h"
+#include "rpc-transport.h"
+#include "logging.h"
+#include "xlator.h"
+#include "byte-order.h"
+#include "common-utils.h"
+#include "compat-errno.h"
+#include "protocol-common.h"
+
+#include "glusterfs-xdr.h"
+#include <fcntl.h>
+#include <errno.h>
+#include <netinet/tcp.h>
+
+#define GF_LOG_ERRNO(errno) ((errno == ENOTCONN) ? GF_LOG_DEBUG : GF_LOG_ERROR)
+#define SA(ptr) ((struct sockaddr *)ptr)
+
+#define __socket_proto_reset_pending(priv) do { \
+ memset (&priv->incoming.frag.vector, 0, \
+ sizeof (priv->incoming.frag.vector)); \
+ priv->incoming.frag.pending_vector = \
+ &priv->incoming.frag.vector; \
+ priv->incoming.frag.pending_vector->iov_base = \
+ priv->incoming.frag.fragcurrent; \
+ priv->incoming.pending_vector = \
+ priv->incoming.frag.pending_vector; \
+ } while (0);
+
+
+#define __socket_proto_update_pending(priv) \
+ do { \
+ uint32_t remaining_fragsize = 0; \
+ if (priv->incoming.frag.pending_vector->iov_len == 0) { \
+ remaining_fragsize = RPC_FRAGSIZE (priv->incoming.fraghdr) \
+ - priv->incoming.frag.bytes_read; \
+ \
+ priv->incoming.frag.pending_vector->iov_len = \
+ remaining_fragsize > priv->incoming.frag.remaining_size \
+ ? priv->incoming.frag.remaining_size : remaining_fragsize; \
+ \
+ priv->incoming.frag.remaining_size -= \
+ priv->incoming.frag.pending_vector->iov_len; \
+ } \
+ } while (0);
+
+#define __socket_proto_update_priv_after_read(priv, ret, bytes_read) \
+ { \
+ priv->incoming.frag.fragcurrent += bytes_read; \
+ priv->incoming.frag.bytes_read += bytes_read; \
+ \
+ if ((ret > 0) || (priv->incoming.frag.remaining_size != 0)) { \
+ if (priv->incoming.frag.remaining_size != 0) { \
+ __socket_proto_reset_pending (priv); \
+ } \
+ \
+ gf_log (this->name, GF_LOG_TRACE, "partial read on non-blocking socket"); \
+ \
+ break; \
+ } \
+ }
+
+#define __socket_proto_init_pending(priv, size) \
+ do { \
+ uint32_t remaining_fragsize = 0; \
+ remaining_fragsize = RPC_FRAGSIZE (priv->incoming.fraghdr) \
+ - priv->incoming.frag.bytes_read; \
+ \
+ __socket_proto_reset_pending (priv); \
+ \
+ priv->incoming.frag.pending_vector->iov_len = \
+ remaining_fragsize > size ? size : remaining_fragsize; \
+ \
+ priv->incoming.frag.remaining_size = \
+ size - priv->incoming.frag.pending_vector->iov_len; \
+ \
+} while (0);
+
+
+/* This will be used in a switch case and breaks from the switch case if all
+ * the pending data is not read.
+ */
+#define __socket_proto_read(priv, ret) \
+ { \
+ size_t bytes_read = 0; \
+ \
+ __socket_proto_update_pending (priv); \
+ \
+ ret = __socket_readv (this, \
+ priv->incoming.pending_vector, 1, \
+ &priv->incoming.pending_vector, \
+ &priv->incoming.pending_count, \
+ &bytes_read); \
+ if (ret == -1) { \
+ gf_log (this->name, GF_LOG_TRACE, \
+ "reading from socket failed. Error (%s), " \
+ "peer (%s)", strerror (errno), \
+ this->peerinfo.identifier); \
+ break; \
+ } \
+ __socket_proto_update_priv_after_read (priv, ret, bytes_read); \
+ }
+
+
+int socket_init (rpc_transport_t *this);
+
+/*
+ * return value:
+ * 0 = success (completed)
+ * -1 = error
+ * > 0 = incomplete
+ */
+
+int
+__socket_rwv (rpc_transport_t *this, struct iovec *vector, int count,
+ struct iovec **pending_vector, int *pending_count, size_t *bytes,
+ int write)
+{
+ socket_private_t *priv = NULL;
+ int sock = -1;
+ int ret = -1;
+ struct iovec *opvector = NULL;
+ int opcount = 0;
+ int moved = 0;
+
+ priv = this->private;
+ sock = priv->sock;
+
+ opvector = vector;
+ opcount = count;
+
+ if (bytes != NULL) {
+ *bytes = 0;
+ }
+
+ while (opcount) {
+ if (write) {
+ ret = writev (sock, opvector, opcount);
+
+ if (ret == 0 || (ret == -1 && errno == EAGAIN)) {
+ /* done for now */
+ break;
+ }
+ } else {
+ ret = readv (sock, opvector, opcount);
+ if (ret == -1 && errno == EAGAIN) {
+ /* done for now */
+ break;
+ }
+ }
+
+ if (ret == 0) {
+ /* Mostly due to 'umount' in client */
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "EOF from peer %s", this->peerinfo.identifier);
+ opcount = -1;
+ errno = ENOTCONN;
+ break;
+ }
+ if (ret == -1) {
+ if (errno == EINTR)
+ continue;
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "%s failed (%s)", write ? "writev" : "readv",
+ strerror (errno));
+ opcount = -1;
+ break;
+ }
+
+ if (bytes != NULL) {
+ *bytes += ret;
+ }
+
+ moved = 0;
+
+ while (moved < ret) {
+ if ((ret - moved) >= opvector[0].iov_len) {
+ moved += opvector[0].iov_len;
+ opvector++;
+ opcount--;
+ } else {
+ opvector[0].iov_len -= (ret - moved);
+ opvector[0].iov_base += (ret - moved);
+ moved += (ret - moved);
+ }
+ while (opcount && !opvector[0].iov_len) {
+ opvector++;
+ opcount--;
+ }
+ }
+ }
+
+ if (pending_vector)
+ *pending_vector = opvector;
+
+ if (pending_count)
+ *pending_count = opcount;
+
+ return opcount;
+}
+
+
+int
+__socket_readv (rpc_transport_t *this, struct iovec *vector, int count,
+ struct iovec **pending_vector, int *pending_count,
+ size_t *bytes)
+{
+ int ret = -1;
+
+ ret = __socket_rwv (this, vector, count,
+ pending_vector, pending_count, bytes, 0);
+
+ return ret;
+}
+
+
+int
+__socket_writev (rpc_transport_t *this, struct iovec *vector, int count,
+ struct iovec **pending_vector, int *pending_count)
+{
+ int ret = -1;
+
+ ret = __socket_rwv (this, vector, count,
+ pending_vector, pending_count, NULL, 1);
+
+ return ret;
+}
+
+
+int
+__socket_disconnect (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = -1;
+
+ priv = this->private;
+
+ if (priv->sock != -1) {
+ ret = shutdown (priv->sock, SHUT_RDWR);
+ priv->connected = -1;
+ gf_log (this->name, GF_LOG_TRACE,
+ "shutdown() returned %d. set connection state to -1",
+ ret);
+ }
+
+ return ret;
+}
+
+
+int
+__socket_server_bind (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = -1;
+ int opt = 1;
+
+ priv = this->private;
+
+ ret = setsockopt (priv->sock, SOL_SOCKET, SO_REUSEADDR,
+ &opt, sizeof (opt));
+
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "setsockopt() for SO_REUSEADDR failed (%s)",
+ strerror (errno));
+ }
+
+ ret = bind (priv->sock, (struct sockaddr *)&this->myinfo.sockaddr,
+ this->myinfo.sockaddr_len);
+
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "binding to %s failed: %s",
+ this->myinfo.identifier, strerror (errno));
+ if (errno == EADDRINUSE) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Port is already in use");
+ }
+ }
+
+ return ret;
+}
+
+
+int
+__socket_nonblock (int fd)
+{
+ int flags = 0;
+ int ret = -1;
+
+ flags = fcntl (fd, F_GETFL);
+
+ if (flags != -1)
+ ret = fcntl (fd, F_SETFL, flags | O_NONBLOCK);
+
+ return ret;
+}
+
+
+int
+__socket_nodelay (int fd)
+{
+ int on = 1;
+ int ret = -1;
+
+ ret = setsockopt (fd, IPPROTO_TCP, TCP_NODELAY,
+ &on, sizeof (on));
+ if (!ret)
+ gf_log ("", GF_LOG_TRACE,
+ "NODELAY enabled for socket %d", fd);
+
+ return ret;
+}
+
+int
+__socket_connect_finish (int fd)
+{
+ int ret = -1;
+ int optval = 0;
+ socklen_t optlen = sizeof (int);
+
+ ret = getsockopt (fd, SOL_SOCKET, SO_ERROR, (void *)&optval, &optlen);
+
+ if (ret == 0 && optval) {
+ errno = optval;
+ ret = -1;
+ }
+
+ return ret;
+}
+
+
+void
+__socket_reset (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+
+ priv = this->private;
+
+ /* TODO: use mem-pool on incoming data */
+
+ if (priv->incoming.iobuf) {
+ iobuf_unref (priv->incoming.iobuf);
+ }
+
+ if (priv->incoming.vectoriob) {
+ iobuf_unref (priv->incoming.vectoriob);
+ }
+
+ memset (&priv->incoming, 0, sizeof (priv->incoming));
+
+ event_unregister (this->ctx->event_pool, priv->sock, priv->idx);
+
+ close (priv->sock);
+ priv->sock = -1;
+ priv->idx = -1;
+ priv->connected = -1;
+}
+
+
+struct ioq *
+__socket_ioq_new (rpc_transport_t *this, rpc_transport_msg_t *msg)
+{
+ socket_private_t *priv = NULL;
+ struct ioq *entry = NULL;
+ int count = 0;
+
+ priv = this->private;
+
+ /* TODO: use mem-pool */
+ entry = GF_CALLOC (1, sizeof (*entry), 0);
+ if (!entry)
+ return NULL;
+
+ count = msg->rpchdrcount + msg->proghdrcount + msg->progpayloadcount;
+
+ assert (count <= MAX_IOVEC);
+
+ if (msg->rpchdr != NULL) {
+ memcpy (&entry->vector[0], msg->rpchdr,
+ sizeof (struct iovec) * msg->rpchdrcount);
+ entry->count += msg->rpchdrcount;
+ }
+
+ if (msg->proghdr != NULL) {
+ memcpy (&entry->vector[entry->count], msg->proghdr,
+ sizeof (struct iovec) * msg->proghdrcount);
+ entry->count += msg->proghdrcount;
+ }
+
+ if (msg->progpayload != NULL) {
+ memcpy (&entry->vector[entry->count], msg->progpayload,
+ sizeof (struct iovec) * msg->progpayloadcount);
+ entry->count += msg->progpayloadcount;
+ }
+
+ entry->pending_vector = entry->vector;
+ entry->pending_count = entry->count;
+
+ if (msg->iobref != NULL)
+ entry->iobref = iobref_ref (msg->iobref);
+
+ INIT_LIST_HEAD (&entry->list);
+
+ return entry;
+}
+
+
+void
+__socket_ioq_entry_free (struct ioq *entry)
+{
+ list_del_init (&entry->list);
+ if (entry->iobref)
+ iobref_unref (entry->iobref);
+
+ /* TODO: use mem-pool */
+ GF_FREE (entry);
+}
+
+
+void
+__socket_ioq_flush (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ struct ioq *entry = NULL;
+
+ priv = this->private;
+
+ while (!list_empty (&priv->ioq)) {
+ entry = priv->ioq_next;
+ __socket_ioq_entry_free (entry);
+ }
+
+ return;
+}
+
+
+int
+__socket_ioq_churn_entry (rpc_transport_t *this, struct ioq *entry)
+{
+ int ret = -1;
+
+ ret = __socket_writev (this, entry->pending_vector,
+ entry->pending_count,
+ &entry->pending_vector,
+ &entry->pending_count);
+
+ if (ret == 0) {
+ /* current entry was completely written */
+ assert (entry->pending_count == 0);
+ __socket_ioq_entry_free (entry);
+ }
+
+ return ret;
+}
+
+
+int
+__socket_ioq_churn (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = 0;
+ struct ioq *entry = NULL;
+
+ priv = this->private;
+
+ while (!list_empty (&priv->ioq)) {
+ /* pick next entry */
+ entry = priv->ioq_next;
+
+ ret = __socket_ioq_churn_entry (this, entry);
+
+ if (ret != 0)
+ break;
+ }
+
+ if (list_empty (&priv->ioq)) {
+ /* all pending writes done, not interested in POLLOUT */
+ priv->idx = event_select_on (this->ctx->event_pool,
+ priv->sock, priv->idx, -1, 0);
+ }
+
+ return ret;
+}
+
+
+int
+socket_event_poll_err (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = -1;
+
+ priv = this->private;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ __socket_ioq_flush (this);
+ __socket_reset (this);
+ }
+ pthread_mutex_unlock (&priv->lock);
+
+ rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, this);
+
+ return ret;
+}
+
+
+int
+socket_event_poll_out (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = -1;
+
+ priv = this->private;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ if (priv->connected == 1) {
+ ret = __socket_ioq_churn (this);
+
+ if (ret == -1) {
+ __socket_disconnect (this);
+ }
+ }
+ }
+ pthread_mutex_unlock (&priv->lock);
+
+ ret = rpc_transport_notify (this, RPC_TRANSPORT_MSG_SENT, NULL);
+
+ return ret;
+}
+
+
+inline int
+__socket_read_simple_msg (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = 0;
+ uint32_t remaining_size = 0;
+ size_t bytes_read = 0;
+
+ priv = this->private;
+
+ switch (priv->incoming.frag.simple_state) {
+
+ case SP_STATE_SIMPLE_MSG_INIT:
+ remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr)
+ - priv->incoming.frag.bytes_read;
+
+ __socket_proto_init_pending (priv, remaining_size);
+
+ priv->incoming.frag.simple_state =
+ SP_STATE_READING_SIMPLE_MSG;
+
+ /* fall through */
+
+ case SP_STATE_READING_SIMPLE_MSG:
+ ret = 0;
+
+ remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr)
+ - priv->incoming.frag.bytes_read;
+
+ if (remaining_size > 0) {
+ ret = __socket_readv (this,
+ priv->incoming.pending_vector, 1,
+ &priv->incoming.pending_vector,
+ &priv->incoming.pending_count,
+ &bytes_read);
+ }
+
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "reading from socket failed. Error (%s), "
+ "peer (%s)", strerror (errno),
+ this->peerinfo.identifier);
+ break;
+ }
+
+ priv->incoming.frag.bytes_read += bytes_read;
+ priv->incoming.frag.fragcurrent += bytes_read;
+
+ if (ret > 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "partial read on non-blocking socket.");
+ break;
+ }
+
+ if (ret == 0) {
+ priv->incoming.frag.simple_state
+ = SP_STATE_SIMPLE_MSG_INIT;
+ }
+ }
+
+ return ret;
+}
+
+
+inline int
+__socket_read_simple_request (rpc_transport_t *this)
+{
+ return __socket_read_simple_msg (this);
+}
+
+
+#define rpc_cred_addr(buf) (buf + RPC_MSGTYPE_SIZE + RPC_CALL_BODY_SIZE - 4)
+
+#define rpc_verf_addr(fragcurrent) (fragcurrent - 4)
+
+
+inline int
+__socket_read_vectored_request (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = 0;
+ uint32_t credlen = 0, verflen = 0;
+ char *addr = NULL;
+ struct iobuf *iobuf = NULL;
+ uint32_t remaining_size = 0;
+ uint32_t gluster_write_proc_len = 0;
+
+ priv = this->private;
+
+ switch (priv->incoming.frag.call_body.request.vector_state) {
+ case SP_STATE_VECTORED_REQUEST_INIT:
+ addr = rpc_cred_addr (iobuf_ptr (priv->incoming.iobuf));
+
+ /* also read verf flavour and verflen */
+ credlen = ntoh32 (*((uint32_t *)addr))
+ + RPC_AUTH_FLAVOUR_N_LENGTH_SIZE;
+
+ __socket_proto_init_pending (priv, credlen);
+
+ priv->incoming.frag.call_body.request.vector_state =
+ SP_STATE_READING_CREDBYTES;
+
+ /* fall through */
+
+ case SP_STATE_READING_CREDBYTES:
+ __socket_proto_read (priv, ret);
+
+ priv->incoming.frag.call_body.request.vector_state =
+ SP_STATE_READ_CREDBYTES;
+
+ /* fall through */
+
+ case SP_STATE_READ_CREDBYTES:
+ addr = rpc_verf_addr (priv->incoming.frag.fragcurrent);
+
+ /* FIXME: Also handle procedures other than glusterfs-write
+ * here
+ */
+ /* also read proc-header */
+ gluster_write_proc_len = sizeof (gfs3_write_req);
+
+ verflen = ntoh32 (*((uint32_t *)addr))
+ + gluster_write_proc_len;
+
+ __socket_proto_init_pending (priv, verflen);
+
+ priv->incoming.frag.call_body.request.vector_state
+ = SP_STATE_READING_VERFBYTES;
+
+ /* fall through */
+
+ case SP_STATE_READING_VERFBYTES:
+ __socket_proto_read (priv, ret);
+
+ priv->incoming.frag.call_body.request.vector_state =
+ SP_STATE_READ_VERFBYTES;
+
+ /* fall through */
+
+ case SP_STATE_READ_VERFBYTES:
+ if (priv->incoming.vectoriob == NULL) {
+ iobuf = iobuf_get (this->ctx->iobuf_pool);
+ if (!iobuf) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "unable to allocate IO buffer "
+ "for peer %s",
+ this->peerinfo.identifier);
+ ret = -1;
+ break;
+ }
+
+ priv->incoming.vectoriob = iobuf;
+ priv->incoming.frag.fragcurrent = iobuf_ptr (iobuf);
+ }
+
+ priv->incoming.frag.call_body.request.vector_state =
+ SP_STATE_READING_PROG;
+
+ /* fall through */
+
+ case SP_STATE_READING_PROG:
+ /* now read the remaining rpc msg into buffer pointed by
+ * fragcurrent
+ */
+
+ ret = __socket_read_simple_msg (this);
+
+ remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr)
+ - priv->incoming.frag.bytes_read;
+
+ if ((ret == -1)
+ || ((ret == 0)
+ && (remaining_size == 0)
+ && RPC_LASTFRAG (priv->incoming.fraghdr))) {
+ priv->incoming.frag.call_body.request.vector_state
+ = SP_STATE_VECTORED_REQUEST_INIT;
+ priv->incoming.vectoriob_size
+ = (unsigned long)priv->incoming.frag.fragcurrent
+ - (unsigned long)iobuf_ptr (priv->incoming.vectoriob);
+ }
+ break;
+ }
+
+ return ret;
+}
+
+
+#define rpc_msgtype_addr(buf) (buf + 4)
+
+#define rpc_prognum_addr(buf) (buf + RPC_MSGTYPE_SIZE + 4)
+
+#define rpc_procnum_addr(buf) (buf + RPC_MSGTYPE_SIZE + 12)
+
+
+inline int
+__socket_read_request (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ uint32_t prognum = 0, procnum = 0;
+ uint32_t remaining_size = 0;
+ int ret = -1;
+ char *buf = NULL;
+
+ priv = this->private;
+
+ switch (priv->incoming.frag.call_body.request.header_state) {
+
+ case SP_STATE_REQUEST_HEADER_INIT:
+
+ __socket_proto_init_pending (priv, RPC_CALL_BODY_SIZE);
+
+ priv->incoming.frag.call_body.request.header_state
+ = SP_STATE_READING_RPCHDR1;
+
+ /* fall through */
+
+ case SP_STATE_READING_RPCHDR1:
+ __socket_proto_read (priv, ret);
+
+ priv->incoming.frag.call_body.request.header_state =
+ SP_STATE_READ_RPCHDR1;
+
+ /* fall through */
+
+ case SP_STATE_READ_RPCHDR1:
+ buf = rpc_prognum_addr (iobuf_ptr (priv->incoming.iobuf));
+ prognum = ntoh32 (*((uint32_t *)buf));
+
+ buf = rpc_procnum_addr (iobuf_ptr (priv->incoming.iobuf));
+ procnum = ntoh32 (*((uint32_t *)buf));
+
+ if ((prognum == GLUSTER3_1_FOP_PROGRAM)
+ && (procnum == GF_FOP_WRITE)) {
+ ret = __socket_read_vectored_request (this);
+ } else {
+ ret = __socket_read_simple_request (this);
+ }
+
+ remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr)
+ - priv->incoming.frag.bytes_read;
+
+ if ((ret == -1)
+ || ((ret == 0)
+ && (remaining_size == 0)
+ && (RPC_LASTFRAG (priv->incoming.fraghdr)))) {
+ priv->incoming.frag.call_body.request.header_state =
+ SP_STATE_REQUEST_HEADER_INIT;
+ }
+
+ break;
+ }
+
+ return ret;
+}
+
+
+inline int
+__socket_read_accepted_successful_reply (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = 0;
+ struct iobuf *iobuf = NULL;
+ uint32_t gluster_read_rsp_hdr_len = 0;
+
+ priv = this->private;
+
+ switch (priv->incoming.frag.call_body.reply.accepted_success_state) {
+
+ case SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT:
+ gluster_read_rsp_hdr_len = sizeof (gfs3_read_rsp);
+
+ __socket_proto_init_pending (priv, gluster_read_rsp_hdr_len);
+
+ priv->incoming.frag.call_body.reply.accepted_success_state
+ = SP_STATE_READING_PROC_HEADER;
+
+ /* fall through */
+
+ case SP_STATE_READING_PROC_HEADER:
+ __socket_proto_read (priv, ret);
+
+ priv->incoming.frag.call_body.reply.accepted_success_state
+ = SP_STATE_READ_PROC_HEADER;
+
+ /* fall through */
+
+ case SP_STATE_READ_PROC_HEADER:
+ if (priv->incoming.vectoriob == NULL) {
+ iobuf = iobuf_get (this->ctx->iobuf_pool);
+ if (iobuf == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ priv->incoming.vectoriob = iobuf;
+ }
+
+ priv->incoming.frag.fragcurrent
+ = iobuf_ptr (priv->incoming.vectoriob);
+
+ /* now read the entire remaining msg into new iobuf */
+ ret = __socket_read_simple_msg (this);
+ if ((ret == -1)
+ || ((ret == 0)
+ && RPC_LASTFRAG (priv->incoming.fraghdr))) {
+ priv->incoming.frag.call_body.reply.accepted_success_state
+ = SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT;
+ }
+
+ break;
+ }
+
+out:
+ return ret;
+}
+
+#define rpc_reply_verflen_addr(fragcurrent) ((char *)fragcurrent - 4)
+#define rpc_reply_accept_status_addr(fragcurrent) ((char *)fragcurrent - 4)
+
+inline int
+__socket_read_accepted_reply (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = -1;
+ char *buf = NULL;
+ uint32_t verflen = 0, len = 0;
+ uint32_t remaining_size = 0;
+
+ priv = this->private;
+
+ switch (priv->incoming.frag.call_body.reply.accepted_state) {
+
+ case SP_STATE_ACCEPTED_REPLY_INIT:
+ __socket_proto_init_pending (priv,
+ RPC_AUTH_FLAVOUR_N_LENGTH_SIZE);
+
+ priv->incoming.frag.call_body.reply.accepted_state
+ = SP_STATE_READING_REPLY_VERFLEN;
+
+ /* fall through */
+
+ case SP_STATE_READING_REPLY_VERFLEN:
+ __socket_proto_read (priv, ret);
+
+ priv->incoming.frag.call_body.reply.accepted_state
+ = SP_STATE_READ_REPLY_VERFLEN;
+
+ /* fall through */
+
+ case SP_STATE_READ_REPLY_VERFLEN:
+ buf = rpc_reply_verflen_addr (priv->incoming.frag.fragcurrent);
+
+ verflen = ntoh32 (*((uint32_t *) buf));
+
+ /* also read accept status along with verf data */
+ len = verflen + RPC_ACCEPT_STATUS_LEN;
+
+ __socket_proto_init_pending (priv, len);
+
+ priv->incoming.frag.call_body.reply.accepted_state
+ = SP_STATE_READING_REPLY_VERFBYTES;
+
+ /* fall through */
+
+ case SP_STATE_READING_REPLY_VERFBYTES:
+ __socket_proto_read (priv, ret);
+
+ priv->incoming.frag.call_body.reply.accepted_state
+ = SP_STATE_READ_REPLY_VERFBYTES;
+
+ buf = rpc_reply_accept_status_addr (priv->incoming.frag.fragcurrent);
+
+ priv->incoming.frag.call_body.reply.accept_status
+ = ntoh32 (*(uint32_t *) buf);
+
+ /* fall through */
+
+ case SP_STATE_READ_REPLY_VERFBYTES:
+
+ if (priv->incoming.frag.call_body.reply.accept_status
+ == SUCCESS) {
+ ret = __socket_read_accepted_successful_reply (this);
+ } else {
+ /* read entire remaining msg into buffer pointed to by
+ * fragcurrent
+ */
+ ret = __socket_read_simple_msg (this);
+ }
+
+ remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr)
+ - priv->incoming.frag.bytes_read;
+
+ if ((ret == -1)
+ || ((ret == 0)
+ && (remaining_size == 0)
+ && (RPC_LASTFRAG (priv->incoming.fraghdr)))) {
+ priv->incoming.frag.call_body.reply.accepted_state
+ = SP_STATE_ACCEPTED_REPLY_INIT;
+ }
+
+ break;
+ }
+
+ return ret;
+}
+
+
+inline int
+__socket_read_denied_reply (rpc_transport_t *this)
+{
+ return __socket_read_simple_msg (this);
+}
+
+
+#define rpc_reply_status_addr(fragcurrent) ((char *)fragcurrent - 4)
+
+
+inline int
+__socket_read_vectored_reply (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = 0;
+ char *buf = NULL;
+ uint32_t remaining_size = 0;
+
+ priv = this->private;
+
+ switch (priv->incoming.frag.call_body.reply.status_state) {
+
+ case SP_STATE_ACCEPTED_REPLY_INIT:
+ __socket_proto_init_pending (priv, RPC_REPLY_STATUS_SIZE);
+
+ priv->incoming.frag.call_body.reply.status_state
+ = SP_STATE_READING_REPLY_STATUS;
+
+ /* fall through */
+
+ case SP_STATE_READING_REPLY_STATUS:
+ __socket_proto_read (priv, ret);
+
+ buf = rpc_reply_status_addr (priv->incoming.frag.fragcurrent);
+
+ priv->incoming.frag.call_body.reply.accept_status
+ = ntoh32 (*((uint32_t *) buf));
+
+ priv->incoming.frag.call_body.reply.status_state
+ = SP_STATE_READ_REPLY_STATUS;
+
+ /* fall through */
+
+ case SP_STATE_READ_REPLY_STATUS:
+ if (priv->incoming.frag.call_body.reply.accept_status
+ == MSG_ACCEPTED) {
+ ret = __socket_read_accepted_reply (this);
+ } else {
+ ret = __socket_read_denied_reply (this);
+ }
+
+ remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr)
+ - priv->incoming.frag.bytes_read;
+
+ if ((ret == -1)
+ || ((ret == 0)
+ && (remaining_size == 0)
+ && (RPC_LASTFRAG (priv->incoming.fraghdr)))) {
+ priv->incoming.frag.call_body.reply.status_state
+ = SP_STATE_ACCEPTED_REPLY_INIT;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+
+inline int
+__socket_read_simple_reply (rpc_transport_t *this)
+{
+ return __socket_read_simple_msg (this);
+}
+
+#define rpc_xid_addr(buf) (buf)
+
+inline int
+__socket_read_reply (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ char *buf = NULL;
+ int32_t ret = -1;
+ rpc_request_info_t *request_info = NULL;
+
+ priv = this->private;
+
+ buf = rpc_xid_addr (iobuf_ptr (priv->incoming.iobuf));
+
+ request_info = GF_CALLOC (1, sizeof (*request_info), 0);
+ if (request_info == NULL) {
+ gf_log (this->name, GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+
+ priv->incoming.request_info = request_info;
+
+ request_info->xid = ntoh32 (*((uint32_t *) buf));
+
+ /* release priv->lock, so as to avoid deadlock b/w conn->lock and
+ * priv->lock, since we are doing an upcall here.
+ */
+ pthread_mutex_unlock (&priv->lock);
+ {
+ ret = rpc_transport_notify (this, RPC_TRANSPORT_MAP_XID_REQUEST,
+ priv->incoming.request_info);
+ }
+ pthread_mutex_lock (&priv->lock);
+
+ if (ret == -1) {
+ goto out;
+ }
+
+ if ((request_info->prognum == GLUSTER3_1_FOP_PROGRAM)
+ && (request_info->procnum == GF_FOP_READ)) {
+ if (request_info->rsp.rspbuf != NULL) {
+ priv->incoming.vectoriob
+ = iobuf_ref (request_info->rsp.rspbuf);
+ }
+
+ ret = __socket_read_vectored_reply (this);
+ } else {
+ ret = __socket_read_simple_reply (this);
+ }
+out:
+ return ret;
+}
+
+
+/* returns the number of bytes yet to be read in a fragment */
+inline int
+__socket_read_frag (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int32_t ret = 0;
+ char *buf = NULL;
+ uint32_t remaining_size = 0;
+
+ priv = this->private;
+
+ switch (priv->incoming.frag.state) {
+ case SP_STATE_NADA:
+ __socket_proto_init_pending (priv, RPC_MSGTYPE_SIZE);
+
+ priv->incoming.frag.state = SP_STATE_READING_MSGTYPE;
+
+ /* fall through */
+
+ case SP_STATE_READING_MSGTYPE:
+ __socket_proto_read (priv, ret);
+
+ priv->incoming.frag.state = SP_STATE_READ_MSGTYPE;
+ /* fall through */
+
+ case SP_STATE_READ_MSGTYPE:
+ buf = rpc_msgtype_addr (iobuf_ptr (priv->incoming.iobuf));
+ priv->incoming.msg_type = ntoh32 (*((uint32_t *)buf));
+
+ if (priv->incoming.msg_type == CALL) {
+ ret = __socket_read_request (this);
+ } else if (priv->incoming.msg_type == REPLY) {
+ ret = __socket_read_reply (this);
+ } else {
+ gf_log ("rpc", GF_LOG_ERROR,
+ "wrong MSG-TYPE (%d) received",
+ priv->incoming.msg_type);
+ ret = -1;
+ }
+
+ remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr)
+ - priv->incoming.frag.bytes_read;
+
+ if ((ret == -1)
+ || ((ret == 0)
+ && (remaining_size == 0)
+ && (RPC_LASTFRAG (priv->incoming.fraghdr)))) {
+ priv->incoming.frag.state = SP_STATE_NADA;
+ }
+
+ break;
+ }
+
+ return ret;
+}
+
+
+inline
+void __socket_reset_priv (socket_private_t *priv)
+{
+ if (priv->incoming.iobuf) {
+ iobuf_unref (priv->incoming.iobuf);
+ priv->incoming.iobuf = NULL;
+ }
+
+ if (priv->incoming.vectoriob) {
+ iobuf_unref (priv->incoming.vectoriob);
+ priv->incoming.vectoriob = NULL;
+ }
+}
+
+
+int
+__socket_proto_state_machine (rpc_transport_t *this,
+ rpc_transport_pollin_t **pollin)
+{
+ int ret = -1;
+ socket_private_t *priv = NULL;
+ struct iobuf *iobuf = NULL;
+
+ priv = this->private;
+ while (priv->incoming.record_state != SP_STATE_COMPLETE) {
+ switch (priv->incoming.record_state) {
+
+ case SP_STATE_NADA:
+ iobuf = iobuf_get (this->ctx->iobuf_pool);
+ if (!iobuf) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "unable to allocate IO buffer "
+ "for peer %s",
+ this->peerinfo.identifier);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ priv->incoming.iobuf = iobuf;
+ priv->incoming.iobuf_size = 0;
+ priv->incoming.vectoriob_size = 0;
+
+ priv->incoming.pending_vector = priv->incoming.vector;
+ priv->incoming.pending_vector->iov_base =
+ &priv->incoming.fraghdr;
+
+ priv->incoming.frag.fragcurrent = iobuf_ptr (iobuf);
+ priv->incoming.pending_vector->iov_len =
+ sizeof (priv->incoming.fraghdr);
+
+ priv->incoming.record_state = SP_STATE_READING_FRAGHDR;
+
+ /* fall through */
+
+ case SP_STATE_READING_FRAGHDR:
+ ret = __socket_readv (this,
+ priv->incoming.pending_vector, 1,
+ &priv->incoming.pending_vector,
+ &priv->incoming.pending_count,
+ NULL);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "reading from socket failed. Error (%s), "
+ "peer (%s)", strerror (errno),
+ this->peerinfo.identifier);
+ goto out;
+ }
+
+ if (ret > 0) {
+ gf_log (this->name, GF_LOG_TRACE, "partial "
+ "fragment header read");
+ goto out;
+ }
+
+ if (ret == 0) {
+ priv->incoming.record_state =
+ SP_STATE_READ_FRAGHDR;
+ }
+ /* fall through */
+
+ case SP_STATE_READ_FRAGHDR:
+
+ priv->incoming.fraghdr = ntoh32 (priv->incoming.fraghdr);
+ priv->incoming.record_state = SP_STATE_READING_FRAG;
+ priv->incoming.total_bytes_read
+ += RPC_FRAGSIZE(priv->incoming.fraghdr);
+ /* fall through */
+
+ case SP_STATE_READING_FRAG:
+ ret = __socket_read_frag (this);
+
+ if ((ret == -1)
+ || (priv->incoming.frag.bytes_read !=
+ RPC_FRAGSIZE (priv->incoming.fraghdr))) {
+ goto out;
+ }
+
+ priv->incoming.frag.bytes_read = 0;
+
+ if (!RPC_LASTFRAG (priv->incoming.fraghdr)) {
+ priv->incoming.record_state =
+ SP_STATE_READING_FRAGHDR;
+ break;
+ }
+
+ /* we've read the entire rpc record, notify the
+ * upper layers.
+ */
+ if (pollin != NULL) {
+ priv->incoming.iobuf_size
+ = priv->incoming.total_bytes_read
+ - priv->incoming.vectoriob_size;
+
+ *pollin = rpc_transport_pollin_alloc (this,
+ priv->incoming.iobuf,
+ priv->incoming.iobuf_size,
+ priv->incoming.vectoriob,
+ priv->incoming.vectoriob_size,
+ priv->incoming.request_info);
+ if (*pollin == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ priv->incoming.request_info = NULL;
+ }
+ priv->incoming.record_state = SP_STATE_COMPLETE;
+ break;
+
+ case SP_STATE_COMPLETE:
+ /* control should not reach here */
+ gf_log (this->name, GF_LOG_DEBUG, "control reached to "
+ "SP_STATE_COMPLETE, which should not have "
+ "happened");
+ break;
+ }
+ }
+
+ if (priv->incoming.record_state == SP_STATE_COMPLETE) {
+ priv->incoming.record_state = SP_STATE_NADA;
+ __socket_reset_priv (priv);
+ }
+
+out:
+ if ((ret == -1) && (errno == EAGAIN)) {
+ ret = 0;
+ }
+ return ret;
+}
+
+
+int
+socket_proto_state_machine (rpc_transport_t *this,
+ rpc_transport_pollin_t **pollin)
+{
+ socket_private_t *priv = NULL;
+ int ret = 0;
+
+ priv = this->private;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ ret = __socket_proto_state_machine (this, pollin);
+ }
+ pthread_mutex_unlock (&priv->lock);
+
+ return ret;
+}
+
+
+int
+socket_event_poll_in (rpc_transport_t *this)
+{
+ int ret = -1;
+ rpc_transport_pollin_t *pollin = NULL;
+
+ ret = socket_proto_state_machine (this, &pollin);
+
+ if (pollin != NULL) {
+ ret = rpc_transport_notify (this, RPC_TRANSPORT_MSG_RECEIVED,
+ pollin);
+
+ rpc_transport_pollin_destroy (pollin);
+ }
+
+ return ret;
+}
+
+
+int
+socket_connect_finish (rpc_transport_t *this)
+{
+ int ret = -1;
+ socket_private_t *priv = NULL;
+ rpc_transport_event_t event = 0;
+ char notify_rpc = 0;
+
+ priv = this->private;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ if (priv->connected)
+ goto unlock;
+
+ ret = __socket_connect_finish (priv->sock);
+
+ if (ret == -1 && errno == EINPROGRESS)
+ ret = 1;
+
+ if (ret == -1 && errno != EINPROGRESS) {
+ if (!priv->connect_finish_log) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "connection to %s failed (%s)",
+ this->peerinfo.identifier,
+ strerror (errno));
+ priv->connect_finish_log = 1;
+ }
+ __socket_disconnect (this);
+ notify_rpc = 1;
+ event = RPC_TRANSPORT_DISCONNECT;
+ goto unlock;
+ }
+
+ if (ret == 0) {
+ notify_rpc = 1;
+
+ this->myinfo.sockaddr_len =
+ sizeof (this->myinfo.sockaddr);
+
+ ret = getsockname (priv->sock,
+ SA (&this->myinfo.sockaddr),
+ &this->myinfo.sockaddr_len);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "getsockname on (%d) failed (%s)",
+ priv->sock, strerror (errno));
+ __socket_disconnect (this);
+ event = GF_EVENT_POLLERR;
+ goto unlock;
+ }
+
+ priv->connected = 1;
+ priv->connect_finish_log = 0;
+ event = RPC_TRANSPORT_CONNECT;
+ get_transport_identifiers (this);
+ }
+ }
+unlock:
+ pthread_mutex_unlock (&priv->lock);
+
+ if (notify_rpc) {
+ rpc_transport_notify (this, event, this);
+ }
+
+ return 0;
+}
+
+
+/* reads rpc_requests during pollin */
+int
+socket_event_handler (int fd, int idx, void *data,
+ int poll_in, int poll_out, int poll_err)
+{
+ rpc_transport_t *this = NULL;
+ socket_private_t *priv = NULL;
+ int ret = 0;
+
+ this = data;
+ priv = this->private;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ priv->idx = idx;
+ }
+ pthread_mutex_unlock (&priv->lock);
+
+ if (!priv->connected) {
+ ret = socket_connect_finish (this);
+ }
+
+ if (!ret && poll_out) {
+ ret = socket_event_poll_out (this);
+ }
+
+ if (!ret && poll_in) {
+ ret = socket_event_poll_in (this);
+ }
+
+ if ((ret < 0) || poll_err) {
+ gf_log ("transport", GF_LOG_TRACE, "disconnecting now");
+ socket_event_poll_err (this);
+ rpc_transport_unref (this);
+ }
+
+ return 0;
+}
+
+
+int
+socket_server_event_handler (int fd, int idx, void *data,
+ int poll_in, int poll_out, int poll_err)
+{
+ rpc_transport_t *this = NULL;
+ socket_private_t *priv = NULL;
+ int ret = 0;
+ int new_sock = -1;
+ rpc_transport_t *new_trans = NULL;
+ struct sockaddr_storage new_sockaddr = {0, };
+ socklen_t addrlen = sizeof (new_sockaddr);
+ socket_private_t *new_priv = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+
+ this = data;
+ priv = this->private;
+ ctx = this->ctx;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ priv->idx = idx;
+
+ if (poll_in) {
+ new_sock = accept (priv->sock, SA (&new_sockaddr),
+ &addrlen);
+
+ if (new_sock == -1)
+ goto unlock;
+
+ if (!priv->bio) {
+ ret = __socket_nonblock (new_sock);
+
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "NBIO on %d failed (%s)",
+ new_sock, strerror (errno));
+
+ close (new_sock);
+ goto unlock;
+ }
+ }
+
+ if (priv->nodelay) {
+ ret = __socket_nodelay (new_sock);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "setsockopt() failed for "
+ "NODELAY (%s)",
+ strerror (errno));
+ }
+ }
+
+ new_trans = GF_CALLOC (1, sizeof (*new_trans), 0);
+ new_trans->fini = this->fini;
+ new_trans->name = gf_strdup (this->name);
+
+ memcpy (&new_trans->peerinfo.sockaddr, &new_sockaddr,
+ addrlen);
+ new_trans->peerinfo.sockaddr_len = addrlen;
+
+ new_trans->myinfo.sockaddr_len =
+ sizeof (new_trans->myinfo.sockaddr);
+
+ ret = getsockname (new_sock,
+ SA (&new_trans->myinfo.sockaddr),
+ &new_trans->myinfo.sockaddr_len);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "getsockname on %d failed (%s)",
+ new_sock, strerror (errno));
+ close (new_sock);
+ goto unlock;
+ }
+
+ get_transport_identifiers (new_trans);
+ socket_init (new_trans);
+ new_trans->ops = this->ops;
+ new_trans->init = this->init;
+ new_trans->fini = this->fini;
+ new_trans->ctx = ctx;
+ new_trans->mydata = this->mydata;
+ new_trans->notify = this->notify;
+ new_priv = new_trans->private;
+
+ pthread_mutex_lock (&new_priv->lock);
+ {
+ new_priv->sock = new_sock;
+ new_priv->connected = 1;
+ rpc_transport_ref (new_trans);
+
+ new_priv->idx =
+ event_register (ctx->event_pool,
+ new_sock,
+ socket_event_handler,
+ new_trans, 1, 0);
+
+ if (new_priv->idx == -1)
+ ret = -1;
+ }
+ pthread_mutex_unlock (&new_priv->lock);
+ ret = rpc_transport_notify (this, RPC_TRANSPORT_ACCEPT, new_trans);
+ }
+ }
+unlock:
+ pthread_mutex_unlock (&priv->lock);
+
+ return ret;
+}
+
+
+int
+socket_disconnect (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ int ret = -1;
+
+ priv = this->private;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ ret = __socket_disconnect (this);
+ }
+ pthread_mutex_unlock (&priv->lock);
+
+ return ret;
+}
+
+
+int
+socket_connect (rpc_transport_t *this)
+{
+ int ret = -1;
+ int sock = -1;
+ socket_private_t *priv = NULL;
+ struct sockaddr_storage sockaddr = {0, };
+ socklen_t sockaddr_len = 0;
+ glusterfs_ctx_t *ctx = NULL;
+ sa_family_t sa_family = {0, };
+
+ priv = this->private;
+ ctx = this->ctx;
+
+ if (!priv) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "connect() called on uninitialized transport");
+ goto err;
+ }
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ sock = priv->sock;
+ }
+ pthread_mutex_unlock (&priv->lock);
+
+ if (sock != -1) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "connect () called on transport already connected");
+ ret = 0;
+ goto err;
+ }
+
+ ret = socket_client_get_remote_sockaddr (this, SA (&sockaddr),
+ &sockaddr_len, &sa_family);
+ if (ret == -1) {
+ /* logged inside client_get_remote_sockaddr */
+ goto err;
+ }
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ if (priv->sock != -1) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "connect() -- already connected");
+ goto unlock;
+ }
+
+ memcpy (&this->peerinfo.sockaddr, &sockaddr, sockaddr_len);
+ this->peerinfo.sockaddr_len = sockaddr_len;
+
+ priv->sock = socket (sa_family, SOCK_STREAM, 0);
+ if (priv->sock == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "socket creation failed (%s)",
+ strerror (errno));
+ goto unlock;
+ }
+
+ /* Cant help if setting socket options fails. We can continue
+ * working nonetheless.
+ */
+ if (setsockopt (priv->sock, SOL_SOCKET, SO_RCVBUF,
+ &priv->windowsize,
+ sizeof (priv->windowsize)) < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "setting receive window size failed: %d: %d: "
+ "%s", priv->sock, priv->windowsize,
+ strerror (errno));
+ }
+
+ if (setsockopt (priv->sock, SOL_SOCKET, SO_SNDBUF,
+ &priv->windowsize,
+ sizeof (priv->windowsize)) < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "setting send window size failed: %d: %d: "
+ "%s", priv->sock, priv->windowsize,
+ strerror (errno));
+ }
+
+
+ if (priv->nodelay && priv->lowlat) {
+ ret = __socket_nodelay (priv->sock);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "setsockopt() failed for NODELAY (%s)",
+ strerror (errno));
+ }
+ }
+
+ if (!priv->bio) {
+ ret = __socket_nonblock (priv->sock);
+
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "NBIO on %d failed (%s)",
+ priv->sock, strerror (errno));
+ close (priv->sock);
+ priv->sock = -1;
+ goto unlock;
+ }
+ }
+
+ SA (&this->myinfo.sockaddr)->sa_family =
+ SA (&this->peerinfo.sockaddr)->sa_family;
+
+ ret = client_bind (this, SA (&this->myinfo.sockaddr),
+ &this->myinfo.sockaddr_len, priv->sock);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "client bind failed: %s", strerror (errno));
+ close (priv->sock);
+ priv->sock = -1;
+ goto unlock;
+ }
+
+ ret = connect (priv->sock, SA (&this->peerinfo.sockaddr),
+ this->peerinfo.sockaddr_len);
+
+ if (ret == -1 && errno != EINPROGRESS) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "connection attempt failed (%s)",
+ strerror (errno));
+ close (priv->sock);
+ priv->sock = -1;
+ goto unlock;
+ }
+
+ priv->connected = 0;
+
+ rpc_transport_ref (this);
+
+ priv->idx = event_register (ctx->event_pool, priv->sock,
+ socket_event_handler, this, 1, 1);
+ if (priv->idx == -1)
+ ret = -1;
+ }
+unlock:
+ pthread_mutex_unlock (&priv->lock);
+
+err:
+ return ret;
+}
+
+
+int
+socket_listen (rpc_transport_t *this)
+{
+ socket_private_t * priv = NULL;
+ int ret = -1;
+ int sock = -1;
+ struct sockaddr_storage sockaddr;
+ socklen_t sockaddr_len;
+ peer_info_t *myinfo = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ sa_family_t sa_family = {0, };
+
+ priv = this->private;
+ myinfo = &this->myinfo;
+ ctx = this->ctx;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ sock = priv->sock;
+ }
+ pthread_mutex_unlock (&priv->lock);
+
+ if (sock != -1) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "alreading listening");
+ return ret;
+ }
+
+ ret = socket_server_get_local_sockaddr (this, SA (&sockaddr),
+ &sockaddr_len, &sa_family);
+ if (ret == -1) {
+ return ret;
+ }
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ if (priv->sock != -1) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "already listening");
+ goto unlock;
+ }
+
+ memcpy (&myinfo->sockaddr, &sockaddr, sockaddr_len);
+ myinfo->sockaddr_len = sockaddr_len;
+
+ priv->sock = socket (sa_family, SOCK_STREAM, 0);
+
+ if (priv->sock == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "socket creation failed (%s)",
+ strerror (errno));
+ goto unlock;
+ }
+
+ /* Cant help if setting socket options fails. We can continue
+ * working nonetheless.
+ */
+ if (setsockopt (priv->sock, SOL_SOCKET, SO_RCVBUF,
+ &priv->windowsize,
+ sizeof (priv->windowsize)) < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "setting receive window size failed: %d: %d: "
+ "%s", priv->sock, priv->windowsize,
+ strerror (errno));
+ }
+
+ if (setsockopt (priv->sock, SOL_SOCKET, SO_SNDBUF,
+ &priv->windowsize,
+ sizeof (priv->windowsize)) < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "setting send window size failed: %d: %d: "
+ "%s", priv->sock, priv->windowsize,
+ strerror (errno));
+ }
+
+ if (priv->nodelay) {
+ ret = __socket_nodelay (priv->sock);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "setsockopt() failed for NODELAY (%s)",
+ strerror (errno));
+ }
+ }
+
+ if (!priv->bio) {
+ ret = __socket_nonblock (priv->sock);
+
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "NBIO on %d failed (%s)",
+ priv->sock, strerror (errno));
+ close (priv->sock);
+ priv->sock = -1;
+ goto unlock;
+ }
+ }
+
+ ret = __socket_server_bind (this);
+
+ if (ret == -1) {
+ /* logged inside __socket_server_bind() */
+ close (priv->sock);
+ priv->sock = -1;
+ goto unlock;
+ }
+
+ ret = listen (priv->sock, 10);
+
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not set socket %d to listen mode (%s)",
+ priv->sock, strerror (errno));
+ close (priv->sock);
+ priv->sock = -1;
+ goto unlock;
+ }
+
+ rpc_transport_ref (this);
+
+ priv->idx = event_register (ctx->event_pool, priv->sock,
+ socket_server_event_handler,
+ this, 1, 0);
+
+ if (priv->idx == -1) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "could not register socket %d with events",
+ priv->sock);
+ ret = -1;
+ close (priv->sock);
+ priv->sock = -1;
+ goto unlock;
+ }
+ }
+unlock:
+ pthread_mutex_unlock (&priv->lock);
+
+ return ret;
+}
+
+
+/* TODO: implement per transfer limit */
+#if 0
+int
+socket_submit (rpc_transport_t *this, char *buf, int len,
+ struct iovec *vector, int count,
+ struct iobref *iobref)
+{
+ socket_private_t *priv = NULL;
+ int ret = -1;
+ char need_poll_out = 0;
+ char need_append = 1;
+ struct ioq *entry = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+
+ priv = this->private;
+ ctx = this->ctx;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ if (priv->connected != 1) {
+ if (!priv->submit_log && !priv->connect_finish_log) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "not connected (priv->connected = %d)",
+ priv->connected);
+ priv->submit_log = 1;
+ }
+ goto unlock;
+ }
+
+ priv->submit_log = 0;
+ entry = __socket_ioq_new (this, buf, len, vector, count, iobref);
+ if (!entry)
+ goto unlock;
+
+ if (list_empty (&priv->ioq)) {
+ ret = __socket_ioq_churn_entry (this, entry);
+
+ if (ret == 0)
+ need_append = 0;
+
+ if (ret > 0)
+ need_poll_out = 1;
+ }
+
+ if (need_append) {
+ list_add_tail (&entry->list, &priv->ioq);
+ ret = 0;
+ }
+
+ if (need_poll_out) {
+ /* first entry to wait. continue writing on POLLOUT */
+ priv->idx = event_select_on (ctx->event_pool,
+ priv->sock,
+ priv->idx, -1, 1);
+ }
+ }
+unlock:
+ pthread_mutex_unlock (&priv->lock);
+
+ return ret;
+}
+#endif
+
+
+int32_t
+socket_submit_request (rpc_transport_t *this, rpc_transport_req_t *req)
+{
+ socket_private_t *priv = NULL;
+ int ret = -1;
+ char need_poll_out = 0;
+ char need_append = 1;
+ struct ioq *entry = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+
+ priv = this->private;
+ ctx = this->ctx;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ if (priv->connected != 1) {
+ if (!priv->submit_log && !priv->connect_finish_log) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "not connected (priv->connected = %d)",
+ priv->connected);
+ priv->submit_log = 1;
+ }
+ goto unlock;
+ }
+
+ priv->submit_log = 0;
+ entry = __socket_ioq_new (this, &req->msg);
+ if (!entry)
+ goto unlock;
+
+ if (list_empty (&priv->ioq)) {
+ ret = __socket_ioq_churn_entry (this, entry);
+
+ if (ret == 0)
+ need_append = 0;
+
+ if (ret > 0)
+ need_poll_out = 1;
+ }
+
+ if (need_append) {
+ list_add_tail (&entry->list, &priv->ioq);
+ ret = 0;
+ }
+
+ if (need_poll_out) {
+ /* first entry to wait. continue writing on POLLOUT */
+ priv->idx = event_select_on (ctx->event_pool,
+ priv->sock,
+ priv->idx, -1, 1);
+ }
+ }
+unlock:
+ pthread_mutex_unlock (&priv->lock);
+
+ return ret;
+}
+
+
+int32_t
+socket_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply)
+{
+ socket_private_t *priv = NULL;
+ int ret = -1;
+ char need_poll_out = 0;
+ char need_append = 1;
+ struct ioq *entry = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+
+ priv = this->private;
+ ctx = this->ctx;
+
+ pthread_mutex_lock (&priv->lock);
+ {
+ if (priv->connected != 1) {
+ if (!priv->submit_log && !priv->connect_finish_log) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "not connected (priv->connected = %d)",
+ priv->connected);
+ priv->submit_log = 1;
+ }
+ goto unlock;
+ }
+ priv->submit_log = 0;
+ entry = __socket_ioq_new (this, &reply->msg);
+ if (!entry)
+ goto unlock;
+ if (list_empty (&priv->ioq)) {
+ ret = __socket_ioq_churn_entry (this, entry);
+
+ if (ret == 0)
+ need_append = 0;
+
+ if (ret > 0)
+ need_poll_out = 1;
+ }
+
+ if (need_append) {
+ list_add_tail (&entry->list, &priv->ioq);
+ ret = 0;
+ }
+
+ if (need_poll_out) {
+ /* first entry to wait. continue writing on POLLOUT */
+ priv->idx = event_select_on (ctx->event_pool,
+ priv->sock,
+ priv->idx, -1, 1);
+ }
+ }
+
+unlock:
+ pthread_mutex_unlock (&priv->lock);
+
+ return ret;
+}
+
+
+int32_t
+socket_getpeername (rpc_transport_t *this, char *hostname, int hostlen)
+{
+ int32_t ret = -1;
+
+ if ((this == NULL) || (hostname == NULL)) {
+ goto out;
+ }
+
+ if (hostlen < (strlen (this->peerinfo.identifier) + 1)) {
+ goto out;
+ }
+
+ strcpy (hostname, this->peerinfo.identifier);
+ ret = 0;
+out:
+ return ret;
+}
+
+
+int32_t
+socket_getpeeraddr (rpc_transport_t *this, char *peeraddr, int addrlen,
+ struct sockaddr *sa, socklen_t salen)
+{
+ int32_t ret = -1;
+
+ if ((this == NULL) || (sa == NULL)) {
+ goto out;
+ }
+
+ *sa = *((struct sockaddr *)&this->peerinfo.sockaddr);
+
+ if (peeraddr != NULL) {
+ ret = socket_getpeername (this, peeraddr, addrlen);
+ }
+
+out:
+ return ret;
+}
+
+
+int32_t
+socket_getmyname (rpc_transport_t *this, char *hostname, int hostlen)
+{
+ int32_t ret = -1;
+
+ if ((this == NULL) || (hostname == NULL)) {
+ goto out;
+ }
+
+ if (hostlen < (strlen (this->myinfo.identifier) + 1)) {
+ goto out;
+ }
+
+ strcpy (hostname, this->myinfo.identifier);
+ ret = 0;
+out:
+ return ret;
+}
+
+
+int32_t
+socket_getmyaddr (rpc_transport_t *this, char *myaddr, int addrlen,
+ struct sockaddr *sa, socklen_t salen)
+{
+ int32_t ret = -1;
+
+ if ((this == NULL) || (sa == NULL)) {
+ goto out;
+ }
+
+ *sa = *((struct sockaddr *)&this->myinfo.sockaddr);
+
+ if (myaddr != NULL) {
+ ret = socket_getmyname (this, myaddr, addrlen);
+ }
+
+out:
+ return ret;
+}
+
+
+struct rpc_transport_ops tops = {
+ .listen = socket_listen,
+ .connect = socket_connect,
+ .disconnect = socket_disconnect,
+ .submit_request = socket_submit_request,
+ .submit_reply = socket_submit_reply,
+ .get_peername = socket_getpeername,
+ .get_peeraddr = socket_getpeeraddr,
+ .get_myname = socket_getmyname,
+ .get_myaddr = socket_getmyaddr
+};
+
+
+int
+socket_init (rpc_transport_t *this)
+{
+ socket_private_t *priv = NULL;
+ gf_boolean_t tmp_bool = 0;
+ uint64_t windowsize = GF_DEFAULT_SOCKET_WINDOW_SIZE;
+ char *optstr = NULL;
+
+ if (this->private) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "double init attempted");
+ return -1;
+ }
+
+ priv = GF_CALLOC (1, sizeof (*priv), 0);
+ if (!priv) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "calloc (1, %"GF_PRI_SIZET") returned NULL",
+ sizeof (*priv));
+ return -1;
+ }
+
+ pthread_mutex_init (&priv->lock, NULL);
+
+ priv->sock = -1;
+ priv->idx = -1;
+ priv->connected = -1;
+
+ INIT_LIST_HEAD (&priv->ioq);
+
+ if (dict_get (this->options, "non-blocking-io")) {
+ optstr = data_to_str (dict_get (this->options,
+ "non-blocking-io"));
+
+ if (gf_string2boolean (optstr, &tmp_bool) == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "'non-blocking-io' takes only boolean options,"
+ " not taking any action");
+ tmp_bool = 1;
+ }
+ priv->bio = 0;
+ if (!tmp_bool) {
+ priv->bio = 1;
+ gf_log (this->name, GF_LOG_WARNING,
+ "disabling non-blocking IO");
+ }
+ }
+
+ optstr = NULL;
+
+ // By default, we enable NODELAY
+ priv->nodelay = 1;
+ if (dict_get (this->options, "rpc-transport.socket.nodelay")) {
+ optstr = data_to_str (dict_get (this->options,
+ "rpc-transport.socket.nodelay"));
+
+ if (gf_string2boolean (optstr, &tmp_bool) == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "'rpc-transport.socket.nodelay' takes only "
+ "boolean options, not taking any action");
+ tmp_bool = 1;
+ }
+ if (!tmp_bool) {
+ priv->nodelay = 0;
+ gf_log (this->name, GF_LOG_DEBUG,
+ "disabling nodelay");
+ }
+ }
+
+
+ optstr = NULL;
+ if (dict_get_str (this->options, "rpc-transport.window-size",
+ &optstr) == 0) {
+ if (gf_string2bytesize (optstr, &windowsize) != 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "invalid number format: %s", optstr);
+ return -1;
+ }
+ }
+
+ optstr = NULL;
+
+ if (dict_get_str (this->options, "rpc-transport.socket.lowlat",
+ &optstr) == 0) {
+ priv->lowlat = 1;
+ }
+
+ priv->windowsize = (int)windowsize;
+ this->private = priv;
+
+ return 0;
+}
+
+
+void
+fini (rpc_transport_t *this)
+{
+ socket_private_t *priv = this->private;
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "transport %p destroyed", this);
+
+ pthread_mutex_destroy (&priv->lock);
+
+ GF_FREE (this->name);
+ GF_FREE (priv);
+}
+
+
+int32_t
+init (rpc_transport_t *this)
+{
+ int ret = -1;
+
+ ret = socket_init (this);
+
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "socket_init() failed");
+ }
+
+ return ret;
+}
+
+struct volume_options options[] = {
+ { .key = {"remote-port",
+ "rpc-transport.remote-port",
+ "rpc-transport.socket.remote-port"},
+ .type = GF_OPTION_TYPE_INT
+ },
+ { .key = {"rpc-transport.socket.listen-port", "listen-port"},
+ .type = GF_OPTION_TYPE_INT
+ },
+ { .key = {"rpc-transport.socket.bind-address", "bind-address" },
+ .type = GF_OPTION_TYPE_INTERNET_ADDRESS
+ },
+ { .key = {"rpc-transport.socket.connect-path", "connect-path"},
+ .type = GF_OPTION_TYPE_ANY
+ },
+ { .key = {"rpc-transport.socket.bind-path", "bind-path"},
+ .type = GF_OPTION_TYPE_ANY
+ },
+ { .key = {"rpc-transport.socket.listen-path", "listen-path"},
+ .type = GF_OPTION_TYPE_ANY
+ },
+ { .key = { "rpc-transport.address-family",
+ "address-family" },
+ .value = {"inet", "inet6", "inet/inet6", "inet6/inet",
+ "unix", "inet-sdp" },
+ .type = GF_OPTION_TYPE_STR
+ },
+
+ { .key = {"non-blocking-io"},
+ .type = GF_OPTION_TYPE_BOOL
+ },
+ { .key = {"rpc-transport.window-size"},
+ .type = GF_OPTION_TYPE_SIZET,
+ .min = GF_MIN_SOCKET_WINDOW_SIZE,
+ .max = GF_MAX_SOCKET_WINDOW_SIZE,
+ },
+ { .key = {"rpc-transport.socket.nodelay"},
+ .type = GF_OPTION_TYPE_BOOL
+ },
+ { .key = {"rpc-transport.socket.lowlat"},
+ .type = GF_OPTION_TYPE_BOOL
+ },
+ { .key = {NULL} }
+};
diff --git a/xlators/protocol/rpc/rpc-transport/socket/src/socket.h b/xlators/protocol/rpc/rpc-transport/socket/src/socket.h
new file mode 100644
index 00000000000..aa31ee2a7ef
--- /dev/null
+++ b/xlators/protocol/rpc/rpc-transport/socket/src/socket.h
@@ -0,0 +1,190 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _SOCKET_H
+#define _SOCKET_H
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "event.h"
+#include "rpc-transport.h"
+#include "logging.h"
+#include "dict.h"
+#include "mem-pool.h"
+
+#ifndef MAX_IOVEC
+#define MAX_IOVEC 16
+#endif /* MAX_IOVEC */
+
+#define GF_DEFAULT_SOCKET_LISTEN_PORT 6969
+
+/* This is the size set through setsockopt for
+ * both the TCP receive window size and the
+ * send buffer size.
+ * Till the time iobuf size becomes configurable, this size is set to include
+ * two iobufs + the GlusterFS protocol headers.
+ * Linux allows us to over-ride the max values for the system.
+ * Should we over-ride them? Because if we set a value larger than the default
+ * setsockopt will fail. Having larger values might be beneficial for
+ * IB links.
+ */
+#define GF_DEFAULT_SOCKET_WINDOW_SIZE (512 * GF_UNIT_KB)
+#define GF_MAX_SOCKET_WINDOW_SIZE (1 * GF_UNIT_MB)
+#define GF_MIN_SOCKET_WINDOW_SIZE (128 * GF_UNIT_KB)
+
+typedef enum {
+ SP_STATE_NADA = 0,
+ SP_STATE_COMPLETE,
+ SP_STATE_READING_FRAGHDR,
+ SP_STATE_READ_FRAGHDR,
+ SP_STATE_READING_FRAG,
+} sp_rpcrecord_state_t;
+
+typedef enum {
+ SP_STATE_RPCFRAG_INIT,
+ SP_STATE_READING_MSGTYPE,
+ SP_STATE_READ_MSGTYPE,
+} sp_rpcfrag_state_t;
+
+typedef enum {
+ SP_STATE_SIMPLE_MSG_INIT,
+ SP_STATE_READING_SIMPLE_MSG,
+} sp_rpcfrag_simple_msg_state_t;
+
+typedef enum {
+ SP_STATE_VECTORED_REQUEST_INIT,
+ SP_STATE_READING_CREDBYTES,
+ SP_STATE_READ_CREDBYTES, /* read credential data. */
+ SP_STATE_READING_VERFBYTES,
+ SP_STATE_READ_VERFBYTES, /* read verifier data */
+ SP_STATE_READING_PROG,
+} sp_rpcfrag_vectored_request_state_t;
+
+typedef enum {
+ SP_STATE_REQUEST_HEADER_INIT,
+ SP_STATE_READING_RPCHDR1,
+ SP_STATE_READ_RPCHDR1, /* read msg from beginning till and
+ * including credlen
+ */
+} sp_rpcfrag_request_header_state_t;
+
+struct ioq {
+ union {
+ struct list_head list;
+ struct {
+ struct ioq *next;
+ struct ioq *prev;
+ };
+ };
+
+ struct iovec vector[MAX_IOVEC];
+ int count;
+ struct iovec *pending_vector;
+ int pending_count;
+ struct iobref *iobref;
+};
+
+typedef struct {
+ sp_rpcfrag_request_header_state_t header_state;
+ sp_rpcfrag_vectored_request_state_t vector_state;
+} sp_rpcfrag_request_state_t;
+
+typedef enum {
+ SP_STATE_VECTORED_REPLY_STATUS_INIT,
+ SP_STATE_READING_REPLY_STATUS,
+ SP_STATE_READ_REPLY_STATUS,
+} sp_rpcfrag_vectored_reply_status_state_t;
+
+typedef enum {
+ SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT,
+ SP_STATE_READING_PROC_HEADER,
+ SP_STATE_READ_PROC_HEADER,
+} sp_rpcfrag_vectored_reply_accepted_success_state_t;
+
+typedef enum {
+ SP_STATE_ACCEPTED_REPLY_INIT,
+ SP_STATE_READING_REPLY_VERFLEN,
+ SP_STATE_READ_REPLY_VERFLEN,
+ SP_STATE_READING_REPLY_VERFBYTES,
+ SP_STATE_READ_REPLY_VERFBYTES,
+} sp_rpcfrag_vectored_reply_accepted_state_t;
+
+typedef struct {
+ uint32_t accept_status;
+ sp_rpcfrag_vectored_reply_status_state_t status_state;
+ sp_rpcfrag_vectored_reply_accepted_state_t accepted_state;
+ sp_rpcfrag_vectored_reply_accepted_success_state_t accepted_success_state;
+} sp_rpcfrag_vectored_reply_state_t;
+
+typedef struct {
+ int32_t sock;
+ int32_t idx;
+ unsigned char connected; // -1 = not connected. 0 = in progress. 1 = connected
+ char bio;
+ char connect_finish_log;
+ char submit_log;
+ union {
+ struct list_head ioq;
+ struct {
+ struct ioq *ioq_next;
+ struct ioq *ioq_prev;
+ };
+ };
+ struct {
+ sp_rpcrecord_state_t record_state;
+ struct {
+ char *fragcurrent;
+ uint32_t bytes_read;
+ uint32_t remaining_size;
+ struct iovec vector;
+ struct iovec *pending_vector;
+ union {
+ sp_rpcfrag_request_state_t request;
+ sp_rpcfrag_vectored_reply_state_t reply;
+ } call_body;
+
+ sp_rpcfrag_simple_msg_state_t simple_state;
+ sp_rpcfrag_state_t state;
+ } frag;
+ struct iobuf *iobuf;
+ size_t iobuf_size;
+ struct iovec vector[2];
+ int count;
+ struct iobuf *vectoriob;
+ size_t vectoriob_size;
+ rpc_request_info_t *request_info;
+ struct iovec *pending_vector;
+ int pending_count;
+ uint32_t fraghdr;
+ char complete_record;
+ msg_type_t msg_type;
+ size_t total_bytes_read;
+ } incoming;
+ pthread_mutex_t lock;
+ int windowsize;
+ char lowlat;
+ char nodelay;
+} socket_private_t;
+
+
+#endif
diff --git a/xlators/protocol/server/Makefile.am b/xlators/protocol/server/Makefile.am
new file mode 100644
index 00000000000..af437a64d6d
--- /dev/null
+++ b/xlators/protocol/server/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = src
diff --git a/xlators/protocol/server/src/Makefile.am b/xlators/protocol/server/src/Makefile.am
new file mode 100644
index 00000000000..842ab5e50fc
--- /dev/null
+++ b/xlators/protocol/server/src/Makefile.am
@@ -0,0 +1,22 @@
+xlator_LTLIBRARIES = server.la
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/alpha/protocol
+
+server_la_LDFLAGS = -module -avoidversion
+
+server_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
+ $(top_builddir)/xlators/protocol/rpc/rpc-lib/src/libgfrpc.la \
+ $(top_builddir)/xlators/protocol/lib/src/libgfproto1.la
+
+server_la_SOURCES = server.c server-resolve.c server-helpers.c \
+ server3_1-fops.c server-handshake.c
+
+noinst_HEADERS = server.h server-helpers.h server-mem-types.h
+
+AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall \
+ -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles \
+ -DCONFDIR=\"$(sysconfdir)/glusterfs\" -D$(GF_HOST_OS) \
+ $(GF_CFLAGS) -I$(top_srcdir)/xlators/protocol/lib/src \
+ -I$(top_srcdir)/xlators/protocol/rpc/rpc-lib/src/ \
+ -I$(top_srcdir)/contrib/md5/
+
+CLEANFILES = *~
diff --git a/xlators/protocol/server/src/server-handshake.c b/xlators/protocol/server/src/server-handshake.c
new file mode 100644
index 00000000000..8ce9f6b3a86
--- /dev/null
+++ b/xlators/protocol/server/src/server-handshake.c
@@ -0,0 +1,689 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "server.h"
+#include "server-helpers.h"
+#include "glusterfs-xdr.h"
+#include "compat-errno.h"
+#include "msg-xdr.h"
+#include "authenticate.h"
+
+struct __get_xl_struct {
+ const char *name;
+ xlator_t *reply;
+};
+int
+gf_compare_client_version (rpcsvc_request_t *req, int fop_prognum,
+ int mgmt_prognum)
+{
+ int ret = -1;
+ /* TODO: think.. */
+ if (glusterfs3_1_fop_prog.prognum == fop_prognum)
+ ret = 0;
+
+ return ret;
+}
+
+void __check_and_set (xlator_t *each, void *data)
+{
+ if (!strcmp (each->name,
+ ((struct __get_xl_struct *) data)->name))
+ ((struct __get_xl_struct *) data)->reply = each;
+}
+
+static xlator_t *
+get_xlator_by_name (xlator_t *some_xl, const char *name)
+{
+ struct __get_xl_struct get = {
+ .name = name,
+ .reply = NULL
+ };
+
+ xlator_foreach (some_xl, __check_and_set, &get);
+
+ return get.reply;
+}
+
+
+int
+_volfile_update_checksum (xlator_t *this, char *key, uint32_t checksum)
+{
+ server_conf_t *conf = NULL;
+ struct _volfile_ctx *temp_volfile = NULL;
+
+ conf = this->private;
+ temp_volfile = conf->volfile;
+
+ while (temp_volfile) {
+ if ((NULL == key) && (NULL == temp_volfile->key))
+ break;
+ if ((NULL == key) || (NULL == temp_volfile->key)) {
+ temp_volfile = temp_volfile->next;
+ continue;
+ }
+ if (strcmp (temp_volfile->key, key) == 0)
+ break;
+ temp_volfile = temp_volfile->next;
+ }
+
+ if (!temp_volfile) {
+ temp_volfile = GF_CALLOC (1, sizeof (struct _volfile_ctx),
+ gf_server_mt_volfile_ctx_t);
+
+ temp_volfile->next = conf->volfile;
+ temp_volfile->key = (key)? gf_strdup (key): NULL;
+ temp_volfile->checksum = checksum;
+
+ conf->volfile = temp_volfile;
+ goto out;
+ }
+
+ if (temp_volfile->checksum != checksum) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "the volume file got modified between earlier access "
+ "and now, this may lead to inconsistency between "
+ "clients, advised to remount client");
+ temp_volfile->checksum = checksum;
+ }
+
+ out:
+ return 0;
+}
+
+
+size_t
+build_volfile_path (xlator_t *this, const char *key, char *path,
+ size_t path_len)
+{
+ int ret = -1;
+ int free_filename = 0;
+ char *filename = NULL;
+ server_conf_t *conf = NULL;
+ char data_key[256] = {0,};
+
+ conf = this->private;
+
+ /* Inform users that this option is changed now */
+ ret = dict_get_str (this->options, "client-volume-filename",
+ &filename);
+ if (ret == 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "option 'client-volume-filename' is changed to "
+ "'volume-filename.<key>' which now takes 'key' as an "
+ "option to choose/fetch different files from server. "
+ "Refer documentation or contact developers for more "
+ "info. Currently defaulting to given file '%s'",
+ filename);
+ }
+
+ if (key && !filename) {
+ sprintf (data_key, "volume-filename.%s", key);
+ ret = dict_get_str (this->options, data_key, &filename);
+ if (ret < 0) {
+ /* Make sure that key doesn't contain "../" in path */
+ if ((gf_strstr (key, "/", "..")) == -1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s: invalid key", key);
+ goto out;
+ }
+ }
+
+ ret = gf_asprintf (&filename, "%s/%s.vol", conf->conf_dir, key);
+ if (-1 == ret)
+ goto out;
+
+ free_filename = 1;
+ }
+
+ if (!filename) {
+ ret = dict_get_str (this->options,
+ "volume-filename.default", &filename);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "no default volume filename given, "
+ "defaulting to %s", DEFAULT_VOLUME_FILE_PATH);
+ filename = DEFAULT_VOLUME_FILE_PATH;
+ }
+ }
+
+ ret = -1;
+
+ if ((filename) && (path_len > strlen (filename))) {
+ strcpy (path, filename);
+ ret = strlen (filename);
+ }
+
+out:
+ if (free_filename)
+ GF_FREE (filename);
+
+ return ret;
+}
+
+int
+_validate_volfile_checksum (xlator_t *this, char *key,
+ uint32_t checksum)
+{
+ char filename[ZR_PATH_MAX] = {0,};
+ server_conf_t *conf = NULL;
+ struct _volfile_ctx *temp_volfile = NULL;
+ int ret = 0;
+ int fd = 0;
+ uint32_t local_checksum = 0;
+
+ conf = this->private;
+ temp_volfile = conf->volfile;
+
+ if (!checksum)
+ goto out;
+
+ if (!temp_volfile) {
+ ret = build_volfile_path (this, key, filename,
+ sizeof (filename));
+ if (ret <= 0)
+ goto out;
+ fd = open (filename, O_RDONLY);
+ if (-1 == fd) {
+ ret = 0;
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to open volume file (%s) : %s",
+ filename, strerror (errno));
+ goto out;
+ }
+ get_checksum_for_file (fd, &local_checksum);
+ _volfile_update_checksum (this, key, local_checksum);
+ close (fd);
+ }
+
+ temp_volfile = conf->volfile;
+ while (temp_volfile) {
+ if ((NULL == key) && (NULL == temp_volfile->key))
+ break;
+ if ((NULL == key) || (NULL == temp_volfile->key)) {
+ temp_volfile = temp_volfile->next;
+ continue;
+ }
+ if (strcmp (temp_volfile->key, key) == 0)
+ break;
+ temp_volfile = temp_volfile->next;
+ }
+
+ if (!temp_volfile)
+ goto out;
+
+ if ((temp_volfile->checksum) &&
+ (checksum != temp_volfile->checksum))
+ ret = -1;
+
+out:
+ return ret;
+}
+
+int
+build_program_list (server_conf_t *conf, char *list)
+{
+ /* Reply in "Name:Program-Number:Program-Version,..." format */
+ sprintf (list, "%s:%d:%d",
+ glusterfs3_1_fop_prog.progname,
+ glusterfs3_1_fop_prog.prognum,
+ glusterfs3_1_fop_prog.progver);
+ /* TODO: keep adding new versions to the list here */
+ return 0;
+}
+
+int
+server_dump_version (rpcsvc_request_t *req)
+{
+ char list[8192] = {0,};
+ server_conf_t *conf = NULL;
+ int ret = -1;
+ int op_errno = EINVAL;
+ gf_dump_version_req args = {0,};
+ gf_dump_version_rsp rsp = {0,};
+
+ conf = ((xlator_t *)req->conn->svc->mydata)->private;
+
+ if (xdr_to_glusterfs_req (req, &args, xdr_to_dump_version_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto fail;
+ }
+
+ build_program_list (conf, list);
+ rsp.msg.msg_val = list;
+ rsp.msg.msg_len = strlen (list) + 1;
+ ret = 0;
+fail:
+ rsp.op_errno = gf_errno_to_error (op_errno);
+ rsp.op_ret = ret;
+
+ server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
+ (gfs_serialize_t)xdr_serialize_dump_version_rsp);
+
+ if (args.key)
+ free (args.key);
+
+ return 0;
+}
+
+int
+server_getspec (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ int32_t op_errno = ENOENT;
+ int32_t spec_fd = -1;
+ size_t file_len = 0;
+ char filename[ZR_PATH_MAX] = {0,};
+ struct stat stbuf = {0,};
+ uint32_t checksum = 0;
+ char *key = NULL;
+ server_conf_t *conf = NULL;
+
+ gf_getspec_req args = {0,};
+ gf_getspec_rsp rsp = {0,};
+ server_connection_t *conn = NULL;
+
+ conn = req->conn->trans->private;
+ conf = conn->this->private;
+
+ if (xdr_to_glusterfs_req (req, &args, xdr_to_getspec_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto fail;
+ }
+
+ ret = build_volfile_path (conn->this, args.key,
+ filename, sizeof (filename));
+ if (ret > 0) {
+ /* to allocate the proper buffer to hold the file data */
+ ret = stat (filename, &stbuf);
+ if (ret < 0){
+ gf_log (conn->this->name, GF_LOG_ERROR,
+ "Unable to stat %s (%s)",
+ filename, strerror (errno));
+ goto fail;
+ }
+
+ spec_fd = open (filename, O_RDONLY);
+ if (spec_fd < 0) {
+ gf_log (conn->this->name, GF_LOG_ERROR,
+ "Unable to open %s (%s)",
+ filename, strerror (errno));
+ goto fail;
+ }
+ ret = file_len = stbuf.st_size;
+
+ if (conf->verify_volfile) {
+ get_checksum_for_file (spec_fd, &checksum);
+ _volfile_update_checksum (conn->this, key, checksum);
+ }
+ } else {
+ errno = ENOENT;
+ }
+
+ if (file_len) {
+ rsp.spec = GF_CALLOC (file_len, sizeof (char),
+ gf_server_mt_rsp_buf_t);
+ if (!rsp.spec) {
+ ret = -1;
+ op_errno = ENOMEM;
+ goto fail;
+ }
+ ret = read (spec_fd, rsp.spec, file_len);
+
+ close (spec_fd);
+ }
+
+ /* convert to XDR */
+fail:
+ op_errno = errno;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+ rsp.op_ret = ret;
+
+ server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
+ (gfs_serialize_t)xdr_serialize_getspec_rsp);
+
+ return 0;
+}
+
+
+int
+server_setvolume (rpcsvc_request_t *req)
+{
+ gf_setvolume_req args = {0,};
+ gf_setvolume_rsp rsp = {0,};
+ server_connection_t *conn = NULL;
+ server_conf_t *conf = NULL;
+ peer_info_t *peerinfo = NULL;
+ dict_t *reply = NULL;
+ dict_t *config_params = NULL;
+ dict_t *params = NULL;
+ char *name = NULL;
+ char *process_uuid = NULL;
+ xlator_t *xl = NULL;
+ char *msg = NULL;
+ char *volfile_key = NULL;
+ xlator_t *this = NULL;
+ uint32_t checksum = 0;
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ int32_t fop_version = 0;
+ int32_t mgmt_version = 0;
+
+ params = dict_new ();
+ reply = dict_new ();
+ if (xdr_to_glusterfs_req (req, &args, xdr_to_setvolume_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto fail;
+ }
+
+ this = req->conn->svc->mydata;
+
+ config_params = dict_copy_with_ref (this->options, NULL);
+ conf = this->private;
+
+ ret = dict_unserialize (args.dict.dict_val, args.dict.dict_len, &params);
+ if (ret < 0) {
+ ret = dict_set_str (reply, "ERROR",
+ "Internal error: failed to unserialize "
+ "request dictionary");
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg \"%s\"",
+ "Internal error: failed to unserialize "
+ "request dictionary");
+
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto fail;
+ }
+
+ ret = dict_get_str (params, "process-uuid", &process_uuid);
+ if (ret < 0) {
+ ret = dict_set_str (reply, "ERROR",
+ "UUID not specified");
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto fail;
+ }
+
+
+ conn = server_connection_get (this, process_uuid);
+ if (req->conn->trans->xl_private != conn)
+ req->conn->trans->xl_private = conn;
+
+ ret = dict_get_int32 (params, "fops-version", &fop_version);
+ if (ret < 0) {
+ ret = dict_set_str (reply, "ERROR",
+ "No FOP version number specified");
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+ }
+
+ ret = dict_get_int32 (params, "mgmt-version", &mgmt_version);
+ if (ret < 0) {
+ ret = dict_set_str (reply, "ERROR",
+ "No MGMT version number specified");
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+ }
+
+ ret = gf_compare_client_version (req, fop_version, mgmt_version);
+ if (ret != 0) {
+ ret = gf_asprintf (&msg, "version mismatch: client(%d)"
+ " - client-mgmt(%d)",
+ fop_version, mgmt_version);
+ /* get_supported_version (req)); */
+ if (-1 == ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "asprintf failed while setting up error msg");
+ goto fail;
+ }
+ ret = dict_set_dynstr (reply, "ERROR", msg);
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto fail;
+ }
+
+ ret = dict_get_str (params, "remote-subvolume", &name);
+ if (ret < 0) {
+ ret = dict_set_str (reply, "ERROR",
+ "No remote-subvolume option specified");
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto fail;
+ }
+
+ xl = get_xlator_by_name (this, name);
+ if (xl == NULL) {
+ ret = gf_asprintf (&msg, "remote-subvolume \"%s\" is not found",
+ name);
+ if (-1 == ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "asprintf failed while setting error msg");
+ goto fail;
+ }
+ ret = dict_set_dynstr (reply, "ERROR", msg);
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+
+ op_ret = -1;
+ op_errno = ENOENT;
+ goto fail;
+ }
+
+ if (conf->verify_volfile) {
+ ret = dict_get_uint32 (params, "volfile-checksum", &checksum);
+ if (ret == 0) {
+ ret = dict_get_str (params, "volfile-key",
+ &volfile_key);
+
+ ret = _validate_volfile_checksum (this, volfile_key,
+ checksum);
+ if (-1 == ret) {
+ ret = dict_set_str (reply, "ERROR",
+ "volume-file checksum "
+ "varies from earlier "
+ "access");
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+
+ op_ret = -1;
+ op_errno = ESTALE;
+ goto fail;
+ }
+ }
+ }
+
+
+ peerinfo = &req->conn->trans->peerinfo;
+ ret = dict_set_static_ptr (params, "peer-info", peerinfo);
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set peer-info");
+
+ if (conf->auth_modules == NULL) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Authentication module not initialized");
+ }
+
+ ret = gf_authenticate (params, config_params,
+ conf->auth_modules);
+
+ if (ret == AUTH_ACCEPT) {
+ gf_log (this->name, GF_LOG_INFO,
+ "accepted client from %s",
+ peerinfo->identifier);
+ op_ret = 0;
+ conn->bound_xl = xl;
+ ret = dict_set_str (reply, "ERROR", "Success");
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+ } else {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Cannot authenticate client from %s",
+ peerinfo->identifier);
+ op_ret = -1;
+ op_errno = EACCES;
+ ret = dict_set_str (reply, "ERROR", "Authentication failed");
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+
+ goto fail;
+ }
+
+ if (conn->bound_xl == NULL) {
+ ret = dict_set_str (reply, "ERROR",
+ "Check volfile and handshake "
+ "options in protocol/client");
+ if (ret < 0)
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to set error msg");
+
+ op_ret = -1;
+ op_errno = EACCES;
+ goto fail;
+ }
+
+ if ((conn->bound_xl != NULL) &&
+ (ret >= 0) &&
+ (conn->bound_xl->itable == NULL)) {
+ /* create inode table for this bound_xl, if one doesn't
+ already exist */
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "creating inode table with lru_limit=%"PRId32", "
+ "xlator=%s", conf->inode_lru_limit,
+ conn->bound_xl->name);
+
+ /* TODO: what is this ? */
+ conn->bound_xl->itable = inode_table_new (conf->inode_lru_limit,
+ conn->bound_xl);
+ }
+
+ ret = dict_set_str (reply, "process-uuid",
+ this->ctx->process_uuid);
+
+ ret = dict_set_uint64 (reply, "transport-ptr",
+ ((uint64_t) (long) req->conn->trans));
+
+
+fail:
+ rsp.dict.dict_len = dict_serialized_length (reply);
+ if (rsp.dict.dict_len < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to get serialized length of reply dict");
+ op_ret = -1;
+ op_errno = EINVAL;
+ rsp.dict.dict_len = 0;
+ }
+
+ if (rsp.dict.dict_len) {
+ rsp.dict.dict_val = GF_CALLOC (1, rsp.dict.dict_len, 0);
+ if (rsp.dict.dict_val) {
+ ret = dict_serialize (reply, rsp.dict.dict_val);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "failed to serialize reply dict");
+ op_ret = -1;
+ op_errno = -ret;
+ }
+ }
+ }
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
+ (gfs_serialize_t)xdr_serialize_setvolume_rsp);
+
+
+ if (args.dict.dict_val)
+ free (args.dict.dict_val);
+
+ if (rsp.dict.dict_val)
+ GF_FREE (rsp.dict.dict_val);
+
+ dict_unref (params);
+ dict_unref (reply);
+ dict_unref (config_params);
+
+ return 0;
+}
+
+
+int
+server_ping (rpcsvc_request_t *req)
+{
+ gf_common_rsp rsp = {0,};
+
+ rsp.gfs_id = req->gfs_id;
+ /* Accepted */
+ rsp.op_ret = 0;
+
+ server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+
+rpcsvc_actor_t gluster_handshake_actors[] = {
+ [GF_HNDSK_NULL] = {"NULL", GF_HNDSK_NULL, server_null, NULL, NULL },
+ [GF_HNDSK_DUMP_VERSION] = {"VERSION", GF_HNDSK_DUMP_VERSION, server_dump_version, NULL, NULL },
+ [GF_HNDSK_SETVOLUME] = {"SETVOLUME", GF_HNDSK_SETVOLUME, server_setvolume, NULL, NULL },
+ [GF_HNDSK_GETSPEC] = {"GETSPEC", GF_HNDSK_GETSPEC, server_getspec, NULL, NULL },
+ [GF_HNDSK_PING] = {"PING", GF_HNDSK_PING, server_ping, NULL, NULL },
+};
+
+
+struct rpcsvc_program gluster_handshake_prog = {
+ .progname = "GlusterFS Handshake",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+
+ .actors = gluster_handshake_actors,
+ .numactors = 5,
+ .progport = 7008,
+};
diff --git a/xlators/protocol/server/src/server-helpers.c b/xlators/protocol/server/src/server-helpers.c
new file mode 100644
index 00000000000..5cae205d76f
--- /dev/null
+++ b/xlators/protocol/server/src/server-helpers.c
@@ -0,0 +1,1392 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "server.h"
+#include "server-helpers.h"
+
+int
+server_decode_groups (call_frame_t *frame, rpcsvc_request_t *req)
+{
+ int i = 0;
+
+ if ((!frame) || (!req))
+ return 0;
+
+ frame->root->ngrps = req->auxgidcount;
+ if (frame->root->ngrps == 0)
+ return 0;
+
+ if (frame->root->ngrps > GF_REQUEST_MAXGROUPS)
+ return -1;
+
+ for (; i < frame->root->ngrps; ++i)
+ frame->root->groups[i] = req->auxgids[i];
+
+ return 0;
+}
+
+/* server_loc_fill - derive a loc_t for a given inode number
+ *
+ * NOTE: make sure that @loc is empty, because any pointers it holds with reference will
+ * be leaked after returning from here.
+ */
+int
+server_loc_fill (loc_t *loc, server_state_t *state,
+ ino_t ino, ino_t par,
+ const char *name, const char *path)
+{
+ inode_t *inode = NULL;
+ inode_t *parent = NULL;
+ int32_t ret = -1;
+ char *dentry_path = NULL;
+
+
+ GF_VALIDATE_OR_GOTO ("server", loc, out);
+ GF_VALIDATE_OR_GOTO ("server", state, out);
+ GF_VALIDATE_OR_GOTO ("server", path, out);
+
+ /* anything beyond this point is success */
+ ret = 0;
+ loc->ino = ino;
+ inode = loc->inode;
+ if (inode == NULL) {
+ if (ino)
+ inode = inode_search (state->itable, ino, NULL);
+
+ if ((inode == NULL) &&
+ (par && name))
+ inode = inode_search (state->itable, par, name);
+
+ loc->inode = inode;
+ if (inode)
+ loc->ino = inode->ino;
+ }
+
+ parent = loc->parent;
+ if (parent == NULL) {
+ if (inode)
+ parent = inode_parent (inode, par, name);
+ else
+ parent = inode_search (state->itable, par, NULL);
+ loc->parent = parent;
+ }
+
+ if (name && parent) {
+ ret = inode_path (parent, name, &dentry_path);
+ if (ret < 0) {
+ gf_log (state->conn->bound_xl->name, GF_LOG_DEBUG,
+ "failed to build path for %"PRId64"/%s: %s",
+ parent->ino, name, strerror (-ret));
+ }
+ } else if (inode) {
+ ret = inode_path (inode, NULL, &dentry_path);
+ if (ret < 0) {
+ gf_log (state->conn->bound_xl->name, GF_LOG_DEBUG,
+ "failed to build path for %"PRId64": %s",
+ inode->ino, strerror (-ret));
+ }
+ }
+
+ if (dentry_path) {
+ if (strcmp (dentry_path, path)) {
+ gf_log (state->conn->bound_xl->name, GF_LOG_DEBUG,
+ "paths differ for inode(%"PRId64"): "
+ "client path = %s. dentry path = %s",
+ ino, path, dentry_path);
+ }
+
+ loc->path = dentry_path;
+ loc->name = strrchr (loc->path, '/');
+ if (loc->name)
+ loc->name++;
+ } else {
+ loc->path = gf_strdup (path);
+ loc->name = strrchr (loc->path, '/');
+ if (loc->name)
+ loc->name++;
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * stat_to_str - convert struct iatt to a ASCII string
+ * @stbuf: struct iatt pointer
+ *
+ * not for external reference
+ */
+char *
+stat_to_str (struct iatt *stbuf)
+{
+ int ret = 0;
+ char *tmp_buf = NULL;
+
+ uint64_t dev = stbuf->ia_gen;
+ uint64_t ino = stbuf->ia_ino;
+ uint32_t mode = st_mode_from_ia (stbuf->ia_prot, stbuf->ia_type);
+ uint32_t nlink = stbuf->ia_nlink;
+ uint32_t uid = stbuf->ia_uid;
+ uint32_t gid = stbuf->ia_gid;
+ uint64_t rdev = stbuf->ia_rdev;
+ uint64_t size = stbuf->ia_size;
+ uint32_t blksize = stbuf->ia_blksize;
+ uint64_t blocks = stbuf->ia_blocks;
+ uint32_t atime = stbuf->ia_atime;
+ uint32_t mtime = stbuf->ia_mtime;
+ uint32_t ctime = stbuf->ia_ctime;
+
+ uint32_t atime_nsec = stbuf->ia_atime_nsec;
+ uint32_t mtime_nsec = stbuf->ia_mtime_nsec;
+ uint32_t ctime_nsec = stbuf->ia_ctime_nsec;
+
+
+ ret = gf_asprintf (&tmp_buf,
+ GF_STAT_PRINT_FMT_STR,
+ dev,
+ ino,
+ mode,
+ nlink,
+ uid,
+ gid,
+ rdev,
+ size,
+ blksize,
+ blocks,
+ atime,
+ atime_nsec,
+ mtime,
+ mtime_nsec,
+ ctime,
+ ctime_nsec);
+ if (-1 == ret) {
+ gf_log ("protocol/server", GF_LOG_DEBUG,
+ "asprintf failed while setting up stat buffer string");
+ return NULL;
+ }
+ return tmp_buf;
+}
+
+
+void
+server_loc_wipe (loc_t *loc)
+{
+ if (loc->parent) {
+ inode_unref (loc->parent);
+ loc->parent = NULL;
+ }
+
+ if (loc->inode) {
+ inode_unref (loc->inode);
+ loc->inode = NULL;
+ }
+
+ if (loc->path)
+ GF_FREE ((void *)loc->path);
+}
+
+
+void
+server_resolve_wipe (server_resolve_t *resolve)
+{
+ struct resolve_comp *comp = NULL;
+ int i = 0;
+
+ if (resolve->path)
+ GF_FREE ((void *)resolve->path);
+
+ if (resolve->bname)
+ GF_FREE ((void *)resolve->bname);
+
+ if (resolve->resolved)
+ GF_FREE ((void *)resolve->resolved);
+
+ loc_wipe (&resolve->deep_loc);
+
+ comp = resolve->components;
+ if (comp) {
+ for (i = 0; comp[i].basename; i++) {
+ if (comp[i].inode)
+ inode_unref (comp[i].inode);
+ }
+ GF_FREE ((void *)resolve->components);
+ }
+}
+
+
+void
+free_state (server_state_t *state)
+{
+ if (state->conn) {
+ //xprt_svc_unref (state->conn);
+ state->conn = NULL;
+ }
+
+ if (state->fd) {
+ fd_unref (state->fd);
+ state->fd = NULL;
+ }
+
+ if (state->iobref) {
+ iobref_unref (state->iobref);
+ state->iobref = NULL;
+ }
+
+ if (state->iobuf) {
+ iobuf_unref (state->iobuf);
+ state->iobuf = NULL;
+ }
+
+ if (state->dict) {
+ dict_unref (state->dict);
+ state->dict = NULL;
+ }
+
+ if (state->volume)
+ GF_FREE ((void *)state->volume);
+
+ if (state->name)
+ GF_FREE ((void *)state->name);
+
+ server_loc_wipe (&state->loc);
+ server_loc_wipe (&state->loc2);
+
+ server_resolve_wipe (&state->resolve);
+ server_resolve_wipe (&state->resolve2);
+
+ GF_FREE (state);
+}
+
+
+call_frame_t *
+server_copy_frame (call_frame_t *frame)
+{
+ call_frame_t *new_frame = NULL;
+ server_state_t *state = NULL, *new_state = NULL;
+
+ state = frame->root->state;
+
+ new_frame = copy_frame (frame);
+
+ new_state = GF_CALLOC (1, sizeof (server_state_t), 0);
+
+ new_frame->root->op = frame->root->op;
+ new_frame->root->type = frame->root->type;
+ new_frame->root->trans = state->conn;
+ new_frame->root->state = new_state;
+
+ new_state->itable = state->itable;
+ new_state->conn = state->conn;
+ //new_state->conn = xprt_ref (state->conn);
+
+ new_state->resolve.fd_no = -1;
+ new_state->resolve2.fd_no = -1;
+
+ return new_frame;
+}
+
+
+int
+gf_add_locker (struct _lock_table *table, const char *volume,
+ loc_t *loc, fd_t *fd, pid_t pid)
+{
+ int32_t ret = -1;
+ struct _locker *new = NULL;
+ uint8_t dir = 0;
+
+ new = GF_CALLOC (1, sizeof (struct _locker), 0);
+ if (new == NULL) {
+ gf_log ("server", GF_LOG_ERROR,
+ "failed to allocate memory for \'struct _locker\'");
+ goto out;
+ }
+ INIT_LIST_HEAD (&new->lockers);
+
+ new->volume = gf_strdup (volume);
+
+ if (fd == NULL) {
+ loc_copy (&new->loc, loc);
+ dir = IA_ISDIR (new->loc.inode->ia_type);
+ } else {
+ new->fd = fd_ref (fd);
+ dir = IA_ISDIR (fd->inode->ia_type);
+ }
+
+ new->pid = pid;
+
+ LOCK (&table->lock);
+ {
+ if (dir)
+ list_add_tail (&new->lockers, &table->dir_lockers);
+ else
+ list_add_tail (&new->lockers, &table->file_lockers);
+ }
+ UNLOCK (&table->lock);
+out:
+ return ret;
+}
+
+
+int
+gf_del_locker (struct _lock_table *table, const char *volume,
+ loc_t *loc, fd_t *fd, pid_t pid)
+{
+ struct _locker *locker = NULL;
+ struct _locker *tmp = NULL;
+ int32_t ret = 0;
+ uint8_t dir = 0;
+ struct list_head *head = NULL;
+ struct list_head del;
+
+ INIT_LIST_HEAD (&del);
+
+ if (fd) {
+ dir = IA_ISDIR (fd->inode->ia_type);
+ } else {
+ dir = IA_ISDIR (loc->inode->ia_type);
+ }
+
+ LOCK (&table->lock);
+ {
+ if (dir) {
+ head = &table->dir_lockers;
+ } else {
+ head = &table->file_lockers;
+ }
+
+ list_for_each_entry_safe (locker, tmp, head, lockers) {
+ if (locker->fd && fd &&
+ (locker->fd == fd) && (locker->pid == pid)
+ && !strcmp (locker->volume, volume)) {
+ list_move_tail (&locker->lockers, &del);
+ } else if (locker->loc.inode &&
+ loc &&
+ (locker->loc.inode == loc->inode) &&
+ (locker->pid == pid)
+ && !strcmp (locker->volume, volume)) {
+ list_move_tail (&locker->lockers, &del);
+ }
+ }
+ }
+ UNLOCK (&table->lock);
+
+ tmp = NULL;
+ locker = NULL;
+
+ list_for_each_entry_safe (locker, tmp, &del, lockers) {
+ list_del_init (&locker->lockers);
+ if (locker->fd)
+ fd_unref (locker->fd);
+ else
+ loc_wipe (&locker->loc);
+
+ GF_FREE (locker->volume);
+ GF_FREE (locker);
+ }
+
+ return ret;
+}
+
+
+int
+gf_direntry_to_bin (dir_entry_t *head, char *buffer)
+{
+ dir_entry_t *trav = NULL;
+ uint32_t len = 0;
+ uint32_t this_len = 0;
+ size_t buflen = -1;
+ char *ptr = NULL;
+ char *tmp_buf = NULL;
+
+ trav = head->next;
+ while (trav) {
+ len += strlen (trav->name);
+ len += 1;
+ len += strlen (trav->link);
+ len += 1; /* for '\n' */
+ len += 256; // max possible for statbuf;
+ trav = trav->next;
+ }
+
+ ptr = buffer;
+ trav = head->next;
+ while (trav) {
+ tmp_buf = stat_to_str (&trav->buf);
+ /* tmp_buf will have \n before \0 */
+
+ this_len = sprintf (ptr, "%s/%s%s\n",
+ trav->name, tmp_buf,
+ trav->link);
+
+ GF_FREE (tmp_buf);
+ trav = trav->next;
+ ptr += this_len;
+ }
+
+ buflen = strlen (buffer);
+
+ return buflen;
+}
+
+
+static struct _lock_table *
+gf_lock_table_new (void)
+{
+ struct _lock_table *new = NULL;
+
+ new = GF_CALLOC (1, sizeof (struct _lock_table), 0);
+ if (new == NULL) {
+ gf_log ("server-protocol", GF_LOG_CRITICAL,
+ "failed to allocate memory for new lock table");
+ goto out;
+ }
+ INIT_LIST_HEAD (&new->dir_lockers);
+ INIT_LIST_HEAD (&new->file_lockers);
+ LOCK_INIT (&new->lock);
+out:
+ return new;
+}
+
+static int
+server_nop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE(frame);
+
+ if (state)
+ free_state (state);
+ STACK_DESTROY (frame->root);
+ return 0;
+}
+
+int
+do_lock_table_cleanup (xlator_t *this, server_connection_t *conn,
+ call_frame_t *frame, struct _lock_table *ltable)
+{
+ struct list_head file_lockers, dir_lockers;
+ call_frame_t *tmp_frame = NULL;
+ struct flock flock = {0, };
+ xlator_t *bound_xl = NULL;
+ struct _locker *locker = NULL, *tmp = NULL;
+ int ret = -1;
+
+ bound_xl = conn->bound_xl;
+ INIT_LIST_HEAD (&file_lockers);
+ INIT_LIST_HEAD (&dir_lockers);
+
+ LOCK (&ltable->lock);
+ {
+ list_splice_init (&ltable->file_lockers,
+ &file_lockers);
+
+ list_splice_init (&ltable->dir_lockers, &dir_lockers);
+ }
+ UNLOCK (&ltable->lock);
+
+ GF_FREE (ltable);
+
+ flock.l_type = F_UNLCK;
+ flock.l_start = 0;
+ flock.l_len = 0;
+ list_for_each_entry_safe (locker,
+ tmp, &file_lockers, lockers) {
+ tmp_frame = copy_frame (frame);
+ if (tmp_frame == NULL) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "out of memory");
+ goto out;
+ }
+ /*
+ pid = 0 is a special case that tells posix-locks
+ to release all locks from this transport
+ */
+ tmp_frame->root->pid = 0;
+ tmp_frame->root->trans = conn;
+
+ if (locker->fd) {
+ STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
+ bound_xl->fops->finodelk,
+ locker->volume,
+ locker->fd, F_SETLK, &flock);
+ fd_unref (locker->fd);
+ } else {
+ STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
+ bound_xl->fops->inodelk,
+ locker->volume,
+ &(locker->loc), F_SETLK, &flock);
+ loc_wipe (&locker->loc);
+ }
+
+ GF_FREE (locker->volume);
+
+ list_del_init (&locker->lockers);
+ GF_FREE (locker);
+ }
+
+ tmp = NULL;
+ locker = NULL;
+ list_for_each_entry_safe (locker, tmp, &dir_lockers, lockers) {
+ tmp_frame = copy_frame (frame);
+
+ tmp_frame->root->pid = 0;
+ tmp_frame->root->trans = conn;
+
+ if (locker->fd) {
+ STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
+ bound_xl->fops->fentrylk,
+ locker->volume,
+ locker->fd, NULL,
+ ENTRYLK_UNLOCK, ENTRYLK_WRLCK);
+ fd_unref (locker->fd);
+ } else {
+ STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
+ bound_xl->fops->entrylk,
+ locker->volume,
+ &(locker->loc), NULL,
+ ENTRYLK_UNLOCK, ENTRYLK_WRLCK);
+ loc_wipe (&locker->loc);
+ }
+
+ GF_FREE (locker->volume);
+
+ list_del_init (&locker->lockers);
+ GF_FREE (locker);
+ }
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+static int
+server_connection_cleanup_flush_cbk (call_frame_t *frame, void *cookie,
+ xlator_t *this, int32_t op_ret,
+ int32_t op_errno)
+{
+ fd_t *fd = NULL;
+
+ fd = frame->local;
+
+ fd_unref (fd);
+ frame->local = NULL;
+
+ STACK_DESTROY (frame->root);
+ return 0;
+}
+
+
+int
+do_fd_cleanup (xlator_t *this, server_connection_t *conn, call_frame_t *frame,
+ fdentry_t *fdentries, int fd_count)
+{
+ fd_t *fd = NULL;
+ int i = 0, ret = -1;
+ call_frame_t *tmp_frame = NULL;
+ xlator_t *bound_xl = NULL;
+
+ bound_xl = conn->bound_xl;
+ for (i = 0;i < fd_count; i++) {
+ fd = fdentries[i].fd;
+
+ if (fd != NULL) {
+ tmp_frame = copy_frame (frame);
+ if (tmp_frame == NULL) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "out of memory");
+ goto out;
+ }
+ tmp_frame->local = fd;
+
+ tmp_frame->root->pid = 0;
+ tmp_frame->root->trans = conn;
+ tmp_frame->root->lk_owner = 0;
+ STACK_WIND (tmp_frame,
+ server_connection_cleanup_flush_cbk,
+ bound_xl, bound_xl->fops->flush, fd);
+ }
+ }
+
+ GF_FREE (fdentries);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+do_connection_cleanup (xlator_t *this, server_connection_t *conn,
+ struct _lock_table *ltable, fdentry_t *fdentries, int fd_count)
+{
+ int ret = 0;
+ int saved_ret = 0;
+ call_frame_t *frame = NULL;
+ server_state_t *state = NULL;
+
+ frame = create_frame (this, this->ctx->pool);
+ if (frame == NULL) {
+ gf_log (this->name, GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+
+ saved_ret = do_lock_table_cleanup (this, conn, frame, ltable);
+
+ if (fdentries != NULL) {
+ ret = do_fd_cleanup (this, conn, frame, fdentries, fd_count);
+ }
+
+ state = CALL_STATE (frame);
+ if (state)
+ GF_FREE (state);
+
+ STACK_DESTROY (frame->root);
+
+ if (saved_ret || ret) {
+ ret = -1;
+ }
+
+out:
+ return ret;
+}
+
+
+int
+server_connection_cleanup (xlator_t *this, server_connection_t *conn)
+{
+ char do_cleanup = 0;
+ struct _lock_table *ltable = NULL;
+ fdentry_t *fdentries = NULL;
+ uint32_t fd_count = 0;
+ int ret = 0;
+
+ if (conn == NULL) {
+ goto out;
+ }
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ conn->active_transports--;
+ if (conn->active_transports == 0) {
+ if (conn->ltable) {
+ ltable = conn->ltable;
+ conn->ltable = gf_lock_table_new ();
+ }
+
+ if (conn->fdtable) {
+ fdentries = gf_fd_fdtable_get_all_fds (conn->fdtable,
+ &fd_count);
+ }
+ do_cleanup = 1;
+ }
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ if (do_cleanup && conn->bound_xl)
+ ret = do_connection_cleanup (this, conn, ltable, fdentries, fd_count);
+
+out:
+ return ret;
+}
+
+
+int
+server_connection_destroy (xlator_t *this, server_connection_t *conn)
+{
+ call_frame_t *frame = NULL, *tmp_frame = NULL;
+ xlator_t *bound_xl = NULL;
+ int32_t ret = -1;
+ server_state_t *state = NULL;
+ struct list_head file_lockers;
+ struct list_head dir_lockers;
+ struct _lock_table *ltable = NULL;
+ struct _locker *locker = NULL, *tmp = NULL;
+ struct flock flock = {0,};
+ fd_t *fd = NULL;
+ int32_t i = 0;
+ fdentry_t *fdentries = NULL;
+ uint32_t fd_count = 0;
+
+ if (conn == NULL) {
+ ret = 0;
+ goto out;
+ }
+
+ bound_xl = (xlator_t *) (conn->bound_xl);
+
+ if (bound_xl) {
+ /* trans will have ref_count = 1 after this call, but its
+ ok since this function is called in
+ GF_EVENT_TRANSPORT_CLEANUP */
+ frame = create_frame (this, this->ctx->pool);
+
+ pthread_mutex_lock (&(conn->lock));
+ {
+ if (conn->ltable) {
+ ltable = conn->ltable;
+ conn->ltable = NULL;
+ }
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ INIT_LIST_HEAD (&file_lockers);
+ INIT_LIST_HEAD (&dir_lockers);
+
+ LOCK (&ltable->lock);
+ {
+ list_splice_init (&ltable->file_lockers,
+ &file_lockers);
+
+ list_splice_init (&ltable->dir_lockers, &dir_lockers);
+ }
+ UNLOCK (&ltable->lock);
+ GF_FREE (ltable);
+
+ flock.l_type = F_UNLCK;
+ flock.l_start = 0;
+ flock.l_len = 0;
+ list_for_each_entry_safe (locker,
+ tmp, &file_lockers, lockers) {
+ tmp_frame = copy_frame (frame);
+ /*
+ pid = 0 is a special case that tells posix-locks
+ to release all locks from this transport
+ */
+ tmp_frame->root->pid = 0;
+ tmp_frame->root->trans = conn;
+
+ if (locker->fd) {
+ STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
+ bound_xl->fops->finodelk,
+ locker->volume,
+ locker->fd, F_SETLK, &flock);
+ fd_unref (locker->fd);
+ } else {
+ STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
+ bound_xl->fops->inodelk,
+ locker->volume,
+ &(locker->loc), F_SETLK, &flock);
+ loc_wipe (&locker->loc);
+ }
+
+ GF_FREE (locker->volume);
+
+ list_del_init (&locker->lockers);
+ GF_FREE (locker);
+ }
+
+ tmp = NULL;
+ locker = NULL;
+ list_for_each_entry_safe (locker, tmp, &dir_lockers, lockers) {
+ tmp_frame = copy_frame (frame);
+
+ tmp_frame->root->pid = 0;
+ tmp_frame->root->trans = conn;
+
+ if (locker->fd) {
+ STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
+ bound_xl->fops->fentrylk,
+ locker->volume,
+ locker->fd, NULL,
+ ENTRYLK_UNLOCK, ENTRYLK_WRLCK);
+ fd_unref (locker->fd);
+ } else {
+ STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
+ bound_xl->fops->entrylk,
+ locker->volume,
+ &(locker->loc), NULL,
+ ENTRYLK_UNLOCK, ENTRYLK_WRLCK);
+ loc_wipe (&locker->loc);
+ }
+
+ GF_FREE (locker->volume);
+
+ list_del_init (&locker->lockers);
+ GF_FREE (locker);
+ }
+
+ pthread_mutex_lock (&(conn->lock));
+ {
+ if (conn->fdtable) {
+ fdentries = gf_fd_fdtable_get_all_fds (conn->fdtable,
+ &fd_count);
+ gf_fd_fdtable_destroy (conn->fdtable);
+ conn->fdtable = NULL;
+ }
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ if (fdentries != NULL) {
+ for (i = 0; i < fd_count; i++) {
+ fd = fdentries[i].fd;
+ if (fd != NULL) {
+ tmp_frame = copy_frame (frame);
+ tmp_frame->local = fd;
+
+ STACK_WIND (tmp_frame,
+ server_connection_cleanup_flush_cbk,
+ bound_xl,
+ bound_xl->fops->flush,
+ fd);
+ }
+ }
+ GF_FREE (fdentries);
+ }
+ }
+
+ if (frame) {
+ state = CALL_STATE (frame);
+ if (state)
+ GF_FREE (state);
+ STACK_DESTROY (frame->root);
+ }
+
+ gf_log (this->name, GF_LOG_INFO, "destroyed connection of %s",
+ conn->id);
+
+ GF_FREE (conn->id);
+ GF_FREE (conn);
+
+out:
+ return ret;
+}
+
+
+server_connection_t *
+server_connection_get (xlator_t *this, const char *id)
+{
+ server_connection_t *conn = NULL;
+ server_connection_t *trav = NULL;
+ server_conf_t *conf = NULL;
+
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->mutex);
+ {
+ list_for_each_entry (trav, &conf->conns, list) {
+ if (!strcmp (id, trav->id)) {
+ conn = trav;
+ break;
+ }
+ }
+
+ if (!conn) {
+ conn = (void *) GF_CALLOC (1, sizeof (*conn), 0);
+
+ conn->id = gf_strdup (id);
+ conn->fdtable = gf_fd_fdtable_alloc ();
+ conn->ltable = gf_lock_table_new ();
+ conn->this = this;
+ pthread_mutex_init (&conn->lock, NULL);
+
+ list_add (&conn->list, &conf->conns);
+ }
+
+ conn->ref++;
+ conn->active_transports++;
+ }
+ pthread_mutex_unlock (&conf->mutex);
+
+ return conn;
+}
+
+
+void
+server_connection_put (xlator_t *this, server_connection_t *conn)
+{
+ server_conf_t *conf = NULL;
+ server_connection_t *todel = NULL;
+
+ if (conn == NULL) {
+ goto out;
+ }
+
+ conf = this->private;
+
+ pthread_mutex_lock (&conf->mutex);
+ {
+ conn->ref--;
+
+ if (!conn->ref) {
+ list_del_init (&conn->list);
+ todel = conn;
+ }
+ }
+ pthread_mutex_unlock (&conf->mutex);
+
+ if (todel) {
+ server_connection_destroy (this, todel);
+ }
+
+out:
+ return;
+}
+
+static call_frame_t *
+server_alloc_frame (rpcsvc_request_t *req)
+{
+ call_frame_t *frame = NULL;
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+
+ GF_VALIDATE_OR_GOTO("server", req, out);
+
+ conn = (server_connection_t *)req->conn->trans->xl_private;
+ if (!conn)
+ goto out;
+ frame = create_frame (conn->this, req->conn->svc->ctx->pool);
+ GF_VALIDATE_OR_GOTO("server", frame, out);
+
+ state = GF_CALLOC (1, sizeof (*state), 0);
+ GF_VALIDATE_OR_GOTO("server", state, out);
+
+ if (conn->bound_xl)
+ state->itable = conn->bound_xl->itable;
+
+ state->xprt = req->conn->trans;
+ state->conn = conn;
+
+ state->resolve.fd_no = -1;
+ state->resolve2.fd_no = -1;
+
+ frame->root->state = state; /* which socket */
+ frame->root->unique = 0; /* which call */
+
+ frame->this = conn->this;
+out:
+ return frame;
+}
+
+
+
+call_frame_t *
+get_frame_from_request (rpcsvc_request_t *req)
+{
+ call_frame_t *frame = NULL;
+
+ frame = server_alloc_frame (req);
+ if (!frame)
+ goto out;
+
+ frame->root->op = req->procnum;
+ frame->root->type = req->type;
+
+ frame->root->unique = req->xid;
+
+ frame->root->uid = req->uid;
+ frame->root->gid = req->gid;
+ frame->root->pid = req->pid;
+ frame->root->lk_owner = req->lk_owner;
+
+ server_decode_groups (frame, req);
+
+ frame->local = req;
+out:
+ return frame;
+}
+
+
+int
+server_build_config (xlator_t *this, server_conf_t *conf)
+{
+ data_t *data = NULL;
+ int ret = -1;
+ struct stat buf = {0,};
+
+ ret = dict_get_int32 (this->options, "inode-lru-limit",
+ &conf->inode_lru_limit);
+ if (ret < 0) {
+ conf->inode_lru_limit = 1024;
+ }
+
+ conf->verify_volfile = 1;
+ data = dict_get (this->options, "verify-volfile-checksum");
+ if (data) {
+ ret = gf_string2boolean(data->data, &conf->verify_volfile);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "wrong value for 'verify-volfile-checksum', "
+ "Neglecting option");
+ }
+ }
+
+ data = dict_get (this->options, "trace");
+ if (data) {
+ ret = gf_string2boolean (data->data, &conf->trace);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "'trace' takes on only boolean values. "
+ "Neglecting option");
+ }
+ }
+
+ /* TODO: build_rpc_config (); */
+ ret = dict_get_int32 (this->options, "limits.transaction-size",
+ &conf->rpc_conf.max_block_size);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "defaulting limits.transaction-size to %d",
+ DEFAULT_BLOCK_SIZE);
+ conf->rpc_conf.max_block_size = DEFAULT_BLOCK_SIZE;
+ }
+
+ data = dict_get (this->options, "config-directory");
+ if (data) {
+ /* Check whether the specified directory exists,
+ or directory specified is non standard */
+ ret = stat (data->data, &buf);
+ if ((ret != 0) || !S_ISDIR (buf.st_mode)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Directory '%s' doesn't exist, exiting.",
+ data->data);
+ ret = -1;
+ goto out;
+ }
+ /* Make sure that conf-dir doesn't contain ".." in path */
+ if ((gf_strstr (data->data, "/", "..")) == -1) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s: invalid conf_dir", data->data);
+ goto out;
+ }
+
+ conf->conf_dir = gf_strdup (data->data);
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+server_connection_t *
+get_server_conn_state (xlator_t *this, rpc_transport_t *xprt)
+{
+ return (server_connection_t *)xprt->xl_private;
+}
+
+server_connection_t *
+create_server_conn_state (xlator_t *this, rpc_transport_t *xprt)
+{
+ server_connection_t *conn = NULL;
+ int ret = -1;
+
+ conn = GF_CALLOC (1, sizeof (*conn), 0);
+ if (!conn)
+ goto out;
+
+ pthread_mutex_init (&conn->lock, NULL);
+
+ conn->fdtable = gf_fd_fdtable_alloc ();
+ if (!conn->fdtable)
+ goto out;
+
+ conn->ltable = gf_lock_table_new ();
+ if (!conn->ltable)
+ goto out;
+
+ conn->this = this;
+
+ xprt->xl_private = conn;
+
+ ret = 0;
+out:
+ if (ret)
+ destroy_server_conn_state (conn);
+
+ return conn;
+}
+
+void
+destroy_server_conn_state (server_connection_t *conn)
+{
+ if (!conn) {
+ return;
+ }
+
+ if (conn->ltable) {
+ /* TODO */
+ //FREE (conn->ltable);
+ ;
+ }
+
+ if (conn->fdtable)
+ gf_fd_fdtable_destroy (conn->fdtable);
+
+ pthread_mutex_destroy (&conn->lock);
+
+ GF_FREE (conn);
+
+ return;
+}
+
+
+void
+print_caller (char *str, int size, call_frame_t *frame)
+{
+ int filled = 0;
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ filled += snprintf (str + filled, size - filled,
+ " Callid=%"PRId64", Client=%s",
+ frame->root->unique,
+ state->xprt->peerinfo.identifier);
+
+ return;
+}
+
+
+void
+server_print_resolve (char *str, int size, server_resolve_t *resolve)
+{
+ int filled = 0;
+
+ if (!resolve) {
+ snprintf (str, size, "<nul>");
+ return;
+ }
+
+ filled += snprintf (str + filled, size - filled,
+ " Resolve={");
+ if (resolve->fd_no != -1)
+ filled += snprintf (str + filled, size - filled,
+ "fd=%"PRId64",", (uint64_t) resolve->fd_no);
+ if (resolve->ino)
+ filled += snprintf (str + filled, size - filled,
+ "ino=%"PRIu64",", (uint64_t) resolve->ino);
+ if (resolve->par)
+ filled += snprintf (str + filled, size - filled,
+ "par=%"PRIu64",", (uint64_t) resolve->par);
+ if (resolve->gen)
+ filled += snprintf (str + filled, size - filled,
+ "gen=%"PRIu64",", (uint64_t) resolve->gen);
+ if (resolve->bname)
+ filled += snprintf (str + filled, size - filled,
+ "bname=%s,", resolve->bname);
+ if (resolve->path)
+ filled += snprintf (str + filled, size - filled,
+ "path=%s", resolve->path);
+
+ filled += snprintf (str + filled, size - filled, "}");
+}
+
+
+void
+server_print_loc (char *str, int size, loc_t *loc)
+{
+ int filled = 0;
+
+ if (!loc) {
+ snprintf (str, size, "<nul>");
+ return;
+ }
+
+ filled += snprintf (str + filled, size - filled,
+ " Loc={");
+
+ if (loc->path)
+ filled += snprintf (str + filled, size - filled,
+ "path=%s,", loc->path);
+ if (loc->inode)
+ filled += snprintf (str + filled, size - filled,
+ "inode=%p,", loc->inode);
+ if (loc->parent)
+ filled += snprintf (str + filled, size - filled,
+ "parent=%p", loc->parent);
+
+ filled += snprintf (str + filled, size - filled, "}");
+}
+
+
+void
+server_print_params (char *str, int size, server_state_t *state)
+{
+ int filled = 0;
+
+ filled += snprintf (str + filled, size - filled,
+ " Params={");
+
+ if (state->fd)
+ filled += snprintf (str + filled, size - filled,
+ "fd=%p,", state->fd);
+ if (state->valid)
+ filled += snprintf (str + filled, size - filled,
+ "valid=%d,", state->valid);
+ if (state->flags)
+ filled += snprintf (str + filled, size - filled,
+ "flags=%d,", state->flags);
+ if (state->wbflags)
+ filled += snprintf (str + filled, size - filled,
+ "wbflags=%d,", state->wbflags);
+ if (state->size)
+ filled += snprintf (str + filled, size - filled,
+ "size=%Zu,", state->size);
+ if (state->offset)
+ filled += snprintf (str + filled, size - filled,
+ "offset=%"PRId64",", state->offset);
+ if (state->cmd)
+ filled += snprintf (str + filled, size - filled,
+ "cmd=%d,", state->cmd);
+ if (state->type)
+ filled += snprintf (str + filled, size - filled,
+ "type=%d,", state->type);
+ if (state->name)
+ filled += snprintf (str + filled, size - filled,
+ "name=%s,", state->name);
+ if (state->mask)
+ filled += snprintf (str + filled, size - filled,
+ "mask=%d,", state->mask);
+ if (state->volume)
+ filled += snprintf (str + filled, size - filled,
+ "volume=%s,", state->volume);
+
+ filled += snprintf (str + filled, size - filled,
+ "bound_xl=%s}", state->conn->bound_xl->name);
+}
+
+int
+server_resolve_is_empty (server_resolve_t *resolve)
+{
+ if (resolve->fd_no != -1)
+ return 0;
+
+ if (resolve->ino != 0)
+ return 0;
+
+ if (resolve->gen != 0)
+ return 0;
+
+ if (resolve->par != 0)
+ return 0;
+
+ if (resolve->path != 0)
+ return 0;
+
+ if (resolve->bname != 0)
+ return 0;
+
+ return 1;
+}
+
+void
+server_print_reply (call_frame_t *frame, int op_ret, int op_errno)
+{
+ server_conf_t *conf = NULL;
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ char caller[512];
+ char fdstr[32];
+ char *op = "UNKNOWN";
+
+ this = frame->this;
+ conf = this->private;
+
+ if (!conf->trace)
+ return;
+
+ state = CALL_STATE (frame);
+
+ print_caller (caller, 256, frame);
+
+ switch (frame->root->type) {
+ case GF_OP_TYPE_FOP:
+ op = gf_fop_list[frame->root->op];
+ break;
+ case GF_OP_TYPE_MGMT:
+ op = gf_mgmt_list[frame->root->op];
+ break;
+ default:
+ op = "";
+ }
+
+ fdstr[0] = '\0';
+ if (state->fd)
+ snprintf (fdstr, 32, " fd=%p", state->fd);
+
+ gf_log (this->name, GF_LOG_NORMAL,
+ "%s%s => (%d, %d)%s",
+ op, caller, op_ret, op_errno, fdstr);
+}
+
+
+void
+server_print_request (call_frame_t *frame)
+{
+ server_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ server_state_t *state = NULL;
+ char resolve_vars[256];
+ char resolve2_vars[256];
+ char loc_vars[256];
+ char loc2_vars[256];
+ char other_vars[512];
+ char caller[512];
+ char *op = "UNKNOWN";
+
+ this = frame->this;
+ conf = this->private;
+
+ state = CALL_STATE (frame);
+
+ if (!conf->trace)
+ return;
+
+ memset (resolve_vars, '\0', 256);
+ memset (resolve2_vars, '\0', 256);
+ memset (loc_vars, '\0', 256);
+ memset (loc2_vars, '\0', 256);
+ memset (other_vars, '\0', 256);
+
+ print_caller (caller, 256, frame);
+
+ if (!server_resolve_is_empty (&state->resolve)) {
+ server_print_resolve (resolve_vars, 256, &state->resolve);
+ server_print_loc (loc_vars, 256, &state->loc);
+ }
+
+ if (!server_resolve_is_empty (&state->resolve2)) {
+ server_print_resolve (resolve2_vars, 256, &state->resolve2);
+ server_print_loc (loc2_vars, 256, &state->loc2);
+ }
+
+ server_print_params (other_vars, 512, state);
+
+ switch (frame->root->type) {
+ case GF_OP_TYPE_FOP:
+ op = gf_fop_list[frame->root->op];
+ break;
+ case GF_OP_TYPE_MGMT:
+ op = gf_mgmt_list[frame->root->op];
+ break;
+ default:
+ op = "";
+ break;
+ }
+
+ gf_log (this->name, GF_LOG_NORMAL,
+ "%s%s%s%s%s%s%s",
+ op, caller,
+ resolve_vars, loc_vars, resolve2_vars, loc2_vars, other_vars);
+}
diff --git a/xlators/protocol/server/src/server-helpers.h b/xlators/protocol/server/src/server-helpers.h
new file mode 100644
index 00000000000..4897336af69
--- /dev/null
+++ b/xlators/protocol/server/src/server-helpers.h
@@ -0,0 +1,89 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _SERVER_HELPERS_H
+#define _SERVER_HELPERS_H
+
+#include "server.h"
+
+#define CALL_STATE(frame) ((server_state_t *)frame->root->state)
+
+#define BOUND_XL(frame) ((xlator_t *) CALL_STATE(frame)->conn->bound_xl)
+
+#define XPRT_FROM_FRAME(frame) ((rpc_transport_t *) CALL_STATE(frame)->xprt)
+
+#define SERVER_CONNECTION(frame) \
+ ((server_connection_t *) CALL_STATE(frame)->conn)
+
+#define SERVER_CONF(frame) \
+ ((server_conf_t *)XPRT_FROM_FRAME(frame)->this->private)
+
+#define XPRT_FROM_XLATOR(this) ((((server_conf_t *)this->private))->listen)
+
+#define INODE_LRU_LIMIT(this) \
+ (((server_conf_t *)(this->private))->config.inode_lru_limit)
+
+#define IS_ROOT_INODE(inode) (inode == inode->table->root)
+
+#define IS_NOT_ROOT(pathlen) ((pathlen > 2)? 1 : 0)
+
+char *
+stat_to_str (struct iatt *stbuf);
+
+call_frame_t *
+server_copy_frame (call_frame_t *frame);
+
+void free_state (server_state_t *state);
+
+void server_loc_wipe (loc_t *loc);
+
+int32_t
+gf_add_locker (struct _lock_table *table, const char *volume,
+ loc_t *loc,
+ fd_t *fd,
+ pid_t pid);
+
+int32_t
+gf_del_locker (struct _lock_table *table, const char *volume,
+ loc_t *loc,
+ fd_t *fd,
+ pid_t pid);
+
+int32_t
+gf_direntry_to_bin (dir_entry_t *head, char *bufferp);
+
+void
+server_print_request (call_frame_t *frame);
+
+call_frame_t *
+get_frame_from_request (rpcsvc_request_t *req);
+
+server_connection_t *
+get_server_conn_state (xlator_t *this, rpc_transport_t *xptr);
+
+server_connection_t *
+create_server_conn_state (xlator_t *this, rpc_transport_t *xptr);
+
+void
+destroy_server_conn_state (server_connection_t *conn);
+
+int
+server_build_config (xlator_t *this, server_conf_t *conf);
+
+#endif /* !_SERVER_HELPERS_H */
diff --git a/xlators/protocol/server/src/server-mem-types.h b/xlators/protocol/server/src/server-mem-types.h
new file mode 100644
index 00000000000..76c5ae1ac0c
--- /dev/null
+++ b/xlators/protocol/server/src/server-mem-types.h
@@ -0,0 +1,37 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef __SERVER_MEM_TYPES_H__
+#define __SERVER_MEM_TYPES_H__
+
+#include "mem-types.h"
+
+enum gf_server_mem_types_ {
+ gf_server_mt_server_conf_t = gf_common_mt_end + 1,
+ gf_server_mt_resolv_comp_t,
+ gf_server_mt_state_t,
+ gf_server_mt_locker_t,
+ gf_server_mt_lock_table_t,
+ gf_server_mt_conn_t,
+ gf_server_mt_rsp_buf_t,
+ gf_server_mt_volfile_ctx_t,
+ gf_server_mt_end,
+};
+#endif /* __SERVER_MEM_TYPES_H__ */
diff --git a/xlators/protocol/server/src/server-resolve.c b/xlators/protocol/server/src/server-resolve.c
new file mode 100644
index 00000000000..77336216f19
--- /dev/null
+++ b/xlators/protocol/server/src/server-resolve.c
@@ -0,0 +1,655 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "server.h"
+#include "server-helpers.h"
+
+
+int
+server_resolve_all (call_frame_t *frame);
+int
+resolve_entry_simple (call_frame_t *frame);
+int
+resolve_inode_simple (call_frame_t *frame);
+int
+resolve_path_simple (call_frame_t *frame);
+
+int
+component_count (const char *path)
+{
+ int count = 0;
+ const char *trav = NULL;
+
+ trav = path;
+
+ for (trav = path; *trav; trav++) {
+ if (*trav == '/')
+ count++;
+ }
+
+ return count + 2;
+}
+
+
+int
+prepare_components (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+ char *resolved = NULL;
+ int count = 0;
+ struct resolve_comp *components = NULL;
+ int i = 0;
+ char *trav = NULL;
+
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+
+ resolved = gf_strdup (resolve->path);
+ resolve->resolved = resolved;
+
+ count = component_count (resolve->path);
+ components = GF_CALLOC (sizeof (*components), count,
+ gf_server_mt_resolv_comp_t);
+ resolve->components = components;
+
+ components[0].basename = "";
+ components[0].ino = 1;
+ components[0].gen = 0;
+ components[0].inode = state->itable->root;
+
+ i = 1;
+ for (trav = resolved; *trav; trav++) {
+ if (*trav == '/') {
+ components[i].basename = trav + 1;
+ *trav = 0;
+ i++;
+ }
+ }
+
+ return 0;
+}
+
+
+int
+resolve_loc_touchup (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ server_resolve_t *resolve = NULL;
+ loc_t *loc = NULL;
+ char *path = NULL;
+ int ret = 0;
+
+ state = CALL_STATE (frame);
+
+ resolve = state->resolve_now;
+ loc = state->loc_now;
+
+ if (!loc->path) {
+ if (loc->parent) {
+ ret = inode_path (loc->parent, resolve->bname, &path);
+ } else if (loc->inode) {
+ ret = inode_path (loc->inode, NULL, &path);
+ }
+
+ if (!path)
+ path = gf_strdup (resolve->path);
+
+ loc->path = path;
+ }
+
+ loc->name = strrchr (loc->path, '/');
+ if (loc->name)
+ loc->name++;
+
+ if (!loc->parent && loc->inode) {
+ loc->parent = inode_parent (loc->inode, 0, NULL);
+ }
+
+ return 0;
+}
+
+
+int
+resolve_deep_continue (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+ int ret = 0;
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+
+ resolve->op_ret = 0;
+ resolve->op_errno = 0;
+
+ if (resolve->par)
+ ret = resolve_entry_simple (frame);
+ else if (resolve->ino)
+ ret = resolve_inode_simple (frame);
+ else if (resolve->path)
+ ret = resolve_path_simple (frame);
+
+ resolve_loc_touchup (frame);
+
+ server_resolve_all (frame);
+
+ return 0;
+}
+
+
+int
+resolve_deep_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, inode_t *inode, struct iatt *buf,
+ dict_t *xattr, struct iatt *postparent)
+{
+ server_state_t *state = NULL;
+ server_resolve_t *resolve = NULL;
+ struct resolve_comp *components = NULL;
+ int i = 0;
+ inode_t *link_inode = NULL;
+
+ state = CALL_STATE (frame);
+ resolve = state->resolve_now;
+ components = resolve->components;
+
+ i = (long) cookie;
+
+ if (op_ret == -1) {
+ goto get_out_of_here;
+ }
+
+ if (i != 0) {
+ /* no linking for root inode */
+ link_inode = inode_link (inode, resolve->deep_loc.parent,
+ resolve->deep_loc.name, buf);
+ inode_lookup (link_inode);
+ components[i].inode = link_inode;
+ link_inode = NULL;
+ }
+
+ loc_wipe (&resolve->deep_loc);
+
+ i++; /* next component */
+
+ if (!components[i].basename) {
+ /* all components of the path are resolved */
+ goto get_out_of_here;
+ }
+
+ /* join the current component with the path resolved until now */
+ *(components[i].basename - 1) = '/';
+
+ resolve->deep_loc.path = gf_strdup (resolve->resolved);
+ resolve->deep_loc.parent = inode_ref (components[i-1].inode);
+ resolve->deep_loc.inode = inode_new (state->itable);
+ resolve->deep_loc.name = components[i].basename;
+
+ STACK_WIND_COOKIE (frame, resolve_deep_cbk, (void *) (long) i,
+ BOUND_XL (frame), BOUND_XL (frame)->fops->lookup,
+ &resolve->deep_loc, NULL);
+ return 0;
+
+get_out_of_here:
+ resolve_deep_continue (frame);
+ return 0;
+}
+
+
+int
+resolve_path_deep (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+ int i = 0;
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+
+ gf_log (BOUND_XL (frame)->name, GF_LOG_TRACE,
+ "RESOLVE %s() seeking deep resolution of %s",
+ gf_fop_list[frame->root->op], resolve->path);
+
+ prepare_components (frame);
+
+ /* start from the root */
+ resolve->deep_loc.inode = state->itable->root;
+ resolve->deep_loc.path = gf_strdup ("/");
+ resolve->deep_loc.name = "";
+
+ STACK_WIND_COOKIE (frame, resolve_deep_cbk, (void *) (long) i,
+ BOUND_XL (frame), BOUND_XL (frame)->fops->lookup,
+ &resolve->deep_loc, NULL);
+ return 0;
+}
+
+
+int
+resolve_path_simple (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+ struct resolve_comp *components = NULL;
+ int ret = -1;
+ int par_idx = 0;
+ int ino_idx = 0;
+ int i = 0;
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+ components = resolve->components;
+
+ if (!components) {
+ resolve->op_ret = -1;
+ resolve->op_errno = ENOENT;
+ goto out;
+ }
+
+ for (i = 0; components[i].basename; i++) {
+ par_idx = ino_idx;
+ ino_idx = i;
+ }
+
+ if (!components[par_idx].inode) {
+ resolve->op_ret = -1;
+ resolve->op_errno = ENOENT;
+ goto out;
+ }
+
+ if (!components[ino_idx].inode &&
+ (resolve->type == RESOLVE_MUST || resolve->type == RESOLVE_EXACT)) {
+ resolve->op_ret = -1;
+ resolve->op_errno = ENOENT;
+ goto out;
+ }
+
+ if (components[ino_idx].inode && resolve->type == RESOLVE_NOT) {
+ resolve->op_ret = -1;
+ resolve->op_errno = EEXIST;
+ goto out;
+ }
+
+ if (components[ino_idx].inode)
+ state->loc_now->inode = inode_ref (components[ino_idx].inode);
+ state->loc_now->parent = inode_ref (components[par_idx].inode);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/*
+ Check if the requirements are fulfilled by entries in the inode cache itself
+ Return value:
+ <= 0 - simple resolution was decisive and complete (either success or failure)
+ > 0 - indecisive, need to perform deep resolution
+*/
+
+int
+resolve_entry_simple (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+ inode_t *parent = NULL;
+ inode_t *inode = NULL;
+ int ret = 0;
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+
+ parent = inode_get (state->itable, resolve->par, 0);
+ if (!parent) {
+ /* simple resolution is indecisive. need to perform
+ deep resolution */
+ resolve->op_ret = -1;
+ resolve->op_errno = ENOENT;
+ ret = 1;
+
+ inode = inode_grep (state->itable, parent, resolve->bname);
+ if (inode != NULL) {
+ gf_log (this->name, GF_LOG_DEBUG, "%"PRId64": inode "
+ "(pointer:%p ino: %"PRIu64") present but parent"
+ " is NULL for path (%s)", frame->root->unique,
+ inode, inode->ino, resolve->path);
+ inode_unref (inode);
+ }
+ goto out;
+ }
+
+ if (parent->ino != 1 && parent->generation != resolve->gen) {
+ /* simple resolution is decisive - request was for a
+ stale handle */
+ resolve->op_ret = -1;
+ resolve->op_errno = ENOENT;
+ ret = -1;
+ goto out;
+ }
+
+ /* expected @parent was found from the inode cache */
+ state->loc_now->parent = inode_ref (parent);
+
+ inode = inode_grep (state->itable, parent, resolve->bname);
+ if (!inode) {
+ switch (resolve->type) {
+ case RESOLVE_DONTCARE:
+ case RESOLVE_NOT:
+ ret = 0;
+ break;
+ case RESOLVE_MAY:
+ ret = 1;
+ break;
+ default:
+ resolve->op_ret = -1;
+ resolve->op_errno = ENOENT;
+ ret = 1;
+ break;
+ }
+
+ goto out;
+ }
+
+ if (resolve->type == RESOLVE_NOT) {
+ gf_log (this->name, GF_LOG_DEBUG, "inode (pointer: %p ino:%"
+ PRIu64") found for path (%s) while type is RESOLVE_NOT",
+ inode, inode->ino, resolve->path);
+ resolve->op_ret = -1;
+ resolve->op_errno = EEXIST;
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+
+ state->loc_now->inode = inode_ref (inode);
+
+out:
+ if (parent)
+ inode_unref (parent);
+
+ if (inode)
+ inode_unref (inode);
+
+ return ret;
+}
+
+
+int
+server_resolve_entry (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+ int ret = 0;
+ loc_t *loc = NULL;
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+ loc = state->loc_now;
+
+ ret = resolve_entry_simple (frame);
+
+ if (ret > 0) {
+ loc_wipe (loc);
+ resolve_path_deep (frame);
+ return 0;
+ }
+
+ if (ret == 0)
+ resolve_loc_touchup (frame);
+
+ server_resolve_all (frame);
+
+ return 0;
+}
+
+
+int
+resolve_inode_simple (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+ inode_t *inode = NULL;
+ int ret = 0;
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+
+ if (resolve->type == RESOLVE_EXACT) {
+ inode = inode_get (state->itable, resolve->ino, resolve->gen);
+ } else {
+ inode = inode_get (state->itable, resolve->ino, 0);
+ }
+
+ if (!inode) {
+ resolve->op_ret = -1;
+ resolve->op_errno = ENOENT;
+ ret = 1;
+ goto out;
+ }
+
+ if (inode->ino != 1 && inode->generation != resolve->gen) {
+ resolve->op_ret = -1;
+ resolve->op_errno = ENOENT;
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+
+ state->loc_now->inode = inode_ref (inode);
+
+out:
+ if (inode)
+ inode_unref (inode);
+
+ return ret;
+}
+
+
+int
+server_resolve_inode (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+ int ret = 0;
+ loc_t *loc = NULL;
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+ loc = state->loc_now;
+
+ ret = resolve_inode_simple (frame);
+
+ if (ret > 0) {
+ loc_wipe (loc);
+ resolve_path_deep (frame);
+ return 0;
+ }
+
+ if (ret == 0)
+ resolve_loc_touchup (frame);
+
+ server_resolve_all (frame);
+
+ return 0;
+}
+
+
+int
+server_resolve_fd (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+ server_connection_t *conn = NULL;
+ uint64_t fd_no = -1;
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+ conn = SERVER_CONNECTION (frame);
+
+ fd_no = resolve->fd_no;
+
+ state->fd = gf_fd_fdptr_get (conn->fdtable, fd_no);
+
+ if (!state->fd) {
+ resolve->op_ret = -1;
+ resolve->op_errno = EBADF;
+ }
+
+ server_resolve_all (frame);
+
+ return 0;
+}
+
+
+int
+server_resolve (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+ server_resolve_t *resolve = NULL;
+
+ state = CALL_STATE (frame);
+ this = frame->this;
+ resolve = state->resolve_now;
+
+ if (resolve->fd_no != -1) {
+
+ server_resolve_fd (frame);
+
+ } else if (resolve->par) {
+
+ server_resolve_entry (frame);
+
+ } else if (resolve->ino) {
+
+ server_resolve_inode (frame);
+
+ } else if (resolve->path) {
+
+ resolve_path_deep (frame);
+
+ } else {
+
+ resolve->op_ret = -1;
+ resolve->op_errno = EINVAL;
+
+ server_resolve_all (frame);
+ }
+
+ return 0;
+}
+
+
+int
+server_resolve_done (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *bound_xl = NULL;
+
+ state = CALL_STATE (frame);
+ bound_xl = BOUND_XL (frame);
+
+ server_print_request (frame);
+
+ state->resume_fn (frame, bound_xl);
+
+ return 0;
+}
+
+
+/*
+ * This function is called multiple times, once per resolving one location/fd.
+ * state->resolve_now is used to decide which location/fd is to be resolved now
+ */
+int
+server_resolve_all (call_frame_t *frame)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+
+ this = frame->this;
+ state = CALL_STATE (frame);
+
+ if (state->resolve_now == NULL) {
+
+ state->resolve_now = &state->resolve;
+ state->loc_now = &state->loc;
+
+ server_resolve (frame);
+
+ } else if (state->resolve_now == &state->resolve) {
+
+ state->resolve_now = &state->resolve2;
+ state->loc_now = &state->loc2;
+
+ server_resolve (frame);
+
+ } else if (state->resolve_now == &state->resolve2) {
+
+ server_resolve_done (frame);
+
+ } else {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid pointer for state->resolve_now");
+ }
+
+ return 0;
+}
+
+
+int
+resolve_and_resume (call_frame_t *frame, server_resume_fn_t fn)
+{
+ server_state_t *state = NULL;
+ xlator_t *this = NULL;
+
+ state = CALL_STATE (frame);
+ state->resume_fn = fn;
+
+ this = frame->this;
+
+ server_resolve_all (frame);
+
+ return 0;
+}
diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
new file mode 100644
index 00000000000..18be607a94d
--- /dev/null
+++ b/xlators/protocol/server/src/server.c
@@ -0,0 +1,687 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include "server.h"
+#include "server-helpers.h"
+#include "glusterfs-xdr.h"
+#include "call-stub.h"
+#include "statedump.h"
+#include "defaults.h"
+#include "authenticate.h"
+#include "rpcsvc.h"
+
+struct iobuf *
+gfs_serialize_reply (rpcsvc_request_t *req, void *arg, gfs_serialize_t sfunc,
+ struct iovec *outmsg)
+{
+ struct iobuf *iob = NULL;
+ ssize_t retlen = -1;
+
+ /* First, get the io buffer into which the reply in arg will
+ * be serialized.
+ */
+ iob = iobuf_get (req->conn->svc->ctx->iobuf_pool);
+ if (!iob) {
+ gf_log ("", GF_LOG_ERROR, "Failed to get iobuf");
+ goto ret;
+ }
+
+ iobuf_to_iovec (iob, outmsg);
+ /* Use the given serializer to translate the give C structure in arg
+ * to XDR format which will be written into the buffer in outmsg.
+ */
+ /* retlen is used to received the error since size_t is unsigned and we
+ * need -1 for error notification during encoding.
+ */
+ retlen = sfunc (*outmsg, arg);
+ if (retlen == -1) {
+ gf_log ("", GF_LOG_ERROR, "Failed to encode message");
+ goto ret;
+ }
+
+ outmsg->iov_len = retlen;
+ret:
+ if (retlen == -1) {
+ iobuf_unref (iob);
+ iob = NULL;
+ }
+
+ return iob;
+}
+
+
+
+/* Generic reply function for NFSv3 specific replies. */
+int
+server_submit_reply (call_frame_t *frame, rpcsvc_request_t *req, void *arg,
+ struct iovec *payload, int payloadcount,
+ struct iobref *iobref, gfs_serialize_t sfunc)
+{
+ struct iobuf *iob = NULL;
+ int ret = -1;
+ struct iovec rsp = {0,};
+ server_state_t *state = NULL;
+ char new_iobref = 0;
+
+ if (!req) {
+ goto ret;
+ }
+
+ if (frame) {
+ state = CALL_STATE (frame);
+ }
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ gf_log ("", GF_LOG_ERROR, "out of memory");
+ goto ret;
+ }
+
+ new_iobref = 1;
+ }
+
+ iob = gfs_serialize_reply (req, arg, sfunc, &rsp);
+ if (!iob) {
+ gf_log ("", GF_LOG_ERROR, "Failed to serialize reply");
+ goto ret;
+ }
+
+ iobref_add (iobref, iob);
+
+ /* Then, submit the message for transmission. */
+ ret = rpcsvc_submit_generic (req, &rsp, 1, payload, payloadcount,
+ iobref);
+
+ /* Now that we've done our job of handing the message to the RPC layer
+ * we can safely unref the iob in the hope that RPC layer must have
+ * ref'ed the iob on receiving into the txlist.
+ */
+ iobuf_unref (iob);
+ if (ret == -1) {
+ gf_log ("", GF_LOG_ERROR, "Reply submission failed");
+ goto ret;
+ }
+
+ ret = 0;
+ret:
+ if (state) {
+ free_state (state);
+ }
+
+ if (frame) {
+ STACK_DESTROY (frame->root);
+ }
+
+ if (new_iobref) {
+ iobref_unref (iobref);
+ }
+
+ return ret;
+}
+
+/* */
+int
+xdr_to_glusterfs_req (rpcsvc_request_t *req, void *arg, gfs_serialize_t sfunc)
+{
+ int ret = -1;
+
+ if (!req)
+ return -1;
+
+ ret = sfunc (req->msg[0], arg);
+
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+
+#if 0
+/*
+ * prototype of operations function for each of mop and
+ * fop at server protocol level
+ *
+ * @frame: call frame pointer
+ * @bound_xl: the xlator that this frame is bound to
+ * @params: parameters dictionary
+ *
+ * to be used by protocol interpret, _not_ for exterenal reference
+ */
+typedef int32_t (*gf_op_t) (call_frame_t *frame, xlator_t *bould_xl,
+ gf_hdr_common_t *hdr, size_t hdrlen,
+ struct iobuf *iobuf);
+
+
+static gf_op_t gf_fops[] = {
+ [GF_FOP_STAT] = server_stat,
+ [GF_FOP_READLINK] = server_readlink,
+ [GF_FOP_MKNOD] = server_mknod,
+ [GF_FOP_MKDIR] = server_mkdir,
+ [GF_FOP_UNLINK] = server_unlink,
+ [GF_FOP_RMDIR] = server_rmdir,
+ [GF_FOP_SYMLINK] = server_symlink,
+ [GF_FOP_RENAME] = server_rename,
+ [GF_FOP_LINK] = server_link,
+ [GF_FOP_TRUNCATE] = server_truncate,
+ [GF_FOP_OPEN] = server_open,
+ [GF_FOP_READ] = server_readv,
+ [GF_FOP_WRITE] = server_writev,
+ [GF_FOP_STATFS] = server_statfs,
+ [GF_FOP_FLUSH] = server_flush,
+ [GF_FOP_FSYNC] = server_fsync,
+ [GF_FOP_SETXATTR] = server_setxattr,
+ [GF_FOP_GETXATTR] = server_getxattr,
+ [GF_FOP_FGETXATTR] = server_fgetxattr,
+ [GF_FOP_FSETXATTR] = server_fsetxattr,
+ [GF_FOP_REMOVEXATTR] = server_removexattr,
+ [GF_FOP_OPENDIR] = server_opendir,
+ [GF_FOP_FSYNCDIR] = server_fsyncdir,
+ [GF_FOP_ACCESS] = server_access,
+ [GF_FOP_CREATE] = server_create,
+ [GF_FOP_FTRUNCATE] = server_ftruncate,
+ [GF_FOP_FSTAT] = server_fstat,
+ [GF_FOP_LK] = server_lk,
+ [GF_FOP_LOOKUP] = server_lookup,
+ [GF_FOP_READDIR] = server_readdir,
+ [GF_FOP_READDIRP] = server_readdirp,
+ [GF_FOP_INODELK] = server_inodelk,
+ [GF_FOP_FINODELK] = server_finodelk,
+ [GF_FOP_ENTRYLK] = server_entrylk,
+ [GF_FOP_FENTRYLK] = server_fentrylk,
+ [GF_FOP_CHECKSUM] = server_checksum,
+ [GF_FOP_RCHECKSUM] = server_rchecksum,
+ [GF_FOP_XATTROP] = server_xattrop,
+ [GF_FOP_FXATTROP] = server_fxattrop,
+ [GF_FOP_SETATTR] = server_setattr,
+ [GF_FOP_FSETATTR] = server_fsetattr,
+ [GF_FOP_SETDENTS] = server_setdents,
+ [GF_FOP_GETDENTS] = server_getdents,
+ [GF_FOP_LOCK_NOTIFY] = server_lock_notify,
+ [GF_FOP_LOCK_FNOTIFY] = server_lock_fnotify,
+};
+
+static gf_op_t gf_cbks[] = {
+ [GF_CBK_FORGET] = server_forget,
+ [GF_CBK_RELEASE] = server_release,
+ [GF_CBK_RELEASEDIR] = server_releasedir
+};
+
+#endif
+
+int
+server_fd (xlator_t *this)
+{
+ server_conf_t *conf = NULL;
+ server_connection_t *trav = NULL;
+ char key[GF_DUMP_MAX_BUF_LEN];
+ int i = 1;
+ int ret = -1;
+
+ if (!this)
+ return -1;
+
+ conf = this->private;
+ if (!conf) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "conf null in xlator");
+ return -1;
+ }
+
+ gf_proc_dump_add_section("xlator.protocol.server.conn");
+
+ ret = pthread_mutex_trylock (&conf->mutex);
+ if (ret) {
+ gf_log("", GF_LOG_WARNING, "Unable to dump fdtable"
+ " errno: %d", errno);
+ return -1;
+ }
+
+ list_for_each_entry (trav, &conf->conns, list) {
+ if (trav->id) {
+ gf_proc_dump_build_key(key,
+ "xlator.protocol.server.conn",
+ "%d.id", i);
+ gf_proc_dump_write(key, "%s", trav->id);
+ }
+
+ gf_proc_dump_build_key(key,"xlator.protocol.server.conn",
+ "%d.ref",i)
+ gf_proc_dump_write(key, "%d", trav->ref);
+ if (trav->bound_xl) {
+ gf_proc_dump_build_key(key,
+ "xlator.protocol.server.conn",
+ "%d.bound_xl", i);
+ gf_proc_dump_write(key, "%s", trav->bound_xl->name);
+ }
+
+ gf_proc_dump_build_key(key,
+ "xlator.protocol.server.conn",
+ "%d.id", i);
+ fdtable_dump(trav->fdtable,key);
+ i++;
+ }
+ pthread_mutex_unlock (&conf->mutex);
+
+
+ return 0;
+ }
+
+int
+server_priv (xlator_t *this)
+{
+ return 0;
+}
+
+int
+server_inode (xlator_t *this)
+{
+ server_conf_t *conf = NULL;
+ server_connection_t *trav = NULL;
+ char key[GF_DUMP_MAX_BUF_LEN];
+ int i = 1;
+ int ret = -1;
+
+ if (!this)
+ return -1;
+
+ conf = this->private;
+ if (!conf) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "conf null in xlator");
+ return -1;
+ }
+
+ ret = pthread_mutex_trylock (&conf->mutex);
+ if (ret) {
+ gf_log("", GF_LOG_WARNING, "Unable to dump itable"
+ " errno: %d", errno);
+ return -1;
+ }
+
+ list_for_each_entry (trav, &conf->conns, list) {
+ if (trav->bound_xl && trav->bound_xl->itable) {
+ gf_proc_dump_build_key(key,
+ "xlator.protocol.server.conn",
+ "%d.bound_xl.%s",
+ i, trav->bound_xl->name);
+ inode_table_dump(trav->bound_xl->itable,key);
+ i++;
+ }
+ }
+ pthread_mutex_unlock (&conf->mutex);
+
+
+ return 0;
+}
+
+
+static void
+get_auth_types (dict_t *this, char *key, data_t *value, void *data)
+{
+ dict_t *auth_dict = NULL;
+ char *saveptr = NULL;
+ char *tmp = NULL;
+ char *key_cpy = NULL;
+ int32_t ret = -1;
+
+ auth_dict = data;
+ key_cpy = gf_strdup (key);
+ GF_VALIDATE_OR_GOTO("server", key_cpy, out);
+
+ tmp = strtok_r (key_cpy, ".", &saveptr);
+ ret = strcmp (tmp, "auth");
+ if (ret == 0) {
+ tmp = strtok_r (NULL, ".", &saveptr);
+ if (strcmp (tmp, "ip") == 0) {
+ /* TODO: backward compatibility, remove when
+ newer versions are available */
+ tmp = "addr";
+ gf_log ("server", GF_LOG_WARNING,
+ "assuming 'auth.ip' to be 'auth.addr'");
+ }
+ ret = dict_set_dynptr (auth_dict, tmp, NULL, 0);
+ if (ret < 0) {
+ gf_log ("server", GF_LOG_DEBUG,
+ "failed to dict_set_dynptr");
+ }
+ }
+
+ GF_FREE (key_cpy);
+out:
+ return;
+}
+
+
+int
+validate_auth_options (xlator_t *this, dict_t *dict)
+{
+ int ret = -1;
+ int error = 0;
+ xlator_list_t *trav = NULL;
+ data_pair_t *pair = NULL;
+ char *saveptr = NULL;
+ char *tmp = NULL;
+ char *key_cpy = NULL;
+
+ trav = this->children;
+ while (trav) {
+ error = -1;
+ for (pair = dict->members_list; pair; pair = pair->next) {
+ key_cpy = gf_strdup (pair->key);
+ tmp = strtok_r (key_cpy, ".", &saveptr);
+ ret = strcmp (tmp, "auth");
+ if (ret == 0) {
+ /* for module type */
+ tmp = strtok_r (NULL, ".", &saveptr);
+ /* for volume name */
+ tmp = strtok_r (NULL, ".", &saveptr);
+ }
+
+ if (strcmp (tmp, trav->xlator->name) == 0) {
+ error = 0;
+ GF_FREE (key_cpy);
+ break;
+ }
+ GF_FREE (key_cpy);
+ }
+ if (-1 == error) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "volume '%s' defined as subvolume, but no "
+ "authentication defined for the same",
+ trav->xlator->name);
+ break;
+ }
+ trav = trav->next;
+ }
+
+ return error;
+}
+
+
+int
+server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
+ void *data)
+{
+ xlator_t *this = NULL;
+ rpc_transport_t *xprt = NULL;
+ server_connection_t *conn = NULL;
+
+ if (!xl || !data) {
+ gf_log ("server", GF_LOG_WARNING,
+ "Calling rpc_notify without initializing");
+ goto out;
+ }
+
+ this = xl;
+ xprt = data;
+
+ switch (event) {
+ case RPCSVC_EVENT_ACCEPT:
+ {
+ /* Have a structure per new connection */
+ /* TODO: Should we create anything here at all ? * /
+ conn = create_server_conn_state (this, xprt);
+ if (!conn)
+ goto out;
+
+ xprt->protocol_private = conn;
+ */
+ xprt->mydata = this;
+ break;
+ }
+ case RPCSVC_EVENT_DISCONNECT:
+ conn = get_server_conn_state (this, xprt);
+ if (conn)
+ destroy_server_conn_state (conn);
+
+ break;
+ default:
+ break;
+ }
+
+out:
+ return 0;
+}
+
+static int32_t
+mem_acct_init (xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init (this, gf_server_mt_end + 1);
+
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Memory accounting init"
+ "failed");
+ return ret;
+ }
+
+ return ret;
+}
+
+int
+init (xlator_t *this)
+{
+ int32_t ret = -1;
+ server_conf_t *conf = NULL;
+
+ if (this->children == NULL) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "protocol/server should have subvolume");
+ goto out;
+ }
+
+ if (this->parents != NULL) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "protocol/server should not have parent volumes");
+ goto out;
+ }
+
+ ret = mem_acct_init (this);
+ if (ret)
+ goto out;
+
+ conf = GF_CALLOC (1, sizeof (server_conf_t), 0);
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ INIT_LIST_HEAD (&conf->conns);
+ pthread_mutex_init (&conf->mutex, NULL);
+
+ this->private = conf;
+
+ ret = server_build_config (this, conf);
+ if (ret)
+ goto out;
+
+ /* Authentication modules */
+ conf->auth_modules = dict_new ();
+ GF_VALIDATE_OR_GOTO(this->name, conf->auth_modules, out);
+
+ dict_foreach (this->options, get_auth_types, conf->auth_modules);
+ ret = validate_auth_options (this, this->options);
+ if (ret == -1) {
+ /* logging already done in validate_auth_options function. */
+ goto out;
+ }
+
+ ret = gf_auth_init (this, conf->auth_modules);
+ if (ret) {
+ dict_unref (conf->auth_modules);
+ goto out;
+ }
+
+ /* RPC related */
+ //conf->rpc = rpc_svc_init (&conf->rpc_conf);
+ conf->rpc = rpcsvc_init (this->ctx, this->options);
+ if (!conf->rpc) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = rpcsvc_register_notify (conf->rpc, server_rpc_notify, this);
+ if (ret)
+ goto out;
+
+ glusterfs3_1_fop_prog.options = this->options;
+ ret = rpcsvc_program_register (conf->rpc, glusterfs3_1_fop_prog);
+ if (ret)
+ goto out;
+
+ gluster_handshake_prog.options = this->options;
+ ret = rpcsvc_program_register (conf->rpc, gluster_handshake_prog);
+ if (ret)
+ goto out;
+
+#ifndef GF_DARWIN_HOST_OS
+ {
+ struct rlimit lim;
+
+ lim.rlim_cur = 1048576;
+ lim.rlim_max = 1048576;
+
+ if (setrlimit (RLIMIT_NOFILE, &lim) == -1) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "WARNING: Failed to set 'ulimit -n 1M': %s",
+ strerror(errno));
+ lim.rlim_cur = 65536;
+ lim.rlim_max = 65536;
+
+ if (setrlimit (RLIMIT_NOFILE, &lim) == -1) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to set max open fd to 64k: %s",
+ strerror(errno));
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "max open fd set to 64k");
+ }
+ }
+ }
+#endif
+
+ ret = 0;
+out:
+ if (ret)
+ this->fini (this);
+
+
+ return ret;
+}
+
+
+void
+fini (xlator_t *this)
+{
+ server_conf_t *conf = NULL;
+
+ conf = this->private;
+
+ if (conf) {
+ if (conf->rpc) {
+ /* TODO: memory leak here, have to free RPC */
+ /*
+ if (conf->rpc->conn) {
+ rpcsvc_conn_destroy (conf->rpc->conn);
+ }
+ rpcsvc_fini (conf->rpc);
+ */
+ ;
+ }
+
+ if (conf->conf_dir)
+ GF_FREE (conf->conf_dir);
+
+ if (conf->auth_modules)
+ dict_unref (conf->auth_modules);
+
+ GF_FREE (conf);
+ }
+
+ this->private = NULL;
+
+ return;
+}
+
+int
+notify (xlator_t *this, int32_t event, void *data, ...)
+{
+ int ret = 0;
+ switch (event) {
+ default:
+ default_notify (this, event, data);
+ break;
+ }
+
+ return ret;
+}
+
+
+struct xlator_fops fops = {
+};
+
+struct xlator_cbks cbks = {
+};
+
+struct xlator_dumpops dumpops = {
+ .priv = server_priv,
+ .fd = server_fd,
+ .inode = server_inode,
+};
+
+
+struct volume_options options[] = {
+ { .key = {"transport-type"},
+ .value = {"rpc", "rpc-over-rdma", "tcp", "socket", "ib-verbs",
+ "unix", "ib-sdp", "tcp/server", "ib-verbs/server"},
+ .type = GF_OPTION_TYPE_STR
+ },
+ { .key = {"volume-filename.*"},
+ .type = GF_OPTION_TYPE_PATH,
+ },
+ { .key = {"inode-lru-limit"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 0,
+ .max = (1 * GF_UNIT_MB)
+ },
+ { .key = {"verify-volfile-checksum"},
+ .type = GF_OPTION_TYPE_BOOL
+ },
+ { .key = {"trace"},
+ .type = GF_OPTION_TYPE_BOOL
+ },
+ { .key = {"config-directory",
+ "conf-dir"},
+ .type = GF_OPTION_TYPE_PATH,
+ },
+
+ { .key = {NULL} },
+};
diff --git a/xlators/protocol/server/src/server.h b/xlators/protocol/server/src/server.h
new file mode 100644
index 00000000000..aaa036e83b6
--- /dev/null
+++ b/xlators/protocol/server/src/server.h
@@ -0,0 +1,203 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _SERVER_H
+#define _SERVER_H
+
+#include <pthread.h>
+
+#include "rpcsvc.h"
+
+#include "fd.h"
+#include "protocol-common.h"
+
+#include "server-mem-types.h"
+
+#define DEFAULT_BLOCK_SIZE 4194304 /* 4MB */
+#define DEFAULT_VOLUME_FILE_PATH CONFDIR "/glusterfs.vol"
+
+typedef struct _server_state server_state_t;
+
+struct _locker {
+ struct list_head lockers;
+ char *volume;
+ loc_t loc;
+ fd_t *fd;
+ pid_t pid;
+};
+
+struct _lock_table {
+ struct list_head file_lockers;
+ struct list_head dir_lockers;
+ gf_lock_t lock;
+ size_t count;
+};
+
+
+/* private structure per connection (transport object)
+ * used as transport_t->xl_private
+ */
+struct _server_connection {
+ struct list_head list;
+ char *id;
+ int ref;
+ int active_transports;
+ pthread_mutex_t lock;
+ char disconnected;
+ fdtable_t *fdtable;
+ struct _lock_table *ltable;
+ xlator_t *bound_xl;
+ xlator_t *this;
+};
+
+typedef struct _server_connection server_connection_t;
+
+
+server_connection_t *
+server_connection_get (xlator_t *this, const char *id);
+
+void
+server_connection_put (xlator_t *this, server_connection_t *conn);
+
+int
+server_connection_destroy (xlator_t *this, server_connection_t *conn);
+
+int
+server_connection_cleanup (xlator_t *this, server_connection_t *conn);
+
+int server_null (rpcsvc_request_t *req);
+
+struct _volfile_ctx {
+ struct _volfile_ctx *next;
+ char *key;
+ uint32_t checksum;
+};
+
+struct server_conf {
+ rpcsvc_t *rpc;
+ struct rpcsvc_config rpc_conf;
+ int inode_lru_limit;
+ gf_boolean_t verify_volfile;
+ gf_boolean_t trace;
+ char *conf_dir;
+ struct _volfile_ctx *volfile;
+
+ dict_t *auth_modules;
+ pthread_mutex_t mutex;
+ struct list_head conns;
+};
+typedef struct server_conf server_conf_t;
+
+
+typedef enum {
+ RESOLVE_MUST = 1,
+ RESOLVE_NOT,
+ RESOLVE_MAY,
+ RESOLVE_DONTCARE,
+ RESOLVE_EXACT
+} server_resolve_type_t;
+
+
+struct resolve_comp {
+ char *basename;
+ ino_t ino;
+ uint64_t gen;
+ inode_t *inode;
+};
+
+typedef struct {
+ server_resolve_type_t type;
+ uint64_t fd_no;
+ ino_t ino;
+ uint64_t gen;
+ ino_t par;
+ char *path;
+ char *bname;
+ char *resolved;
+ int op_ret;
+ int op_errno;
+ loc_t deep_loc;
+ struct resolve_comp *components;
+ int comp_count;
+} server_resolve_t;
+
+
+typedef int (*server_resume_fn_t) (call_frame_t *frame, xlator_t *bound_xl);
+
+int
+resolve_and_resume (call_frame_t *frame, server_resume_fn_t fn);
+
+struct _server_state {
+ server_connection_t *conn;
+ rpc_transport_t *xprt;
+ inode_table_t *itable;
+
+ server_resume_fn_t resume_fn;
+
+ loc_t loc;
+ loc_t loc2;
+ server_resolve_t resolve;
+ server_resolve_t resolve2;
+
+ /* used within resolve_and_resume */
+ loc_t *loc_now;
+ server_resolve_t *resolve_now;
+
+ struct iatt stbuf;
+ int valid;
+
+ fd_t *fd;
+ int flags;
+ int wbflags;
+ struct iobuf *iobuf;
+ struct iobref *iobref;
+
+ size_t size;
+ off_t offset;
+ mode_t mode;
+ dev_t dev;
+ size_t nr_count;
+ int cmd;
+ int type;
+ char *name;
+ int name_len;
+
+ int mask;
+ char is_revalidate;
+ dict_t *dict;
+ struct flock flock;
+ const char *volume;
+ dir_entry_t *entry;
+};
+
+extern struct rpcsvc_program gluster_handshake_prog;
+extern struct rpcsvc_program glusterfs3_1_fop_prog;
+extern struct rpcsvc_program gluster_ping_prog;
+
+typedef ssize_t (*gfs_serialize_t) (struct iovec outmsg, void *args);
+
+int
+server_submit_reply (call_frame_t *frame, rpcsvc_request_t *req, void *arg,
+ struct iovec *payload, int payloadcount,
+ struct iobref *iobref, gfs_serialize_t sfunc);
+
+int xdr_to_glusterfs_req (rpcsvc_request_t *req, void *arg,
+ gfs_serialize_t sfunc);
+
+#endif /* !_SERVER_H */
diff --git a/xlators/protocol/server/src/server3_1-fops.c b/xlators/protocol/server/src/server3_1-fops.c
new file mode 100644
index 00000000000..4156e7be96f
--- /dev/null
+++ b/xlators/protocol/server/src/server3_1-fops.c
@@ -0,0 +1,4839 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "server.h"
+#include "server-helpers.h"
+#include "glusterfs-xdr.h"
+#include "msg-xdr.h"
+#include "compat-errno.h"
+
+#include "md5.h"
+
+#define SERVER_PATH_MAX (16 * 1024)
+
+/* Callback function section */
+int
+server_statfs_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct statvfs *buf)
+{
+ gfs3_statfs_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ if (op_ret >= 0) {
+ gf_statfs_from_statfs (&rsp.statfs, buf);
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_statfs_rsp);
+
+ return 0;
+}
+
+int
+server_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ inode_t *inode, struct iatt *stbuf, dict_t *dict,
+ struct iatt *postparent)
+{
+ rpcsvc_request_t *req = NULL;
+ server_state_t *state = NULL;
+ inode_t *root_inode = NULL;
+ inode_t *link_inode = NULL;
+ loc_t fresh_loc = {0,};
+ gfs3_lookup_rsp rsp = {0, };
+ int32_t ret = -1;
+
+ state = CALL_STATE(frame);
+
+ req = frame->local;
+ frame->local = NULL;
+
+ if (state->is_revalidate == 1 && op_ret == -1) {
+ state->is_revalidate = 2;
+ loc_copy (&fresh_loc, &state->loc);
+ inode_unref (fresh_loc.inode);
+ fresh_loc.inode = inode_new (state->itable);
+
+ STACK_WIND (frame, server_lookup_cbk, BOUND_XL (frame),
+ BOUND_XL (frame)->fops->lookup,
+ &fresh_loc, state->dict);
+
+ loc_wipe (&fresh_loc);
+ return 0;
+ }
+
+ if (dict) {
+ rsp.dict.dict_len = dict_serialized_length (dict);
+ if (rsp.dict.dict_len < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s (%"PRId64"): failed to get serialized "
+ "length of reply dict",
+ state->loc.path, state->loc.inode->ino);
+ op_ret = -1;
+ op_errno = EINVAL;
+ rsp.dict.dict_len = 0;
+ }
+ }
+
+ if ((op_ret >= 0) && dict) {
+ rsp.dict.dict_val = GF_CALLOC (1, rsp.dict.dict_len, 0);
+ if (!rsp.dict.dict_val) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ rsp.dict.dict_len = 0;
+ goto out;
+ }
+ ret = dict_serialize (dict, rsp.dict.dict_val);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s (%"PRId64"): failed to serialize reply dict",
+ state->loc.path, state->loc.inode->ino);
+ op_ret = -1;
+ op_errno = -ret;
+ rsp.dict.dict_len = 0;
+ }
+ }
+
+ gf_stat_from_iatt (&rsp.postparent, postparent);
+
+ if (op_ret == 0) {
+ root_inode = BOUND_XL(frame)->itable->root;
+ if (inode == root_inode) {
+ /* we just looked up root ("/") */
+ stbuf->ia_ino = 1;
+ if (inode->ia_type == 0)
+ inode->ia_type = stbuf->ia_type;
+ }
+
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+
+ if (inode->ino != 1) {
+ link_inode = inode_link (inode, state->loc.parent,
+ state->loc.name, stbuf);
+ inode_lookup (link_inode);
+ inode_unref (link_inode);
+ }
+ } else {
+ if (state->is_revalidate && op_errno == ENOENT) {
+ if (state->loc.inode->ino != 1) {
+ inode_unlink (state->loc.inode,
+ state->loc.parent,
+ state->loc.name);
+ }
+ }
+
+ gf_log (this->name,
+ (op_errno == ENOENT ? GF_LOG_TRACE : GF_LOG_DEBUG),
+ "%"PRId64": LOOKUP %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+out:
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ (gfs_serialize_t)xdr_serialize_lookup_rsp);
+
+ if (rsp.dict.dict_val)
+ GF_FREE (rsp.dict.dict_val);
+
+ return 0;
+}
+
+
+int
+server_lk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct flock *lock)
+{
+ gfs3_lk_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+ server_state_t *state = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+
+ if (op_ret == 0) {
+ gf_flock_from_flock (&rsp.flock, lock);
+ } else if (op_errno != ENOSYS) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": LK %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_lk_rsp);
+
+ return 0;
+}
+
+
+int
+server_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ server_connection_t *conn = NULL;
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ conn = SERVER_CONNECTION(frame);
+ state = CALL_STATE(frame);
+
+ if (op_ret >= 0) {
+ if (state->flock.l_type == F_UNLCK)
+ gf_del_locker (conn->ltable, state->volume,
+ &state->loc, NULL, frame->root->pid);
+ else
+ gf_add_locker (conn->ltable, state->volume,
+ &state->loc, NULL, frame->root->pid);
+ } else if (op_errno != ENOSYS) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": INODELK %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+
+int
+server_finodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ conn = SERVER_CONNECTION(frame);
+ state = CALL_STATE(frame);
+
+ if (op_ret >= 0) {
+ if (state->flock.l_type == F_UNLCK)
+ gf_del_locker (conn->ltable, state->volume,
+ NULL, state->fd,
+ frame->root->pid);
+ else
+ gf_add_locker (conn->ltable, state->volume,
+ NULL, state->fd,
+ frame->root->pid);
+ } else if (op_errno != ENOSYS) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": FINODELK %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+int
+server_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ server_connection_t *conn = NULL;
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+ gf_common_rsp rsp = {0,};
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ conn = SERVER_CONNECTION(frame);
+ state = CALL_STATE(frame);
+
+ if (op_ret >= 0) {
+ if (state->cmd == ENTRYLK_UNLOCK)
+ gf_del_locker (conn->ltable, state->volume,
+ &state->loc, NULL, frame->root->pid);
+ else
+ gf_add_locker (conn->ltable, state->volume,
+ &state->loc, NULL, frame->root->pid);
+ } else if (op_errno != ENOSYS) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": INODELK %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+ return 0;
+}
+
+
+int
+server_fentrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ server_connection_t *conn = NULL;
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ conn = SERVER_CONNECTION(frame);
+ state = CALL_STATE(frame);
+ if (op_ret >= 0) {
+ if (state->cmd == ENTRYLK_UNLOCK)
+ gf_del_locker (conn->ltable, state->volume,
+ NULL, state->fd, frame->root->pid);
+ else
+ gf_add_locker (conn->ltable, state->volume,
+ NULL, state->fd, frame->root->pid);
+ } else if (op_errno != ENOSYS) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": FENTRYLK %"PRId64" (%"PRId64") "
+ " ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+
+int
+server_access_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+int
+server_rmdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent)
+{
+ gfs3_rmdir_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ inode_t *parent = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+
+ if (op_ret == 0) {
+ inode_unlink (state->loc.inode, state->loc.parent,
+ state->loc.name);
+ parent = inode_parent (state->loc.inode, 0, NULL);
+ if (parent)
+ inode_unref (parent);
+ else
+ inode_forget (state->loc.inode, 0);
+
+ gf_stat_from_iatt (&rsp.preparent, preparent);
+ gf_stat_from_iatt (&rsp.postparent, postparent);
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": RMDIR %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_rmdir_rsp);
+
+ return 0;
+}
+
+int
+server_mkdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *stbuf, struct iatt *preparent,
+ struct iatt *postparent)
+{
+ gfs3_mkdir_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ inode_t *link_inode = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+ if (op_ret >= 0) {
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+ gf_stat_from_iatt (&rsp.preparent, preparent);
+ gf_stat_from_iatt (&rsp.postparent, postparent);
+
+ link_inode = inode_link (inode, state->loc.parent,
+ state->loc.name, stbuf);
+ inode_lookup (link_inode);
+ inode_unref (link_inode);
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": MKDIR %s ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_mkdir_rsp);
+
+ return 0;
+}
+
+int
+server_mknod_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ inode_t *inode, struct iatt *stbuf, struct iatt *preparent,
+ struct iatt *postparent)
+{
+ gfs3_mknod_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ inode_t *link_inode = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+ if (op_ret >= 0) {
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+ gf_stat_from_iatt (&rsp.preparent, preparent);
+ gf_stat_from_iatt (&rsp.postparent, postparent);
+
+ link_inode = inode_link (inode, state->loc.parent,
+ state->loc.name, stbuf);
+ inode_lookup (link_inode);
+ inode_unref (link_inode);
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": MKNOD %s ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_mknod_rsp);
+
+
+ return 0;
+}
+
+int
+server_fsyncdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+
+ if (op_ret < 0) {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": FSYNCDIR %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+int
+server_readdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, gf_dirent_t *entries)
+{
+ gfs3_readdir_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ state = CALL_STATE(frame);
+ if (op_ret > 0) {
+ rsp.buf.buf_len = gf_dirent_serialize (entries, NULL, 0);
+ if (rsp.buf.buf_len > 0) {
+ rsp.buf.buf_val = GF_CALLOC (1, rsp.buf.buf_len, 0);
+ if (!rsp.buf.buf_val) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ gf_dirent_serialize (entries, rsp.buf.buf_val,
+ rsp.buf.buf_len);
+ }
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": READDIR %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+unwind:
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_readdir_rsp);
+
+ if (rsp.buf.buf_val)
+ GF_FREE (rsp.buf.buf_val);
+
+ return 0;
+}
+
+
+int
+server_releasedir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+int
+server_opendir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd)
+{
+ server_connection_t *conn = NULL;
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+ gfs3_opendir_rsp rsp = {0,};
+ uint64_t fd_no = 0;
+
+ conn = SERVER_CONNECTION (frame);
+ state = CALL_STATE (frame);
+
+ if (op_ret >= 0) {
+ fd_bind (fd);
+
+ fd_no = gf_fd_unused_get (conn->fdtable, fd);
+ fd_ref (fd); // on behalf of the client
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": OPENDIR %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.fd = fd_no;
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_opendir_rsp);
+
+ return 0;
+}
+
+int
+server_removexattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+int
+server_getxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict)
+{
+ gfs3_getxattr_rsp rsp = {0,};
+ int32_t len = 0;
+ int32_t ret = -1;
+ rpcsvc_request_t *req = NULL;
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (op_ret >= 0) {
+ len = dict_serialized_length (dict);
+ if (len < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s (%"PRId64"): failed to get serialized length of "
+ "reply dict",
+ state->loc.path, state->resolve.ino);
+ op_ret = -1;
+ op_errno = EINVAL;
+ len = 0;
+ goto out;
+ }
+
+ rsp.dict.dict_val = GF_CALLOC (len, sizeof (char), 0);
+ if (!rsp.dict.dict_val) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ len = 0;
+ goto out;
+ }
+ ret = dict_serialize (dict, rsp.dict.dict_val);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s (%"PRId64"): failed to serialize reply dict",
+ state->loc.path, state->resolve.ino);
+ op_ret = -1;
+ op_errno = EINVAL;
+ len = 0;
+ }
+ }
+out:
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+ rsp.dict.dict_len = len;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_getxattr_rsp);
+
+ if (rsp.dict.dict_val)
+ GF_FREE (rsp.dict.dict_val);
+
+ return 0;
+}
+
+
+int
+server_fgetxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict)
+{
+ gfs3_fgetxattr_rsp rsp = {0,};
+ int32_t len = 0;
+ int32_t ret = -1;
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (op_ret >= 0) {
+ len = dict_serialized_length (dict);
+ if (len < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s (%"PRId64"): failed to get serialized "
+ "length of reply dict",
+ state->loc.path, state->resolve.ino);
+ op_ret = -1;
+ op_errno = EINVAL;
+ len = 0;
+ goto out;
+ }
+ rsp.dict.dict_val = GF_CALLOC (1, len, 0);
+ if (!rsp.dict.dict_val) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ len = 0;
+ goto out;
+ }
+ ret = dict_serialize (dict, rsp.dict.dict_val);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s (%"PRId64"): failed to serialize reply dict",
+ state->loc.path, state->resolve.ino);
+ op_ret = -1;
+ op_errno = -ret;
+ len = 0;
+ }
+ }
+
+out:
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+ rsp.dict.dict_len = len;
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_fgetxattr_rsp);
+
+ if (rsp.dict.dict_val)
+ GF_FREE (rsp.dict.dict_val);
+
+ return 0;
+}
+
+int
+server_setxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+
+int
+server_fsetxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+int
+server_rename_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *stbuf,
+ struct iatt *preoldparent, struct iatt *postoldparent,
+ struct iatt *prenewparent, struct iatt *postnewparent)
+{
+ gfs3_rename_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+
+ if (op_ret == 0) {
+ stbuf->ia_ino = state->loc.inode->ino;
+ stbuf->ia_type = state->loc.inode->ia_type;
+
+ gf_log (state->conn->bound_xl->name, GF_LOG_TRACE,
+ "%"PRId64": RENAME_CBK (%"PRId64") %"PRId64"/%s "
+ "==> %"PRId64"/%s",
+ frame->root->unique, state->loc.inode->ino,
+ state->loc.parent->ino, state->loc.name,
+ state->loc2.parent->ino, state->loc2.name);
+
+ inode_rename (state->itable,
+ state->loc.parent, state->loc.name,
+ state->loc2.parent, state->loc2.name,
+ state->loc.inode, stbuf);
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+
+ gf_stat_from_iatt (&rsp.preoldparent, preoldparent);
+ gf_stat_from_iatt (&rsp.postoldparent, postoldparent);
+
+ gf_stat_from_iatt (&rsp.prenewparent, prenewparent);
+ gf_stat_from_iatt (&rsp.postnewparent, postnewparent);
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_rename_rsp);
+
+ return 0;
+}
+
+int
+server_unlink_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent)
+{
+ gfs3_unlink_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ inode_t *parent = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+
+ if (op_ret == 0) {
+ gf_log (state->conn->bound_xl->name, GF_LOG_TRACE,
+ "%"PRId64": UNLINK_CBK %"PRId64"/%s (%"PRId64")",
+ frame->root->unique, state->loc.parent->ino,
+ state->loc.name, state->loc.inode->ino);
+
+ inode_unlink (state->loc.inode, state->loc.parent,
+ state->loc.name);
+
+ parent = inode_parent (state->loc.inode, 0, NULL);
+ if (parent)
+ inode_unref (parent);
+ else
+ inode_forget (state->loc.inode, 0);
+
+ gf_stat_from_iatt (&rsp.preparent, preparent);
+ gf_stat_from_iatt (&rsp.postparent, postparent);
+
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": UNLINK %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_unlink_rsp);
+
+ return 0;
+}
+
+int
+server_symlink_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *stbuf, struct iatt *preparent,
+ struct iatt *postparent)
+{
+ gfs3_symlink_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ inode_t *link_inode = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+ if (op_ret >= 0) {
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+ gf_stat_from_iatt (&rsp.preparent, preparent);
+ gf_stat_from_iatt (&rsp.postparent, postparent);
+
+ link_inode = inode_link (inode, state->loc.parent,
+ state->loc.name, stbuf);
+ inode_lookup (link_inode);
+ inode_unref (link_inode);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": SYMLINK %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_symlink_rsp);
+
+ return 0;
+}
+
+
+int
+server_link_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *stbuf, struct iatt *preparent,
+ struct iatt *postparent)
+{
+ gfs3_link_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ inode_t *link_inode = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+
+ if (op_ret == 0) {
+ stbuf->ia_ino = state->loc.inode->ino;
+
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+ gf_stat_from_iatt (&rsp.preparent, preparent);
+ gf_stat_from_iatt (&rsp.postparent, postparent);
+
+ gf_log (state->conn->bound_xl->name, GF_LOG_TRACE,
+ "%"PRId64": LINK (%"PRId64") %"PRId64"/%s ==> %"PRId64"/%s",
+ frame->root->unique, inode->ino,
+ state->loc2.parent->ino,
+ state->loc2.name, state->loc.parent->ino,
+ state->loc.name);
+
+ link_inode = inode_link (inode, state->loc2.parent,
+ state->loc2.name, stbuf);
+ inode_unref (link_inode);
+ } else {
+ gf_log (state->conn->bound_xl->name, GF_LOG_DEBUG,
+ "%"PRId64": LINK (%"PRId64") %"PRId64"/%s ==> %"PRId64"/%s "
+ " ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve2.ino,
+ state->resolve2.par,
+ state->resolve2.bname, state->resolve.par,
+ state->resolve.bname,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_link_rsp);
+
+ return 0;
+}
+
+int
+server_truncate_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf)
+{
+ gfs3_truncate_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE (frame);
+
+ if (op_ret == 0) {
+ gf_stat_from_iatt (&rsp.prestat, prebuf);
+ gf_stat_from_iatt (&rsp.poststat, postbuf);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": TRUNCATE %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_truncate_rsp);
+
+ return 0;
+}
+
+int
+server_fstat_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *stbuf)
+{
+ gfs3_fstat_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+
+ if (op_ret == 0) {
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": FSTAT %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_fstat_rsp);
+
+ return 0;
+}
+
+int
+server_ftruncate_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf)
+{
+ gfs3_ftruncate_rsp rsp = {0};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE (frame);
+
+ if (op_ret == 0) {
+ gf_stat_from_iatt (&rsp.prestat, prebuf);
+ gf_stat_from_iatt (&rsp.poststat, postbuf);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": FTRUNCATE %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_ftruncate_rsp);
+
+ return 0;
+}
+
+int
+server_flush_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+ if (op_ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": FLUSH %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+
+
+ return 0;
+}
+
+int
+server_fsync_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf)
+{
+ gfs3_fsync_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+
+ if (op_ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": FSYNC %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ } else {
+ gf_stat_from_iatt (&(rsp.prestat), prebuf);
+ gf_stat_from_iatt (&(rsp.poststat), postbuf);
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_fsync_rsp);
+
+ return 0;
+}
+
+int
+server_release_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ gf_common_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+ return 0;
+}
+
+
+int
+server_writev_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf)
+{
+ gfs3_write_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+ if (op_ret >= 0) {
+ gf_stat_from_iatt (&rsp.prestat, prebuf);
+ gf_stat_from_iatt (&rsp.poststat, postbuf);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": WRITEV %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_writev_rsp);
+
+ return 0;
+}
+
+
+int
+server_readv_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iovec *vector, int32_t count,
+ struct iatt *stbuf, struct iobref *iobref)
+{
+ gfs3_read_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE(frame);
+ if (op_ret >= 0) {
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+ rsp.size = op_ret;
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": READV %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, vector, count, iobref,
+ xdr_serialize_readv_rsp);
+
+ return 0;
+}
+
+int
+server_checksum_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ uint8_t *fchecksum, uint8_t *dchecksum)
+{
+ gfs3_checksum_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ if (op_ret >= 0) {
+ rsp.fchecksum.fchecksum_val = (char *)fchecksum;
+ rsp.fchecksum.fchecksum_len = NAME_MAX;
+ rsp.dchecksum.dchecksum_val = (char *)dchecksum;
+ rsp.dchecksum.dchecksum_len = NAME_MAX;
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_checksum_rsp);
+
+ return 0;
+}
+
+
+int
+server_rchecksum_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ uint32_t weak_checksum, uint8_t *strong_checksum)
+{
+ gfs3_rchecksum_rsp rsp = {0,};
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ if (op_ret >= 0) {
+ rsp.weak_checksum = weak_checksum;
+
+ rsp.strong_checksum.strong_checksum_val = (char *)strong_checksum;
+ rsp.strong_checksum.strong_checksum_len = MD5_DIGEST_LEN;
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_rchecksum_rsp);
+
+ return 0;
+}
+
+
+int
+server_open_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd)
+{
+ server_connection_t *conn = NULL;
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+ uint64_t fd_no = 0;
+ gfs3_open_rsp rsp = {0,};
+
+ conn = SERVER_CONNECTION (frame);
+ state = CALL_STATE (frame);
+
+ if (op_ret >= 0) {
+ fd_bind (fd);
+ fd_no = gf_fd_unused_get (conn->fdtable, fd);
+ fd_ref (fd);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": OPEN %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.fd = fd_no;
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_open_rsp);
+ return 0;
+}
+
+
+int
+server_create_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ fd_t *fd, inode_t *inode, struct iatt *stbuf,
+ struct iatt *preparent, struct iatt *postparent)
+{
+ server_connection_t *conn = NULL;
+ server_state_t *state = NULL;
+ inode_t *link_inode = NULL;
+ rpcsvc_request_t *req = NULL;
+ uint64_t fd_no = 0;
+ gfs3_create_rsp rsp = {0,};
+
+ conn = SERVER_CONNECTION (frame);
+ state = CALL_STATE (frame);
+
+ if (op_ret >= 0) {
+ gf_log (state->conn->bound_xl->name, GF_LOG_TRACE,
+ "%"PRId64": CREATE %"PRId64"/%s (%"PRId64")",
+ frame->root->unique, state->loc.parent->ino,
+ state->loc.name, stbuf->ia_ino);
+
+ link_inode = inode_link (inode, state->loc.parent,
+ state->loc.name, stbuf);
+
+ if (link_inode != inode) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "create(%s) inode (ptr=%p, ino=%"PRId64", "
+ "gen=%"PRId64") found conflict (ptr=%p, "
+ "ino=%"PRId64", gen=%"PRId64")",
+ state->loc.path, inode, inode->ino,
+ inode->generation, link_inode,
+ link_inode->ino, link_inode->generation);
+
+ /*
+ VERY racy code (if used anywhere else)
+ -- don't do this without understanding
+ */
+
+ inode_unref (fd->inode);
+ fd->inode = inode_ref (link_inode);
+ }
+
+ inode_lookup (link_inode);
+ inode_unref (link_inode);
+
+ fd_bind (fd);
+
+ fd_no = gf_fd_unused_get (conn->fdtable, fd);
+ fd_ref (fd);
+
+ if ((fd_no < 0) || (fd == 0)) {
+ op_ret = fd_no;
+ op_errno = errno;
+ }
+
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+ gf_stat_from_iatt (&rsp.preparent, preparent);
+ gf_stat_from_iatt (&rsp.postparent, postparent);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": CREATE %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.fd = fd_no;
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_create_rsp);
+
+ return 0;
+}
+
+int
+server_readlink_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, const char *buf,
+ struct iatt *stbuf)
+{
+ gfs3_readlink_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+
+ state = CALL_STATE(frame);
+
+ if (op_ret >= 0) {
+ gf_stat_from_iatt (&rsp.buf, stbuf);
+ rsp.path = (char *)buf;
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": READLINK %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_readlink_rsp);
+
+ return 0;
+}
+
+int
+server_stat_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *stbuf)
+{
+ gfs3_stat_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE (frame);
+
+ if (op_ret == 0) {
+ gf_stat_from_iatt (&rsp.stat, stbuf);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": STAT %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_stat_rsp);
+
+ return 0;
+}
+
+
+int
+server_setattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *statpre, struct iatt *statpost)
+{
+ gfs3_setattr_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ state = CALL_STATE (frame);
+
+ if (op_ret == 0) {
+ gf_stat_from_iatt (&rsp.statpre, statpre);
+ gf_stat_from_iatt (&rsp.statpost, statpost);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": SETATTR %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_setattr_rsp);
+
+ return 0;
+}
+
+int
+server_fsetattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *statpre, struct iatt *statpost)
+{
+ gfs3_fsetattr_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (op_ret == 0) {
+ gf_stat_from_iatt (&rsp.statpre, statpre);
+ gf_stat_from_iatt (&rsp.statpost, statpost);
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": FSETATTR %"PRId64" (%"PRId64") ==> "
+ "%"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0,
+ op_ret, strerror (op_errno));
+ }
+
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_fsetattr_rsp);
+
+ return 0;
+}
+
+
+int
+server_xattrop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict)
+{
+ gfs3_xattrop_rsp rsp = {0,};
+ int32_t len = 0;
+ int32_t ret = -1;
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (op_ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": XATTROP %s (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->loc.path,
+ state->loc.inode ? state->loc.inode->ino : 0,
+ op_ret, strerror (op_errno));
+ goto out;
+ }
+
+ if ((op_ret >= 0) && dict) {
+ len = dict_serialized_length (dict);
+ if (len < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s (%"PRId64"): failed to get serialized length"
+ " for reply dict",
+ state->loc.path, state->loc.inode->ino);
+ op_ret = -1;
+ op_errno = EINVAL;
+ len = 0;
+ goto out;
+ }
+ rsp.dict.dict_val = GF_CALLOC (1, len, 0);
+ if (!rsp.dict.dict_val) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ len = 0;
+ goto out;
+ }
+ ret = dict_serialize (dict, rsp.dict.dict_val);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s (%"PRId64"): failed to serialize reply dict",
+ state->loc.path, state->loc.inode->ino);
+ op_ret = -1;
+ op_errno = -ret;
+ len = 0;
+ }
+ }
+out:
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+ rsp.dict.dict_len = len;
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_xattrop_rsp);
+
+ if (rsp.dict.dict_val)
+ GF_FREE (rsp.dict.dict_val);
+
+ return 0;
+}
+
+
+int
+server_fxattrop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict)
+{
+ gfs3_xattrop_rsp rsp = {0,};
+ int32_t len = 0;
+ int32_t ret = -1;
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ state = CALL_STATE(frame);
+
+ if (op_ret < 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "%"PRId64": FXATTROP %"PRId64" (%"PRId64") ==> %"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ goto out;
+ }
+
+ if ((op_ret >= 0) && dict) {
+ len = dict_serialized_length (dict);
+ if (len < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "fd - %"PRId64" (%"PRId64"): failed to get "
+ "serialized length for reply dict",
+ state->resolve.fd_no, state->fd->inode->ino);
+ op_ret = -1;
+ op_errno = EINVAL;
+ len = 0;
+ goto out;
+ }
+ rsp.dict.dict_val = GF_CALLOC (1, len, 0);
+ if (!rsp.dict.dict_val) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ len = 0;
+ goto out;
+ }
+ ret = dict_serialize (dict, rsp.dict.dict_val);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "fd - %"PRId64" (%"PRId64"): failed to "
+ "serialize reply dict",
+ state->resolve.fd_no, state->fd->inode->ino);
+ op_ret = -1;
+ op_errno = -ret;
+ len = 0;
+ }
+ }
+out:
+ req = frame->local;
+ frame->local = NULL;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+ rsp.dict.dict_len = len;
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_fxattrop_rsp);
+
+ if (rsp.dict.dict_val)
+ GF_FREE (rsp.dict.dict_val);
+
+ return 0;
+}
+
+
+int
+server_readdirp_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, gf_dirent_t *entries)
+{
+ gfs3_readdirp_rsp rsp = {0,};
+ server_state_t *state = NULL;
+ rpcsvc_request_t *req = NULL;
+
+ req = frame->local;
+ frame->local = NULL;
+
+ state = CALL_STATE(frame);
+ if (op_ret > 0) {
+ rsp.buf.buf_len = gf_dirent_serialize (entries, NULL, 0);
+ rsp.buf.buf_val = GF_CALLOC (1, rsp.buf.buf_len, 0);
+ if (!rsp.buf.buf_val) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ rsp.buf.buf_len = 0;
+ goto out;
+ }
+ gf_dirent_serialize (entries, rsp.buf.buf_val, rsp.buf.buf_len);
+ } else {
+ gf_log (this->name, GF_LOG_TRACE,
+ "%"PRId64": READDIRP %"PRId64" (%"PRId64") ==>"
+ "%"PRId32" (%s)",
+ frame->root->unique, state->resolve.fd_no,
+ state->fd ? state->fd->inode->ino : 0, op_ret,
+ strerror (op_errno));
+ }
+
+out:
+ rsp.op_ret = op_ret;
+ rsp.op_errno = gf_errno_to_error (op_errno);
+
+ server_submit_reply (frame, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_readdirp_rsp);
+
+ if (rsp.buf.buf_val)
+ GF_FREE (rsp.buf.buf_val);
+
+ return 0;
+}
+
+/* Resume function section */
+
+int
+server_rchecksum_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0) {
+ op_ret = state->resolve.op_ret;
+ op_errno = state->resolve.op_errno;
+ goto err;
+ }
+
+ STACK_WIND (frame, server_rchecksum_cbk, bound_xl,
+ bound_xl->fops->rchecksum, state->fd,
+ state->offset, state->size);
+
+ return 0;
+err:
+ server_rchecksum_cbk (frame, NULL, frame->this, -1, EINVAL, 0, NULL);
+
+ return 0;
+
+}
+
+int
+server_checksum_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0) {
+ op_ret = state->resolve.op_ret;
+ op_errno = state->resolve.op_errno;
+ goto err;
+ }
+
+ STACK_WIND (frame, server_checksum_cbk, bound_xl,
+ bound_xl->fops->checksum, &state->loc, state->flags);
+
+ return 0;
+err:
+ server_checksum_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+
+ return 0;
+}
+
+int
+server_lk_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_lk_cbk, bound_xl, bound_xl->fops->lk,
+ state->fd, state->cmd, &state->flock);
+
+ return 0;
+
+err:
+ server_lk_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+int
+server_rename_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0) {
+ op_ret = state->resolve.op_ret;
+ op_errno = state->resolve.op_errno;
+ goto err;
+ }
+
+ if (state->resolve2.op_ret != 0) {
+ op_ret = state->resolve2.op_ret;
+ op_errno = state->resolve2.op_errno;
+ goto err;
+ }
+
+ STACK_WIND (frame, server_rename_cbk,
+ bound_xl, bound_xl->fops->rename,
+ &state->loc, &state->loc2);
+ return 0;
+err:
+ server_rename_cbk (frame, NULL, frame->this, op_ret, op_errno,
+ NULL, NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+
+int
+server_link_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0) {
+ op_ret = state->resolve.op_ret;
+ op_errno = state->resolve.op_errno;
+ goto err;
+ }
+
+ if (state->resolve2.op_ret != 0) {
+ op_ret = state->resolve2.op_ret;
+ op_errno = state->resolve2.op_errno;
+ goto err;
+ }
+
+ state->loc2.inode = inode_ref (state->loc.inode);
+
+ STACK_WIND (frame, server_link_cbk, bound_xl, bound_xl->fops->link,
+ &state->loc, &state->loc2);
+
+ return 0;
+err:
+ server_link_cbk (frame, NULL, frame->this, op_ret, op_errno,
+ NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+int
+server_symlink_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ state->loc.inode = inode_new (state->itable);
+
+ STACK_WIND (frame, server_symlink_cbk,
+ bound_xl, bound_xl->fops->symlink,
+ state->name, &state->loc);
+
+ return 0;
+err:
+ server_symlink_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+
+int
+server_access_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_access_cbk,
+ bound_xl, bound_xl->fops->access,
+ &state->loc, state->mask);
+ return 0;
+err:
+ server_access_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno);
+ return 0;
+}
+
+int
+server_fentrylk_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_fentrylk_cbk, bound_xl,
+ bound_xl->fops->fentrylk,
+ state->volume, state->fd, state->name,
+ state->cmd, state->type);
+
+ return 0;
+err:
+ server_fentrylk_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno);
+ return 0;
+}
+
+
+int
+server_entrylk_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_entrylk_cbk,
+ bound_xl, bound_xl->fops->entrylk,
+ state->volume, &state->loc, state->name,
+ state->cmd, state->type);
+ return 0;
+err:
+ server_entrylk_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno);
+ return 0;
+}
+
+
+int
+server_finodelk_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_finodelk_cbk, bound_xl,
+ bound_xl->fops->finodelk,
+ state->volume, state->fd, state->cmd, &state->flock);
+
+ return 0;
+err:
+ server_finodelk_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno);
+
+ return 0;
+}
+
+int
+server_inodelk_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_inodelk_cbk,
+ bound_xl, bound_xl->fops->inodelk,
+ state->volume, &state->loc, state->cmd, &state->flock);
+ return 0;
+err:
+ server_inodelk_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno);
+ return 0;
+}
+
+int
+server_rmdir_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_rmdir_cbk,
+ bound_xl, bound_xl->fops->rmdir, &state->loc);
+ return 0;
+err:
+ server_rmdir_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+ return 0;
+}
+
+int
+server_mkdir_resume (call_frame_t *frame, xlator_t *bound_xl)
+
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ state->loc.inode = inode_new (state->itable);
+
+ STACK_WIND (frame, server_mkdir_cbk,
+ bound_xl, bound_xl->fops->mkdir,
+ &(state->loc), state->mode);
+
+ return 0;
+err:
+ server_mkdir_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+
+int
+server_mknod_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ state->loc.inode = inode_new (state->itable);
+
+ STACK_WIND (frame, server_mknod_cbk,
+ bound_xl, bound_xl->fops->mknod,
+ &(state->loc), state->mode, state->dev);
+
+ return 0;
+err:
+ server_mknod_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+
+int
+server_fsyncdir_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_fsyncdir_cbk,
+ bound_xl,
+ bound_xl->fops->fsyncdir,
+ state->fd, state->flags);
+ return 0;
+
+err:
+ server_fsyncdir_cbk (frame, NULL, frame->this,
+ state->resolve.op_ret,
+ state->resolve.op_errno);
+ return 0;
+}
+
+
+int
+ server_readdir_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_readdir_cbk,
+ bound_xl,
+ bound_xl->fops->readdir,
+ state->fd, state->size, state->offset);
+
+ return 0;
+err:
+ server_readdir_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+int
+server_readdirp_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_readdirp_cbk, bound_xl,
+ bound_xl->fops->readdirp, state->fd, state->size,
+ state->offset);
+
+ return 0;
+err:
+ server_readdirp_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+
+int
+server_opendir_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ state->fd = fd_create (state->loc.inode, frame->root->pid);
+
+ STACK_WIND (frame, server_opendir_cbk,
+ bound_xl, bound_xl->fops->opendir,
+ &state->loc, state->fd);
+ return 0;
+err:
+ server_opendir_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+
+int
+server_statfs_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret !=0)
+ goto err;
+
+ STACK_WIND (frame, server_statfs_cbk,
+ bound_xl, bound_xl->fops->statfs,
+ &state->loc);
+ return 0;
+
+err:
+ server_statfs_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+
+int
+server_removexattr_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_removexattr_cbk,
+ bound_xl, bound_xl->fops->removexattr,
+ &state->loc, state->name);
+ return 0;
+err:
+ server_removexattr_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno);
+ return 0;
+}
+
+int
+server_fgetxattr_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_fgetxattr_cbk,
+ bound_xl, bound_xl->fops->fgetxattr,
+ state->fd, state->name);
+ return 0;
+err:
+ server_fgetxattr_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+
+int
+server_xattrop_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_xattrop_cbk,
+ bound_xl, bound_xl->fops->xattrop,
+ &state->loc, state->flags, state->dict);
+ return 0;
+err:
+ server_xattrop_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+int
+server_fxattrop_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_fxattrop_cbk,
+ bound_xl, bound_xl->fops->fxattrop,
+ state->fd, state->flags, state->dict);
+ return 0;
+err:
+ server_fxattrop_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+int
+server_fsetxattr_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_setxattr_cbk,
+ bound_xl, bound_xl->fops->fsetxattr,
+ state->fd, state->dict, state->flags);
+ return 0;
+err:
+ server_fsetxattr_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno);
+
+ return 0;
+}
+
+int
+server_unlink_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_unlink_cbk,
+ bound_xl, bound_xl->fops->unlink,
+ &state->loc);
+ return 0;
+err:
+ server_unlink_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+ return 0;
+}
+
+int
+server_truncate_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_truncate_cbk,
+ bound_xl, bound_xl->fops->truncate,
+ &state->loc, state->offset);
+ return 0;
+err:
+ server_truncate_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+ return 0;
+}
+
+
+
+int
+server_fstat_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_fstat_cbk,
+ bound_xl, bound_xl->fops->fstat,
+ state->fd);
+ return 0;
+err:
+ server_fstat_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+
+int
+server_setxattr_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_setxattr_cbk,
+ bound_xl, bound_xl->fops->setxattr,
+ &state->loc, state->dict, state->flags);
+ return 0;
+err:
+ server_setxattr_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno);
+
+ return 0;
+}
+
+
+int
+server_getxattr_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_getxattr_cbk,
+ bound_xl, bound_xl->fops->getxattr,
+ &state->loc, state->name);
+ return 0;
+err:
+ server_getxattr_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+
+int
+server_ftruncate_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_ftruncate_cbk,
+ bound_xl, bound_xl->fops->ftruncate,
+ state->fd, state->offset);
+ return 0;
+err:
+ server_ftruncate_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+
+ return 0;
+}
+
+
+int
+server_flush_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_flush_cbk,
+ bound_xl, bound_xl->fops->flush, state->fd);
+ return 0;
+err:
+ server_flush_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno);
+
+ return 0;
+}
+
+
+int
+server_fsync_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_fsync_cbk,
+ bound_xl, bound_xl->fops->fsync,
+ state->fd, state->flags);
+ return 0;
+err:
+ server_fsync_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+
+ return 0;
+}
+
+int
+server_writev_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+ struct iovec iov = {0, };
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ iov.iov_len = state->size;
+
+ if (state->iobuf) {
+ iov.iov_base = state->iobuf->ptr;
+ }
+
+ STACK_WIND (frame, server_writev_cbk,
+ bound_xl, bound_xl->fops->writev,
+ state->fd, &iov, 1, state->offset, state->iobref);
+
+ return 0;
+err:
+ server_writev_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+ return 0;
+}
+
+
+int
+server_readv_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_readv_cbk,
+ bound_xl, bound_xl->fops->readv,
+ state->fd, state->size, state->offset);
+
+ return 0;
+err:
+ server_readv_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, 0, NULL, NULL);
+ return 0;
+}
+
+
+int
+server_create_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ state->loc.inode = inode_new (state->itable);
+
+ state->fd = fd_create (state->loc.inode, frame->root->pid);
+ state->fd->flags = state->flags;
+
+ STACK_WIND (frame, server_create_cbk,
+ bound_xl, bound_xl->fops->create,
+ &(state->loc), state->flags, state->mode, state->fd);
+
+ return 0;
+err:
+ server_create_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL, NULL,
+ NULL, NULL);
+ return 0;
+}
+
+
+int
+server_open_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ state->fd = fd_create (state->loc.inode, frame->root->pid);
+ state->fd->flags = state->flags;
+
+ STACK_WIND (frame, server_open_cbk,
+ bound_xl, bound_xl->fops->open,
+ &state->loc, state->flags, state->fd, 0);
+
+ return 0;
+err:
+ server_open_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+
+int
+server_readlink_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_readlink_cbk,
+ bound_xl, bound_xl->fops->readlink,
+ &state->loc, state->size);
+ return 0;
+err:
+ server_readlink_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+ return 0;
+}
+
+
+int
+server_fsetattr_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_fsetattr_cbk,
+ bound_xl, bound_xl->fops->fsetattr,
+ state->fd, &state->stbuf, state->valid);
+ return 0;
+err:
+ server_fsetattr_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+
+ return 0;
+}
+
+
+int
+server_setattr_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_setattr_cbk,
+ bound_xl, bound_xl->fops->setattr,
+ &state->loc, &state->stbuf, state->valid);
+ return 0;
+err:
+ server_setattr_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL);
+
+ return 0;
+}
+
+
+int
+server_stat_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ STACK_WIND (frame, server_stat_cbk,
+ bound_xl, bound_xl->fops->stat, &state->loc);
+ return 0;
+err:
+ server_stat_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL);
+ return 0;
+}
+
+int
+server_lookup_resume (call_frame_t *frame, xlator_t *bound_xl)
+{
+ server_state_t *state = NULL;
+
+ state = CALL_STATE (frame);
+
+ if (state->resolve.op_ret != 0)
+ goto err;
+
+ if (!state->loc.inode)
+ state->loc.inode = inode_new (state->itable);
+ else
+ state->is_revalidate = 1;
+
+ STACK_WIND (frame, server_lookup_cbk,
+ bound_xl, bound_xl->fops->lookup,
+ &state->loc, state->dict);
+
+ return 0;
+err:
+ server_lookup_cbk (frame, NULL, frame->this, state->resolve.op_ret,
+ state->resolve.op_errno, NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+
+
+
+/* Fop section */
+
+int
+server_stat (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_stat_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ /* Initialize args first, then decode */
+ args.path = path;
+
+ if (!xdr_to_stat_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+ {
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+ }
+
+ resolve_and_resume (frame, server_stat_resume);
+out:
+ return 0;
+}
+
+
+int
+server_setattr (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_setattr_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ args.path = path;
+
+ if (!xdr_to_setattr_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+
+ gf_stat_to_iatt (&args.stbuf, &state->stbuf);
+ state->valid = args.valid;
+
+ resolve_and_resume (frame, server_setattr_resume);
+out:
+ return 0;
+}
+
+
+int
+server_fsetattr (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_fsetattr_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ if (!xdr_to_fsetattr_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+
+ gf_stat_to_iatt (&args.stbuf, &state->stbuf);
+ state->valid = args.valid;
+
+ resolve_and_resume (frame, server_fsetattr_resume);
+out:
+ return 0;
+}
+
+
+int
+server_readlink (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_readlink_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ args.path = path;
+
+ if (!xdr_to_readlink_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+
+ state->size = args.size;
+
+ resolve_and_resume (frame, server_readlink_resume);
+out:
+ return 0;
+}
+
+
+int
+server_create (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_create_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+ char bname[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.path = path;
+ args.bname = bname;
+
+ if (!xdr_to_create_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_NOT;
+ state->resolve.par = args.par;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.bname = gf_strdup (args.bname);
+ state->mode = args.mode;
+ state->flags = gf_flags_to_flags (args.flags);
+
+ resolve_and_resume (frame, server_create_resume);
+out:
+ return 0;
+}
+
+
+int
+server_open (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_open_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ args.path = path;
+
+ if (!xdr_to_open_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+
+ state->flags = gf_flags_to_flags (args.flags);
+
+ resolve_and_resume (frame, server_open_resume);
+out:
+ return 0;
+}
+
+
+int
+server_readv (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_read_req args = {0,};
+
+ if (!req)
+ goto out;
+
+ if (!xdr_to_readv_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+ state->size = args.size;
+ state->offset = args.offset;
+
+ resolve_and_resume (frame, server_readv_resume);
+out:
+ return 0;
+}
+
+
+int
+server_writev (rpcsvc_request_t *req)
+{
+ /* TODO : */
+ assert (0);
+ return 0;
+}
+
+
+int
+server_writev_vec (rpcsvc_request_t *req, struct iobuf *iobuf)
+{
+ server_state_t *state = NULL;
+ struct iobref *iobref = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_write_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ if (!xdr_to_writev_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+ state->offset = args.offset;
+
+ if (iobuf) {
+ iobref = iobref_new ();
+ iobref_add (iobref, iobuf);
+
+ state->iobref = iobref;
+ state->iobuf = iobuf_ref (iobuf);
+
+ state->size = req->msg[1].iov_len;
+ }
+
+ resolve_and_resume (frame, server_writev_resume);
+out:
+ return 0;
+}
+
+
+int
+server_release (rpcsvc_request_t *req)
+{
+ server_connection_t *conn = NULL;
+ gfs3_release_req args = {0,};
+ gf_common_rsp rsp = {0,};
+
+ if (!xdr_to_release_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ conn = req->conn->trans->xl_private;
+ gf_fd_put (conn->fdtable, args.fd);
+
+ server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+out:
+ return 0;
+}
+
+int
+server_releasedir (rpcsvc_request_t *req)
+{
+ server_connection_t *conn = NULL;
+ gfs3_releasedir_req args = {0,};
+ gf_common_rsp rsp = {0,};
+
+ if (!xdr_to_release_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ conn = req->conn->trans->xl_private;
+ gf_fd_put (conn->fdtable, args.fd);
+
+ server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
+ xdr_serialize_common_rsp);
+out:
+ return 0;
+}
+
+
+int
+server_fsync (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_fsync_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ if (!xdr_to_fsync_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+ state->flags = args.data;
+
+ resolve_and_resume (frame, server_fsync_resume);
+out:
+ return 0;
+}
+
+
+
+int
+server_flush (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_flush_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ if (!xdr_to_flush_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+
+ resolve_and_resume (frame, server_flush_resume);
+out:
+ return 0;
+}
+
+
+
+int
+server_ftruncate (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_ftruncate_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ if (!xdr_to_ftruncate_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+ state->offset = args.offset;
+
+ resolve_and_resume (frame, server_ftruncate_resume);
+out:
+ return 0;
+}
+
+
+int
+server_fstat (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_write_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ if (!xdr_to_fstat_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+
+ resolve_and_resume (frame, server_fstat_resume);
+out:
+ return 0;
+}
+
+
+int
+server_truncate (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_truncate_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ args.path = path;
+ if (!xdr_to_truncate_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->offset = args.offset;
+
+ resolve_and_resume (frame, server_truncate_resume);
+out:
+ return 0;
+}
+
+
+
+int
+server_unlink (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_unlink_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+ char bname[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ args.path = path;
+ args.bname = bname;
+
+ if (!xdr_to_unlink_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.par = args.par;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.bname = gf_strdup (args.bname);
+
+ resolve_and_resume (frame, server_unlink_resume);
+out:
+ return 0;
+}
+
+
+int
+server_setxattr (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ dict_t *dict = NULL;
+ call_frame_t *frame = NULL;
+ server_connection_t *conn = NULL;
+ char *buf = NULL;
+ gfs3_setxattr_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+ char dict_val[(16 * 1024)] = {0, };
+ int32_t ret = -1;
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.path = path;
+ args.dict.dict_val = dict_val;
+
+ if (!xdr_to_setxattr_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->flags = args.flags;
+
+ if (args.dict.dict_len) {
+ dict = dict_new ();
+ buf = memdup (args.dict.dict_val, args.dict.dict_len);
+ GF_VALIDATE_OR_GOTO (conn->bound_xl->name, buf, out);
+
+ ret = dict_unserialize (buf, args.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_log (conn->bound_xl->name, GF_LOG_ERROR,
+ "%"PRId64": %s (%"PRId64"): failed to "
+ "unserialize request buffer to dictionary",
+ frame->root->unique, state->loc.path,
+ state->resolve.ino);
+ goto err;
+ }
+
+ dict->extra_free = buf;
+ buf = NULL;
+
+ state->dict = dict;
+ }
+
+ resolve_and_resume (frame, server_setxattr_resume);
+
+ return 0;
+err:
+ if (dict)
+ dict_unref (dict);
+
+ server_setxattr_cbk (frame, NULL, frame->this, -1, EINVAL);
+out:
+ if (buf)
+ GF_FREE (buf);
+ return 0;
+
+}
+
+
+
+int
+server_fsetxattr (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ dict_t *dict = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ char *buf = NULL;
+ gfs3_fsetxattr_req args = {0,};
+ char dict_val[(16 *1024)] = {0,};
+ int32_t ret = -1;
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.dict.dict_val = dict_val;
+ if (!xdr_to_fsetxattr_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+ state->flags = args.flags;
+
+ if (args.dict.dict_len) {
+ dict = dict_new ();
+ buf = memdup (args.dict.dict_val, args.dict.dict_len);
+ GF_VALIDATE_OR_GOTO (conn->bound_xl->name, buf, out);
+
+ ret = dict_unserialize (buf, args.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_log (conn->bound_xl->name, GF_LOG_ERROR,
+ "%"PRId64": %s (%"PRId64"): failed to "
+ "unserialize request buffer to dictionary",
+ frame->root->unique, state->loc.path,
+ state->resolve.ino);
+ goto err;
+ }
+ dict->extra_free = buf;
+ buf = NULL;
+ state->dict = dict;
+ }
+
+ resolve_and_resume (frame, server_fsetxattr_resume);
+
+ return 0;
+err:
+ if (dict)
+ dict_unref (dict);
+
+ server_setxattr_cbk (frame, NULL, frame->this, -1, EINVAL);
+out:
+ if (buf)
+ GF_FREE (buf);
+ return 0;
+}
+
+
+
+int
+server_fxattrop (rpcsvc_request_t *req)
+{
+ dict_t *dict = NULL;
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ char *buf = NULL;
+ gfs3_fxattrop_req args = {0,};
+ char dict_val[(16 *1024)] = {0,};
+ int32_t ret = -1;
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.dict.dict_val = dict_val;
+ if (!xdr_to_fxattrop_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE(frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->flags = args.flags;
+
+ if (args.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
+
+ buf = memdup (args.dict.dict_val, args.dict.dict_len);
+ GF_VALIDATE_OR_GOTO (conn->bound_xl->name, buf, out);
+
+ ret = dict_unserialize (buf, args.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_log (conn->bound_xl->name, GF_LOG_ERROR,
+ "fd - %"PRId64" (%"PRId64"): failed to unserialize "
+ "request buffer to dictionary",
+ state->resolve.fd_no, state->fd->inode->ino);
+ goto fail;
+ }
+ dict->extra_free = buf;
+ buf = NULL;
+
+ state->dict = dict;
+ }
+
+ resolve_and_resume (frame, server_fxattrop_resume);
+
+ return 0;
+
+fail:
+ if (dict)
+ dict_unref (dict);
+
+ server_fxattrop_cbk (frame, NULL, frame->this, -1, EINVAL, NULL);
+out:
+ return 0;
+}
+
+
+
+int
+server_xattrop (rpcsvc_request_t *req)
+{
+ dict_t *dict = NULL;
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ char *buf = NULL;
+ gfs3_xattrop_req args = {0,};
+ char dict_val[(16 *1024)] = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+ int32_t ret = -1;
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+ args.dict.dict_val = dict_val;
+ args.path = path;
+
+ if (!xdr_to_xattrop_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE(frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->flags = args.flags;
+
+ if (args.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
+
+ buf = memdup (args.dict.dict_val, args.dict.dict_len);
+ GF_VALIDATE_OR_GOTO (conn->bound_xl->name, buf, out);
+
+ ret = dict_unserialize (buf, args.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_log (conn->bound_xl->name, GF_LOG_ERROR,
+ "fd - %"PRId64" (%"PRId64"): failed to unserialize "
+ "request buffer to dictionary",
+ state->resolve.fd_no, state->fd->inode->ino);
+ goto fail;
+ }
+ dict->extra_free = buf;
+ buf = NULL;
+
+ state->dict = dict;
+ }
+
+ resolve_and_resume (frame, server_xattrop_resume);
+
+ return 0;
+fail:
+ if (dict)
+ dict_unref (dict);
+
+ server_xattrop_cbk (frame, NULL, frame->this, -1, EINVAL, NULL);
+out:
+ return 0;
+}
+
+
+int
+server_getxattr (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_getxattr_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+ char name[4096] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.path = path;
+ args.name = name;
+
+ if (!xdr_to_getxattr_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+
+ if (args.namelen)
+ state->name = gf_strdup (args.name);
+
+ resolve_and_resume (frame, server_getxattr_resume);
+out:
+ return 0;
+}
+
+
+int
+server_fgetxattr (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_fgetxattr_req args = {0,};
+ char name[4096] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.name = name;
+ if (!xdr_to_fgetxattr_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+
+ if (args.namelen)
+ state->name = gf_strdup (args.name);
+
+ resolve_and_resume (frame, server_fgetxattr_resume);
+out:
+ return 0;
+}
+
+
+
+int
+server_removexattr (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_removexattr_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+ char name[4096] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.path = path;
+ args.name = name;
+ if (!xdr_to_removexattr_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->name = gf_strdup (args.name);
+
+ resolve_and_resume (frame, server_removexattr_resume);
+out:
+ return 0;
+}
+
+
+
+
+int
+server_opendir (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_opendir_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ args.path = path;
+
+ if (!xdr_to_opendir_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+
+ resolve_and_resume (frame, server_opendir_resume);
+out:
+ return 0;
+}
+
+
+int
+server_readdirp (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_readdirp_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ if (!xdr_to_readdirp_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE(frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+ state->size = args.size;
+ state->offset = args.offset;
+
+ resolve_and_resume (frame, server_readdirp_resume);
+out:
+ return 0;
+}
+
+int
+server_readdir (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_readdir_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ if (!xdr_to_readdir_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE(frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+ state->size = args.size;
+ state->offset = args.offset;
+
+ resolve_and_resume (frame, server_readdir_resume);
+out:
+ return 0;
+}
+
+int
+server_fsyncdir (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_fsyncdir_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ if (!xdr_to_fsyncdir_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE(frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.fd_no = args.fd;
+ state->flags = args.data;
+
+ resolve_and_resume (frame, server_fsyncdir_resume);
+out:
+ return 0;
+}
+
+
+
+int
+server_mknod (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_mknod_req args = {0,};
+ char bname[SERVER_PATH_MAX] = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+ args.path = path;
+ args.bname = bname;
+
+ if (!xdr_to_mknod_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_NOT;
+ state->resolve.par = args.par;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.bname = gf_strdup (args.bname);
+
+ state->mode = args.mode;
+ state->dev = args.dev;
+
+ resolve_and_resume (frame, server_mknod_resume);
+out:
+ return 0;
+}
+
+
+int
+server_mkdir (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_mkdir_req args = {0,};
+ char bname[SERVER_PATH_MAX] = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+ args.path = path;
+ args.bname = bname;
+
+ if (!xdr_to_mkdir_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_NOT;
+ state->resolve.par = args.par;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.bname = gf_strdup (args.bname);
+
+ state->mode = args.mode;
+
+ resolve_and_resume (frame, server_mkdir_resume);
+out:
+ return 0;
+}
+
+
+int
+server_rmdir (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_rmdir_req args = {0,};
+ char bname[SERVER_PATH_MAX] = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+ args.path = path;
+ args.bname = bname;
+
+ if (!xdr_to_rmdir_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.par = args.par;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.bname = gf_strdup (args.bname);
+
+ resolve_and_resume (frame, server_rmdir_resume);
+out:
+ return 0;
+}
+
+
+
+int
+server_inodelk (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_inodelk_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+ char volume[4096] = {0,};
+ int cmd = 0;
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+ args.path = path;
+ args.volume = volume;
+
+ if (!xdr_to_inodelk_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_EXACT;
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+
+ cmd = args.cmd;
+ switch (cmd) {
+ case GF_LK_GETLK:
+ state->cmd = F_GETLK;
+ break;
+ case GF_LK_SETLK:
+ state->cmd = F_SETLK;
+ break;
+ case GF_LK_SETLKW:
+ state->cmd = F_SETLKW;
+ break;
+ }
+
+ state->type = args.type;
+ state->volume = gf_strdup (args.volume);
+
+ gf_flock_to_flock (&args.flock, &state->flock);
+
+ switch (state->type) {
+ case GF_LK_F_RDLCK:
+ state->flock.l_type = F_RDLCK;
+ break;
+ case GF_LK_F_WRLCK:
+ state->flock.l_type = F_WRLCK;
+ break;
+ case GF_LK_F_UNLCK:
+ state->flock.l_type = F_UNLCK;
+ break;
+ }
+
+ resolve_and_resume (frame, server_inodelk_resume);
+out:
+ return 0;
+}
+
+int
+server_finodelk (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_finodelk_req args = {0,};
+ char volume[4096] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.volume = volume;
+ if (!xdr_to_finodelk_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE(frame);
+
+ state->resolve.type = RESOLVE_EXACT;
+ state->volume = gf_strdup (args.volume);
+ state->resolve.fd_no = args.fd;
+ state->cmd = args.cmd;
+
+ switch (state->cmd) {
+ case GF_LK_GETLK:
+ state->cmd = F_GETLK;
+ break;
+ case GF_LK_SETLK:
+ state->cmd = F_SETLK;
+ break;
+ case GF_LK_SETLKW:
+ state->cmd = F_SETLKW;
+ break;
+ }
+
+ state->type = args.type;
+
+ gf_flock_to_flock (&args.flock, &state->flock);
+
+ switch (state->type) {
+ case GF_LK_F_RDLCK:
+ state->flock.l_type = F_RDLCK;
+ break;
+ case GF_LK_F_WRLCK:
+ state->flock.l_type = F_WRLCK;
+ break;
+ case GF_LK_F_UNLCK:
+ state->flock.l_type = F_UNLCK;
+ break;
+ }
+
+ resolve_and_resume (frame, server_finodelk_resume);
+out:
+ return 0;
+}
+
+
+int
+server_entrylk (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_entrylk_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+ char name[4096] = {0,};
+ char volume[4096] = {0,};
+
+ if (!req)
+ return 0;
+
+ args.path = path;
+ args.volume = volume;
+ args.name = name;
+
+ conn = req->conn->trans->xl_private;
+
+ if (!xdr_to_entrylk_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_EXACT;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+
+ if (args.namelen)
+ state->name = gf_strdup (args.name);
+ state->volume = gf_strdup (args.volume);
+
+ state->cmd = args.cmd;
+ state->type = args.type;
+
+ resolve_and_resume (frame, server_entrylk_resume);
+out:
+ return 0;
+}
+
+int
+server_fentrylk (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_fentrylk_req args = {0,};
+ char name[4096] = {0,};
+ char volume[4096] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.name = name;
+ args.volume = volume;
+ if (!xdr_to_fentrylk_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE(frame);
+
+ state->resolve.type = RESOLVE_EXACT;
+ state->resolve.fd_no = args.fd;
+ state->cmd = args.cmd;
+ state->type = args.type;
+
+ if (args.namelen)
+ state->name = gf_strdup (args.name);
+ state->volume = gf_strdup (args.volume);
+
+ resolve_and_resume (frame, server_finodelk_resume);
+out:
+ return 0;
+}
+
+int
+server_access (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_access_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.path = path;
+ if (!xdr_to_access_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.ino = args.ino;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+ state->mask = args.mask;
+
+ resolve_and_resume (frame, server_access_resume);
+out:
+ return 0;
+}
+
+
+
+int
+server_symlink (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_symlink_req args = {0,};
+ char linkname[4096] = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+ char bname[4096] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+ args.path = path;
+ args.bname = bname;
+ args.linkname = linkname;
+
+ if (!xdr_to_symlink_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_NOT;
+ state->resolve.par = args.par;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.bname = gf_strdup (args.bname);
+ state->name = gf_strdup (args.linkname);
+
+ resolve_and_resume (frame, server_symlink_resume);
+out:
+ return 0;
+}
+
+
+
+int
+server_link (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_link_req args = {0,};
+ char oldpath[SERVER_PATH_MAX] = {0,};
+ char newpath[SERVER_PATH_MAX] = {0,};
+ char newbname[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.oldpath = oldpath;
+ args.newpath = newpath;
+ args.newbname = newbname;
+
+ if (!xdr_to_link_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.path = gf_strdup (args.oldpath);
+ state->resolve.ino = args.oldino;
+ state->resolve.gen = args.oldgen;
+
+ state->resolve2.type = RESOLVE_NOT;
+ state->resolve2.path = gf_strdup (args.newpath);
+ state->resolve2.bname = gf_strdup (args.newbname);
+ state->resolve2.par = args.newpar;
+ state->resolve2.gen = args.newgen;
+
+ resolve_and_resume (frame, server_link_resume);
+out:
+ return 0;
+}
+
+
+int
+server_rename (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_rename_req args = {0,};
+ char oldpath[SERVER_PATH_MAX] = {0,};
+ char oldbname[SERVER_PATH_MAX] = {0,};
+ char newpath[SERVER_PATH_MAX] = {0,};
+ char newbname[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.oldpath = oldpath;
+ args.oldbname = oldbname;
+ args.newpath = newpath;
+ args.newbname = newbname;
+ if (!xdr_to_rename_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.path = gf_strdup (args.oldpath);
+ state->resolve.bname = gf_strdup (args.oldbname);
+ state->resolve.par = args.oldpar;
+ state->resolve.gen = args.oldgen;
+
+ state->resolve2.type = RESOLVE_MAY;
+ state->resolve2.path = gf_strdup (args.newpath);
+ state->resolve2.bname = gf_strdup (args.newbname);
+ state->resolve2.par = args.newpar;
+ state->resolve2.gen = args.newgen;
+
+ resolve_and_resume (frame, server_rename_resume);
+out:
+ return 0;
+}
+
+int
+server_lk (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_lk_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ if (!xdr_to_lk_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.fd_no = args.fd;
+ state->cmd = args.cmd;
+ state->type = args.type;
+
+ switch (state->cmd) {
+ case GF_LK_GETLK:
+ state->cmd = F_GETLK;
+ break;
+ case GF_LK_SETLK:
+ state->cmd = F_SETLK;
+ break;
+ case GF_LK_SETLKW:
+ state->cmd = F_SETLKW;
+ break;
+ }
+
+ gf_flock_to_flock (&args.flock, &state->flock);
+
+ switch (state->type) {
+ case GF_LK_F_RDLCK:
+ state->flock.l_type = F_RDLCK;
+ break;
+ case GF_LK_F_WRLCK:
+ state->flock.l_type = F_WRLCK;
+ break;
+ case GF_LK_F_UNLCK:
+ state->flock.l_type = F_UNLCK;
+ break;
+ default:
+ gf_log (conn->bound_xl->name, GF_LOG_ERROR,
+ "fd - %"PRId64" (%"PRId64"): Unknown lock type: %"PRId32"!",
+ state->resolve.fd_no, state->fd->inode->ino, state->type);
+ break;
+ }
+
+
+ resolve_and_resume (frame, server_lk_resume);
+out:
+ return 0;
+}
+
+int
+server_checksum (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_checksum_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.path = path;
+ if (!xdr_to_checksum_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MAY;
+ state->resolve.path = gf_strdup (args.path);
+ state->resolve.gen = args.gen;
+ state->resolve.ino = args.ino;
+ state->flags = args.flag;
+
+ resolve_and_resume (frame, server_checksum_resume);
+out:
+ return 0;
+}
+
+
+
+int
+server_rchecksum (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_rchecksum_req args = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ if (!xdr_to_rchecksum_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE(frame);
+
+ state->resolve.type = RESOLVE_MAY;
+ state->resolve.fd_no = args.fd;
+ state->offset = args.offset;
+ state->size = args.len;
+
+ resolve_and_resume (frame, server_rchecksum_resume);
+out:
+ return 0;
+}
+
+int
+server_null (rpcsvc_request_t *req)
+{
+ gf_common_rsp rsp = {0,};
+
+ rsp.gfs_id = req->gfs_id;
+ /* Accepted */
+ rsp.op_ret = 0;
+
+ server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
+ (gfs_serialize_t)xdr_serialize_common_rsp);
+
+ return 0;
+}
+
+int
+server_lookup (rpcsvc_request_t *req)
+{
+ call_frame_t *frame = NULL;
+ server_connection_t *conn = NULL;
+ server_state_t *state = NULL;
+ dict_t *xattr_req = NULL;
+ char *buf = NULL;
+ gfs3_lookup_req args = {0,};
+ int ret = 0;
+ char path[SERVER_PATH_MAX] = {0,};
+ char bname[SERVER_PATH_MAX] = {0,};
+ char dict_val[(16 * 1024)] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+
+ args.path = path;
+ args.bname = bname;
+ args.dict.dict_val = dict_val;
+
+ if (!xdr_to_lookup_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto err;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS;
+ goto err;
+ }
+
+ /* NOTE: lookup() uses req->ino only to identify if a lookup()
+ * is requested for 'root' or not
+ */
+
+ state = CALL_STATE (frame);
+ state->resolve.ino = args.ino;
+ if (state->resolve.ino != 1)
+ state->resolve.ino = 0;
+
+ state->resolve.type = RESOLVE_DONTCARE;
+ state->resolve.par = args.par;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+
+ if (IS_NOT_ROOT (STRLEN_0 (args.path))) {
+ state->resolve.bname = gf_strdup (args.bname);
+ }
+
+ if (args.dict.dict_len) {
+ /* Unserialize the dictionary */
+ xattr_req = dict_new ();
+
+ buf = memdup (args.dict.dict_val, args.dict.dict_len);
+ if (buf == NULL) {
+ gf_log (conn->bound_xl->name, GF_LOG_ERROR,
+ "out of memory");
+ goto err;
+ }
+
+ ret = dict_unserialize (buf, args.dict.dict_len,
+ &xattr_req);
+ if (ret < 0) {
+ gf_log (conn->bound_xl->name, GF_LOG_ERROR,
+ "%"PRId64": %s (%"PRId64"): failed to "
+ "unserialize req-buffer to dictionary",
+ frame->root->unique, state->resolve.path,
+ state->resolve.ino);
+ goto err;
+ }
+
+ state->dict = xattr_req;
+
+ xattr_req->extra_free = buf;
+
+ buf = NULL;
+ }
+
+ resolve_and_resume (frame, server_lookup_resume);
+
+ return 0;
+err:
+ if (xattr_req)
+ dict_unref (xattr_req);
+
+ if (buf) {
+ GF_FREE (buf);
+ }
+
+ server_lookup_cbk (frame, NULL, frame->this, -1, EINVAL, NULL, NULL,
+ NULL, NULL);
+
+ return 0;
+}
+
+int
+server_statfs (rpcsvc_request_t *req)
+{
+ server_state_t *state = NULL;
+ server_connection_t *conn = NULL;
+ call_frame_t *frame = NULL;
+ gfs3_statfs_req args = {0,};
+ char path[SERVER_PATH_MAX] = {0,};
+
+ if (!req)
+ return 0;
+
+ conn = req->conn->trans->xl_private;
+ args.path = path;
+ if (!xdr_to_statfs_req (req->msg[0], &args)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ frame = get_frame_from_request (req);
+ if (!frame) {
+ // something wrong, mostly insufficient memory
+ req->rpc_err = GARBAGE_ARGS; /* TODO */
+ goto out;
+ }
+
+ state = CALL_STATE (frame);
+
+ state->resolve.type = RESOLVE_MUST;
+ state->resolve.ino = args.ino;
+ if (!state->resolve.ino)
+ state->resolve.ino = 1;
+ state->resolve.gen = args.gen;
+ state->resolve.path = gf_strdup (args.path);
+
+ resolve_and_resume (frame, server_statfs_resume);
+out:
+ return 0;
+}
+
+
+rpcsvc_actor_t glusterfs3_1_fop_actors[] = {
+ [GFS3_OP_NULL] = { "NULL", GFS3_OP_NULL, server_null, NULL, NULL},
+ [GFS3_OP_STAT] = { "STAT", GFS3_OP_STAT, server_stat, NULL, NULL },
+ [GFS3_OP_READLINK] = { "READLINK", GFS3_OP_READLINK, server_readlink, NULL, NULL },
+ [GFS3_OP_MKNOD] = { "MKNOD", GFS3_OP_MKNOD, server_mknod, NULL, NULL },
+ [GFS3_OP_MKDIR] = { "MKDIR", GFS3_OP_MKDIR, server_mkdir, NULL, NULL },
+ [GFS3_OP_UNLINK] = { "UNLINK", GFS3_OP_UNLINK, server_unlink, NULL, NULL },
+ [GFS3_OP_RMDIR] = { "RMDIR", GFS3_OP_RMDIR, server_rmdir, NULL, NULL },
+ [GFS3_OP_SYMLINK] = { "SYMLINK", GFS3_OP_SYMLINK, server_symlink, NULL, NULL },
+ [GFS3_OP_RENAME] = { "RENAME", GFS3_OP_RENAME, server_rename, NULL, NULL },
+ [GFS3_OP_LINK] = { "LINK", GFS3_OP_LINK, server_link, NULL, NULL },
+ [GFS3_OP_TRUNCATE] = { "TRUNCATE", GFS3_OP_TRUNCATE, server_truncate, NULL, NULL },
+ [GFS3_OP_OPEN] = { "OPEN", GFS3_OP_OPEN, server_open, NULL, NULL },
+ [GFS3_OP_READ] = { "READ", GFS3_OP_READ, server_readv, NULL, NULL },
+ [GFS3_OP_WRITE] = { "WRITE", GFS3_OP_WRITE, server_writev, server_writev_vec, NULL },
+ [GFS3_OP_STATFS] = { "STATFS", GFS3_OP_STATFS, server_statfs, NULL, NULL },
+ [GFS3_OP_FLUSH] = { "FLUSH", GFS3_OP_FLUSH, server_flush, NULL, NULL },
+ [GFS3_OP_FSYNC] = { "FSYNC", GFS3_OP_FSYNC, server_fsync, NULL, NULL },
+ [GFS3_OP_SETXATTR] = { "SETXATTR", GFS3_OP_SETXATTR, server_setxattr, NULL, NULL },
+ [GFS3_OP_GETXATTR] = { "GETXATTR", GFS3_OP_GETXATTR, server_getxattr, NULL, NULL },
+ [GFS3_OP_REMOVEXATTR] = { "REMOVEXATTR", GFS3_OP_REMOVEXATTR, server_removexattr, NULL, NULL },
+ [GFS3_OP_OPENDIR] = { "OPENDIR", GFS3_OP_OPENDIR, server_opendir, NULL, NULL },
+ [GFS3_OP_FSYNCDIR] = { "FSYNCDIR", GFS3_OP_FSYNCDIR, server_fsyncdir, NULL, NULL },
+ [GFS3_OP_ACCESS] = { "ACCESS", GFS3_OP_ACCESS, server_access, NULL, NULL },
+ [GFS3_OP_CREATE] = { "CREATE", GFS3_OP_CREATE, server_create, NULL, NULL },
+ [GFS3_OP_FTRUNCATE] = { "FTRUNCATE", GFS3_OP_FTRUNCATE, server_ftruncate, NULL, NULL },
+ [GFS3_OP_FSTAT] = { "FSTAT", GFS3_OP_FSTAT, server_fstat, NULL, NULL },
+ [GFS3_OP_LK] = { "LK", GFS3_OP_LK, server_lk, NULL, NULL },
+ [GFS3_OP_LOOKUP] = { "LOOKUP", GFS3_OP_LOOKUP, server_lookup, NULL, NULL },
+ [GFS3_OP_READDIR] = { "READDIR", GFS3_OP_READDIR, server_readdir, NULL, NULL },
+ [GFS3_OP_INODELK] = { "INODELK", GFS3_OP_INODELK, server_inodelk, NULL, NULL },
+ [GFS3_OP_FINODELK] = { "FINODELK", GFS3_OP_FINODELK, server_finodelk, NULL, NULL },
+ [GFS3_OP_ENTRYLK] = { "ENTRYLK", GFS3_OP_ENTRYLK, server_entrylk, NULL, NULL },
+ [GFS3_OP_FENTRYLK] = { "FENTRYLK", GFS3_OP_FENTRYLK, server_fentrylk, NULL, NULL },
+ [GFS3_OP_CHECKSUM] = { "CHECKSUM", GFS3_OP_CHECKSUM, server_checksum, NULL, NULL },
+ [GFS3_OP_XATTROP] = { "XATTROP", GFS3_OP_XATTROP, server_xattrop, NULL, NULL },
+ [GFS3_OP_FXATTROP] = { "FXATTROP", GFS3_OP_FXATTROP, server_fxattrop, NULL, NULL },
+ [GFS3_OP_FGETXATTR] = { "FGETXATTR", GFS3_OP_FGETXATTR, server_fgetxattr, NULL, NULL },
+ [GFS3_OP_FSETXATTR] = { "FSETXATTR", GFS3_OP_FSETXATTR, server_fsetxattr, NULL, NULL },
+ [GFS3_OP_RCHECKSUM] = { "RCHECKSUM", GFS3_OP_RCHECKSUM, server_rchecksum, NULL, NULL },
+ [GFS3_OP_SETATTR] = { "SETATTR", GFS3_OP_SETATTR, server_setattr, NULL, NULL },
+ [GFS3_OP_FSETATTR] = { "FSETATTR", GFS3_OP_FSETATTR, server_fsetattr, NULL, NULL },
+ [GFS3_OP_READDIRP] = { "READDIRP", GFS3_OP_READDIRP, server_readdirp, NULL, NULL },
+ [GFS3_OP_RELEASE] = { "RELEASE", GFS3_OP_RELEASE, server_release, NULL, NULL },
+ [GFS3_OP_RELEASEDIR] = { "RELEASEDIR", GFS3_OP_RELEASEDIR, server_releasedir, NULL, NULL },
+};
+
+
+struct rpcsvc_program glusterfs3_1_fop_prog = {
+ .progname = "GlusterFS-3.1.0",
+ .prognum = GLUSTER3_1_FOP_PROGRAM,
+ .progver = GLUSTER3_1_FOP_VERSION,
+ .numactors = GLUSTER3_1_FOP_PROCCNT,
+ .actors = glusterfs3_1_fop_actors,
+ .progport = 7007,
+};