summaryrefslogtreecommitdiffstats
path: root/xlators/protocol/client/src
diff options
context:
space:
mode:
authorRaghavendra G <rgowdapp@redhat.com>2019-05-13 12:54:52 +0530
committerRaghavendra G <rgowdapp@redhat.com>2019-09-12 20:25:30 +0530
commit59f3c790048c9036adb81d56329a66fc3ffb255a (patch)
treef25e39fa8243b979b64118d7f712007dbcc96a7e /xlators/protocol/client/src
parent61d438e73857776a1f96a7334f56b132275a587b (diff)
protocol/client: don't reopen fds on which POSIX locks are held after a reconnect
Bricks cleanup any granted locks after a client disconnects and currently these locks are not healed after a reconnect. This means post reconnect a competing process could be granted a lock even though the first process which was granted locks has not unlocked. By not re-opening fds, subsequent operations on such fds will fail forcing the application to close the current fd and reopen a new one. This way we prevent any silent corruption. A new option "client.strict-locks" is introduced to control this behaviour. This option is set to "off" by default. Change-Id: Ieed545efea466cb5e8f5a36199aa26380c301b9e Signed-off-by: Raghavendra G <rgowdapp@redhat.com> updates: bz#1694920
Diffstat (limited to 'xlators/protocol/client/src')
-rw-r--r--xlators/protocol/client/src/client-handshake.c3
-rw-r--r--xlators/protocol/client/src/client-helpers.c5
-rw-r--r--xlators/protocol/client/src/client-lk.c2
-rw-r--r--xlators/protocol/client/src/client-rpc-fops.c45
-rw-r--r--xlators/protocol/client/src/client-rpc-fops_v2.c32
-rw-r--r--xlators/protocol/client/src/client.c13
-rw-r--r--xlators/protocol/client/src/client.h15
7 files changed, 109 insertions, 6 deletions
diff --git a/xlators/protocol/client/src/client-handshake.c b/xlators/protocol/client/src/client-handshake.c
index 649b113db14..337b2f856af 100644
--- a/xlators/protocol/client/src/client-handshake.c
+++ b/xlators/protocol/client/src/client-handshake.c
@@ -681,7 +681,8 @@ client_post_handshake(call_frame_t *frame, xlator_t *this)
{
list_for_each_entry_safe(fdctx, tmp, &conf->saved_fds, sfd_pos)
{
- if (fdctx->remote_fd != -1)
+ if (fdctx->remote_fd != -1 ||
+ (!list_empty(&fdctx->lock_list) && conf->strict_locks))
continue;
fdctx->reopen_done = client_child_up_reopen_done;
diff --git a/xlators/protocol/client/src/client-helpers.c b/xlators/protocol/client/src/client-helpers.c
index 52e1089900b..156f1cd3d9b 100644
--- a/xlators/protocol/client/src/client-helpers.c
+++ b/xlators/protocol/client/src/client-helpers.c
@@ -412,6 +412,7 @@ client_get_remote_fd(xlator_t *this, fd_t *fd, int flags, int64_t *remote_fd)
{
clnt_fd_ctx_t *fdctx = NULL;
clnt_conf_t *conf = NULL;
+ gf_boolean_t locks_held = _gf_false;
GF_VALIDATE_OR_GOTO(this->name, fd, out);
GF_VALIDATE_OR_GOTO(this->name, remote_fd, out);
@@ -433,11 +434,13 @@ client_get_remote_fd(xlator_t *this, fd_t *fd, int flags, int64_t *remote_fd)
*remote_fd = -1;
else
*remote_fd = fdctx->remote_fd;
+
+ locks_held = !list_empty(&fdctx->lock_list);
}
}
pthread_spin_unlock(&conf->fd_lock);
- if ((flags & FALLBACK_TO_ANON_FD) && (*remote_fd == -1))
+ if ((flags & FALLBACK_TO_ANON_FD) && (*remote_fd == -1) && (!locks_held))
*remote_fd = GF_ANON_FD_NO;
return 0;
diff --git a/xlators/protocol/client/src/client-lk.c b/xlators/protocol/client/src/client-lk.c
index 679e1982f49..c1fb055fc0e 100644
--- a/xlators/protocol/client/src/client-lk.c
+++ b/xlators/protocol/client/src/client-lk.c
@@ -351,7 +351,7 @@ delete_granted_locks_owner(fd_t *fd, gf_lkowner_t *owner)
list_for_each_entry_safe(lock, tmp, &fdctx->lock_list, list)
{
- if (!is_same_lkowner(&lock->owner, owner)) {
+ if (is_same_lkowner(&lock->owner, owner)) {
list_del_init(&lock->list);
list_add_tail(&lock->list, &delete_list);
count++;
diff --git a/xlators/protocol/client/src/client-rpc-fops.c b/xlators/protocol/client/src/client-rpc-fops.c
index 8ca6cbedc85..247d49728a3 100644
--- a/xlators/protocol/client/src/client-rpc-fops.c
+++ b/xlators/protocol/client/src/client-rpc-fops.c
@@ -21,8 +21,18 @@ int32_t
client3_getspec(call_frame_t *frame, xlator_t *this, void *data);
rpc_clnt_prog_t clnt3_3_fop_prog;
-/* CBK */
+int
+client_is_setlk(int32_t cmd)
+{
+ if ((cmd == F_SETLK) || (cmd == F_SETLK64) || (cmd == F_SETLKW) ||
+ (cmd == F_SETLKW64)) {
+ return 1;
+ }
+ return 0;
+}
+
+/* CBK */
int
client3_3_symlink_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
@@ -815,7 +825,8 @@ client3_3_flush_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- if (rsp.op_ret >= 0 && !fd_is_anonymous(local->fd)) {
+ if ((rsp.op_ret >= 0 || (rsp.op_errno == ENOTCONN)) &&
+ !fd_is_anonymous(local->fd)) {
/* Delete all saved locks of the owner issuing flush */
ret = delete_granted_locks_owner(local->fd, &local->owner);
gf_msg_trace(this->name, 0, "deleting locks of owner (%s) returned %d",
@@ -2387,10 +2398,12 @@ client3_3_lk_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = 0;
xlator_t *this = NULL;
dict_t *xdata = NULL;
+ clnt_local_t *local = NULL;
this = THIS;
frame = myframe;
+ local = frame->local;
if (-1 == req->rpc_status) {
rsp.op_ret = -1;
@@ -2411,6 +2424,18 @@ client3_3_lk_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = client_post_lk(this, &rsp, &lock, &xdata);
if (ret < 0)
goto out;
+
+ /* Save the lock to the client lock cache to be able
+ to recover in the case of server reboot.*/
+
+ if (client_is_setlk(local->cmd)) {
+ ret = client_add_lock_for_recovery(local->fd, &lock, &local->owner,
+ local->cmd);
+ if (ret < 0) {
+ rsp.op_ret = -1;
+ rsp.op_errno = -ret;
+ }
+ }
}
out:
@@ -4184,8 +4209,16 @@ client3_3_flush(call_frame_t *frame, xlator_t *this, void *data)
ret = client_pre_flush(this, &req, args->fd, args->xdata);
if (ret) {
op_errno = -ret;
+ if (op_errno == EBADF) {
+ ret = delete_granted_locks_owner(local->fd, &local->owner);
+ gf_msg_trace(this->name, 0,
+ "deleting locks of owner (%s) returned %d",
+ lkowner_utoa(&local->owner), ret);
+ }
+
goto unwind;
}
+
ret = client_submit_request(this, &req, frame, conf->fops, GFS3_OP_FLUSH,
client3_3_flush_cbk, NULL,
(xdrproc_t)xdr_gfs3_flush_req);
@@ -5120,8 +5153,16 @@ client3_3_lk(call_frame_t *frame, xlator_t *this, void *data)
args->xdata);
if (ret) {
op_errno = -ret;
+
+ if ((op_errno == EBADF) && (args->flock->l_type == F_UNLCK) &&
+ client_is_setlk(local->cmd)) {
+ client_add_lock_for_recovery(local->fd, args->flock, &local->owner,
+ local->cmd);
+ }
+
goto unwind;
}
+
ret = client_submit_request(this, &req, frame, conf->fops, GFS3_OP_LK,
client3_3_lk_cbk, NULL,
(xdrproc_t)xdr_gfs3_lk_req);
diff --git a/xlators/protocol/client/src/client-rpc-fops_v2.c b/xlators/protocol/client/src/client-rpc-fops_v2.c
index 5743543fa7a..92461026b68 100644
--- a/xlators/protocol/client/src/client-rpc-fops_v2.c
+++ b/xlators/protocol/client/src/client-rpc-fops_v2.c
@@ -717,7 +717,8 @@ client4_0_flush_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- if (rsp.op_ret >= 0 && !fd_is_anonymous(local->fd)) {
+ if ((rsp.op_ret >= 0 || (rsp.op_errno == ENOTCONN)) &&
+ !fd_is_anonymous(local->fd)) {
/* Delete all saved locks of the owner issuing flush */
ret = delete_granted_locks_owner(local->fd, &local->owner);
gf_msg_trace(this->name, 0, "deleting locks of owner (%s) returned %d",
@@ -2172,10 +2173,12 @@ client4_0_lk_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = 0;
xlator_t *this = NULL;
dict_t *xdata = NULL;
+ clnt_local_t *local = NULL;
this = THIS;
frame = myframe;
+ local = frame->local;
if (-1 == req->rpc_status) {
rsp.op_ret = -1;
@@ -2196,6 +2199,18 @@ client4_0_lk_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = client_post_lk_v2(this, &rsp, &lock, &xdata);
if (ret < 0)
goto out;
+
+ /* Save the lock to the client lock cache to be able
+ to recover in the case of server reboot.*/
+
+ if (client_is_setlk(local->cmd)) {
+ ret = client_add_lock_for_recovery(local->fd, &lock, &local->owner,
+ local->cmd);
+ if (ret < 0) {
+ rsp.op_ret = -1;
+ rsp.op_errno = -ret;
+ }
+ }
}
out:
@@ -3972,6 +3987,13 @@ client4_0_flush(call_frame_t *frame, xlator_t *this, void *data)
ret = client_pre_flush_v2(this, &req, args->fd, args->xdata);
if (ret) {
op_errno = -ret;
+ if (op_errno == EBADF) {
+ ret = delete_granted_locks_owner(local->fd, &local->owner);
+ gf_msg_trace(this->name, 0,
+ "deleting locks of owner (%s) returned %d",
+ lkowner_utoa(&local->owner), ret);
+ }
+
goto unwind;
}
ret = client_submit_request(this, &req, frame, conf->fops, GFS3_OP_FLUSH,
@@ -4745,8 +4767,16 @@ client4_0_lk(call_frame_t *frame, xlator_t *this, void *data)
args->xdata);
if (ret) {
op_errno = -ret;
+
+ if ((op_errno == EBADF) && (args->flock->l_type == F_UNLCK) &&
+ client_is_setlk(local->cmd)) {
+ client_add_lock_for_recovery(local->fd, args->flock, &local->owner,
+ local->cmd);
+ }
+
goto unwind;
}
+
ret = client_submit_request(this, &req, frame, conf->fops, GFS3_OP_LK,
client4_0_lk_cbk, NULL,
(xdrproc_t)xdr_gfx_lk_req);
diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c
index 5ef866fe03f..e8b5cd82e9f 100644
--- a/xlators/protocol/client/src/client.c
+++ b/xlators/protocol/client/src/client.c
@@ -2588,6 +2588,7 @@ build_client_config(xlator_t *this, clnt_conf_t *conf)
GF_OPTION_INIT("send-gids", conf->send_gids, bool, out);
GF_OPTION_INIT("testing.old-protocol", conf->old_protocol, bool, out);
+ GF_OPTION_INIT("strict-locks", conf->strict_locks, bool, out);
conf->client_id = glusterfs_leaf_position(this);
@@ -2773,6 +2774,7 @@ reconfigure(xlator_t *this, dict_t *options)
out);
GF_OPTION_RECONF("send-gids", conf->send_gids, options, bool, out);
+ GF_OPTION_RECONF("strict-locks", conf->strict_locks, options, bool, out);
ret = 0;
out:
@@ -3153,6 +3155,17 @@ struct volume_options options[] = {
.op_version = {GD_OP_VERSION_7_0},
.flags = OPT_FLAG_SETTABLE,
},
+ {.key = {"strict-locks"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .op_version = {GD_OP_VERSION_7_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .description = "When set, doesn't reopen saved fds after reconnect "
+ "if POSIX locks are held on them. Hence subsequent "
+ "operations on these fds will fail. This is "
+ "necessary for stricter lock complaince as bricks "
+ "cleanup any granted locks when a client "
+ "disconnects."},
{.key = {NULL}},
};
diff --git a/xlators/protocol/client/src/client.h b/xlators/protocol/client/src/client.h
index 833c232fb78..879a7d530d3 100644
--- a/xlators/protocol/client/src/client.h
+++ b/xlators/protocol/client/src/client.h
@@ -155,6 +155,14 @@ typedef struct clnt_conf {
compltely, ie client_fini_complete
to return*/
gf_boolean_t fini_completed;
+ gf_boolean_t strict_locks; /* When set, doesn't reopen saved fds after
+ reconnect if POSIX locks are held on them.
+ Hence subsequent operations on these fds will
+ fail. This is necessary for stricter lock
+ complaince as bricks cleanup any granted
+ locks when a client disconnects.
+ */
+
} clnt_conf_t;
typedef struct _client_fd_ctx {
@@ -386,4 +394,11 @@ clnt_readdir_rsp_cleanup_v2(gfx_readdir_rsp *rsp);
int
clnt_readdirp_rsp_cleanup_v2(gfx_readdirp_rsp *rsp);
+int
+client_add_lock_for_recovery(fd_t *fd, struct gf_flock *flock,
+ gf_lkowner_t *owner, int32_t cmd);
+
+int
+client_is_setlk(int32_t cmd);
+
#endif /* !_CLIENT_H */