diff options
| -rw-r--r-- | tests/bugs/bug-1694920.t | 63 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-set.c | 13 | ||||
| -rw-r--r-- | xlators/protocol/client/src/client-handshake.c | 3 | ||||
| -rw-r--r-- | xlators/protocol/client/src/client-helpers.c | 5 | ||||
| -rw-r--r-- | xlators/protocol/client/src/client-lk.c | 2 | ||||
| -rw-r--r-- | xlators/protocol/client/src/client-rpc-fops.c | 45 | ||||
| -rw-r--r-- | xlators/protocol/client/src/client-rpc-fops_v2.c | 32 | ||||
| -rw-r--r-- | xlators/protocol/client/src/client.c | 13 | ||||
| -rw-r--r-- | xlators/protocol/client/src/client.h | 15 | 
9 files changed, 185 insertions, 6 deletions
diff --git a/tests/bugs/bug-1694920.t b/tests/bugs/bug-1694920.t new file mode 100644 index 00000000000..5bf93c92f94 --- /dev/null +++ b/tests/bugs/bug-1694920.t @@ -0,0 +1,63 @@ +#!/bin/bash + +SCRIPT_TIMEOUT=300 + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc +. $(dirname $0)/../fileio.rc +cleanup; + +TEST glusterd; +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}; +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.open-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off +TEST $CLI volume start $V0 +TEST $GFS -s $H0 --volfile-id=$V0  $M0; + +TEST touch $M0/a + +#When all bricks are up, lock and unlock should succeed +TEST fd1=`fd_available` +TEST fd_open $fd1 'w' $M0/a +TEST flock -x $fd1 +TEST fd_close $fd1 + +#When all bricks are down, lock/unlock should fail +TEST fd1=`fd_available` +TEST fd_open $fd1 'w' $M0/a +TEST $CLI volume stop $V0 +TEST ! flock -x $fd1 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0 +TEST fd_close $fd1 + +#When a brick goes down and comes back up operations on fd which had locks on it should succeed by default +TEST fd1=`fd_available` +TEST fd_open $fd1 'w' $M0/a +TEST flock -x $fd1 +TEST $CLI volume stop $V0 +sleep 2 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0 +TEST fd_write $fd1 "data" +TEST fd_close $fd1 + +#When a brick goes down and comes back up operations on fd which had locks on it should fail when client.strict-locks is on +TEST $CLI volume set $V0 client.strict-locks on +TEST fd1=`fd_available` +TEST fd_open $fd1 'w' $M0/a +TEST flock -x $fd1 +TEST $CLI volume stop $V0 +sleep 2 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0 +TEST ! fd_write $fd1 "data" +TEST fd_close $fd1 + +cleanup diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c index e4427069263..f7bfaa2ba3e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c @@ -1514,6 +1514,19 @@ struct volopt_map_entry glusterd_volopt_map[] = {       .op_version = GD_OP_VERSION_3_10_2,       .value = "9",       .flags = VOLOPT_FLAG_CLIENT_OPT}, +    {.key = "client.strict-locks", +     .voltype = "protocol/client", +     .option = "strict-locks", +     .value = "off", +     .op_version = GD_OP_VERSION_8_0, +     .validate_fn = validate_boolean, +     .type = GLOBAL_DOC, +     .description = "When set, doesn't reopen saved fds after reconnect " +                    "if POSIX locks are held on them. Hence subsequent " +                    "operations on these fds will fail. This is " +                    "necessary for stricter lock complaince as bricks " +                    "cleanup any granted locks when a client " +                    "disconnects."},      /* Although the following option is named ta-remote-port but it will be       * added as remote-port in client volfile for ta-bricks only. diff --git a/xlators/protocol/client/src/client-handshake.c b/xlators/protocol/client/src/client-handshake.c index 649b113db14..337b2f856af 100644 --- a/xlators/protocol/client/src/client-handshake.c +++ b/xlators/protocol/client/src/client-handshake.c @@ -681,7 +681,8 @@ client_post_handshake(call_frame_t *frame, xlator_t *this)      {          list_for_each_entry_safe(fdctx, tmp, &conf->saved_fds, sfd_pos)          { -            if (fdctx->remote_fd != -1) +            if (fdctx->remote_fd != -1 || +                (!list_empty(&fdctx->lock_list) && conf->strict_locks))                  continue;              fdctx->reopen_done = client_child_up_reopen_done; diff --git a/xlators/protocol/client/src/client-helpers.c b/xlators/protocol/client/src/client-helpers.c index 52e1089900b..156f1cd3d9b 100644 --- a/xlators/protocol/client/src/client-helpers.c +++ b/xlators/protocol/client/src/client-helpers.c @@ -412,6 +412,7 @@ client_get_remote_fd(xlator_t *this, fd_t *fd, int flags, int64_t *remote_fd)  {      clnt_fd_ctx_t *fdctx = NULL;      clnt_conf_t *conf = NULL; +    gf_boolean_t locks_held = _gf_false;      GF_VALIDATE_OR_GOTO(this->name, fd, out);      GF_VALIDATE_OR_GOTO(this->name, remote_fd, out); @@ -433,11 +434,13 @@ client_get_remote_fd(xlator_t *this, fd_t *fd, int flags, int64_t *remote_fd)                  *remote_fd = -1;              else                  *remote_fd = fdctx->remote_fd; + +            locks_held = !list_empty(&fdctx->lock_list);          }      }      pthread_spin_unlock(&conf->fd_lock); -    if ((flags & FALLBACK_TO_ANON_FD) && (*remote_fd == -1)) +    if ((flags & FALLBACK_TO_ANON_FD) && (*remote_fd == -1) && (!locks_held))          *remote_fd = GF_ANON_FD_NO;      return 0; diff --git a/xlators/protocol/client/src/client-lk.c b/xlators/protocol/client/src/client-lk.c index 679e1982f49..c1fb055fc0e 100644 --- a/xlators/protocol/client/src/client-lk.c +++ b/xlators/protocol/client/src/client-lk.c @@ -351,7 +351,7 @@ delete_granted_locks_owner(fd_t *fd, gf_lkowner_t *owner)      list_for_each_entry_safe(lock, tmp, &fdctx->lock_list, list)      { -        if (!is_same_lkowner(&lock->owner, owner)) { +        if (is_same_lkowner(&lock->owner, owner)) {              list_del_init(&lock->list);              list_add_tail(&lock->list, &delete_list);              count++; diff --git a/xlators/protocol/client/src/client-rpc-fops.c b/xlators/protocol/client/src/client-rpc-fops.c index 8ca6cbedc85..247d49728a3 100644 --- a/xlators/protocol/client/src/client-rpc-fops.c +++ b/xlators/protocol/client/src/client-rpc-fops.c @@ -21,8 +21,18 @@ int32_t  client3_getspec(call_frame_t *frame, xlator_t *this, void *data);  rpc_clnt_prog_t clnt3_3_fop_prog; -/* CBK */ +int +client_is_setlk(int32_t cmd) +{ +    if ((cmd == F_SETLK) || (cmd == F_SETLK64) || (cmd == F_SETLKW) || +        (cmd == F_SETLKW64)) { +        return 1; +    } +    return 0; +} + +/* CBK */  int  client3_3_symlink_cbk(struct rpc_req *req, struct iovec *iov, int count,                        void *myframe) @@ -815,7 +825,8 @@ client3_3_flush_cbk(struct rpc_req *req, struct iovec *iov, int count,          goto out;      } -    if (rsp.op_ret >= 0 && !fd_is_anonymous(local->fd)) { +    if ((rsp.op_ret >= 0 || (rsp.op_errno == ENOTCONN)) && +        !fd_is_anonymous(local->fd)) {          /* Delete all saved locks of the owner issuing flush */          ret = delete_granted_locks_owner(local->fd, &local->owner);          gf_msg_trace(this->name, 0, "deleting locks of owner (%s) returned %d", @@ -2387,10 +2398,12 @@ client3_3_lk_cbk(struct rpc_req *req, struct iovec *iov, int count,      int ret = 0;      xlator_t *this = NULL;      dict_t *xdata = NULL; +    clnt_local_t *local = NULL;      this = THIS;      frame = myframe; +    local = frame->local;      if (-1 == req->rpc_status) {          rsp.op_ret = -1; @@ -2411,6 +2424,18 @@ client3_3_lk_cbk(struct rpc_req *req, struct iovec *iov, int count,          ret = client_post_lk(this, &rsp, &lock, &xdata);          if (ret < 0)              goto out; + +        /* Save the lock to the client lock cache to be able +           to recover in the case of server reboot.*/ + +        if (client_is_setlk(local->cmd)) { +            ret = client_add_lock_for_recovery(local->fd, &lock, &local->owner, +                                               local->cmd); +            if (ret < 0) { +                rsp.op_ret = -1; +                rsp.op_errno = -ret; +            } +        }      }  out: @@ -4184,8 +4209,16 @@ client3_3_flush(call_frame_t *frame, xlator_t *this, void *data)      ret = client_pre_flush(this, &req, args->fd, args->xdata);      if (ret) {          op_errno = -ret; +        if (op_errno == EBADF) { +            ret = delete_granted_locks_owner(local->fd, &local->owner); +            gf_msg_trace(this->name, 0, +                         "deleting locks of owner (%s) returned %d", +                         lkowner_utoa(&local->owner), ret); +        } +          goto unwind;      } +      ret = client_submit_request(this, &req, frame, conf->fops, GFS3_OP_FLUSH,                                  client3_3_flush_cbk, NULL,                                  (xdrproc_t)xdr_gfs3_flush_req); @@ -5120,8 +5153,16 @@ client3_3_lk(call_frame_t *frame, xlator_t *this, void *data)                          args->xdata);      if (ret) {          op_errno = -ret; + +        if ((op_errno == EBADF) && (args->flock->l_type == F_UNLCK) && +            client_is_setlk(local->cmd)) { +            client_add_lock_for_recovery(local->fd, args->flock, &local->owner, +                                         local->cmd); +        } +          goto unwind;      } +      ret = client_submit_request(this, &req, frame, conf->fops, GFS3_OP_LK,                                  client3_3_lk_cbk, NULL,                                  (xdrproc_t)xdr_gfs3_lk_req); diff --git a/xlators/protocol/client/src/client-rpc-fops_v2.c b/xlators/protocol/client/src/client-rpc-fops_v2.c index 5743543fa7a..92461026b68 100644 --- a/xlators/protocol/client/src/client-rpc-fops_v2.c +++ b/xlators/protocol/client/src/client-rpc-fops_v2.c @@ -717,7 +717,8 @@ client4_0_flush_cbk(struct rpc_req *req, struct iovec *iov, int count,          goto out;      } -    if (rsp.op_ret >= 0 && !fd_is_anonymous(local->fd)) { +    if ((rsp.op_ret >= 0 || (rsp.op_errno == ENOTCONN)) && +        !fd_is_anonymous(local->fd)) {          /* Delete all saved locks of the owner issuing flush */          ret = delete_granted_locks_owner(local->fd, &local->owner);          gf_msg_trace(this->name, 0, "deleting locks of owner (%s) returned %d", @@ -2172,10 +2173,12 @@ client4_0_lk_cbk(struct rpc_req *req, struct iovec *iov, int count,      int ret = 0;      xlator_t *this = NULL;      dict_t *xdata = NULL; +    clnt_local_t *local = NULL;      this = THIS;      frame = myframe; +    local = frame->local;      if (-1 == req->rpc_status) {          rsp.op_ret = -1; @@ -2196,6 +2199,18 @@ client4_0_lk_cbk(struct rpc_req *req, struct iovec *iov, int count,          ret = client_post_lk_v2(this, &rsp, &lock, &xdata);          if (ret < 0)              goto out; + +        /* Save the lock to the client lock cache to be able +           to recover in the case of server reboot.*/ + +        if (client_is_setlk(local->cmd)) { +            ret = client_add_lock_for_recovery(local->fd, &lock, &local->owner, +                                               local->cmd); +            if (ret < 0) { +                rsp.op_ret = -1; +                rsp.op_errno = -ret; +            } +        }      }  out: @@ -3972,6 +3987,13 @@ client4_0_flush(call_frame_t *frame, xlator_t *this, void *data)      ret = client_pre_flush_v2(this, &req, args->fd, args->xdata);      if (ret) {          op_errno = -ret; +        if (op_errno == EBADF) { +            ret = delete_granted_locks_owner(local->fd, &local->owner); +            gf_msg_trace(this->name, 0, +                         "deleting locks of owner (%s) returned %d", +                         lkowner_utoa(&local->owner), ret); +        } +          goto unwind;      }      ret = client_submit_request(this, &req, frame, conf->fops, GFS3_OP_FLUSH, @@ -4745,8 +4767,16 @@ client4_0_lk(call_frame_t *frame, xlator_t *this, void *data)                             args->xdata);      if (ret) {          op_errno = -ret; + +        if ((op_errno == EBADF) && (args->flock->l_type == F_UNLCK) && +            client_is_setlk(local->cmd)) { +            client_add_lock_for_recovery(local->fd, args->flock, &local->owner, +                                         local->cmd); +        } +          goto unwind;      } +      ret = client_submit_request(this, &req, frame, conf->fops, GFS3_OP_LK,                                  client4_0_lk_cbk, NULL,                                  (xdrproc_t)xdr_gfx_lk_req); diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c index 5ef866fe03f..e8b5cd82e9f 100644 --- a/xlators/protocol/client/src/client.c +++ b/xlators/protocol/client/src/client.c @@ -2588,6 +2588,7 @@ build_client_config(xlator_t *this, clnt_conf_t *conf)      GF_OPTION_INIT("send-gids", conf->send_gids, bool, out);      GF_OPTION_INIT("testing.old-protocol", conf->old_protocol, bool, out); +    GF_OPTION_INIT("strict-locks", conf->strict_locks, bool, out);      conf->client_id = glusterfs_leaf_position(this); @@ -2773,6 +2774,7 @@ reconfigure(xlator_t *this, dict_t *options)                       out);      GF_OPTION_RECONF("send-gids", conf->send_gids, options, bool, out); +    GF_OPTION_RECONF("strict-locks", conf->strict_locks, options, bool, out);      ret = 0;  out: @@ -3153,6 +3155,17 @@ struct volume_options options[] = {          .op_version = {GD_OP_VERSION_7_0},          .flags = OPT_FLAG_SETTABLE,      }, +    {.key = {"strict-locks"}, +     .type = GF_OPTION_TYPE_BOOL, +     .default_value = "off", +     .op_version = {GD_OP_VERSION_7_0}, +     .flags = OPT_FLAG_SETTABLE, +     .description = "When set, doesn't reopen saved fds after reconnect " +                    "if POSIX locks are held on them. Hence subsequent " +                    "operations on these fds will fail. This is " +                    "necessary for stricter lock complaince as bricks " +                    "cleanup any granted locks when a client " +                    "disconnects."},      {.key = {NULL}},  }; diff --git a/xlators/protocol/client/src/client.h b/xlators/protocol/client/src/client.h index 833c232fb78..879a7d530d3 100644 --- a/xlators/protocol/client/src/client.h +++ b/xlators/protocol/client/src/client.h @@ -155,6 +155,14 @@ typedef struct clnt_conf {                                            compltely, ie client_fini_complete                                            to return*/      gf_boolean_t fini_completed; +    gf_boolean_t strict_locks; /* When set, doesn't reopen saved fds after +                                  reconnect if POSIX locks are held on them. +                                  Hence subsequent operations on these fds will +                                  fail. This is necessary for stricter lock +                                  complaince as bricks cleanup any granted +                                  locks when a client disconnects. +                               */ +  } clnt_conf_t;  typedef struct _client_fd_ctx { @@ -386,4 +394,11 @@ clnt_readdir_rsp_cleanup_v2(gfx_readdir_rsp *rsp);  int  clnt_readdirp_rsp_cleanup_v2(gfx_readdirp_rsp *rsp); +int +client_add_lock_for_recovery(fd_t *fd, struct gf_flock *flock, +                             gf_lkowner_t *owner, int32_t cmd); + +int +client_is_setlk(int32_t cmd); +  #endif /* !_CLIENT_H */  | 
