diff options
| author | Meghana Madhusudhan <mmadhusu@redhat.com> | 2015-04-20 10:41:47 +0530 | 
|---|---|---|
| committer | Niels de Vos <ndevos@redhat.com> | 2015-05-07 02:08:43 -0700 | 
| commit | 4aad69a8f88acf384c812316aaa985cde2229cd7 (patch) | |
| tree | 12318360918c95e89f7a40a24855e626cae0a014 | |
| parent | 6d82215ab95d95ace13465a3efd384e50942ea67 (diff) | |
NFS-Ganesha : Locking global options file
Global option gluster features.ganesha enable
writes into the global 'option' file. The snapshot
feature also writes into the same file.
To handle concurrent multiple transactions correctly,
a new lock has to be introduced on this file.
Every operation using this file needs
to contest for the new lock type.
This is a back-port of the patch,
http://review.gluster.org/#/c/10130/
Change-Id: I1fdd285814e615a13dbf8c88ad2b7ee311247f90
BUG: 1218963
Signed-off-by: Meghana Madhusudhan <mmadhusu@redhat.com>
Reviewed-on: http://review.gluster.org/10606
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Niels de Vos <ndevos@redhat.com>
Tested-by: NetBSD Build System
Reviewed-by: Avra Sengupta <asengupt@redhat.com>
| -rw-r--r-- | cli/src/cli-cmd-parser.c | 25 | ||||
| -rw-r--r-- | cli/src/cli-rpc-ops.c | 4 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-ganesha.c | 21 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-locks.c | 3 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 26 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 51 | 
6 files changed, 111 insertions, 19 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c index d4aa61ca6b8..4fd205ed5fe 100644 --- a/cli/src/cli-cmd-parser.c +++ b/cli/src/cli-cmd-parser.c @@ -903,6 +903,20 @@ cli_cmd_ganesha_parse (struct cli_state *state,                  goto out;          } +        ret = dict_set_str (dict, "globalname", "All"); +        if (ret) { +                gf_log (THIS->name, GF_LOG_ERROR, "dict set on global" +                        " key failed."); +                goto out; +        } + +        ret = dict_set_int32 (dict, "hold_global_locks", _gf_true); +        if (ret) { +                gf_log (THIS->name, GF_LOG_ERROR, "dict set on global key " +                        "failed."); +                goto out; +        } +          *options = dict;  out:          if (ret) @@ -4240,6 +4254,17 @@ cli_snap_config_limit_parse (const char **words, dict_t *dict,                  goto out;          } +        ret = dict_set_dynstr_with_alloc (dict, "globalname", "All"); +        if (ret) { +                gf_log ("cli", GF_LOG_ERROR, "Could not set global key"); +                goto out; +        } +        ret = dict_set_int32 (dict, "hold_global_locks", _gf_true); +        if (ret) { +                gf_log ("cli", GF_LOG_ERROR, "Could not set global locks"); +                goto out; +        } +  out:          return ret;  } diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index 07fb8dc7e67..3c32e6d8a09 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -1870,9 +1870,9 @@ gf_cli_ganesha_cbk (struct rpc_req *req, struct iovec *iov,          if (rsp.op_ret) {                  if (strcmp (rsp.op_errstr, "")) -                        cli_err ("ganesha enable: failed: %s", rsp.op_errstr); +                        cli_err ("nfs-ganesha: failed: %s", rsp.op_errstr);                  else -                        cli_err ("ganesha enable: failed"); +                        cli_err ("nfs-ganesha: failed");          }          else { diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c index a200a64d7c6..d4ab77ca5f9 100644 --- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c +++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c @@ -219,17 +219,12 @@ int  glusterd_op_set_ganesha (dict_t *dict, char **errstr)  {          int                                      ret = 0; -        int                                      flags = 0; -        glusterd_volinfo_t                      *volinfo = NULL; -        char                                    *volname = NULL;          xlator_t                                *this = NULL;          glusterd_conf_t                         *priv = NULL;          char                                    *key = NULL;          char                                    *value = NULL; -        char                                     str[50] = {0, }; -        int32_t                                  dict_count = 0;          dict_t                                  *vol_opts = NULL; -        int count                                = 0; +        char                                    *next_version =  NULL;          this = THIS;          GF_ASSERT (this); @@ -268,9 +263,17 @@ glusterd_op_set_ganesha (dict_t *dict, char **errstr)                          " nfs-ganesha in dict.");                  goto out;          } - -        /* To do : Lock the global options file before writing */ -        /* into this file. Bug ID : 1200254    */ +        ret = glusterd_get_next_global_opt_version_str (priv->opts, +                                                        &next_version); +        if (ret) { +                gf_log (THIS->name, GF_LOG_DEBUG, "Could not fetch " +                        " global op version"); +                goto out; +        } +        ret = dict_set_str (priv->opts, GLUSTERD_GLOBAL_OPT_VERSION, +                            next_version); +        if (ret) +                goto out;          ret = glusterd_store_options (this, priv->opts);          if (ret) { diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c index 0703777bdcb..c86dc8069da 100644 --- a/xlators/mgmt/glusterd/src/glusterd-locks.c +++ b/xlators/mgmt/glusterd/src/glusterd-locks.c @@ -26,7 +26,7 @@  #include <signal.h> -#define GF_MAX_LOCKING_ENTITIES 2 +#define GF_MAX_LOCKING_ENTITIES 3  /* Valid entities that the mgmt_v3 lock can hold locks upon    *   * To add newer entities to be locked, we can just add more    * @@ -34,6 +34,7 @@  glusterd_valid_entities   valid_types[] = {          { "vol",  _gf_true  },          { "snap", _gf_false }, +        { "global", _gf_false},          { NULL              },  }; diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index da922e3d674..cb8080693c6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -3162,7 +3162,9 @@ static int  glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)  {          int32_t                         ret             = 0; +        int32_t                         err             = 0;          char                           *volname         = NULL; +        char                           *globalname      = NULL;          glusterd_op_lock_ctx_t         *lock_ctx        = NULL;          glusterd_conf_t                *priv            = NULL;          xlator_t                       *this            = NULL; @@ -3193,8 +3195,19 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)                                  gf_log (this->name, GF_LOG_ERROR,                                          "Unable to acquire lock for %s",                                          volname); +                        goto out;                  } +                ret = dict_get_str (lock_ctx->dict, "globalname", &globalname); +                if (!ret) { +                        ret = glusterd_mgmt_v3_lock (globalname, lock_ctx->uuid, +                                                     "global"); +                        if (ret) +                                gf_log (this->name, GF_LOG_ERROR, +                                        "Unable to acquire lock for %s", +                                        globalname); +                } +out:                  glusterd_op_mgmt_v3_lock_send_resp (lock_ctx->req,                                                     &event->txn_id, ret); @@ -3210,6 +3223,7 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)  {          int32_t                         ret             = 0;          char                           *volname         = NULL; +        char                           *globalname      = NULL;          glusterd_op_lock_ctx_t         *lock_ctx        = NULL;          glusterd_conf_t                *priv            = NULL;          xlator_t                       *this            = NULL; @@ -3241,8 +3255,20 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)                                  gf_log (this->name, GF_LOG_ERROR,                                          "Unable to release lock for %s",                                          volname); +                        goto out;                  } +                ret = dict_get_str (lock_ctx->dict, "globalname", &globalname); +                if (!ret) { +                        ret = glusterd_mgmt_v3_unlock (globalname, lock_ctx->uuid, +                                                      "global"); +                        if (ret) +                                gf_log (this->name, GF_LOG_ERROR, +                                        "Unable to release lock for %s", +                                        globalname); + +                } +out:                  glusterd_op_mgmt_v3_unlock_send_resp (lock_ctx->req,                                                       &event->txn_id, ret); diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index deb38a7dea9..859aa636885 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -1440,6 +1440,8 @@ gd_unlock_op_phase (glusterd_conf_t  *conf, glusterd_op_t op, int *op_ret,          int                     ret         = -1;          xlator_t               *this        = NULL;          struct syncargs         args        = {0}; +        int32_t                 global      = 0; +        char                   *type        = NULL;          this = THIS;          GF_ASSERT (this); @@ -1479,7 +1481,13 @@ gd_unlock_op_phase (glusterd_conf_t  *conf, glusterd_op_t op, int *op_ret,                  }                  rcu_read_unlock ();          } else { -                if (volname) { + +                ret = dict_get_int32 (op_ctx, "hold_global_locks", &global); +                if (global) +                        type = "global"; +                else +                        type = "vol"; +                if (volname || global) {                          rcu_read_lock ();                          cds_list_for_each_entry_rcu (peerinfo, &conf->peers,                                                       uuid_list) { @@ -1537,9 +1545,9 @@ out:                  if (conf->op_version < GD_OP_VERSION_3_6_0)                          glusterd_unlock (MY_UUID);                  else { -                        if (volname) { +                        if (type) {                                  ret = glusterd_mgmt_v3_unlock (volname, MY_UUID, -                                                               "vol"); +                                                               type);                                  if (ret)                                          gf_log (this->name, GF_LOG_ERROR,                                                  "Unable to release lock for %s", @@ -1655,9 +1663,11 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)          int32_t                     tmp_op           = 0;          char                        *op_errstr       = NULL;          char                        *tmp             = NULL; +        char                        *global          = NULL;          char                        *volname         = NULL;          xlator_t                    *this            = NULL;          gf_boolean_t                is_acquired      = _gf_false; +        gf_boolean_t                is_global        = _gf_false;          uuid_t                      *txn_id          = NULL;          glusterd_op_info_t          txn_opinfo       = {{0},}; @@ -1714,6 +1724,12 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)                  }          } else { +                ret = dict_get_str (op_ctx, "globalname", &global); +                if (!ret) { +                        is_global = _gf_true; +                        goto global; +                } +                  /* If no volname is given as a part of the command, locks will                   * not be held */                  ret = dict_get_str (op_ctx, "volname", &tmp); @@ -1742,13 +1758,28 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)                  }          } +global: +        if (is_global) { +                ret = glusterd_mgmt_v3_lock (global, MY_UUID, "global"); +                if (ret) { +                        gf_log (this->name, GF_LOG_ERROR, +                                "Unable to acquire lock for %s", global); +                        gf_asprintf (&op_errstr, +                                     "Another transaction is in progress " +                                     "for %s. Please try again after sometime.", +                                     global); +                        is_global = _gf_false; +                        goto out; +                } +        } +          is_acquired = _gf_true;  local_locking_done:          /* If no volname is given as a part of the command, locks will           * not be held */ -        if (volname || (conf->op_version < GD_OP_VERSION_3_6_0)) { +        if (volname || (conf->op_version < GD_OP_VERSION_3_6_0) || is_global) {                  ret = gd_lock_op_phase (conf, op, op_ctx, &op_errstr, *txn_id,                                          &txn_opinfo);                  if (ret) { @@ -1784,9 +1815,15 @@ local_locking_done:  out:          op_ret = ret;          if (txn_id) { -                (void) gd_unlock_op_phase (conf, op, &op_ret, req, op_ctx, -                                           op_errstr, volname, is_acquired, -                                           *txn_id, &txn_opinfo); +                if (volname) +                        (void) gd_unlock_op_phase (conf, op, &op_ret, req, op_ctx, +                                                   op_errstr, volname, is_acquired, +                                                   *txn_id, &txn_opinfo); +                if (global) +                        (void) gd_unlock_op_phase (conf, op, &op_ret, req, op_ctx, +                                                   op_errstr, global, is_acquired, +                                                   *txn_id, &txn_opinfo); +                  /* Clearing the transaction opinfo */                  ret = glusterd_clear_txn_opinfo (txn_id);  | 
