diff options
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 491 | 
1 files changed, 456 insertions, 35 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index ab3fa94c0fa..797141decd1 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -33,6 +33,7 @@  #include "glusterd-op-sm.h"  #include "glusterd-utils.h"  #include "glusterd-store.h" +#include "glusterd-locks.h"  #include "glusterd1-xdr.h"  #include "cli1-xdr.h" @@ -55,6 +56,7 @@  #endif  extern glusterd_op_info_t opinfo; +extern uuid_t global_txn_id;  int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,                                  rpc_clnt_event_t event, @@ -590,10 +592,16 @@ int32_t  glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,                         char *err_str, size_t err_len)  { -        int32_t                  ret    = -1; -        xlator_t                *this   = NULL; -        glusterd_conf_t         *priv   = NULL; -        int32_t                  locked = 0; +        int32_t                     ret             = -1; +        dict_t                     *dict            = NULL; +        xlator_t                   *this            = NULL; +        glusterd_conf_t            *priv            = NULL; +        int32_t                     locked          = 0; +        char                       *tmp             = NULL; +        char                       *volname         = NULL; +        uuid_t                     *txn_id          = NULL; +        glusterd_op_info_t          txn_op_info     = {{0},}; +        glusterd_op_sm_event_type_t event_type      = GD_OP_EVENT_NONE;          GF_ASSERT (req);          GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX)); @@ -604,33 +612,122 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,          priv = this->private;          GF_ASSERT (priv); -        ret = glusterd_lock (MY_UUID); +        dict = ctx; + +        /* Generate a transaction-id for this operation and +         * save it in the dict. This transaction id distinguishes +         * each transaction, and helps separate opinfos in the +         * op state machine. */ +        ret = glusterd_generate_txn_id (dict, &txn_id); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, +                        "Failed to generate transaction id"); +                goto out; + +        } + +        /* Save the MY_UUID as the originator_uuid. This originator_uuid +         * will be used by is_origin_glusterd() to determine if a node +         * is the originator node for a command. */ +        ret = glusterd_set_originator_uuid (dict);          if (ret) {                  gf_log (this->name, GF_LOG_ERROR, -                        "Unable to acquire lock on localhost, ret: %d", ret); -                snprintf (err_str, err_len, "Another transaction is in progress. " -                          "Please try again after sometime."); +                        "Failed to set originator_uuid.");                  goto out;          } +        /* Based on the op_version, acquire a cluster or volume lock */ +        if (priv->op_version < GD_OP_VERSION_4) { +                ret = glusterd_lock (MY_UUID); +                if (ret) { +                        gf_log (this->name, GF_LOG_ERROR, +                                "Unable to acquire lock on localhost, ret: %d", +                                ret); +                        snprintf (err_str, err_len, +                                  "Another transaction is in progress. " +                                  "Please try again after sometime."); +                        goto out; +                } +        } else { +                /* If no volname is given as a part of the command, locks will +                 * not be held */ +                ret = dict_get_str (dict, "volname", &tmp); +                if (ret) { +                        gf_log ("", GF_LOG_INFO, +                                "No Volume name present. " +                                "Locks not being held."); +                        goto local_locking_done; +                } else { +                        /* Use a copy of volname, as cli response will be +                         * sent before the unlock, and the volname in the +                         * dict, might be removed */ +                        volname = gf_strdup (tmp); +                        if (!volname) +                                goto out; +                } + +                ret = glusterd_volume_lock (volname, MY_UUID); +                if (ret) { +                        gf_log (this->name, GF_LOG_ERROR, +                                "Unable to acquire lock for %s", volname); +                        snprintf (err_str, err_len, +                                  "Another transaction is in progress for %s. " +                                  "Please try again after sometime.", volname); +                        goto out; +                } +        } +          locked = 1;          gf_log (this->name, GF_LOG_DEBUG, "Acquired lock on localhost"); -        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_START_LOCK, NULL); +local_locking_done: + +        /* If no volname is given as a part of the command, locks will +         * not be held, hence sending stage event. */ +        if (volname) +                event_type = GD_OP_EVENT_START_LOCK; +        else { +                txn_op_info.state.state = GD_OP_STATE_LOCK_SENT; +                event_type = GD_OP_EVENT_ALL_ACC; +        } + +        /* Save opinfo for this transaction with the transaction id */ +        glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, ctx, req); + +        ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, +                        "Unable to set transaction's opinfo"); +                if (ctx) +                        dict_unref (ctx); +                goto out; +        } + +        ret = glusterd_op_sm_inject_event (event_type, txn_id, ctx);          if (ret) {                  gf_log (this->name, GF_LOG_ERROR, "Failed to acquire cluster"                          " lock.");                  goto out;          } -        glusterd_op_set_op (op); -        glusterd_op_set_ctx (ctx); -        glusterd_op_set_req (req); - -  out: -        if (locked && ret) -                glusterd_unlock (MY_UUID); +        if (locked && ret) { +                /* Based on the op-version, we release the +                 * cluster or volume lock */ +                if (priv->op_version < GD_OP_VERSION_4) +                        glusterd_unlock (MY_UUID); +                else { +                        ret = glusterd_volume_unlock (volname, MY_UUID); +                        if (ret) +                                gf_log (this->name, GF_LOG_ERROR, +                                        "Unable to release lock for %s", +                                        volname); +                        ret = -1; +                } +        } + +        if (volname) +                GF_FREE (volname);          gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);          return ret; @@ -639,11 +736,15 @@ out:  int  __glusterd_handle_cluster_lock (rpcsvc_request_t *req)  { -        gd1_mgmt_cluster_lock_req       lock_req = {{0},}; -        int32_t                         ret = -1; -        glusterd_op_lock_ctx_t          *ctx = NULL; -        glusterd_peerinfo_t             *peerinfo = NULL; -        xlator_t                        *this = NULL; +        dict_t                         *op_ctx      = NULL; +        int32_t                         ret         = -1; +        gd1_mgmt_cluster_lock_req       lock_req    = {{0},}; +        glusterd_op_lock_ctx_t         *ctx         = NULL; +        glusterd_op_t                   op          = GD_OP_EVENT_LOCK; +        glusterd_peerinfo_t            *peerinfo    = NULL; +        glusterd_op_info_t              txn_op_info = {{0},}; +        uuid_t                         *txn_id      = &global_txn_id; +        xlator_t                       *this        = NULL;          this = THIS;          GF_ASSERT (this); @@ -678,8 +779,29 @@ __glusterd_handle_cluster_lock (rpcsvc_request_t *req)          uuid_copy (ctx->uuid, lock_req.uuid);          ctx->req = req; +        ctx->dict = NULL; + +        op_ctx =  dict_new (); +        if (!op_ctx) { +                gf_log (this->name, GF_LOG_ERROR, +                        "Unable to set new dict"); +                goto out; +        } -        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, ctx); +        glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, op_ctx, req); + +        ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, +                        "Unable to set transaction's opinfo"); +                dict_unref (txn_op_info.op_ctx); +                goto out; +        } + +        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, txn_id, ctx); +        if (ret) +                gf_log (this->name, GF_LOG_ERROR, +                        "Failed to inject event GD_OP_EVENT_LOCK");  out:          gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); @@ -697,6 +819,186 @@ glusterd_handle_cluster_lock (rpcsvc_request_t *req)                                              __glusterd_handle_cluster_lock);  } +static int +glusterd_handle_volume_lock_fn (rpcsvc_request_t *req) +{ +        gd1_mgmt_volume_lock_req        lock_req    = {{0},}; +        int32_t                         ret         = -1; +        glusterd_op_lock_ctx_t         *ctx         = NULL; +        glusterd_peerinfo_t            *peerinfo    = NULL; +        xlator_t                       *this        = NULL; +        glusterd_op_info_t              txn_op_info = {{0},}; + +        this = THIS; +        GF_ASSERT (this); +        GF_ASSERT (req); + +        ret = xdr_to_generic (req->msg[0], &lock_req, +                              (xdrproc_t)xdr_gd1_mgmt_volume_lock_req); +        if (ret < 0) { +                gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock " +                        "request received from peer"); +                req->rpc_err = GARBAGE_ARGS; +                goto out; +        } + +        gf_log (this->name, GF_LOG_DEBUG, "Received volume lock req " +                "from uuid: %s txn_id: %s", uuid_utoa (lock_req.uuid), +                uuid_utoa (lock_req.txn_id)); + +        if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) { +                gf_log (this->name, GF_LOG_WARNING, "%s doesn't " +                        "belong to the cluster. Ignoring request.", +                        uuid_utoa (lock_req.uuid)); +                ret = -1; +                goto out; +        } + +        ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t); +        if (!ctx) { +                ret = -1; +                goto out; +        } + +        uuid_copy (ctx->uuid, lock_req.uuid); +        ctx->req = req; + +        ctx->dict = dict_new (); +        if (!ctx->dict) { +                ret = -1; +                goto out; +        } + +        ret = dict_unserialize (lock_req.dict.dict_val, +                                lock_req.dict.dict_len, &ctx->dict); +        if (ret) { +                gf_log (this->name, GF_LOG_WARNING, +                        "failed to unserialize the dictionary"); +                goto out; +        } + +        glusterd_txn_opinfo_init (&txn_op_info, NULL, &lock_req.op, +                                  ctx->dict, req); + +        ret = glusterd_set_txn_opinfo (&lock_req.txn_id, &txn_op_info); +        if (ret) { +                gf_log (this->name, GF_LOG_ERROR, +                        "Unable to set transaction's opinfo"); +                goto out; +        } + +        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, +                                           &lock_req.txn_id, ctx); +        if (ret) +                gf_log (this->name, GF_LOG_ERROR, +                        "Failed to inject event GD_OP_EVENT_LOCK"); + +out: +        if (ret) { +                if (ctx->dict) +                        dict_destroy (ctx->dict); +                if (ctx) +                        GF_FREE (ctx); +        } + +        glusterd_friend_sm (); +        glusterd_op_sm (); + +        gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); +        return ret; +} + +int +glusterd_handle_volume_lock (rpcsvc_request_t *req) +{ +        return glusterd_big_locked_handler (req, +                                            glusterd_handle_volume_lock_fn); +} + +static int +glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req) +{ +        gd1_mgmt_volume_unlock_req        lock_req = {{0},}; +        int32_t                           ret      = -1; +        glusterd_op_lock_ctx_t           *ctx      = NULL; +        glusterd_peerinfo_t              *peerinfo = NULL; +        xlator_t                         *this     = NULL; + +        this = THIS; +        GF_ASSERT (this); +        GF_ASSERT (req); + +        ret = xdr_to_generic (req->msg[0], &lock_req, +                              (xdrproc_t)xdr_gd1_mgmt_volume_unlock_req); +        if (ret < 0) { +                gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock " +                        "request received from peer"); +                req->rpc_err = GARBAGE_ARGS; +                goto out; +        } + +        gf_log (this->name, GF_LOG_DEBUG, "Received volume unlock req " +                "from uuid: %s", uuid_utoa (lock_req.uuid)); + +        if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) { +                gf_log (this->name, GF_LOG_WARNING, "%s doesn't " +                        "belong to the cluster. Ignoring request.", +                        uuid_utoa (lock_req.uuid)); +                ret = -1; +                goto out; +        } + +        ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t); +        if (!ctx) { +                ret = -1; +                goto out; +        } + +        uuid_copy (ctx->uuid, lock_req.uuid); +        ctx->req = req; + +        ctx->dict = dict_new (); +        if (!ctx->dict) { +                ret = -1; +                goto out; +        } + +        ret = dict_unserialize (lock_req.dict.dict_val, +                                lock_req.dict.dict_len, &ctx->dict); +        if (ret) { +                gf_log (this->name, GF_LOG_WARNING, +                        "failed to unserialize the dictionary"); +                goto out; +        } + +        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, +                                           &lock_req.txn_id, ctx); +        if (ret) +                gf_log (this->name, GF_LOG_ERROR, +                        "Failed to inject event GD_OP_EVENT_UNLOCK"); + +out: +        if (ret) { +                if (ctx->dict) +                        dict_destroy (ctx->dict); +                if (ctx) +                        GF_FREE (ctx); +        } + +        glusterd_friend_sm (); +        glusterd_op_sm (); + +        gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret); +        return ret; +} + +int +glusterd_handle_volume_unlock (rpcsvc_request_t *req) +{ +        return glusterd_big_locked_handler (req, +                                            glusterd_handle_volume_unlock_fn); +} +  int  glusterd_req_ctx_create (rpcsvc_request_t *rpc_req,                           glusterd_op_t op, uuid_t uuid, @@ -755,6 +1057,9 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)          gd1_mgmt_stage_op_req           op_req = {{0},};          glusterd_peerinfo_t             *peerinfo = NULL;          xlator_t                        *this = NULL; +        uuid_t                          *txn_id = &global_txn_id; +        glusterd_op_info_t              txn_op_info = {{0},}; +        glusterd_op_sm_state_info_t     state = {0,};          this = THIS;          GF_ASSERT (this); @@ -783,7 +1088,36 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)          if (ret)                  goto out; -        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP, req_ctx); +        ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id); + +        gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id)); + +        /* In cases where there is no volname, the receivers won't have a +         * transaction opinfo created, as for those operations, the locking +         * phase where the transaction opinfos are created, won't be called. */ +        ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info); +        if (ret) { +                gf_log (this->name, GF_LOG_DEBUG, +                        "No transaction's opinfo set"); + +                state.state = GD_OP_STATE_LOCKED; +                glusterd_txn_opinfo_init (&txn_op_info, &state, +                                          &op_req.op, req_ctx->dict, req); + +                ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info); +                if (ret) { +                        gf_log (this->name, GF_LOG_ERROR, +                                "Unable to set transaction's opinfo"); +                        dict_unref (req_ctx->dict); +                        goto out; +                } +        } + +        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP, +                                           txn_id, req_ctx); +        if (ret) +                gf_log (this->name, GF_LOG_ERROR, +                        "Failed to inject event GD_OP_EVENT_STAGE_OP");   out:          free (op_req.buf.buf_val);//malloced by xdr @@ -807,6 +1141,7 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req)          gd1_mgmt_commit_op_req          op_req = {{0},};          glusterd_peerinfo_t             *peerinfo = NULL;          xlator_t                        *this = NULL; +        uuid_t                          *txn_id = &global_txn_id;          this = THIS;          GF_ASSERT (this); @@ -837,11 +1172,12 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req)          if (ret)                  goto out; -        ret = glusterd_op_init_ctx (op_req.op); -        if (ret) -                goto out; +        ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id); + +        gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id)); -        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, req_ctx); +        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, +                                           txn_id, req_ctx);  out:          free (op_req.buf.buf_val);//malloced by xdr @@ -1892,6 +2228,56 @@ glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)  }  int +glusterd_op_volume_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id, +                                   int32_t status) +{ + +        gd1_mgmt_volume_lock_rsp       rsp = {{0},}; +        int                            ret = -1; + +        GF_ASSERT (req); +        GF_ASSERT (txn_id); +        glusterd_get_uuid (&rsp.uuid); +        rsp.op_ret = status; +        if (rsp.op_ret) +               rsp.op_errno = errno; +        uuid_copy (rsp.txn_id, *txn_id); + +        ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, +                                     (xdrproc_t)xdr_gd1_mgmt_volume_lock_rsp); + +        gf_log (THIS->name, GF_LOG_DEBUG, "Responded to volume lock, ret: %d", +                ret); + +        return ret; +} + +int +glusterd_op_volume_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id, +                                     int32_t status) +{ + +        gd1_mgmt_volume_unlock_rsp      rsp = {{0},}; +        int                             ret = -1; + +        GF_ASSERT (req); +        GF_ASSERT (txn_id); +        rsp.op_ret = status; +        if (rsp.op_ret) +               rsp.op_errno = errno; +        glusterd_get_uuid (&rsp.uuid); +        uuid_copy (rsp.txn_id, *txn_id); + +        ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL, +                                     (xdrproc_t)xdr_gd1_mgmt_volume_unlock_rsp); + +        gf_log (THIS->name, GF_LOG_DEBUG, "Responded to volume unlock, ret: %d", +                ret); + +        return ret; +} + +int  __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)  {          gd1_mgmt_cluster_unlock_req     unlock_req = {{0}, }; @@ -1899,6 +2285,7 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)          glusterd_op_lock_ctx_t          *ctx = NULL;          glusterd_peerinfo_t             *peerinfo = NULL;          xlator_t                        *this = NULL; +        uuid_t                          *txn_id = &global_txn_id;          this = THIS;          GF_ASSERT (this); @@ -1933,8 +2320,9 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)          }          uuid_copy (ctx->uuid, unlock_req.uuid);          ctx->req = req; +        ctx->dict = NULL; -        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, ctx); +        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, txn_id, ctx);  out:          glusterd_friend_sm (); @@ -3838,6 +4226,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,          glusterd_peerinfo_t  *peerinfo    = NULL;          glusterd_peerctx_t   *peerctx     = NULL;          gf_boolean_t         quorum_action = _gf_false; +        glusterd_volinfo_t   *volinfo     = NULL;          uuid_t               uuid;          peerctx = mydata; @@ -3866,6 +4255,30 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,                  gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT %d",                          peerinfo->state.state); +                if (peerinfo->connected) { +                        if (conf->op_version < GD_OP_VERSION_4) { +                                glusterd_get_lock_owner (&uuid); +                                if (!uuid_is_null (uuid) && +                                    !uuid_compare (peerinfo->uuid, uuid)) +                                        glusterd_unlock (peerinfo->uuid); +                        } else { +                                list_for_each_entry (volinfo, &conf->volumes, +                                                     vol_list) { +                                        ret = glusterd_volume_unlock +                                                    (volinfo->volname, +                                                     peerinfo->uuid); +                                        if (ret) +                                                gf_log (this->name, +                                                        GF_LOG_TRACE, +                                                        "Lock not released " +                                                        "for %s", +                                                         volinfo->volname); +                                } +                        } + +                        ret = 0; +                } +                  if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&                      (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {                          peerinfo->quorum_contrib = QUORUM_DOWN; @@ -3880,13 +4293,6 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,                          glusterd_friend_remove_notify (peerctx);                          goto out;                  } -                glusterd_get_lock_owner (&uuid); -                if (!uuid_is_null (uuid) && -                    !uuid_compare (peerinfo->uuid, uuid)) { -                        glusterd_unlock (peerinfo->uuid); -                        if (opinfo.state.state != GD_OP_STATE_DEFAULT) -                                opinfo.state.state = GD_OP_STATE_DEFAULT; -                }                  peerinfo->connected = 0;                  break; @@ -4021,3 +4427,18 @@ struct rpcsvc_program gd_svc_cli_prog_ro = {          .actors    = gd_svc_cli_actors_ro,  	.synctask  = _gf_true,  }; + +rpcsvc_actor_t gd_svc_mgmt_v3_actors[] = { +        [GLUSTERD_MGMT_V3_NULL]          = { "NULL",       GLUSTERD_MGMT_V3_NULL,          glusterd_null,                 NULL, 0, DRC_NA}, +        [GLUSTERD_MGMT_V3_VOLUME_LOCK]   = { "VOL_LOCK",   GLUSTERD_MGMT_V3_VOLUME_LOCK,   glusterd_handle_volume_lock,   NULL, 0, DRC_NA}, +        [GLUSTERD_MGMT_V3_VOLUME_UNLOCK] = { "VOL_UNLOCK", GLUSTERD_MGMT_V3_VOLUME_UNLOCK, glusterd_handle_volume_unlock, NULL, 0, DRC_NA}, +}; + +struct rpcsvc_program gd_svc_mgmt_v3_prog = { +        .progname  = "GlusterD svc mgmt v3", +        .prognum   = GD_MGMT_PROGRAM, +        .progver   = GD_MGMT_V3_VERSION, +        .numactors = GLUSTERD_MGMT_V3_MAXVALUE, +        .actors    = gd_svc_mgmt_v3_actors, +        .synctask  = _gf_true, +};  | 
