diff options
| -rw-r--r-- | cli/src/cli-rpc-ops.c | 20 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-messages.h | 11 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 67 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.h | 2 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 172 | 
5 files changed, 198 insertions, 74 deletions
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index 5fd465fdf2f..e9db57259b2 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -1508,14 +1508,18 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,                  if (rsp.op_ret && strcmp (rsp.op_errstr, "")) {                          snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);                  } else { -                        if (!rsp.op_ret) { +                         if (!rsp.op_ret) { +                                /* append errstr in the cli msg for successful +                                 * case since unlock failures can be highlighted +                                 * event though rebalance command was successful +                                 */                                  snprintf (msg, sizeof (msg),                                            "Rebalance on %s has been started "                                            "successfully. Use rebalance status "                                            "command to check status of the " -                                          "rebalance process.\nID: %s", -                                          volname, task_id_str); -                        } else { +                                          "rebalance process.\nID: %s\n%s", +                                          volname, task_id_str, rsp.op_errstr); +                         } else {                                  snprintf (msg, sizeof (msg),                                            "Starting rebalance on volume %s has "                                            "been unsuccessful.", volname); @@ -1535,13 +1539,17 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,                                            volname);                          goto done;                  } else { +                        /* append errstr in the cli msg for successful case +                         * since unlock failures can be highlighted event though +                         * rebalance command was successful */                          snprintf (msg, sizeof (msg),                                    "rebalance process may be in the middle of a "                                    "file migration.\nThe process will be fully "                                    "stopped once the migration of the file is "                                    "complete.\nPlease check rebalance process "                                    "for completion before doing any further " -                                  "brick related tasks on the volume."); +                                  "brick related tasks on the volume.\n%s", +                                  rsp.op_errstr);                  }          }          if (cmd == GF_DEFRAG_CMD_STATUS) { @@ -1554,6 +1562,8 @@ gf_cli_defrag_volume_cbk (struct rpc_req *req, struct iovec *iov,                                            "Failed to get the status of "                                            "rebalance process");                          goto done; +                } else { +                        snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);                  }          } diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h index 604743ef5e7..b4f8585097a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-messages.h +++ b/xlators/mgmt/glusterd/src/glusterd-messages.h @@ -45,7 +45,7 @@   */  #define GLUSTERD_COMP_BASE      GLFS_MSGID_GLUSTERD -#define GLFS_NUM_MESSAGES       19 +#define GLFS_NUM_MESSAGES       20  #define GLFS_MSGID_END          (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1)  /* Messaged with message IDs */  #define glfs_msg_start_x GLFS_COMP_BASE, "Invalid: Start of messages" @@ -206,6 +206,15 @@   *                    failure to set default options   */  #define GD_MSG_FAIL_DEFAULT_OPT_SET (GLUSTERD_COMP_BASE + 19) + +/*! + * @messageid 106020 + * @diagnosis Failed to release cluster wide lock for one of the peer + * @recommendedaction Restart the glusterd service on the node where the command + * was issued + */ +#define GD_MSG_CLUSTER_UNLOCK_FAILED (GLUSTERD_COMP_BASE + 20) +  /*------------*/  #define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index f39c0ea555f..d68901ee4e9 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -2796,6 +2796,20 @@ glusterd_op_ac_none (glusterd_op_sm_event_t *event, void *ctx)  }  static int +glusterd_op_sm_locking_failed (uuid_t *txn_id) +{ +        int ret = -1; + +        opinfo.op_ret = -1; +        opinfo.op_errstr = gf_strdup ("locking failed for one of the peer."); + +        /* Inject a reject event such that unlocking gets triggered right away*/ +        ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, txn_id, NULL); + +        return ret; +} + +static int  glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)  {          int                   ret      = 0; @@ -2832,8 +2846,10 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)                                                  "peer %s",                                                  gd_op_list[opinfo.op],                                                  peerinfo->hostname); -                                        continue; +                                        goto out;                                  } +                                /* Mark the peer as locked*/ +                                peerinfo->locked = _gf_true;                                  pending_count++;                          }                  } else { @@ -2861,8 +2877,10 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)                                                  gd_op_list[opinfo.op],                                                  peerinfo->hostname);                                          dict_unref (dict); -                                        continue; +                                        goto out;                                  } +                                /* Mark the peer as locked*/ +                                peerinfo->locked = _gf_true;                                  pending_count++;                          }                  } @@ -2873,6 +2891,9 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)                  ret = glusterd_op_sm_inject_all_acc (&event->txn_id);  out: +        if (ret) +                ret = glusterd_op_sm_locking_failed (&event->txn_id); +          gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);          return ret;  } @@ -2895,12 +2916,12 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)          list_for_each_entry (peerinfo, &priv->xaction_peers, op_peers_list) {                  GF_ASSERT (peerinfo); -                if (!peerinfo->connected || !peerinfo->mgmt) +                if (!peerinfo->connected || !peerinfo->mgmt || +                    !peerinfo->locked)                          continue;                  if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&                      (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))                          continue; -                  /* Based on the op_version,                   * release the cluster or mgmt_v3 lock */                  if (priv->op_version < GD_OP_VERSION_3_6_0) { @@ -2909,15 +2930,19 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)                          if (proc->fn) {                                  ret = proc->fn (NULL, this, peerinfo);                                  if (ret) { -                                        gf_log (this->name, GF_LOG_WARNING, -                                                "Failed to send unlock request " -                                                "for operation 'Volume %s' to " -                                                "peer %s", +                                        opinfo.op_errstr = gf_strdup +                                               ("Unlocking failed for one of " +                                                "the peer."); +                                        gf_msg (this->name, GF_LOG_ERROR, 0, +                                                GD_MSG_CLUSTER_UNLOCK_FAILED, +                                                "Unlocking failed for operation" +                                                " volume %s on peer %s",                                                  gd_op_list[opinfo.op],                                                  peerinfo->hostname);                                          continue;                                  }                                  pending_count++; +                                peerinfo->locked = _gf_false;                          }                  } else {                          dict = glusterd_op_get_ctx (); @@ -2929,24 +2954,35 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)                                  ret = dict_set_static_ptr (dict, "peerinfo",                                                             peerinfo);                                  if (ret) { -                                        gf_log (this->name, GF_LOG_ERROR, -                                                "failed to set peerinfo"); +                                        opinfo.op_errstr = gf_strdup +                                          ("Unlocking failed for one of the " +                                           "peer."); +                                        gf_msg (this->name, GF_LOG_ERROR, 0, +                                                GD_MSG_CLUSTER_UNLOCK_FAILED, +                                                "Unlocking failed for operation" +                                                " volume %s on peer %s", +                                                gd_op_list[opinfo.op], +                                                peerinfo->hostname);                                          dict_unref (dict); -                                        goto out; +                                        continue;                                  }                                  ret = proc->fn (NULL, this, dict);                                  if (ret) { -                                        gf_log (this->name, GF_LOG_WARNING, -                                                "Failed to send volume unlock " -                                                "request for operation " -                                                "'Volume %s' to peer %s", +                                        opinfo.op_errstr = gf_strdup +                                          ("Unlocking failed for one of the " +                                           "peer."); +                                        gf_msg (this->name, GF_LOG_ERROR, 0, +                                                GD_MSG_CLUSTER_UNLOCK_FAILED, +                                                "Unlocking failed for operation" +                                                " volume %s on peer %s",                                                  gd_op_list[opinfo.op],                                                  peerinfo->hostname);                                          dict_unref (dict);                                          continue;                                  }                                  pending_count++; +                                peerinfo->locked = _gf_false;                          }                  }          } @@ -2955,7 +2991,6 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)          if (!opinfo.pending_count)                  ret = glusterd_op_sm_inject_all_acc (&event->txn_id); -out:          gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);          return ret;  } diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h index 229ee469598..88fe9ef4c04 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h @@ -311,4 +311,6 @@ glusterd_clear_txn_opinfo (uuid_t *txn_id);  int32_t  glusterd_generate_txn_id (dict_t *dict, uuid_t **txn_id); +void +glusterd_set_opinfo (char *errstr, int32_t op_errno, int32_t op_ret);  #endif diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 19b66ac06d8..ec2d850094a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -26,6 +26,7 @@  #include "protocol-common.h"  #include "glusterd-utils.h"  #include "common-utils.h" +#include "glusterd-messages.h"  #include <sys/uio.h> @@ -656,6 +657,7 @@ __glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,          xlator_t                      *this = NULL;          uuid_t                        *txn_id = NULL;          glusterd_conf_t               *priv = NULL; +        char                          *err_str = NULL;          this = THIS;          GF_ASSERT (this); @@ -666,21 +668,26 @@ __glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,          txn_id = &priv->global_txn_id;          if (-1 == req->rpc_status) { -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; +                gf_log (this->name, GF_LOG_ERROR, "Lock response is not " +                        "received from one of the peer"); +                err_str = "Lock response is not received from one of the peer"; +                glusterd_set_opinfo (err_str, ENETRESET, -1); +                event_type = GD_OP_EVENT_RCVD_RJT;                  goto out;          } -        ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp); +        ret = xdr_to_generic (*iov, &rsp, +                              (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);          if (ret < 0) { -                gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock " -                        "response received from peer"); -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; +                gf_log (this->name, GF_LOG_ERROR, "Failed to decode " +                        "cluster lock response received from peer"); +                err_str = "Failed to decode cluster lock response received from" +                          " peer"; +                glusterd_set_opinfo (err_str, EINVAL, -1); +                event_type = GD_OP_EVENT_RCVD_RJT;                  goto out;          } -out:          op_ret = rsp.op_ret;          gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG, @@ -689,9 +696,12 @@ out:          peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);          if (peerinfo == NULL) { -                ret = -1; -                gf_log (this->name, GF_LOG_CRITICAL, "Lock response received " -                        "from unknown peer: %s", uuid_utoa (rsp.uuid)); +                gf_log (this->name, GF_LOG_CRITICAL, +                        "cluster lock response received from unknown peer: %s." +                        "Ignoring response", uuid_utoa (rsp.uuid)); +                err_str = "cluster lock response received from unknown peer"; +                goto out; +          }          if (op_ret) { @@ -704,6 +714,7 @@ out:                  event_type = GD_OP_EVENT_RCVD_ACC;          } +out:          ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);          if (!ret) { @@ -723,9 +734,17 @@ glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,                                          __glusterd_cluster_lock_cbk);  } +void +glusterd_set_opinfo (char *errstr, int32_t op_errno, int32_t op_ret) +{ +        opinfo.op_errstr = gf_strdup (errstr); +        opinfo.op_errno = op_errno; +        opinfo.op_ret = op_ret; +} +  static int32_t  glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov, -                            int count, void *myframe) +                                    int count, void *myframe)  {          gd1_mgmt_v3_lock_rsp          rsp   = {{0},};          int                           ret   = -1; @@ -733,26 +752,36 @@ glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,          glusterd_op_sm_event_type_t   event_type = GD_OP_EVENT_NONE;          glusterd_peerinfo_t           *peerinfo = NULL;          xlator_t                      *this = NULL; +        call_frame_t                  *frame  = NULL;          uuid_t                        *txn_id = NULL; +        char                          *err_str = NULL;          this = THIS;          GF_ASSERT (this);          GF_ASSERT (req); +        frame = myframe; +        txn_id = frame->cookie; +        frame->cookie = NULL; +          if (-1 == req->rpc_status) { -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; +                gf_log (this->name, GF_LOG_ERROR, "Lock response is not " +                        "received from one of the peer"); +                err_str = "Lock response is not received from one of the peer"; +                glusterd_set_opinfo (err_str, ENETRESET, -1); +                event_type = GD_OP_EVENT_RCVD_RJT;                  goto out;          }          ret = xdr_to_generic (*iov, &rsp,                                (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);          if (ret < 0) { -                gf_log (this->name, GF_LOG_ERROR, -                        "Failed to decode mgmt_v3 lock " -                        "response received from peer"); -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; +                gf_log (this->name, GF_LOG_ERROR, "Failed to decode " +                        "mgmt_v3 lock response received from peer"); +                err_str = "Failed to decode mgmt_v3 lock response received from" +                          " peer"; +                glusterd_set_opinfo (err_str, EINVAL, -1); +                event_type = GD_OP_EVENT_RCVD_RJT;                  goto out;          } @@ -766,7 +795,6 @@ glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,          peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);          if (peerinfo == NULL) { -                ret = -1;                  gf_log (this->name, GF_LOG_CRITICAL,                          "mgmt_v3 lock response received "                          "from unknown peer: %s. Ignoring response", @@ -784,15 +812,15 @@ glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,                  event_type = GD_OP_EVENT_RCVD_ACC;          } +out:          ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL); -          if (!ret) {                  glusterd_friend_sm ();                  glusterd_op_sm ();          } -out: -        GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); +        GF_FREE (frame->cookie); +        GLUSTERD_STACK_DESTROY (frame);          return ret;  } @@ -814,26 +842,39 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,          glusterd_op_sm_event_type_t   event_type = GD_OP_EVENT_NONE;          glusterd_peerinfo_t           *peerinfo = NULL;          xlator_t                      *this = NULL; +        call_frame_t                  *frame = NULL;          uuid_t                        *txn_id = NULL; +        char                          *err_str = NULL;          this = THIS;          GF_ASSERT (this);          GF_ASSERT (req); +        frame = myframe; +        txn_id = frame->cookie; +        frame->cookie = NULL; +          if (-1 == req->rpc_status) { -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; +                err_str = "Unlock response not received from one of the peer."; +                gf_msg (this->name, GF_LOG_ERROR, 0, +                        GD_MSG_CLUSTER_UNLOCK_FAILED, +                        "UnLock response is not received from one of the peer"); +                glusterd_set_opinfo (err_str, 0, 0); +                event_type = GD_OP_EVENT_RCVD_RJT;                  goto out;          }          ret = xdr_to_generic (*iov, &rsp,                                (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);          if (ret < 0) { -                gf_log (this->name, GF_LOG_ERROR, -                        "Failed to decode mgmt_v3 unlock " -                        "response received from peer"); -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; +                gf_msg (this->name, GF_LOG_ERROR, 0, +                        GD_MSG_CLUSTER_UNLOCK_FAILED, +                        "Failed to decode mgmt_v3 unlock response received from" +                        "peer"); +                err_str = "Failed to decode mgmt_v3 unlock response received " +                          "from peer"; +                glusterd_set_opinfo (err_str, 0, 0); +                event_type = GD_OP_EVENT_RCVD_RJT;                  goto out;          } @@ -848,8 +889,8 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,          peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);          if (peerinfo == NULL) { -                ret = -1; -                gf_log (this->name, GF_LOG_CRITICAL, +                gf_msg (this->name, GF_LOG_CRITICAL, 0, +                        GD_MSG_CLUSTER_UNLOCK_FAILED,                          "mgmt_v3 unlock response received "                          "from unknown peer: %s. Ignoring response",                          uuid_utoa (rsp.uuid)); @@ -866,6 +907,7 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,                  event_type = GD_OP_EVENT_RCVD_ACC;          } +out:          ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);          if (!ret) { @@ -873,8 +915,8 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,                  glusterd_op_sm ();          } -out: -        GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe)); +        GF_FREE (frame->cookie); +        GLUSTERD_STACK_DESTROY (frame);          return ret;  } @@ -898,6 +940,7 @@ __glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,          xlator_t                      *this = NULL;          uuid_t                        *txn_id = NULL;          glusterd_conf_t               *priv = NULL; +        char                          *err_str = NULL;          this = THIS;          GF_ASSERT (this); @@ -908,21 +951,28 @@ __glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,          txn_id = &priv->global_txn_id;          if (-1 == req->rpc_status) { -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; +                err_str = "Unlock response not received from one of the peer."; +                gf_msg (this->name, GF_LOG_ERROR, 0, +                        GD_MSG_CLUSTER_UNLOCK_FAILED, +                        "UnLock response is not received from one of the peer"); +                glusterd_set_opinfo (err_str, 0, 0); +                event_type = GD_OP_EVENT_RCVD_RJT;                  goto out;          } -        ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp); +        ret = xdr_to_generic (*iov, &rsp, +                              (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);          if (ret < 0) { -                gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock " -                        "response received from peer"); -                rsp.op_ret   = -1; -                rsp.op_errno = EINVAL; +                gf_msg (this->name, GF_LOG_ERROR, 0, +                        GD_MSG_CLUSTER_UNLOCK_FAILED, +                        "Failed to decode unlock response received from peer"); +                err_str = "Failed to decode cluster unlock response received " +                          "from peer"; +                glusterd_set_opinfo (err_str, 0, 0); +                event_type = GD_OP_EVENT_RCVD_RJT;                  goto out;          } -out:          op_ret = rsp.op_ret;          gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG, @@ -931,8 +981,11 @@ out:          peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);          if (peerinfo == NULL) { -                gf_log (this->name, GF_LOG_CRITICAL, "Unlock response received " -                        "from unknown peer %s", uuid_utoa (rsp.uuid)); +                gf_msg (this->name, GF_LOG_CRITICAL, 0, +                        GD_MSG_CLUSTER_UNLOCK_FAILED, +                        "Unlock response received from unknown peer %s", +                        uuid_utoa (rsp.uuid)); +                goto out;          }          if (op_ret) { @@ -942,6 +995,7 @@ out:                  event_type = GD_OP_EVENT_RCVD_ACC;          } +out:          ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);          if (!ret) { @@ -1516,7 +1570,6 @@ glusterd_mgmt_v3_lock_peers (call_frame_t *frame, xlator_t *this,          int                              ret         = -1;          glusterd_peerinfo_t             *peerinfo    = NULL;          glusterd_conf_t                 *priv        = NULL; -        call_frame_t                    *dummy_frame = NULL;          dict_t                          *dict        = NULL;          uuid_t                          *txn_id      = NULL; @@ -1558,13 +1611,21 @@ glusterd_mgmt_v3_lock_peers (call_frame_t *frame, xlator_t *this,                  uuid_copy (req.txn_id, *txn_id);          } -        dummy_frame = create_frame (this, this->ctx->pool); -        if (!dummy_frame) { +        if (!frame) +                frame = create_frame (this, this->ctx->pool); + +        if (!frame) {                  ret = -1;                  goto out;          } +        frame->cookie = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t); +        if (!frame->cookie) { +                ret = -1; +                goto out; +        } +        uuid_copy (frame->cookie, req.txn_id); -        ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame, +        ret = glusterd_submit_request (peerinfo->rpc, &req, frame,                                         peerinfo->mgmt_v3,                                         GLUSTERD_MGMT_V3_LOCK, NULL,                                         this, glusterd_mgmt_v3_lock_peers_cbk, @@ -1582,7 +1643,6 @@ glusterd_mgmt_v3_unlock_peers (call_frame_t *frame, xlator_t *this,          int                              ret         = -1;          glusterd_peerinfo_t             *peerinfo    = NULL;          glusterd_conf_t                 *priv        = NULL; -        call_frame_t                    *dummy_frame = NULL;          dict_t                          *dict        = NULL;          uuid_t                          *txn_id      = NULL; @@ -1624,13 +1684,21 @@ glusterd_mgmt_v3_unlock_peers (call_frame_t *frame, xlator_t *this,                  uuid_copy (req.txn_id, *txn_id);          } -        dummy_frame = create_frame (this, this->ctx->pool); -        if (!dummy_frame) { +        if (!frame) +                frame = create_frame (this, this->ctx->pool); + +        if (!frame) {                  ret = -1;                  goto out;          } +        frame->cookie = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t); +        if (!frame->cookie) { +                ret = -1; +                goto out; +        } +        uuid_copy (frame->cookie, req.txn_id); -        ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame, +        ret = glusterd_submit_request (peerinfo->rpc, &req, frame,                                         peerinfo->mgmt_v3,                                         GLUSTERD_MGMT_V3_UNLOCK, NULL,                                         this, glusterd_mgmt_v3_unlock_peers_cbk,  | 
