diff options
| author | Avra Sengupta <asengupt@redhat.com> | 2013-09-15 17:55:31 +0530 | 
|---|---|---|
| committer | Anand Avati <avati@redhat.com> | 2013-09-20 11:48:48 -0700 | 
| commit | 78b0b59285b03af65c10a1fd976836bc5f53c167 (patch) | |
| tree | 71aeded1d140b8a3b03d374f033c7e7ad11d8dab | |
| parent | fe16eaa5104cf015461c7b4b8f0d97e613344b1e (diff) | |
glusterd: Adding transaction checks for cluster unlock.
While a gluster command holding lock is in execution,
any other gluster command which tries to run will fail to
acquire the lock. As a result command#2 will follow the
cleanup code flow, which also includes unlocking the held
locks. As both the commands are run from the same node,
command#2 will end up releasing the locks held by command#1
even before command#1 reaches completion.
Now we call the unlock routine in the code path, of the cluster
has been locked during the same transaction.
Signed-off-by: Avra Sengupta <asengupt@redhat.com>
Change-Id: I7b7aa4d4c7e565e982b75b8ed1e550fca528c834
BUG: 1008172
Signed-off-by: Avra Sengupta <asengupt@redhat.com>
Reviewed-on: http://review.gluster.org/5937
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Reviewed-by: Anand Avati <avati@redhat.com>
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 15 | 
1 files changed, 12 insertions, 3 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index a694cae84..a854e0530 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -1018,7 +1018,7 @@ out:  int  gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret,                      rpcsvc_request_t *req, dict_t *op_ctx, char *op_errstr, -                    int npeers) +                    int npeers, gf_boolean_t is_locked)  {          glusterd_peerinfo_t *peerinfo   = NULL;          glusterd_peerinfo_t *tmp        = NULL; @@ -1033,6 +1033,11 @@ gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret,                  goto out;          } +        /* If the lock has not been held during this +         * transaction, do not send unlock requests */ +        if (!is_locked) +                goto out; +          this = THIS;          synctask_barrier_init((&args));          peer_cnt = 0; @@ -1056,7 +1061,8 @@ gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret,  out:          glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr);          glusterd_op_clear_op (op); -        glusterd_unlock (MY_UUID); +        if (is_locked) +                glusterd_unlock (MY_UUID);          return 0;  } @@ -1153,6 +1159,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)          int32_t                     tmp_op          = 0;          char                        *op_errstr      = NULL;          xlator_t                    *this           = NULL; +        gf_boolean_t                is_locked       = _gf_false;          this = THIS;          GF_ASSERT (this); @@ -1175,6 +1182,8 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)                  goto out;          } +        is_locked = _gf_true; +          /* storing op globally to access in synctask code paths           * This is still acceptable, as we are performing this under           * the 'cluster' lock*/ @@ -1212,7 +1221,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)          ret = 0;  out:          (void) gd_unlock_op_phase (&conf->xaction_peers, op, ret, req, -                                   op_ctx, op_errstr, npeers); +                                   op_ctx, op_errstr, npeers, is_locked);          if (req_dict)                  dict_unref (req_dict);  | 
