summaryrefslogtreecommitdiffstats
path: root/xlators/cluster/ec
diff options
context:
space:
mode:
authorPranith Kumar K <pkarampu@redhat.com>2015-07-15 06:16:54 +0530
committerXavier Hernandez <xhernandez@datalab.es>2015-07-23 09:36:46 -0700
commit75d50eaba3fd7d24874ba8acc9a776c863a932e2 (patch)
tree84bbb2ca0391034b037c9349af294b4060cbc0f9 /xlators/cluster/ec
parent2815a8c778fb6c17c6dfe908a82e840687c29a98 (diff)
cluster/ec: Handle race between unlock-timer, new lock
Problem: New lock could come at the time timer is on the way to unlock. This was leading to crash in timer thread because thread executing new lock can free up the timer_link->fop and then timer thread will try to access structures already freed. Fix: If the timer event is fired, set lock->release to true and wait for unlock to complete. Thanks to Xavi and Bhaskar for helping in confirming that this race is the RC. Thanks to Kritika for pointing out and explaining how Avati's patch can be used to fix this bug. > Change-Id: I45fa5470bbc1f03b5f3d133e26d1e0ab24303378 > BUG: 1243187 > Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> > Reviewed-on: http://review.gluster.org/11670 > Tested-by: Gluster Build System <jenkins@build.gluster.com> > Reviewed-by: Xavier Hernandez <xhernandez@datalab.es> > Tested-by: NetBSD Build System <jenkins@build.gluster.org> Change-Id: I9af012e717493684b7cd7d1c63baf2fa401fb542 BUG: 1246121 Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> Reviewed-on: http://review.gluster.org/11752 Tested-by: NetBSD Build System <jenkins@build.gluster.org> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Xavier Hernandez <xhernandez@datalab.es>
Diffstat (limited to 'xlators/cluster/ec')
-rw-r--r--xlators/cluster/ec/src/ec-common.c33
-rw-r--r--xlators/cluster/ec/src/ec-common.h1
-rw-r--r--xlators/cluster/ec/src/ec.c33
3 files changed, 17 insertions, 50 deletions
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
index 18770f259a4..e67b304002d 100644
--- a/xlators/cluster/ec/src/ec-common.c
+++ b/xlators/cluster/ec/src/ec-common.c
@@ -1361,13 +1361,20 @@ void ec_lock(ec_fop_data_t *fop)
if (lock->timer != NULL) {
GF_ASSERT (lock->release == _gf_false);
timer_link = lock->timer->data;
- ec_trace("UNLOCK_CANCELLED", timer_link->fop, "lock=%p", lock);
- gf_timer_call_cancel(fop->xl->ctx, lock->timer);
- lock->timer = NULL;
-
- lock->refs--;
- /* There should remain at least 1 ref, the current one. */
- GF_ASSERT(lock->refs > 0);
+ if (gf_timer_call_cancel(fop->xl->ctx, lock->timer) == 0) {
+ ec_trace("UNLOCK_CANCELLED", timer_link->fop,
+ "lock=%p", lock);
+ lock->timer = NULL;
+ lock->refs--;
+ /* There should remain at least 1 ref, the current one. */
+ GF_ASSERT(lock->refs > 0);
+ } else {
+ /* Timer expired and on the way to unlock.
+ * Set lock->release to _gf_true, so that this
+ * lock will be put in frozen list*/
+ timer_link = NULL;
+ lock->release = _gf_true;
+ }
}
GF_ASSERT(list_empty(&link->wait_list));
@@ -1818,18 +1825,6 @@ void ec_unlock(ec_fop_data_t *fop)
}
}
-void
-ec_unlock_force(ec_fop_data_t *fop)
-{
- int32_t i;
-
- for (i = 0; i < fop->lock_count; i++) {
- ec_trace("UNLOCK_FORCED", fop, "lock=%p", &fop->locks[i]);
-
- ec_unlock_timer_del(&fop->locks[i]);
- }
-}
-
void ec_flush_size_version(ec_fop_data_t *fop)
{
GF_ASSERT(fop->lock_count == 1);
diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h
index 1d78f132a94..41e10e2f16f 100644
--- a/xlators/cluster/ec/src/ec-common.h
+++ b/xlators/cluster/ec/src/ec-common.h
@@ -91,7 +91,6 @@ void ec_lock_prepare_fd(ec_fop_data_t *fop, fd_t *fd, uint32_t flags);
void ec_lock(ec_fop_data_t * fop);
void ec_lock_reuse(ec_fop_data_t *fop);
void ec_unlock(ec_fop_data_t * fop);
-void ec_unlock_force(ec_fop_data_t *fop);
gf_boolean_t ec_get_inode_size(ec_fop_data_t *fop, inode_t *inode,
uint64_t *size);
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
index e28f402e6fe..29ff09adf39 100644
--- a/xlators/cluster/ec/src/ec.c
+++ b/xlators/cluster/ec/src/ec.c
@@ -395,38 +395,11 @@ ec_handle_down (xlator_t *this, ec_t *ec, int32_t idx)
}
gf_boolean_t
-ec_force_unlocks(ec_t *ec)
+ec_disable_delays(ec_t *ec)
{
- struct list_head list;
- ec_fop_data_t *fop;
-
- if (list_empty(&ec->pending_fops)) {
- return _gf_true;
- }
-
- INIT_LIST_HEAD(&list);
-
- /* All pending fops when GF_EVENT_PARENT_DOWN is received should only
- * be fops waiting for a delayed unlock. However the unlock can
- * generate new fops. We don't want to trverse these new fops while
- * forcing unlocks, so we move all fops to a temporal list. To process
- * them without interferences.*/
- list_splice_init(&ec->pending_fops, &list);
-
- while (!list_empty(&list)) {
- fop = list_entry(list.next, ec_fop_data_t, pending_list);
- list_move_tail(&fop->pending_list, &ec->pending_fops);
-
- UNLOCK(&ec->lock);
-
- ec_unlock_force(fop);
-
- LOCK(&ec->lock);
- }
-
ec->shutdown = _gf_true;
- return list_empty(&ec->pending_fops);
+ return list_empty (&ec->pending_fops);
}
void
@@ -482,7 +455,7 @@ ec_notify (xlator_t *this, int32_t event, void *data, void *data2)
} else if (event == GF_EVENT_PARENT_DOWN) {
/* If there aren't pending fops running after we have waken up
* them, we immediately propagate the notification. */
- propagate = ec_force_unlocks(ec);
+ propagate = ec_disable_delays(ec);
goto unlock;
}