summaryrefslogtreecommitdiffstats
path: root/libglusterfs/src/syncop.c
diff options
context:
space:
mode:
authorAnand Avati <avati@redhat.com>2013-03-23 13:55:09 -0700
committerAnand Avati <avati@redhat.com>2013-04-02 16:02:33 -0700
commit87300be91cb9e1cd98ac5cba8998524d95c98d12 (patch)
treed536fa1d7033a3a4b338a794837cd27b89821201 /libglusterfs/src/syncop.c
parentaf939370ad20fe1be8e52ea953996e190e86c4ee (diff)
synctask: introduce synclocks for co-operative locking
This patch introduces a synclocks - co-operative locks for synctasks. Synctasks yield themselves when a lock cannot be acquired at the time of the lock call, and the unlocker will wake the yielded locker at the time of unlock. The implementation is safe in a multi-threaded syncenv framework. It is also safe for sharing the lock between non-synctasks. i.e, the same lock can be used for synchronization between a synctask and a regular thread. In such a situation, waiting synctasks will yield themselves while non-synctasks will sleep on a cond variable. The unlocker (which could be either a synctask or a regular thread) will wake up any type of lock waiter (synctask or regular). Usage: Declaration and Initialization ------------------------------ synclock_t lock; ret = synclock_init (&lock); if (ret) { /* lock could not be allocated */ } Locking and non-blocking lock attempt ------------------------------------- ret = synclock_trylock (&lock); if (ret && (errno == EBUSY)) { /* lock is held by someone else */ return; } synclock_lock (&lock); { /* critical section */ } synclock_unlock (&lock); Change-Id: I081873edb536ddde69a20f4a7dc6558ebf19f5b2 BUG: 763820 Signed-off-by: Anand Avati <avati@redhat.com> Reviewed-on: http://review.gluster.org/4717 Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra G <raghavendra@gluster.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
Diffstat (limited to 'libglusterfs/src/syncop.c')
-rw-r--r--libglusterfs/src/syncop.c150
1 files changed, 149 insertions, 1 deletions
diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c
index 115debbfbf7..876977e2a01 100644
--- a/libglusterfs/src/syncop.c
+++ b/libglusterfs/src/syncop.c
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ Copyright (c) 2008-2013 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
@@ -507,6 +507,154 @@ syncenv_new (size_t stacksize)
}
+int
+synclock_init (synclock_t *lock)
+{
+ if (!lock)
+ return -1;
+
+ pthread_cond_init (&lock->cond, 0);
+ lock->lock = 0;
+ INIT_LIST_HEAD (&lock->waitq);
+
+ return pthread_mutex_init (&lock->guard, 0);
+}
+
+
+int
+synclock_destroy (synclock_t *lock)
+{
+ if (!lock)
+ return -1;
+
+ pthread_cond_destroy (&lock->cond);
+ return pthread_mutex_destroy (&lock->guard);
+}
+
+
+static int
+__synclock_lock (struct synclock *lock)
+{
+ struct synctask *task = NULL;
+
+ if (!lock)
+ return -1;
+
+ task = synctask_get ();
+
+ while (lock->lock) {
+ if (task) {
+ /* called within a synctask */
+ list_add_tail (&task->waitq, &lock->waitq);
+ {
+ pthread_mutex_unlock (&lock->guard);
+ synctask_yield (task);
+ pthread_mutex_lock (&lock->guard);
+ }
+ list_del_init (&task->waitq);
+ } else {
+ /* called by a non-synctask */
+ pthread_cond_wait (&lock->cond, &lock->guard);
+ }
+ }
+
+ lock->lock = _gf_true;
+ lock->owner = task;
+
+ return 0;
+}
+
+
+int
+synclock_lock (synclock_t *lock)
+{
+ int ret = 0;
+
+ pthread_mutex_lock (&lock->guard);
+ {
+ ret = __synclock_lock (lock);
+ }
+ pthread_mutex_unlock (&lock->guard);
+
+ return ret;
+}
+
+
+int
+synclock_trylock (synclock_t *lock)
+{
+ int ret = 0;
+
+ errno = 0;
+
+ pthread_mutex_lock (&lock->guard);
+ {
+ if (lock->lock) {
+ errno = EBUSY;
+ ret = -1;
+ goto unlock;
+ }
+
+ ret = __synclock_lock (lock);
+ }
+unlock:
+ pthread_mutex_unlock (&lock->guard);
+
+ return ret;
+}
+
+
+static int
+__synclock_unlock (synclock_t *lock)
+{
+ struct synctask *task = NULL;
+ struct synctask *curr = NULL;
+
+ if (!lock)
+ return -1;
+
+ curr = synctask_get ();
+
+ if (lock->owner != curr) {
+ /* warn ? */
+ }
+
+ lock->lock = _gf_false;
+
+ /* There could be both synctasks and non synctasks
+ waiting (or none, or either). As a mid-approach
+ between maintaining too many waiting counters
+ at one extreme and a thundering herd on unlock
+ at the other, call a cond_signal (which wakes
+ one waiter) and first synctask waiter. So at
+ most we have two threads waking up to grab the
+ just released lock.
+ */
+ pthread_cond_signal (&lock->cond);
+ if (!list_empty (&lock->waitq)) {
+ task = list_entry (lock->waitq.next, struct synctask, waitq);
+ synctask_wake (task);
+ }
+
+ return 0;
+}
+
+
+int
+synclock_unlock (synclock_t *lock)
+{
+ int ret = 0;
+
+ pthread_mutex_lock (&lock->guard);
+ {
+ ret = __synclock_unlock (lock);
+ }
+ pthread_mutex_unlock (&lock->guard);
+
+ return ret;
+}
+
+
/* FOPS */