summaryrefslogtreecommitdiffstats
path: root/libglusterfs/src/syncop.c
diff options
context:
space:
mode:
authorAnand Avati <avati@redhat.com>2013-04-22 04:35:03 -0700
committerAnand Avati <avati@redhat.com>2013-05-04 00:33:01 -0700
commit83cedcd9be2676e63b1be72ecaf3316a781773cb (patch)
tree70c3bd94e31db88f86ae78cd1efa4f031046ef1d /libglusterfs/src/syncop.c
parent54b9cd3df357ac196af17f53282b3b1975dd4fb9 (diff)
synctask: implement barriers around yield, not the other way
In the current implementation, barriers are in the core of the syncprocessors. Wake()s are treated as syncbarrier wake. This is however delicate, as spurious wake()s of the synctask can mess up the accounting of the barrier and waking it prematurely. The fix is to keep yield() and wake() as the basic primitives, and implement barriers as an object impelemented on top of these primitives. This way, only an explicit barrier_wake() gets counted towards the barrier accounting, and spurious wakes will be truly safe. Change-Id: I8087f0f446113e5b2d0853431c0354335ccda076 BUG: 948686 Signed-off-by: Anand Avati <avati@redhat.com> Reviewed-on: http://review.gluster.org/4921 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Diffstat (limited to 'libglusterfs/src/syncop.c')
-rw-r--r--libglusterfs/src/syncop.c142
1 files changed, 120 insertions, 22 deletions
diff --git a/libglusterfs/src/syncop.c b/libglusterfs/src/syncop.c
index f58bfcecab4..7dcdf3fefcb 100644
--- a/libglusterfs/src/syncop.c
+++ b/libglusterfs/src/syncop.c
@@ -80,24 +80,15 @@ __wait (struct synctask *task)
void
-synctask_waitfor (struct synctask *task, int waitfor)
+synctask_yield (struct synctask *task)
{
- struct syncenv *env = NULL;
xlator_t *oldTHIS = THIS;
- env = task->env;
-
#if defined(__NetBSD__) && defined(_UC_TLSBASE)
/* Preserve pthread private pointer through swapcontex() */
task->proc->sched.uc_flags &= ~_UC_TLSBASE;
#endif
- pthread_mutex_lock (&env->mutex);
- {
- task->waitfor = waitfor;
- }
- pthread_mutex_unlock (&env->mutex);
-
if (swapcontext (&task->ctx, &task->proc->sched) < 0) {
gf_log ("syncop", GF_LOG_ERROR,
"swapcontext failed (%s)", strerror (errno));
@@ -108,13 +99,6 @@ synctask_waitfor (struct synctask *task, int waitfor)
void
-synctask_yield (struct synctask *task)
-{
- synctask_waitfor (task, 1);
-}
-
-
-void
synctask_yawn (struct synctask *task)
{
struct syncenv *env = NULL;
@@ -124,7 +108,6 @@ synctask_yawn (struct synctask *task)
pthread_mutex_lock (&env->mutex);
{
task->woken = 0;
- task->waitfor = 0;
}
pthread_mutex_unlock (&env->mutex);
}
@@ -139,9 +122,9 @@ synctask_wake (struct synctask *task)
pthread_mutex_lock (&env->mutex);
{
- task->woken++;
+ task->woken = 1;
- if (task->slept && task->woken >= task->waitfor)
+ if (task->slept)
__run (task);
pthread_cond_broadcast (&env->cond);
@@ -352,7 +335,6 @@ syncenv_task (struct syncproc *proc)
task->woken = 0;
task->slept = 0;
- task->waitfor = 0;
task->proc = proc;
}
@@ -390,7 +372,7 @@ synctask_switchto (struct synctask *task)
pthread_mutex_lock (&env->mutex);
{
- if (task->woken >= task->waitfor) {
+ if (task->woken) {
__run (task);
} else {
task->slept = 1;
@@ -655,6 +637,122 @@ synclock_unlock (synclock_t *lock)
return ret;
}
+/* Barriers */
+
+int
+syncbarrier_init (struct syncbarrier *barrier)
+{
+ if (!barrier) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ pthread_cond_init (&barrier->cond, 0);
+ barrier->count = 0;
+ INIT_LIST_HEAD (&barrier->waitq);
+
+ return pthread_mutex_init (&barrier->guard, 0);
+}
+
+
+int
+syncbarrier_destroy (struct syncbarrier *barrier)
+{
+ if (!barrier) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ pthread_cond_destroy (&barrier->cond);
+ return pthread_mutex_destroy (&barrier->guard);
+}
+
+
+static int
+__syncbarrier_wait (struct syncbarrier *barrier, int waitfor)
+{
+ struct synctask *task = NULL;
+
+ if (!barrier) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ task = synctask_get ();
+
+ while (barrier->count < waitfor) {
+ if (task) {
+ /* called within a synctask */
+ list_add_tail (&task->waitq, &barrier->waitq);
+ {
+ pthread_mutex_unlock (&barrier->guard);
+ synctask_yield (task);
+ pthread_mutex_lock (&barrier->guard);
+ }
+ list_del_init (&task->waitq);
+ } else {
+ /* called by a non-synctask */
+ pthread_cond_wait (&barrier->cond, &barrier->guard);
+ }
+ }
+
+ barrier->count = 0;
+
+ return 0;
+}
+
+
+int
+syncbarrier_wait (struct syncbarrier *barrier, int waitfor)
+{
+ int ret = 0;
+
+ pthread_mutex_lock (&barrier->guard);
+ {
+ ret = __syncbarrier_wait (barrier, waitfor);
+ }
+ pthread_mutex_unlock (&barrier->guard);
+
+ return ret;
+}
+
+
+static int
+__syncbarrier_wake (struct syncbarrier *barrier)
+{
+ struct synctask *task = NULL;
+
+ if (!barrier) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ barrier->count++;
+
+ pthread_cond_signal (&barrier->cond);
+ if (!list_empty (&barrier->waitq)) {
+ task = list_entry (barrier->waitq.next, struct synctask, waitq);
+ synctask_wake (task);
+ }
+
+ return 0;
+}
+
+
+int
+syncbarrier_wake (struct syncbarrier *barrier)
+{
+ int ret = 0;
+
+ pthread_mutex_lock (&barrier->guard);
+ {
+ ret = __syncbarrier_wake (barrier);
+ }
+ pthread_mutex_unlock (&barrier->guard);
+
+ return ret;
+}
+
/* FOPS */