summaryrefslogtreecommitdiffstats
path: root/libglusterfs
diff options
context:
space:
mode:
authorPrasanna Kumar Kalever <prasanna.kalever@redhat.com>2016-02-11 23:45:37 +0530
committerJeff Darcy <jdarcy@redhat.com>2016-03-17 06:55:47 -0700
commit7e44c783ad731856956929f6614bbe045c26ea3a (patch)
tree85e699cc8926bda7d8bebef63581030817b0eedb /libglusterfs
parentfb3d300617d3616e1b901dd5503ff330a542c7be (diff)
lock: use spinlock only on multicore systems
Using spinlocks on a single-core system makes usually no meaning, since as long as the spinlock polling is blocking the only available CPU core, no other thread can run and since no other thread can run, the lock won't be unlocked until its time quantum expires and it gets de-scheduled. In other words, a spinlock wastes CPU time on those systems for no real benefit. If the thread was put to sleep instead, another thread could have ran at once, possibly unlocking the lock and then allowing the first thread to continue processing, once it woke up again. Change-Id: I0ffc14e26c2e150b564bcb682a576859ab1d1872 BUG: 1306807 Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> Reviewed-on: http://review.gluster.org/13432 Smoke: Gluster Build System <jenkins@build.gluster.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
Diffstat (limited to 'libglusterfs')
-rw-r--r--libglusterfs/src/Makefile.am2
-rw-r--r--libglusterfs/src/locking.c28
-rw-r--r--libglusterfs/src/locking.h59
3 files changed, 79 insertions, 10 deletions
diff --git a/libglusterfs/src/Makefile.am b/libglusterfs/src/Makefile.am
index e0acc3c4e58..39ae9e05091 100644
--- a/libglusterfs/src/Makefile.am
+++ b/libglusterfs/src/Makefile.am
@@ -31,7 +31,7 @@ libglusterfs_la_SOURCES = dict.c xlator.c logging.c \
strfd.c parse-utils.c $(CONTRIBDIR)/mount/mntent.c \
$(CONTRIBDIR)/libexecinfo/execinfo.c quota-common-utils.c rot-buffs.c \
$(CONTRIBDIR)/timer-wheel/timer-wheel.c \
- $(CONTRIBDIR)/timer-wheel/find_last_bit.c tw.c default-args.c
+ $(CONTRIBDIR)/timer-wheel/find_last_bit.c tw.c default-args.c locking.c
nodist_libglusterfs_la_SOURCES = y.tab.c graph.lex.c defaults.c
nodist_libglusterfs_la_HEADERS = y.tab.h
diff --git a/libglusterfs/src/locking.c b/libglusterfs/src/locking.c
new file mode 100644
index 00000000000..d3b9754ef76
--- /dev/null
+++ b/libglusterfs/src/locking.c
@@ -0,0 +1,28 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#if defined(HAVE_SPINLOCK)
+/* None of this matters otherwise. */
+
+#include <pthread.h>
+#include <unistd.h>
+
+#define LOCKING_IMPL
+#include "locking.h"
+
+int use_spinlocks = 0;
+
+static void __attribute__((constructor))
+gf_lock_setup (void)
+{
+ use_spinlocks = (sysconf(_SC_NPROCESSORS_ONLN) > 1);
+}
+
+#endif
diff --git a/libglusterfs/src/locking.h b/libglusterfs/src/locking.h
index 24edf9aed44..71b6a286e6c 100644
--- a/libglusterfs/src/locking.h
+++ b/libglusterfs/src/locking.h
@@ -12,7 +12,8 @@
#define _LOCKING_H
#include <pthread.h>
-#ifdef GF_DARWIN_HOST_OS
+
+#if defined (GF_DARWIN_HOST_OS)
#include <libkern/OSAtomic.h>
#define pthread_spinlock_t OSSpinLock
#define pthread_spin_lock(l) OSSpinLockLock(l)
@@ -21,23 +22,63 @@
#define pthread_spin_init(l, v) (*l = v)
#endif
+#if defined (HAVE_SPINLOCK)
+
+typedef union {
+ pthread_spinlock_t spinlock;
+ pthread_mutex_t mutex;
+} gf_lock_t;
+
+#if !defined(LOCKING_IMPL)
+extern int use_spinlocks;
+
+/*
+ * Using a dispatch table would be unpleasant because we're dealing with two
+ * different types. If the dispatch contains direct pointers to pthread_xx
+ * or mutex_xxx then we have to hope that every possible union alternative
+ * starts at the same address as the union itself. I'm old enough to remember
+ * compilers where this was not the case (for alignment reasons) so I'm a bit
+ * paranoid about that. Also, I don't like casting arguments through "void *"
+ * which we'd also have to do to avoid type errors. The other alternative would
+ * be to define actual functions which pick out the right union member, and put
+ * those in the dispatch tables. Now we have a pointer dereference through the
+ * dispatch table plus a function call, which is likely to be worse than the
+ * branching here from the ?: construct. If it were a clear win it might be
+ * worth the extra complexity, but for now this way seems preferable.
+ */
+
+#define LOCK_INIT(x) (use_spinlocks \
+ ? pthread_spin_init (&((x)->spinlock), 0) \
+ : pthread_mutex_init (&((x)->mutex), 0))
+
+#define LOCK(x) (use_spinlocks \
+ ? pthread_spin_lock (&((x)->spinlock)) \
+ : pthread_mutex_lock (&((x)->mutex)))
+
+#define TRY_LOCK(x) (use_spinlocks \
+ ? pthread_spin_trylock (&((x)->spinlock)) \
+ : pthread_mutex_trylock (&((x)->mutex)))
-#if HAVE_SPINLOCK
-#define LOCK_INIT(x) pthread_spin_init (x, 0)
-#define LOCK(x) pthread_spin_lock (x)
-#define TRY_LOCK(x) pthread_spin_trylock (x)
-#define UNLOCK(x) pthread_spin_unlock (x)
-#define LOCK_DESTROY(x) pthread_spin_destroy (x)
+#define UNLOCK(x) (use_spinlocks \
+ ? pthread_spin_unlock (&((x)->spinlock)) \
+ : pthread_mutex_unlock (&((x)->mutex)))
+
+#define LOCK_DESTROY(x) (use_spinlocks \
+ ? pthread_spin_destroy (&((x)->spinlock)) \
+ : pthread_mutex_destroy (&((x)->mutex)))
+
+#endif
-typedef pthread_spinlock_t gf_lock_t;
#else
+
+typedef pthread_mutex_t gf_lock_t;
+
#define LOCK_INIT(x) pthread_mutex_init (x, 0)
#define LOCK(x) pthread_mutex_lock (x)
#define TRY_LOCK(x) pthread_mutex_trylock (x)
#define UNLOCK(x) pthread_mutex_unlock (x)
#define LOCK_DESTROY(x) pthread_mutex_destroy (x)
-typedef pthread_mutex_t gf_lock_t;
#endif /* HAVE_SPINLOCK */