summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNiels de Vos <ndevos@redhat.com>2017-03-29 13:44:03 +0200
committerJeff Darcy <jeff@pl.atyp.us>2017-04-05 09:14:26 -0400
commit93e3c9abce1a02ac724afa382751852fa5edf713 (patch)
tree887c599c2a31ab65a2f0b2440e5f3fe8b6061afc
parentd6b88e9b8b02813620c3c1a2ea49d58d29062b3e (diff)
libglusterfs: provide standardized atomic operations
The current macros ATOMIC_INCREMENT() and ATOMIC_DECREMENT() expect a lock as first argument. There are at least two issues with this approach: 1. this lock is unused on architectures that have atomic operations 2. some structures use a single lock for multiple variables By defining a gf_atomic_t type, the unused lock can be removed, saving a few bytes on modern architectures. Because the gf_atomic_t type locates the lock for the variable (in case of older architectures), each variable is protected the same on all architectures. This makes the behaviour across all architectures more equal (per variable locking, by a gf_lock_t or compiler optimization). BUG: 1437037 Change-Id: Ic164892b06ea676e6a9566f8a98b7faf0efe76d6 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: https://review.gluster.org/16963 Smoke: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Xavier Hernandez <xhernandez@datalab.es> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Amar Tumballi <amarts@redhat.com> Reviewed-by: Jeff Darcy <jeff@pl.atyp.us>
-rw-r--r--configure.ac21
-rw-r--r--libglusterfs/src/Makefile.am2
-rw-r--r--libglusterfs/src/atomic.h109
-rw-r--r--libglusterfs/src/client_t.c45
-rw-r--r--libglusterfs/src/client_t.h8
-rw-r--r--libglusterfs/src/globals.h17
-rw-r--r--libglusterfs/src/logging.h1
-rw-r--r--libglusterfs/src/mem-pool.c4
-rw-r--r--libglusterfs/src/mem-pool.h10
-rw-r--r--libglusterfs/src/xlator.c5
-rw-r--r--xlators/debug/io-stats/src/io-stats.c8
-rw-r--r--xlators/performance/md-cache/src/md-cache.c102
12 files changed, 216 insertions, 116 deletions
diff --git a/configure.ac b/configure.ac
index c7b6c38c002..c2d03e7ee6a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -956,16 +956,19 @@ AC_SUBST(ARGP_STANDALONE_CPPFLAGS)
AC_SUBST(ARGP_STANDALONE_LDADD)
AC_SUBST(ARGP_STANDALONE_DIR)
-# Check for atomic operation support
-echo -n "checking for atomic operation support... "
-AC_LINK_IFELSE([AC_LANG_SOURCE([[int main() { long int a = 4; __sync_fetch_and_add_8 (&a, 1); }]])],
- [have_sync_fetch_and_add_8=yes], [have_sync_fetch_and_add_8=no])
-if test "x${have_sync_fetch_and_add_8}" = "xyes"; then
- echo "yes"
- AC_DEFINE(HAVE_ATOMIC_BUILTINS, 1, [have atomic builtins])
-else
- echo "no"
+dnl Check for atomic operation support
+AC_CHECK_FUNC([__atomic_load], [have_atomic_builtins])
+if test "x${have_atomic_builtins}" = "xyes"; then
+ AC_DEFINE(HAVE_ATOMIC_BUILTINS, 1, [define if __atomic_*() builtins are available])
+fi
+AC_SUBST(HAVE_ATOMIC_BUILTINS)
+
+dnl __sync_*() will not be needed if __atomic_*() is available
+AC_CHECK_FUNC([__sync_fetch_and_add], [have_sync_builtins])
+if test "x${have_sync_builtind}" = "xyes"; then
+ AC_DEFINE(HAVE_SYNC_BUILTINS, 1, [define if __sync_*() builtins are available])
fi
+AC_SUBST(HAVE_SYNC_BUILTINS)
AC_CHECK_HEADER([malloc.h], AC_DEFINE(HAVE_MALLOC_H, 1, [have malloc.h]))
diff --git a/libglusterfs/src/Makefile.am b/libglusterfs/src/Makefile.am
index e9e690ee4bd..2311c53645a 100644
--- a/libglusterfs/src/Makefile.am
+++ b/libglusterfs/src/Makefile.am
@@ -53,7 +53,7 @@ libglusterfs_la_HEADERS = common-utils.h defaults.h default-args.h \
syncop-utils.h parse-utils.h libglusterfs-messages.h tw.h \
lvm-defaults.h quota-common-utils.h rot-buffs.h \
compat-uuid.h upcall-utils.h throttle-tbf.h events.h\
- compound-fop-utils.h
+ compound-fop-utils.h atomic.h
libglusterfs_ladir = $(includedir)/glusterfs
diff --git a/libglusterfs/src/atomic.h b/libglusterfs/src/atomic.h
new file mode 100644
index 00000000000..71fcb1ee972
--- /dev/null
+++ b/libglusterfs/src/atomic.h
@@ -0,0 +1,109 @@
+/*
+ Copyright (c) 2017 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _ATOMIC_H
+#define _ATOMIC_H
+
+#include <inttypes.h>
+
+#if defined(HAVE_ATOMIC_BUILTINS) || defined(HAVE_SYNC_BUILTINS)
+/* optimized implementation, macros only */
+
+typedef struct gf_atomic_t {
+ int64_t cnt;
+} gf_atomic_t;
+
+#if defined(HAVE_ATOMIC_BUILTINS)
+
+/* all macros have a 'gf_atomic_t' as 1st argument */
+#define GF_ATOMIC_INIT(op, n) __atomic_store (&(op.cnt), __ATOMIC_RELEASE)
+#define GF_ATOMIC_GET(op) __atomic_load (&(op.cnt), __ATOMIC_ACQUIRE)
+#define GF_ATOMIC_INC(op) __atomic_add_and_fetch (&(op.cnt), 1, \
+ __ATOMIC_ACQ_REL)
+#define GF_ATOMIC_DEC(op) __atomic_sub_and_fetch (&(op.cnt), 1, \
+ __ATOMIC_ACQ_REL)
+#define GF_ATOMIC_ADD(op, n) __atomic_add_and_fetch (&(op.cnt), n, \
+ __ATOMIC_ACQ_REL)
+#define GF_ATOMIC_SUB(op, n) __atomic_sub_and_fetch (&(op.cnt), n, \
+ __ATOMIC_ACQ_REL)
+
+#else /* !HAVE_ATOMIC_BUILTINS, but HAVE_SYNC_BUILTINS */
+
+/* all macros have a 'gf_atomic_t' as 1st argument */
+#define GF_ATOMIC_INIT(op, n) ({ op.cnt = n; __sync_synchronize (); })
+#define GF_ATOMIC_GET(op) __sync_add_and_fetch (&(op.cnt), 0)
+#define GF_ATOMIC_INC(op) __sync_add_and_fetch (&(op.cnt), 1)
+#define GF_ATOMIC_DEC(op) __sync_sub_and_fetch (&(op.cnt), 1)
+#define GF_ATOMIC_ADD(op, n) __sync_add_and_fetch (&(op.cnt), n)
+#define GF_ATOMIC_SUB(op, n) __sync_sub_and_fetch (&(op.cnt), n)
+
+#endif /* HAVE_ATOMIC_BUILTINS || HAVE_SYNC_BUILTINS */
+
+#else /* no HAVE_(ATOMIC|SYNC)_BUILTINS */
+/* fallback implementation, using small inline functions to improve type
+ * checking while compiling */
+
+#include "locking.h"
+
+typedef struct gf_atomic_t {
+ int64_t cnt;
+ gf_lock_t lk;
+} gf_atomic_t;
+
+
+static inline void
+gf_atomic_init (gf_atomic_t *op, int64_t cnt)
+{
+ LOCK_INIT (&op->lk);
+ op->cnt = cnt;
+}
+
+
+static inline uint64_t
+gf_atomic_get (gf_atomic_t *op)
+{
+ uint64_t ret;
+
+ LOCK (&op->lk);
+ {
+ ret = op->cnt;
+ }
+ UNLOCK (&op->lk);
+
+ return ret;
+}
+
+
+static inline int64_t
+gf_atomic_add (gf_atomic_t *op, int64_t n)
+{
+ uint64_t ret;
+
+ LOCK (&op->lk);
+ {
+ op->cnt += n;
+ ret = op->cnt;
+ }
+ UNLOCK (&op->lk);
+
+ return ret;
+}
+
+
+#define GF_ATOMIC_INIT(op, cnt) gf_atomic_init (&op, cnt)
+#define GF_ATOMIC_GET(op) gf_atomic_get (&op)
+#define GF_ATOMIC_INC(op) gf_atomic_add (&op, 1)
+#define GF_ATOMIC_DEC(op) gf_atomic_add (&op, -1)
+#define GF_ATOMIC_ADD(op, n) gf_atomic_add (&op, n)
+#define GF_ATOMIC_SUB(op, n) gf_atomic_add (&op, -n)
+
+#endif /* HAVE_ATOMIC_SYNC_OPS */
+
+#endif /* _ATOMIC_H */
diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c
index c20c4089ec3..1adfef5c7e3 100644
--- a/libglusterfs/src/client_t.c
+++ b/libglusterfs/src/client_t.c
@@ -192,8 +192,7 @@ gf_client_get (xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid)
memcmp (cred->authdata,
client->auth.data,
client->auth.len) == 0))) {
- INCREMENT_ATOMIC (client->ref.lock,
- client->ref.bind);
+ GF_ATOMIC_INC (client->bind);
goto unlock;
}
}
@@ -207,7 +206,6 @@ gf_client_get (xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid)
client->this = this;
LOCK_INIT (&client->scratch_ctx.lock);
- LOCK_INIT (&client->ref.lock);
client->client_uid = gf_strdup (client_uid);
if (client->client_uid == NULL) {
@@ -229,8 +227,8 @@ gf_client_get (xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid)
goto unlock;
}
- /* no need to do these atomically here */
- client->ref.bind = client->ref.count = 1;
+ GF_ATOMIC_INIT (client->bind, 1);
+ GF_ATOMIC_INIT (client->count, 1);
client->auth.flavour = cred->flavour;
if (cred->flavour != AUTH_NONE) {
@@ -277,9 +275,10 @@ unlock:
if (client)
gf_msg_callingfn ("client_t", GF_LOG_DEBUG, 0, LG_MSG_BIND_REF,
- "%s: bind_ref: %d, ref: %d",
- client->client_uid, client->ref.bind,
- client->ref.count);
+ "%s: bind_ref: %"GF_PRI_ATOMIC", ref: "
+ "%"GF_PRI_ATOMIC, client->client_uid,
+ GF_ATOMIC_GET (client->bind),
+ GF_ATOMIC_GET (client->count));
return client;
}
@@ -295,14 +294,15 @@ gf_client_put (client_t *client, gf_boolean_t *detached)
if (detached)
*detached = _gf_false;
- bind_ref = DECREMENT_ATOMIC (client->ref.lock, client->ref.bind);
+ bind_ref = GF_ATOMIC_DEC (client->bind);
if (bind_ref == 0)
unref = _gf_true;
gf_msg_callingfn ("client_t", GF_LOG_DEBUG, 0, LG_MSG_BIND_REF, "%s: "
- "bind_ref: %d, ref: %d, unref: %d",
- client->client_uid, client->ref.bind,
- client->ref.count, unref);
+ "bind_ref: %"GF_PRI_ATOMIC", ref: %"GF_PRI_ATOMIC", "
+ "unref: %d", client->client_uid,
+ GF_ATOMIC_GET (client->bind),
+ GF_ATOMIC_GET (client->count), unref);
if (unref) {
if (detached)
*detached = _gf_true;
@@ -322,10 +322,10 @@ gf_client_ref (client_t *client)
return NULL;
}
- INCREMENT_ATOMIC (client->ref.lock, client->ref.count);
+ GF_ATOMIC_INC (client->count);
gf_msg_callingfn ("client_t", GF_LOG_DEBUG, 0, LG_MSG_REF_COUNT, "%s: "
- "ref-count %d", client->client_uid,
- client->ref.count);
+ "ref-count %"GF_PRI_ATOMIC, client->client_uid,
+ GF_ATOMIC_GET (client->count));
return client;
}
@@ -360,7 +360,6 @@ client_destroy (client_t *client)
clienttable = client->this->ctx->clienttable;
LOCK_DESTROY (&client->scratch_ctx.lock);
- LOCK_DESTROY (&client->ref.lock);
LOCK (&clienttable->lock);
{
@@ -419,7 +418,7 @@ gf_client_disconnect (client_t *client)
void
gf_client_unref (client_t *client)
{
- int refcount;
+ uint64_t refcount;
if (!client) {
gf_msg_callingfn ("client_t", GF_LOG_ERROR, EINVAL,
@@ -427,10 +426,10 @@ gf_client_unref (client_t *client)
return;
}
- refcount = DECREMENT_ATOMIC (client->ref.lock, client->ref.count);
+ refcount = GF_ATOMIC_DEC (client->count);
gf_msg_callingfn ("client_t", GF_LOG_DEBUG, 0, LG_MSG_REF_COUNT, "%s: "
- "ref-count %d", client->client_uid,
- (int)client->ref.count);
+ "ref-count %"GF_PRI_ATOMIC, client->client_uid,
+ refcount);
if (refcount == 0) {
gf_msg (THIS->name, GF_LOG_INFO, 0, LG_MSG_DISCONNECT_CLIENT,
"Shutting down connection %s", client->client_uid);
@@ -586,7 +585,8 @@ client_dump (client_t *client, char *prefix)
return;
memset(key, 0, sizeof key);
- gf_proc_dump_write("refcount", "%d", client->ref.count);
+ gf_proc_dump_write("refcount", GF_PRI_ATOMIC,
+ GF_ATOMIC_GET (client->count));
}
@@ -780,7 +780,8 @@ gf_client_dump_fdtables (xlator_t *this)
gf_proc_dump_build_key (key, "conn", "%d.ref",
count);
- gf_proc_dump_write (key, "%d", client->ref.count);
+ gf_proc_dump_write (key, GF_PRI_ATOMIC,
+ GF_ATOMIC_GET (client->count));
if (client->bound_xl) {
gf_proc_dump_build_key (key, "conn",
"%d.bound_xl", count);
diff --git a/libglusterfs/src/client_t.h b/libglusterfs/src/client_t.h
index 29ea7f29ce8..31f1bd048ed 100644
--- a/libglusterfs/src/client_t.h
+++ b/libglusterfs/src/client_t.h
@@ -13,6 +13,7 @@
#include "glusterfs.h"
#include "locking.h" /* for gf_lock_t, not included by glusterfs.h */
+#include "atomic.h" /* for gf_atomic_t */
struct client_ctx {
void *ctx_key;
@@ -26,11 +27,8 @@ typedef struct _client_t {
unsigned short count;
struct client_ctx *ctx;
} scratch_ctx;
- struct {
- gf_lock_t lock;
- volatile int bind;
- volatile int count;
- } ref;
+ gf_atomic_t bind;
+ gf_atomic_t count;
xlator_t *bound_xl;
xlator_t *this;
int tbl_index;
diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h
index 1c8547265d1..24ce0683f7a 100644
--- a/libglusterfs/src/globals.h
+++ b/libglusterfs/src/globals.h
@@ -90,23 +90,6 @@
#define THIS (*__glusterfs_this_location())
#define DECLARE_OLD_THIS xlator_t *old_THIS = THIS
-/*
- * a more comprehensive feature test is shown at
- * http://lists.iptel.org/pipermail/semsdev/2010-October/005075.html
- * this is sufficient for RHEL5 i386 builds
- */
-#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) && !defined(__i386__)
-# define INCREMENT_ATOMIC(lk, op) __sync_add_and_fetch(&op, 1)
-# define DECREMENT_ATOMIC(lk, op) __sync_sub_and_fetch(&op, 1)
-#else
-/* These are only here for old gcc, e.g. on RHEL5 i386.
- * We're not ever going to use this in an if stmt,
- * but let's be pedantically correct for style points */
-# define INCREMENT_ATOMIC(lk, op) do { LOCK (&lk); ++op; UNLOCK (&lk); } while (0)
-/* this is a gcc 'statement expression', it works with llvm/clang too */
-# define DECREMENT_ATOMIC(lk, op) ({ LOCK (&lk); --op; UNLOCK (&lk); op; })
-#endif
-
xlator_t **__glusterfs_this_location (void);
xlator_t *glusterfs_this_get (void);
int glusterfs_this_set (xlator_t *);
diff --git a/libglusterfs/src/logging.h b/libglusterfs/src/logging.h
index a6e318dc3fa..78d57888f35 100644
--- a/libglusterfs/src/logging.h
+++ b/libglusterfs/src/logging.h
@@ -37,6 +37,7 @@
#endif
#define GF_PRI_BLKSIZE PRId32
#define GF_PRI_SIZET "zu"
+#define GF_PRI_ATOMIC PRIu64
#ifdef GF_DARWIN_HOST_OS
#define GF_PRI_TIME "ld"
diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
index af839099ff8..4b600f4681a 100644
--- a/libglusterfs/src/mem-pool.c
+++ b/libglusterfs/src/mem-pool.c
@@ -80,7 +80,7 @@ gf_mem_set_acct_info (xlator_t *xl, char **alloc_ptr, size_t size,
}
UNLOCK(&xl->mem_acct->rec[type].lock);
- INCREMENT_ATOMIC (xl->mem_acct->lock, xl->mem_acct->refcnt);
+ GF_ATOMIC_INC (xl->mem_acct->refcnt);
header = (struct mem_header *) ptr;
header->type = type;
@@ -326,7 +326,7 @@ __gf_free (void *free_ptr)
}
UNLOCK (&mem_acct->rec[header->type].lock);
- if (DECREMENT_ATOMIC (mem_acct->lock, mem_acct->refcnt) == 0) {
+ if (GF_ATOMIC_DEC (mem_acct->refcnt) == 0) {
FREE (mem_acct);
}
diff --git a/libglusterfs/src/mem-pool.h b/libglusterfs/src/mem-pool.h
index 0dc186341b2..1b27119cf6c 100644
--- a/libglusterfs/src/mem-pool.h
+++ b/libglusterfs/src/mem-pool.h
@@ -13,6 +13,7 @@
#include "list.h"
#include "locking.h"
+#include "atomic.h"
#include "logging.h"
#include "mem-types.h"
#include <stdlib.h>
@@ -48,14 +49,7 @@ struct mem_acct_rec {
struct mem_acct {
uint32_t num_types;
- /*
- * The lock is only used on ancient platforms (e.g. RHEL5) to keep
- * refcnt increment/decrement atomic. We could even make its existence
- * conditional on the right set of version/feature checks, but it's so
- * lightweight that it's not worth the obfuscation.
- */
- gf_lock_t lock;
- unsigned int refcnt;
+ gf_atomic_t refcnt;
struct mem_acct_rec rec[0];
};
diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
index 0d09b3fbc82..408012e7846 100644
--- a/libglusterfs/src/xlator.c
+++ b/libglusterfs/src/xlator.c
@@ -596,8 +596,7 @@ xlator_mem_acct_init (xlator_t *xl, int num_types)
memset (xl->mem_acct, 0, sizeof(struct mem_acct));
xl->mem_acct->num_types = num_types;
- LOCK_INIT (&xl->mem_acct->lock);
- xl->mem_acct->refcnt = 1;
+ GF_ATOMIC_INIT (xl->mem_acct->refcnt, 1);
for (i = 0; i < num_types; i++) {
memset (&xl->mem_acct->rec[i], 0, sizeof(struct mem_acct_rec));
@@ -654,7 +653,7 @@ xlator_memrec_free (xlator_t *xl)
for (i = 0; i < mem_acct->num_types; i++) {
LOCK_DESTROY (&(mem_acct->rec[i].lock));
}
- if (DECREMENT_ATOMIC (mem_acct->lock, mem_acct->refcnt) == 0) {
+ if (GF_ATOMIC_DEC (mem_acct->refcnt) == 0) {
FREE (mem_acct);
xl->mem_acct = NULL;
}
diff --git a/xlators/debug/io-stats/src/io-stats.c b/xlators/debug/io-stats/src/io-stats.c
index d3b3136d982..7af7b528f88 100644
--- a/xlators/debug/io-stats/src/io-stats.c
+++ b/xlators/debug/io-stats/src/io-stats.c
@@ -249,7 +249,13 @@ is_fop_latency_started (call_frame_t *frame)
conf->incremental.fop_hits[GF_FOP_##op]++; \
} while (0)
-#if defined(HAVE_ATOMIC_BUILTINS)
+#if defined(HAVE_SYNC_BUILTINS)
+/* FIXME: use gf_atomic_t from libglusterfs/src/atomic.h
+ *
+ * This is currently not behaving correctly. Values are going out of sync in
+ * the case where HAVE_SYNC_BUILTINS are available, but are updated under a
+ * single lock for other cases.
+ */
#define STATS_LOCK(x)
#define STATS_UNLOCK(x)
#define STATS_ADD(x,i) __sync_add_and_fetch (&x, i)
diff --git a/xlators/performance/md-cache/src/md-cache.c b/xlators/performance/md-cache/src/md-cache.c
index b6969cda56e..62f64968aa7 100644
--- a/xlators/performance/md-cache/src/md-cache.c
+++ b/xlators/performance/md-cache/src/md-cache.c
@@ -23,6 +23,7 @@
#include <sys/time.h>
#include "md-cache-messages.h"
#include "statedump.h"
+#include "atomic.h"
/* TODO:
- cache symlink() link names and nuke symlink-cache
@@ -30,19 +31,25 @@
*/
struct mdc_statistics {
- uint64_t stat_hit; /* No. of times lookup/stat was served from mdc */
- uint64_t stat_miss; /* No. of times valid stat wasn't present in mdc */
- uint64_t xattr_hit; /* No. of times getxattr was served from mdc, Note:
- this doesn't count the xattr served from lookup */
- uint64_t xattr_miss; /* No. of times xattr req was WIND from mdc */
- uint64_t negative_lookup; /* No. of negative lookups */
- uint64_t nameless_lookup; /* No. of negative lookups that were sent
- sent to bricks */
- uint64_t stat_invals; /* No. of invalidates received from upcall*/
- uint64_t xattr_invals; /* No. of invalidates received from upcall*/
- uint64_t need_lookup; /* No. of lookups issued, because other xlators
- * requested for explicit lookup */
- gf_lock_t lock;
+ gf_atomic_t stat_hit; /* No. of times lookup/stat was served from
+ mdc */
+
+ gf_atomic_t stat_miss; /* No. of times valid stat wasn't present in
+ mdc */
+
+ gf_atomic_t xattr_hit; /* No. of times getxattr was served from mdc,
+ Note: this doesn't count the xattr served
+ from lookup */
+
+ gf_atomic_t xattr_miss; /* No. of times xattr req was WIND from mdc */
+ gf_atomic_t negative_lookup; /* No. of negative lookups */
+ gf_atomic_t nameless_lookup; /* No. of negative lookups that were sent
+ to bricks */
+
+ gf_atomic_t stat_invals; /* No. of invalidates received from upcall */
+ gf_atomic_t xattr_invals; /* No. of invalidates received from upcall */
+ gf_atomic_t need_lookup; /* No. of lookups issued, because other
+ xlators requested for explicit lookup */
};
struct mdc_conf {
@@ -1027,8 +1034,7 @@ mdc_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
if (op_ret != 0) {
if (op_errno == ENOENT)
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.negative_lookup);
+ GF_ATOMIC_INC (conf->mdc_counter.negative_lookup);
goto out;
}
@@ -1064,15 +1070,14 @@ mdc_lookup (call_frame_t *frame, xlator_t *this, loc_t *loc,
local = mdc_local_get (frame);
if (!local) {
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.stat_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.stat_miss);
goto uncached;
}
loc_copy (&local->loc, loc);
if (!loc->name) {
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.nameless_lookup);
+ GF_ATOMIC_INC (conf->mdc_counter.nameless_lookup);
gf_msg_trace ("md-cache", 0, "Nameless lookup(%s) sent to the "
"brick", uuid_utoa (loc->inode->gfid));
@@ -1084,34 +1089,30 @@ mdc_lookup (call_frame_t *frame, xlator_t *this, loc_t *loc,
}
if (mdc_inode_reset_need_lookup (this, loc->inode)) {
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.need_lookup);
+ GF_ATOMIC_INC (conf->mdc_counter.need_lookup);
goto uncached;
}
ret = mdc_inode_iatt_get (this, loc->inode, &stbuf);
if (ret != 0) {
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.stat_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.stat_miss);
goto uncached;
}
if (xdata) {
ret = mdc_inode_xatt_get (this, loc->inode, &xattr_rsp);
if (ret != 0) {
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.xattr_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_miss);
goto uncached;
}
if (!mdc_xattr_satisfied (this, xdata, xattr_rsp)) {
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.xattr_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_miss);
goto uncached;
}
}
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.stat_hit);
+ GF_ATOMIC_INC (conf->mdc_counter.stat_hit);
MDC_STACK_UNWIND (lookup, frame, 0, 0, loc->inode, &stbuf,
xattr_rsp, &postparent);
@@ -1177,13 +1178,13 @@ mdc_stat (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
if (ret != 0)
goto uncached;
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.stat_hit);
+ GF_ATOMIC_INC (conf->mdc_counter.stat_hit);
MDC_STACK_UNWIND (stat, frame, 0, 0, &stbuf, xdata);
return 0;
uncached:
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.stat_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.stat_miss);
STACK_WIND (frame, mdc_stat_cbk,
FIRST_CHILD(this), FIRST_CHILD(this)->fops->stat,
loc, xdata);
@@ -1232,13 +1233,13 @@ mdc_fstat (call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
if (ret != 0)
goto uncached;
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.stat_hit);
+ GF_ATOMIC_INC (conf->mdc_counter.stat_hit);
MDC_STACK_UNWIND (fstat, frame, 0, 0, &stbuf, xdata);
return 0;
uncached:
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.stat_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.stat_miss);
STACK_WIND (frame, mdc_fstat_cbk,
FIRST_CHILD(this), FIRST_CHILD(this)->fops->fstat,
fd, xdata);
@@ -2123,13 +2124,13 @@ mdc_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, const char *key,
op_errno = ENODATA;
}
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.xattr_hit);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_hit);
MDC_STACK_UNWIND (getxattr, frame, ret, op_errno, xattr, xdata);
return 0;
uncached:
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.xattr_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_miss);
STACK_WIND (frame, mdc_getxattr_cbk,
FIRST_CHILD(this), FIRST_CHILD(this)->fops->getxattr,
loc, key, xdata);
@@ -2188,13 +2189,13 @@ mdc_fgetxattr (call_frame_t *frame, xlator_t *this, fd_t *fd, const char *key,
op_errno = ENODATA;
}
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.xattr_hit);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_hit);
MDC_STACK_UNWIND (fgetxattr, frame, ret, op_errno, xattr, xdata);
return 0;
uncached:
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.xattr_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_miss);
STACK_WIND (frame, mdc_fgetxattr_cbk,
FIRST_CHILD(this), FIRST_CHILD(this)->fops->fgetxattr,
fd, key, xdata);
@@ -2256,13 +2257,13 @@ mdc_removexattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
op_errno = ENODATA;
}
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.xattr_hit);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_hit);
MDC_STACK_UNWIND (removexattr, frame, ret, op_errno, xdata);
return 0;
uncached:
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.xattr_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_miss);
STACK_WIND (frame, mdc_removexattr_cbk,
FIRST_CHILD(this), FIRST_CHILD(this)->fops->removexattr,
loc, name, xdata);
@@ -2325,12 +2326,12 @@ mdc_fremovexattr (call_frame_t *frame, xlator_t *this, fd_t *fd,
op_errno = ENODATA;
}
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.xattr_hit);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_hit);
MDC_STACK_UNWIND (fremovexattr, frame, ret, op_errno, xdata);
return 0;
uncached:
- INCREMENT_ATOMIC (conf->mdc_counter.lock, conf->mdc_counter.xattr_miss);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_miss);
STACK_WIND (frame, mdc_fremovexattr_cbk,
FIRST_CHILD(this), FIRST_CHILD(this)->fops->fremovexattr,
fd, name, xdata);
@@ -2711,8 +2712,7 @@ mdc_invalidate (xlator_t *this, void *data)
(UP_NLINK | UP_RENAME_FLAGS | UP_FORGET | UP_INVAL_ATTR)) {
mdc_inode_iatt_invalidate (this, inode);
mdc_inode_xatt_invalidate (this, inode);
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.stat_invals);
+ GF_ATOMIC_INC (conf->mdc_counter.stat_invals);
goto out;
}
@@ -2725,8 +2725,7 @@ mdc_invalidate (xlator_t *this, void *data)
*/
if (ret < 0)
goto out;
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.stat_invals);
+ GF_ATOMIC_INC (conf->mdc_counter.stat_invals);
}
if (up_ci->flags & UP_XATTR) {
@@ -2735,15 +2734,13 @@ mdc_invalidate (xlator_t *this, void *data)
else
ret = mdc_inode_xatt_invalidate (this, inode);
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.xattr_invals);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_invals);
} else if (up_ci->flags & UP_XATTR_RM) {
tmp.inode = inode;
tmp.this = this;
ret = dict_foreach (up_ci->dict, mdc_inval_xatt, &tmp);
- INCREMENT_ATOMIC (conf->mdc_counter.lock,
- conf->mdc_counter.xattr_invals);
+ GF_ATOMIC_INC (conf->mdc_counter.xattr_invals);
}
out:
@@ -2983,8 +2980,17 @@ init (xlator_t *this)
GF_OPTION_INIT("cache-invalidation", conf->mdc_invalidation, bool, out);
LOCK_INIT (&conf->lock);
- LOCK_INIT (&conf->mdc_counter.lock);
time (&conf->last_child_down);
+ /* initialize gf_atomic_t counters */
+ GF_ATOMIC_INIT (conf->mdc_counter.stat_hit, 0);
+ GF_ATOMIC_INIT (conf->mdc_counter.stat_miss, 0);
+ GF_ATOMIC_INIT (conf->mdc_counter.xattr_hit, 0);
+ GF_ATOMIC_INIT (conf->mdc_counter.xattr_miss, 0);
+ GF_ATOMIC_INIT (conf->mdc_counter.negative_lookup, 0);
+ GF_ATOMIC_INIT (conf->mdc_counter.nameless_lookup, 0);
+ GF_ATOMIC_INIT (conf->mdc_counter.stat_invals, 0);
+ GF_ATOMIC_INIT (conf->mdc_counter.xattr_invals, 0);
+ GF_ATOMIC_INIT (conf->mdc_counter.need_lookup, 0);
/* If timeout is greater than 60s (default before the patch that added
* cache invalidation support was added) then, cache invalidation