From 3f8d118e48f11f448f35aca0c48ad40e0fd34f5b Mon Sep 17 00:00:00 2001 From: Xavier Hernandez Date: Tue, 7 Nov 2017 13:45:03 +0100 Subject: libglusterfs/atomic: Improved atomic support This patch solves a detection problem in configure.ac that prevented that compilation detects builtin __atomic or __sync functions. It also adds more atomic types and support for other atomic functions. An special case has been added to support 64-bit atomics on 32-bit systems. The solution is to fallback to the mutex solution only for 64-bit atomics, but smaller atomic types will still take advantage of builtins if available. Change-Id: I6b9afc7cd6e66b28a33278715583552872278801 BUG: 1510397 Signed-off-by: Xavier Hernandez --- xlators/debug/io-stats/src/io-stats.c | 4 ++-- xlators/performance/md-cache/src/md-cache.c | 16 ++++++++-------- xlators/performance/nl-cache/src/nl-cache-helper.c | 4 ++-- xlators/performance/nl-cache/src/nl-cache.c | 20 ++++++++++---------- 4 files changed, 22 insertions(+), 22 deletions(-) (limited to 'xlators') diff --git a/xlators/debug/io-stats/src/io-stats.c b/xlators/debug/io-stats/src/io-stats.c index 892746c959a..5656e32a4f5 100644 --- a/xlators/debug/io-stats/src/io-stats.c +++ b/xlators/debug/io-stats/src/io-stats.c @@ -1578,8 +1578,8 @@ io_stats_dump (xlator_t *this, struct ios_dump_args *args, gf1_cli_info_op op, gf_boolean_t is_peek) { struct ios_conf *conf = NULL; - struct ios_global_stats cumulative = {{0,}, }; - struct ios_global_stats incremental = {{0,}, }; + struct ios_global_stats cumulative = { }; + struct ios_global_stats incremental = { }; int increment = 0; struct timeval now; diff --git a/xlators/performance/md-cache/src/md-cache.c b/xlators/performance/md-cache/src/md-cache.c index 64a2867f5d9..6938b3150a6 100644 --- a/xlators/performance/md-cache/src/md-cache.c +++ b/xlators/performance/md-cache/src/md-cache.c @@ -2628,21 +2628,21 @@ mdc_priv_dump (xlator_t *this) gf_proc_dump_add_section(key_prefix); gf_proc_dump_write("stat_hit_count", "%"PRId64, - conf->mdc_counter.stat_hit.cnt); + GF_ATOMIC_GET(conf->mdc_counter.stat_hit)); gf_proc_dump_write("stat_miss_count", "%"PRId64, - conf->mdc_counter.stat_miss.cnt); + GF_ATOMIC_GET(conf->mdc_counter.stat_miss)); gf_proc_dump_write("xattr_hit_count", "%"PRId64, - conf->mdc_counter.xattr_hit.cnt); + GF_ATOMIC_GET(conf->mdc_counter.xattr_hit)); gf_proc_dump_write("xattr_miss_count", "%"PRId64, - conf->mdc_counter.xattr_miss.cnt); + GF_ATOMIC_GET(conf->mdc_counter.xattr_miss)); gf_proc_dump_write("nameless_lookup_count", "%"PRId64, - conf->mdc_counter.nameless_lookup.cnt); + GF_ATOMIC_GET(conf->mdc_counter.nameless_lookup)); gf_proc_dump_write("negative_lookup_count", "%"PRId64, - conf->mdc_counter.negative_lookup.cnt); + GF_ATOMIC_GET(conf->mdc_counter.negative_lookup)); gf_proc_dump_write("stat_invalidations_received", "%"PRId64, - conf->mdc_counter.stat_invals.cnt); + GF_ATOMIC_GET(conf->mdc_counter.stat_invals)); gf_proc_dump_write("xattr_invalidations_received", "%"PRId64, - conf->mdc_counter.xattr_invals.cnt); + GF_ATOMIC_GET(conf->mdc_counter.xattr_invals)); return 0; } diff --git a/xlators/performance/nl-cache/src/nl-cache-helper.c b/xlators/performance/nl-cache/src/nl-cache-helper.c index 0b6c884b0de..1556f9ec952 100644 --- a/xlators/performance/nl-cache/src/nl-cache-helper.c +++ b/xlators/performance/nl-cache/src/nl-cache-helper.c @@ -600,8 +600,8 @@ nlc_lru_prune (xlator_t *this, inode_t *inode) LOCK (&conf->lock); { - if ((conf->current_cache_size.cnt < conf->cache_size) && - (conf->refd_inodes.cnt < conf->inode_limit)) + if ((GF_ATOMIC_GET(conf->refd_inodes) < conf->inode_limit) && + (GF_ATOMIC_GET(conf->current_cache_size) < conf->cache_size)) goto unlock; list_for_each_entry_safe (lru_node, tmp, &conf->lru, list) { diff --git a/xlators/performance/nl-cache/src/nl-cache.c b/xlators/performance/nl-cache/src/nl-cache.c index 7dad8d95a53..9fa7ec87616 100644 --- a/xlators/performance/nl-cache/src/nl-cache.c +++ b/xlators/performance/nl-cache/src/nl-cache.c @@ -618,29 +618,29 @@ nlc_priv_dump (xlator_t *this) gf_proc_dump_add_section(key_prefix); gf_proc_dump_write("negative_lookup_hit_count", "%"PRId64, - conf->nlc_counter.nlc_hit.cnt); + GF_ATOMIC_GET(conf->nlc_counter.nlc_hit)); gf_proc_dump_write("negative_lookup_miss_count", "%"PRId64, - conf->nlc_counter.nlc_miss.cnt); + GF_ATOMIC_GET(conf->nlc_counter.nlc_miss)); gf_proc_dump_write("get_real_filename_hit_count", "%"PRId64, - conf->nlc_counter.getrealfilename_hit.cnt); + GF_ATOMIC_GET(conf->nlc_counter.getrealfilename_hit)); gf_proc_dump_write("get_real_filename_miss_count", "%"PRId64, - conf->nlc_counter.getrealfilename_miss.cnt); + GF_ATOMIC_GET(conf->nlc_counter.getrealfilename_miss)); gf_proc_dump_write("nameless_lookup_count", "%"PRId64, - conf->nlc_counter.nameless_lookup.cnt); + GF_ATOMIC_GET(conf->nlc_counter.nameless_lookup)); gf_proc_dump_write("inodes_with_positive_dentry_cache", "%"PRId64, - conf->nlc_counter.pe_inode_cnt.cnt); + GF_ATOMIC_GET(conf->nlc_counter.pe_inode_cnt)); gf_proc_dump_write("inodes_with_negative_dentry_cache", "%"PRId64, - conf->nlc_counter.ne_inode_cnt.cnt); + GF_ATOMIC_GET(conf->nlc_counter.ne_inode_cnt)); gf_proc_dump_write("dentry_invalidations_recieved", "%"PRId64, - conf->nlc_counter.nlc_invals.cnt); + GF_ATOMIC_GET(conf->nlc_counter.nlc_invals)); gf_proc_dump_write("cache_limit", "%"PRIu64, conf->cache_size); gf_proc_dump_write("consumed_cache_size", "%"PRId64, - conf->current_cache_size.cnt); + GF_ATOMIC_GET(conf->current_cache_size)); gf_proc_dump_write("inode_limit", "%"PRIu64, conf->inode_limit); gf_proc_dump_write("consumed_inodes", "%"PRId64, - conf->refd_inodes.cnt); + GF_ATOMIC_GET(conf->refd_inodes)); return 0; } -- cgit