summaryrefslogtreecommitdiffstats
path: root/xlators/performance
diff options
context:
space:
mode:
authorZhang Huan <zhanghuan@open-fs.com>2018-01-09 16:58:22 +0800
committerRaghavendra G <rgowdapp@redhat.com>2018-01-15 05:08:28 +0000
commit0f653fe4fb3a654af65fda736fa23cf0ec9f741c (patch)
treef851488dfbb5fef44e97dde0ca1d12ab4efd6b70 /xlators/performance
parentba149bac92d169ae2256dbc75202dc9e5d06538e (diff)
performance/readdir-ahead: fix cache usage update issue
Use atomic operation to modify cache-size to protect it from concurrent modification. Change-Id: Ie73cdd4abbaf0232b1db4ac856c01d24603890ad BUG: 1533804 Signed-off-by: Zhang Huan <zhanghuan@open-fs.com>
Diffstat (limited to 'xlators/performance')
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead.c13
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead.h2
2 files changed, 9 insertions, 6 deletions
diff --git a/xlators/performance/readdir-ahead/src/readdir-ahead.c b/xlators/performance/readdir-ahead/src/readdir-ahead.c
index 8827b8b51bf..82e877512c5 100644
--- a/xlators/performance/readdir-ahead/src/readdir-ahead.c
+++ b/xlators/performance/readdir-ahead/src/readdir-ahead.c
@@ -86,7 +86,7 @@ rda_reset_ctx(xlator_t *this, struct rda_fd_ctx *ctx)
ctx->op_errno = 0;
gf_dirent_free(&ctx->entries);
- priv->rda_cache_size -= ctx->cur_size;
+ GF_ATOMIC_SUB (priv->rda_cache_size, ctx->cur_size);
ctx->cur_size = 0;
if (ctx->xattrs) {
@@ -136,7 +136,7 @@ __rda_fill_readdirp (xlator_t *this, gf_dirent_t *entries, size_t request_size,
list_del_init(&dirent->list);
ctx->cur_size -= dirent_size;
- priv->rda_cache_size -= dirent_size;
+ GF_ATOMIC_SUB(priv->rda_cache_size, dirent_size);
list_add_tail(&dirent->list, &entries->list);
ctx->cur_offset = dirent->d_off;
@@ -324,7 +324,7 @@ rda_fill_fd_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
ctx->cur_size += dirent_size;
- priv->rda_cache_size += dirent_size;
+ GF_ATOMIC_ADD(priv->rda_cache_size, dirent_size);
ctx->next_offset = dirent->d_off;
}
@@ -363,8 +363,9 @@ out:
* If we have been marked for bypass and have no pending stub, clear the
* run state so we stop preloading the context with entries.
*/
- if (!ctx->stub && ((ctx->state & RDA_FD_BYPASS)
- || (priv->rda_cache_size > priv->rda_cache_limit)))
+ if (!ctx->stub &&
+ ((ctx->state & RDA_FD_BYPASS) ||
+ GF_ATOMIC_GET(priv->rda_cache_size) > priv->rda_cache_limit))
ctx->state &= ~RDA_FD_RUNNING;
if (!(ctx->state & RDA_FD_RUNNING)) {
@@ -667,6 +668,8 @@ init(xlator_t *this)
goto err;
this->private = priv;
+ GF_ATOMIC_INIT (priv->rda_cache_size, 0);
+
this->local_pool = mem_pool_new(struct rda_local, 32);
if (!this->local_pool)
goto err;
diff --git a/xlators/performance/readdir-ahead/src/readdir-ahead.h b/xlators/performance/readdir-ahead/src/readdir-ahead.h
index 8c663e091f1..dda1a69246c 100644
--- a/xlators/performance/readdir-ahead/src/readdir-ahead.h
+++ b/xlators/performance/readdir-ahead/src/readdir-ahead.h
@@ -44,7 +44,7 @@ struct rda_priv {
uint64_t rda_low_wmark;
uint64_t rda_high_wmark;
uint64_t rda_cache_limit;
- uint64_t rda_cache_size;
+ gf_atomic_t rda_cache_size;
};
#endif /* __READDIR_AHEAD_H */