path: root/xlators
diff options
authorRaghavendra G <>2009-11-25 13:52:56 +0000
committerAnand V. Avati <>2009-11-26 09:26:53 -0800
commit71eae4bd2f5384bd79c4e0bb6ac679841a8dc042 (patch)
treecdffc314dabe223fb0179ed807e78d4694e102fe /xlators
parentfc9a8a76001b5d304d4589eb28ee903972dffcbb (diff)
performance/stat-prefetch: don't free the cache in readdir if the offset is not the expected one.
- cache creation is expensive operation. Also, cache will be freed in releasedir. Hence, just remove all entries from cache without freeing the cache. However this is not entirely true, since sp_cache_remove_entry frees the old table and reinitializes a new table if all entries are being removed. When rbtree based hash table provides an interface to remove all the entries, sp_cache_remove_entry should be modified not to destroy the table. - this patch also fixes a race condition wherein the cache being used in lookup getting freed in readdir if the offset is not equal to expected offset. Signed-off-by: Raghavendra G <> Signed-off-by: Anand V. Avati <> BUG: 405 (Segmentation fault in stat-prefetch.) URL:
Diffstat (limited to 'xlators')
1 files changed, 2 insertions, 5 deletions
diff --git a/xlators/performance/stat-prefetch/src/stat-prefetch.c b/xlators/performance/stat-prefetch/src/stat-prefetch.c
index 2261b8948..75a74b531 100644
--- a/xlators/performance/stat-prefetch/src/stat-prefetch.c
+++ b/xlators/performance/stat-prefetch/src/stat-prefetch.c
@@ -917,10 +917,7 @@ sp_readdir (call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
cache = sp_get_cache_fd (this, fd);
if (cache) {
if (off != cache->expected_offset) {
- cache = sp_del_cache_fd (this, fd);
- if (cache) {
- sp_cache_free (cache);
- }
+ sp_cache_remove_entry (cache, NULL, 1);
@@ -928,7 +925,7 @@ sp_readdir (call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
if (ret == -1) {
goto unwind;
ret = sp_cache_remove_parent_entry (frame, this, path);
if (ret < 0) {
errno = -ret;