summaryrefslogtreecommitdiffstats
path: root/xlators/performance/write-behind/src/write-behind.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/performance/write-behind/src/write-behind.c')
-rw-r--r--xlators/performance/write-behind/src/write-behind.c69
1 files changed, 45 insertions, 24 deletions
diff --git a/xlators/performance/write-behind/src/write-behind.c b/xlators/performance/write-behind/src/write-behind.c
index 98b2f4639d3..00cfca016e6 100644
--- a/xlators/performance/write-behind/src/write-behind.c
+++ b/xlators/performance/write-behind/src/write-behind.c
@@ -226,7 +226,7 @@ out:
}
static void
-wb_set_invalidate(wb_inode_t *wb_inode, int set)
+wb_set_invalidate(wb_inode_t *wb_inode)
{
int readdirps = 0;
inode_t *parent_inode = NULL;
@@ -240,21 +240,21 @@ wb_set_invalidate(wb_inode_t *wb_inode, int set)
LOCK(&wb_parent_inode->lock);
{
readdirps = GF_ATOMIC_GET(wb_parent_inode->readdirps);
- if (readdirps && set) {
- GF_ATOMIC_SWAP(wb_inode->invalidate, 1);
- list_del_init(&wb_inode->invalidate_list);
+ if (readdirps && list_empty(&wb_inode->invalidate_list)) {
+ inode_ref(wb_inode->inode);
+ GF_ATOMIC_INIT(wb_inode->invalidate, 1);
list_add(&wb_inode->invalidate_list,
&wb_parent_inode->invalidate_list);
- } else if (readdirps == 0) {
- GF_ATOMIC_SWAP(wb_inode->invalidate, 0);
- list_del_init(&wb_inode->invalidate_list);
}
}
UNLOCK(&wb_parent_inode->lock);
} else {
- GF_ATOMIC_SWAP(wb_inode->invalidate, 0);
+ GF_ATOMIC_INIT(wb_inode->invalidate, 0);
}
+ if (parent_inode)
+ inode_unref(parent_inode);
+
return;
}
@@ -718,6 +718,10 @@ wb_inode_destroy(wb_inode_t *wb_inode)
{
GF_VALIDATE_OR_GOTO("write-behind", wb_inode, out);
+ GF_ASSERT(list_empty(&wb_inode->todo));
+ GF_ASSERT(list_empty(&wb_inode->liability));
+ GF_ASSERT(list_empty(&wb_inode->temptation));
+
LOCK_DESTROY(&wb_inode->lock);
GF_FREE(wb_inode);
out:
@@ -967,8 +971,7 @@ __wb_modify_write_request(wb_request_t *req, int synced_size)
vector = req->stub->args.vector;
count = req->stub->args.count;
- req->stub->args.count = iov_subset(vector, count, synced_size,
- iov_length(vector, count), vector);
+ req->stub->args.count = iov_skip(vector, count, synced_size);
out:
return;
@@ -1092,7 +1095,7 @@ wb_fulfill_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
* In the above scenario, stat for the file is sent back in readdirp
* response but it is stale.
* </comment> */
- wb_set_invalidate(wb_inode, 1);
+ wb_set_invalidate(wb_inode);
if (op_ret == -1) {
wb_fulfill_err(head, op_errno);
@@ -1280,14 +1283,14 @@ __wb_pick_unwinds(wb_inode_t *wb_inode, list_head_t *lies)
wb_inode->window_current += req->orig_size;
+ wb_inode->gen++;
+
if (!req->ordering.fulfilled) {
/* burden increased */
list_add_tail(&req->lie, &wb_inode->liability);
req->ordering.lied = 1;
- wb_inode->gen++;
-
uuid_utoa_r(req->gfid, gfid);
gf_msg_debug(wb_inode->this->name, 0,
"(unique=%" PRIu64
@@ -1519,6 +1522,10 @@ __wb_handle_failed_conflict(wb_request_t *req, wb_request_t *conflict,
*/
req->op_ret = -1;
req->op_errno = conflict->op_errno;
+ if ((req->stub->fop == GF_FOP_TRUNCATE) ||
+ (req->stub->fop == GF_FOP_FTRUNCATE)) {
+ req->stub->frame->local = NULL;
+ }
list_del_init(&req->todo);
list_add_tail(&req->winds, tasks);
@@ -1809,6 +1816,12 @@ wb_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
frame->local = NULL;
wb_inode = req->wb_inode;
+ LOCK(&req->wb_inode->lock);
+ {
+ list_del_init(&req->wip);
+ }
+ UNLOCK(&req->wb_inode->lock);
+
wb_request_unref(req);
/* requests could be pending while this was in progress */
@@ -1928,6 +1941,9 @@ wb_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
unwind:
STACK_UNWIND_STRICT(readv, frame, -1, ENOMEM, NULL, 0, NULL, NULL, NULL);
+
+ if (stub)
+ call_stub_destroy(stub);
return 0;
noqueue:
@@ -2010,6 +2026,9 @@ wb_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
unwind:
STACK_UNWIND_STRICT(flush, frame, -1, ENOMEM, NULL);
+ if (stub)
+ call_stub_destroy(stub);
+
return 0;
noqueue:
@@ -2053,6 +2072,8 @@ wb_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
unwind:
STACK_UNWIND_STRICT(fsync, frame, -1, op_errno, NULL, NULL, NULL);
+ if (stub)
+ call_stub_destroy(stub);
return 0;
noqueue:
@@ -2468,7 +2489,7 @@ wb_mark_readdirp_start(xlator_t *this, inode_t *directory)
wb_directory_inode = wb_inode_create(this, directory);
- if (!wb_directory_inode || !wb_directory_inode->lock.spinlock)
+ if (!wb_directory_inode)
return;
LOCK(&wb_directory_inode->lock);
@@ -2488,7 +2509,7 @@ wb_mark_readdirp_end(xlator_t *this, inode_t *directory)
wb_directory_inode = wb_inode_ctx_get(this, directory);
- if (!wb_directory_inode || !wb_directory_inode->lock.spinlock)
+ if (!wb_directory_inode)
return;
LOCK(&wb_directory_inode->lock);
@@ -2502,7 +2523,8 @@ wb_mark_readdirp_end(xlator_t *this, inode_t *directory)
invalidate_list)
{
list_del_init(&wb_inode->invalidate_list);
- GF_ATOMIC_SWAP(wb_inode->invalidate, 0);
+ GF_ATOMIC_INIT(wb_inode->invalidate, 0);
+ inode_unref(wb_inode->inode);
}
}
unlock:
@@ -2544,16 +2566,19 @@ wb_readdirp_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
entry->inode = NULL;
memset(&entry->d_stat, 0, sizeof(entry->d_stat));
-
- inode_unref(inode);
}
}
UNLOCK(&wb_inode->lock);
+
+ if (inode) {
+ inode_unref(inode);
+ inode = NULL;
+ }
}
+unwind:
wb_mark_readdirp_end(this, fd->inode);
-unwind:
frame->local = NULL;
STACK_UNWIND_STRICT(readdirp, frame, op_ret, op_errno, entries, xdata);
return 0;
@@ -2804,11 +2829,7 @@ wb_forget(xlator_t *this, inode_t *inode)
if (!wb_inode)
return 0;
- GF_ASSERT(list_empty(&wb_inode->todo));
- GF_ASSERT(list_empty(&wb_inode->liability));
- GF_ASSERT(list_empty(&wb_inode->temptation));
-
- GF_FREE(wb_inode);
+ wb_inode_destroy(wb_inode);
return 0;
}