summaryrefslogtreecommitdiffstats
path: root/xlators/performance
diff options
context:
space:
mode:
authorRaghavendra G <raghavendra@gluster.com>2010-02-22 23:59:05 +0000
committerAnand V. Avati <avati@dev.gluster.com>2010-02-22 23:26:53 -0800
commitfa598e1bbb33125f0463f4cfc382206fbabb352d (patch)
tree5c466e240d76ba97c1673fb0b28f2793417b5e58 /xlators/performance
parentb7c361c6c387fe858a51e65cbc6e3be0724bb088 (diff)
performance/write-behind: fix data corruption while aggregating the adjacent contiguous iobufs into a single iobuf
- while aggregating, we should make sure that the destination has enough memory. __wb_collapse_write_bufs assumed that destination vector's iov_base was aligned to the start of an iobuf and hence memory of page_size is available for aggregation. This assumption is not always true, like in the configuration consisting afr->write->io-cache (afr is on top). Refer to the bug url for more details. Signed-off-by: Raghavendra G <raghavendra@gluster.com> Signed-off-by: Anand V. Avati <avati@dev.gluster.com> BUG: 542 (write-behind crashes) URL: http://bugs.gluster.com/cgi-bin/bugzilla3/show_bug.cgi?id=542
Diffstat (limited to 'xlators/performance')
-rw-r--r--xlators/performance/write-behind/src/write-behind.c59
1 files changed, 55 insertions, 4 deletions
diff --git a/xlators/performance/write-behind/src/write-behind.c b/xlators/performance/write-behind/src/write-behind.c
index f5227684c94..91651b80564 100644
--- a/xlators/performance/write-behind/src/write-behind.c
+++ b/xlators/performance/write-behind/src/write-behind.c
@@ -77,6 +77,7 @@ typedef struct wb_request {
char write_behind;
char stack_wound;
char got_reply;
+ char virgin;
}write_request;
struct {
@@ -236,6 +237,8 @@ wb_enqueue (wb_file_t *file, call_stub_t *stub)
request->write_size = iov_length (vector, count);
local->op_ret = request->write_size;
local->op_errno = 0;
+
+ request->flags.write_request.virgin = 1;
}
LOCK (&file->lock);
@@ -1653,10 +1656,51 @@ out:
}
-inline void
+inline int
__wb_copy_into_holder (wb_request_t *holder, wb_request_t *request)
{
- char *ptr = NULL;
+ char *ptr = NULL;
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ int ret = -1;
+
+ if (holder->flags.write_request.virgin) {
+ iobuf = iobuf_get (request->file->this->ctx->iobuf_pool);
+ if (iobuf == NULL) {
+ gf_log (request->file->this->name, GF_LOG_ERROR,
+ "out of memory");
+ goto out;
+ }
+
+ iobref = iobref_new ();
+ if (iobref == NULL) {
+ iobuf_unref (iobuf);
+ gf_log (request->file->this->name, GF_LOG_ERROR,
+ "out of memory");
+ goto out;
+ }
+
+ ret = iobref_add (iobref, iobuf);
+ if (ret != 0) {
+ iobuf_unref (iobuf);
+ iobref_unref (iobref);
+ gf_log (request->file->this->name, GF_LOG_DEBUG,
+ "cannot add iobuf (%p) into iobref (%p)",
+ iobuf, iobref);
+ goto out;
+ }
+
+ iov_unload (iobuf->ptr, holder->stub->args.writev.vector,
+ holder->stub->args.writev.count);
+ holder->stub->args.writev.vector[0].iov_base = iobuf->ptr;
+
+ iobref_unref (holder->stub->args.writev.iobref);
+ holder->stub->args.writev.iobref = iobref;
+
+ iobuf_unref (iobuf);
+
+ holder->flags.write_request.virgin = 0;
+ }
ptr = holder->stub->args.writev.vector[0].iov_base + holder->write_size;
@@ -1670,7 +1714,9 @@ __wb_copy_into_holder (wb_request_t *holder, wb_request_t *request)
request->flags.write_request.stack_wound = 1;
list_move_tail (&request->list, &request->file->passive_requests);
- return;
+ ret = 0;
+out:
+ return ret;
}
@@ -1681,6 +1727,7 @@ __wb_collapse_write_bufs (list_head_t *requests, size_t page_size)
off_t offset_expected = 0;
size_t space_left = 0;
wb_request_t *request = NULL, *tmp = NULL, *holder = NULL;
+ int ret = 0;
list_for_each_entry_safe (request, tmp, requests, list) {
if ((request->stub == NULL)
@@ -1707,7 +1754,11 @@ __wb_collapse_write_bufs (list_head_t *requests, size_t page_size)
space_left = page_size - holder->write_size;
if (space_left >= request->write_size) {
- __wb_copy_into_holder (holder, request);
+ ret = __wb_copy_into_holder (holder, request);
+ if (ret != 0) {
+ break;
+ }
+
__wb_request_unref (request);
} else {
holder = request;