summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShehjar Tikoo <shehjart@gluster.com>2009-05-19 04:34:11 +0000
committerAnand V. Avati <avati@amp.gluster.com>2009-05-19 21:13:22 +0530
commita9d0be4f1b78d9c03e9379fc5cea0ead6114c1d0 (patch)
tree723227b21d59c39c922f2fe8d3ef287d05fef4ad
parent25dc191c51efb97ec970b137edfe4557302b7357 (diff)
mem-pool: Restructure mem-pool behaviour
This commit changes mem-pool behaviour to return a directly usable address by performing the required adjustment on the address being returned. This is different from the previous behaviour where we're trying to fit into the requested size, the list_head*2 also. This is not efficient enough in terms of space but hopefully works better than not having any mem-pool at all. Besides, I am not comfortable with mem-pool meta-data and caller-useable memory area being the same because of the potential for mem-pool's data structure corruption. PS: Please do read the comments in the code for more info during review. Signed-off-by: Anand V. Avati <avati@amp.gluster.com>
-rw-r--r--libglusterfs/src/mem-pool.c85
-rw-r--r--libglusterfs/src/mem-pool.h1
2 files changed, 55 insertions, 31 deletions
diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
index 5e385c85674..56a4a1dc29f 100644
--- a/libglusterfs/src/mem-pool.c
+++ b/libglusterfs/src/mem-pool.c
@@ -23,6 +23,8 @@
#define GF_MEM_POOL_PAD_BOUNDARY (sizeof(struct list_head))
+#define mem_pool_chunkhead2ptr(head) ((head) + GF_MEM_POOL_PAD_BOUNDARY)
+#define mem_pool_ptr2chunkhead(ptr) ((ptr) - GF_MEM_POOL_PAD_BOUNDARY)
struct mem_pool *
@@ -30,7 +32,6 @@ mem_pool_new_fn (unsigned long sizeof_type,
unsigned long count)
{
struct mem_pool *mem_pool = NULL;
- int pad = 0;
unsigned long padded_sizeof_type = 0;
void *pool = NULL;
int i = 0;
@@ -40,10 +41,7 @@ mem_pool_new_fn (unsigned long sizeof_type,
gf_log ("mem-pool", GF_LOG_ERROR, "invalid argument");
return NULL;
}
-
- pad = GF_MEM_POOL_PAD_BOUNDARY -
- (sizeof_type % GF_MEM_POOL_PAD_BOUNDARY);
- padded_sizeof_type = sizeof_type + pad;
+ padded_sizeof_type = sizeof_type + GF_MEM_POOL_PAD_BOUNDARY;
mem_pool = CALLOC (sizeof (*mem_pool), 1);
if (!mem_pool)
@@ -54,21 +52,22 @@ mem_pool_new_fn (unsigned long sizeof_type,
mem_pool->padded_sizeof_type = padded_sizeof_type;
mem_pool->cold_count = count;
+ mem_pool->real_sizeof_type = sizeof_type;
- pool = CALLOC (count, sizeof_type + pad);
+ pool = CALLOC (count, padded_sizeof_type);
if (!pool) {
FREE (mem_pool);
return NULL;
}
for (i = 0; i < count; i++) {
- list = pool + (i * (sizeof_type + pad));
+ list = pool + (i * (padded_sizeof_type));
INIT_LIST_HEAD (list);
list_add_tail (list, &mem_pool->list);
}
mem_pool->pool = pool;
- mem_pool->pool_end = pool + (count * (sizeof_type + pad));
+ mem_pool->pool_end = pool + (count * (padded_sizeof_type));
return mem_pool;
}
@@ -95,23 +94,37 @@ mem_get (struct mem_pool *mem_pool)
mem_pool->cold_count--;
ptr = list;
- }
- }
- UNLOCK (&mem_pool->lock);
-
- if (ptr == NULL) {
- ptr = MALLOC (mem_pool->padded_sizeof_type);
-
- if (!ptr) {
- return NULL;
+ goto fwd_addr_out;
}
- LOCK (&mem_pool->lock);
- {
- mem_pool->hot_count ++;
- }
- UNLOCK (&mem_pool->lock);
+ /* This is a problem area. If we've run out of
+ * chunks in our slab above, we need to allocate
+ * enough memory to service this request.
+ * The problem is, these indvidual chunks will fail
+ * the first address range check in __is_member. Now, since
+ * we're not allocating a full second slab, we wont have
+ * enough info perform the range check in __is_member.
+ *
+ * I am working around this by performing a regular allocation
+ * , just the way the caller would've done when not using the
+ * mem-pool. That also means, we're not padding the size with
+ * the list_head structure because, this will not be added to
+ * the list of chunks that belong to the mem-pool allocated
+ * initially.
+ *
+ * This is the best we can do without adding functionality for
+ * managing multiple slabs. That does not interest us at present
+ * because it is too much work knowing that a better slab
+ * allocator is coming RSN.
+ */
+ ptr = MALLOC (mem_pool->real_sizeof_type);
+ if (!ptr)
+ goto unlocked_out;
}
+fwd_addr_out:
+ ptr = mem_pool_chunkhead2ptr (ptr);
+unlocked_out:
+ UNLOCK (&mem_pool->lock);
return ptr;
}
@@ -128,7 +141,8 @@ __is_member (struct mem_pool *pool, void *ptr)
if (ptr < pool->pool || ptr >= pool->pool_end)
return 0;
- if ((ptr - pool->pool) % pool->padded_sizeof_type)
+ if ((mem_pool_ptr2chunkhead (ptr) - pool->pool)
+ % pool->padded_sizeof_type)
return -1;
return 1;
@@ -145,24 +159,36 @@ mem_put (struct mem_pool *pool, void *ptr)
return;
}
- list = ptr;
-
LOCK (&pool->lock);
{
- pool->hot_count--;
switch (__is_member (pool, ptr))
{
case 1:
+ list = mem_pool_ptr2chunkhead (ptr);
+ pool->hot_count--;
pool->cold_count++;
list_add (list, &pool->list);
break;
case -1:
- /* log error */
+ /* For some reason, the address given is within
+ * the address range of the mem-pool but does not align
+ * with the expected start of a chunk that includes
+ * the list headers also. Sounds like a problem in
+ * layers of clouds up above us. ;)
+ */
abort ();
break;
case 0:
- free (ptr);
+ /* The address is outside the range of the mem-pool. We
+ * assume here that this address was allocated at a
+ * point when the mem-pool was out of chunks in mem_get
+ * or the programmer has made a mistake by calling the
+ * wrong de-allocation interface. We do
+ * not have enough info to distinguish between the two
+ * situations.
+ */
+ FREE (ptr);
break;
default:
/* log error */
@@ -170,7 +196,4 @@ mem_put (struct mem_pool *pool, void *ptr)
}
}
UNLOCK (&pool->lock);
-
- if (ptr)
- free (ptr);
}
diff --git a/libglusterfs/src/mem-pool.h b/libglusterfs/src/mem-pool.h
index 492e868bc83..627945e1fe6 100644
--- a/libglusterfs/src/mem-pool.h
+++ b/libglusterfs/src/mem-pool.h
@@ -41,6 +41,7 @@ struct mem_pool {
unsigned long padded_sizeof_type;
void *pool;
void *pool_end;
+ int real_sizeof_type;
};
struct mem_pool *