summaryrefslogtreecommitdiffstats
path: root/libglusterfs/src
diff options
context:
space:
mode:
Diffstat (limited to 'libglusterfs/src')
-rw-r--r--libglusterfs/src/mem-pool.c85
-rw-r--r--libglusterfs/src/mem-pool.h1
2 files changed, 55 insertions, 31 deletions
diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
index 5e385c856..56a4a1dc2 100644
--- a/libglusterfs/src/mem-pool.c
+++ b/libglusterfs/src/mem-pool.c
@@ -23,6 +23,8 @@
#define GF_MEM_POOL_PAD_BOUNDARY (sizeof(struct list_head))
+#define mem_pool_chunkhead2ptr(head) ((head) + GF_MEM_POOL_PAD_BOUNDARY)
+#define mem_pool_ptr2chunkhead(ptr) ((ptr) - GF_MEM_POOL_PAD_BOUNDARY)
struct mem_pool *
@@ -30,7 +32,6 @@ mem_pool_new_fn (unsigned long sizeof_type,
unsigned long count)
{
struct mem_pool *mem_pool = NULL;
- int pad = 0;
unsigned long padded_sizeof_type = 0;
void *pool = NULL;
int i = 0;
@@ -40,10 +41,7 @@ mem_pool_new_fn (unsigned long sizeof_type,
gf_log ("mem-pool", GF_LOG_ERROR, "invalid argument");
return NULL;
}
-
- pad = GF_MEM_POOL_PAD_BOUNDARY -
- (sizeof_type % GF_MEM_POOL_PAD_BOUNDARY);
- padded_sizeof_type = sizeof_type + pad;
+ padded_sizeof_type = sizeof_type + GF_MEM_POOL_PAD_BOUNDARY;
mem_pool = CALLOC (sizeof (*mem_pool), 1);
if (!mem_pool)
@@ -54,21 +52,22 @@ mem_pool_new_fn (unsigned long sizeof_type,
mem_pool->padded_sizeof_type = padded_sizeof_type;
mem_pool->cold_count = count;
+ mem_pool->real_sizeof_type = sizeof_type;
- pool = CALLOC (count, sizeof_type + pad);
+ pool = CALLOC (count, padded_sizeof_type);
if (!pool) {
FREE (mem_pool);
return NULL;
}
for (i = 0; i < count; i++) {
- list = pool + (i * (sizeof_type + pad));
+ list = pool + (i * (padded_sizeof_type));
INIT_LIST_HEAD (list);
list_add_tail (list, &mem_pool->list);
}
mem_pool->pool = pool;
- mem_pool->pool_end = pool + (count * (sizeof_type + pad));
+ mem_pool->pool_end = pool + (count * (padded_sizeof_type));
return mem_pool;
}
@@ -95,23 +94,37 @@ mem_get (struct mem_pool *mem_pool)
mem_pool->cold_count--;
ptr = list;
- }
- }
- UNLOCK (&mem_pool->lock);
-
- if (ptr == NULL) {
- ptr = MALLOC (mem_pool->padded_sizeof_type);
-
- if (!ptr) {
- return NULL;
+ goto fwd_addr_out;
}
- LOCK (&mem_pool->lock);
- {
- mem_pool->hot_count ++;
- }
- UNLOCK (&mem_pool->lock);
+ /* This is a problem area. If we've run out of
+ * chunks in our slab above, we need to allocate
+ * enough memory to service this request.
+ * The problem is, these indvidual chunks will fail
+ * the first address range check in __is_member. Now, since
+ * we're not allocating a full second slab, we wont have
+ * enough info perform the range check in __is_member.
+ *
+ * I am working around this by performing a regular allocation
+ * , just the way the caller would've done when not using the
+ * mem-pool. That also means, we're not padding the size with
+ * the list_head structure because, this will not be added to
+ * the list of chunks that belong to the mem-pool allocated
+ * initially.
+ *
+ * This is the best we can do without adding functionality for
+ * managing multiple slabs. That does not interest us at present
+ * because it is too much work knowing that a better slab
+ * allocator is coming RSN.
+ */
+ ptr = MALLOC (mem_pool->real_sizeof_type);
+ if (!ptr)
+ goto unlocked_out;
}
+fwd_addr_out:
+ ptr = mem_pool_chunkhead2ptr (ptr);
+unlocked_out:
+ UNLOCK (&mem_pool->lock);
return ptr;
}
@@ -128,7 +141,8 @@ __is_member (struct mem_pool *pool, void *ptr)
if (ptr < pool->pool || ptr >= pool->pool_end)
return 0;
- if ((ptr - pool->pool) % pool->padded_sizeof_type)
+ if ((mem_pool_ptr2chunkhead (ptr) - pool->pool)
+ % pool->padded_sizeof_type)
return -1;
return 1;
@@ -145,24 +159,36 @@ mem_put (struct mem_pool *pool, void *ptr)
return;
}
- list = ptr;
-
LOCK (&pool->lock);
{
- pool->hot_count--;
switch (__is_member (pool, ptr))
{
case 1:
+ list = mem_pool_ptr2chunkhead (ptr);
+ pool->hot_count--;
pool->cold_count++;
list_add (list, &pool->list);
break;
case -1:
- /* log error */
+ /* For some reason, the address given is within
+ * the address range of the mem-pool but does not align
+ * with the expected start of a chunk that includes
+ * the list headers also. Sounds like a problem in
+ * layers of clouds up above us. ;)
+ */
abort ();
break;
case 0:
- free (ptr);
+ /* The address is outside the range of the mem-pool. We
+ * assume here that this address was allocated at a
+ * point when the mem-pool was out of chunks in mem_get
+ * or the programmer has made a mistake by calling the
+ * wrong de-allocation interface. We do
+ * not have enough info to distinguish between the two
+ * situations.
+ */
+ FREE (ptr);
break;
default:
/* log error */
@@ -170,7 +196,4 @@ mem_put (struct mem_pool *pool, void *ptr)
}
}
UNLOCK (&pool->lock);
-
- if (ptr)
- free (ptr);
}
diff --git a/libglusterfs/src/mem-pool.h b/libglusterfs/src/mem-pool.h
index 492e868bc..627945e1f 100644
--- a/libglusterfs/src/mem-pool.h
+++ b/libglusterfs/src/mem-pool.h
@@ -41,6 +41,7 @@ struct mem_pool {
unsigned long padded_sizeof_type;
void *pool;
void *pool_end;
+ int real_sizeof_type;
};
struct mem_pool *