summaryrefslogtreecommitdiffstats
path: root/libglusterfs/src/mem-pool.c
diff options
context:
space:
mode:
authorNiels de Vos <ndevos@redhat.com>2017-08-04 16:29:51 +0200
committerJeff Darcy <jeff@pl.atyp.us>2017-08-28 12:46:16 +0000
commit2645e730b79b44fc035170657e43bb52f3e855c5 (patch)
tree21680ac3f87c74f60f6bb6454f39693fc0dcdef7 /libglusterfs/src/mem-pool.c
parentc96cb9fc28e4358c5d7246ce77b676113a63ce85 (diff)
mem-pool: add tracking of mem_pool that requested the allocation
This renames the current 'struct mem_pool' to 'struct mem_pool_shared'. The mem_pool_shared is globally allocated and not specific for particular objects. A new 'struct mem_pool' gets allocated when mem_pool_new() is called. It points to the mem_pool_shared that handles the actual allocation requests. The 'struct mem_pool' is only used for accounting of the objects that the caller requested and free'd. All of these changes will be used to collect all the memory pools a glusterfs_ctx_t is consuming, so that statedumps can be collected per context. Updates: #307 Change-Id: I6355d3f0251c928e0bbfc71be3431307c6f3a3da Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: https://review.gluster.org/18073 Smoke: Gluster Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Amar Tumballi <amarts@redhat.com> Reviewed-by: Jeff Darcy <jeff@pl.atyp.us>
Diffstat (limited to 'libglusterfs/src/mem-pool.c')
-rw-r--r--libglusterfs/src/mem-pool.c60
1 files changed, 32 insertions, 28 deletions
diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
index ba29137b176..8bd9779ce81 100644
--- a/libglusterfs/src/mem-pool.c
+++ b/libglusterfs/src/mem-pool.c
@@ -14,16 +14,6 @@
#include <stdlib.h>
#include <stdarg.h>
-#define GF_MEM_POOL_LIST_BOUNDARY (sizeof(struct list_head))
-#define GF_MEM_POOL_PTR (sizeof(struct mem_pool*))
-#define GF_MEM_POOL_PAD_BOUNDARY (GF_MEM_POOL_LIST_BOUNDARY + GF_MEM_POOL_PTR + sizeof(int))
-#define mem_pool_chunkhead2ptr(head) ((head) + GF_MEM_POOL_PAD_BOUNDARY)
-#define mem_pool_ptr2chunkhead(ptr) ((ptr) - GF_MEM_POOL_PAD_BOUNDARY)
-#define is_mem_chunk_in_use(ptr) (*ptr == 1)
-#define mem_pool_from_ptr(ptr) ((ptr) + GF_MEM_POOL_LIST_BOUNDARY)
-
-#define GLUSTERFS_ENV_MEM_ACCT_STR "GLUSTERFS_DISABLE_MEM_ACCT"
-
#include "unittest/unittest.h"
#include "libglusterfs-messages.h"
@@ -380,7 +370,7 @@ static pthread_mutex_t pool_lock = PTHREAD_MUTEX_INITIALIZER;
static struct list_head pool_threads;
static pthread_mutex_t pool_free_lock = PTHREAD_MUTEX_INITIALIZER;
static struct list_head pool_free_threads;
-static struct mem_pool pools[NPOOLS];
+static struct mem_pool_shared pools[NPOOLS];
static size_t pool_list_size;
#if !defined(GF_DISABLE_MEMPOOL)
@@ -677,6 +667,8 @@ mem_pool_new_fn (unsigned long sizeof_type,
unsigned long count, char *name)
{
unsigned int i;
+ struct mem_pool *new = NULL;
+ struct mem_pool_shared *pool = NULL;
if (!sizeof_type) {
gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL,
@@ -686,13 +678,27 @@ mem_pool_new_fn (unsigned long sizeof_type,
for (i = 0; i < NPOOLS; ++i) {
if (sizeof_type <= AVAILABLE_SIZE(pools[i].power_of_two)) {
- return &pools[i];
+ pool = &pools[i];
+ break;
}
}
- gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL,
- LG_MSG_INVALID_ARG, "invalid argument");
- return NULL;
+ if (!pool) {
+ gf_msg_callingfn ("mem-pool", GF_LOG_ERROR, EINVAL,
+ LG_MSG_INVALID_ARG, "invalid argument");
+ return NULL;
+ }
+
+ new = GF_CALLOC (sizeof (struct mem_pool), 1, gf_common_mt_mem_pool);
+ if (!new)
+ return NULL;
+
+ new->sizeof_type = sizeof_type;
+ new->count = count;
+ new->name = name;
+ new->pool = pool;
+
+ return new;
}
void*
@@ -709,7 +715,7 @@ mem_get0 (struct mem_pool *mem_pool)
ptr = mem_get(mem_pool);
if (ptr) {
- memset (ptr, 0, AVAILABLE_SIZE(mem_pool->power_of_two));
+ memset (ptr, 0, AVAILABLE_SIZE(mem_pool->pool->power_of_two));
}
return ptr;
@@ -788,7 +794,7 @@ void *
mem_get (struct mem_pool *mem_pool)
{
#if defined(GF_DISABLE_MEMPOOL)
- return GF_CALLOC (1, AVAILABLE_SIZE (mem_pool->power_of_two),
+ return GF_CALLOC (1, AVAILABLE_SIZE (mem_pool->pool->power_of_two),
gf_common_mt_mem_pool);
#else
per_thread_pool_list_t *pool_list;
@@ -807,7 +813,7 @@ mem_get (struct mem_pool *mem_pool)
}
(void) pthread_spin_lock (&pool_list->lock);
- pt_pool = &pool_list->pools[mem_pool->power_of_two-POOL_SMALLEST];
+ pt_pool = &pool_list->pools[mem_pool->pool->power_of_two-POOL_SMALLEST];
retval = mem_get_from_pool (pt_pool);
(void) pthread_spin_unlock (&pool_list->lock);
@@ -816,9 +822,9 @@ mem_get (struct mem_pool *mem_pool)
}
retval->magic = GF_MEM_HEADER_MAGIC;
- retval->next = NULL;
- retval->pool_list = pool_list;;
- retval->power_of_two = mem_pool->power_of_two;
+ retval->pool = mem_pool;
+ retval->pool_list = pool_list;
+ retval->power_of_two = mem_pool->pool->power_of_two;
return retval + 1;
#endif /* GF_DISABLE_MEMPOOL */
@@ -861,14 +867,12 @@ mem_put (void *ptr)
void
mem_pool_destroy (struct mem_pool *pool)
{
- if (!pool)
- return;
+ GF_FREE (pool);
/*
- * Pools are now permanent, so this does nothing. Yes, this means we
- * can keep allocating from a pool after calling mem_destroy on it, but
- * that's kind of OK. All of the objects *in* the pool will eventually
- * be freed via the pool-sweeper thread, and this way we don't have to
- * add a lot of reference-counting complexity.
+ * Pools are now permanent, so the mem_pool->pool is kept around. All
+ * of the objects *in* the pool will eventually be freed via the
+ * pool-sweeper thread, and this way we don't have to add a lot of
+ * reference-counting complexity.
*/
}