diff options
| author | Amar Tumballi <amarts@redhat.com> | 2019-01-04 07:04:50 +0000 |
|---|---|---|
| committer | Amar Tumballi <amarts@redhat.com> | 2019-01-08 11:16:03 +0000 |
| commit | 37653efdc7681d1b0f255054ec2f9c9ddd4c8b14 (patch) | |
| tree | 61688051f8f374ea6a1b661ef357844a477361f6 /libglusterfs/src/glusterfs/iobuf.h | |
| parent | 054c7ea91603acfcb01db8455b25dda7e5e831b2 (diff) | |
Revert "iobuf: Get rid of pre allocated iobuf_pool and use per thread mem pool"
This reverts commit b87c397091bac6a4a6dec4e45a7671fad4a11770.
There seems to be some performance regression with the patch and hence recommended to have it reverted.
Updates: #325
Change-Id: Id85d6203173a44fad6cf51d39b3e96f37afcec09
Diffstat (limited to 'libglusterfs/src/glusterfs/iobuf.h')
| -rw-r--r-- | libglusterfs/src/glusterfs/iobuf.h | 111 |
1 files changed, 97 insertions, 14 deletions
diff --git a/libglusterfs/src/glusterfs/iobuf.h b/libglusterfs/src/glusterfs/iobuf.h index 7516bc8034b..6de0f13ae36 100644 --- a/libglusterfs/src/glusterfs/iobuf.h +++ b/libglusterfs/src/glusterfs/iobuf.h @@ -17,38 +17,118 @@ #include <sys/mman.h> #include <sys/uio.h> +#define GF_VARIABLE_IOBUF_COUNT 32 + +#define GF_RDMA_DEVICE_COUNT 8 + +/* Lets try to define the new anonymous mapping + * flag, in case the system is still using the + * now deprecated MAP_ANON flag. + * + * Also, this should ideally be in a centralized/common + * header which can be used by other source files also. + */ +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + #define GF_ALIGN_BUF(ptr, bound) \ ((void *)((unsigned long)(ptr + bound - 1) & (unsigned long)(~(bound - 1)))) #define GF_IOBUF_ALIGN_SIZE 512 -#define GF_IOBUF_DEFAULT_PAGE_SIZE (128 * GF_UNIT_KB) - /* one allocatable unit for the consumers of the IOBUF API */ /* each unit hosts @page_size bytes of memory */ struct iobuf; +/* one region of memory mapped from the operating system */ +/* each region MMAPs @arena_size bytes of memory */ +/* each arena hosts @arena_size / @page_size IOBUFs */ +struct iobuf_arena; + /* expandable and contractable pool of memory, internally broken into arenas */ struct iobuf_pool; +struct iobuf_init_config { + size_t pagesize; + int32_t num_pages; +}; + struct iobuf { - gf_boolean_t stdalloc; /* indicates whether iobuf is allocated from - mem pool or standard alloc*/ - gf_lock_t lock; /* for ->ptr and ->ref */ - gf_atomic_t ref; /* 0 == passive, >0 == active */ + union { + struct list_head list; + struct { + struct iobuf *next; + struct iobuf *prev; + }; + }; + struct iobuf_arena *iobuf_arena; + + gf_lock_t lock; /* for ->ptr and ->ref */ + gf_atomic_t ref; /* 0 == passive, >0 == active */ void *ptr; /* usable memory region by the consumer */ - void *free_ptr; /* in case of stdalloc, this is the - one to be freed */ - size_t page_size; /* iobuf's page size */ - struct iobuf_pool *iobuf_pool; /* iobuf_pool iobuf is associated with */ + void *free_ptr; /* in case of stdalloc, this is the + one to be freed */ +}; + +struct iobuf_arena { + union { + struct list_head list; + struct { + struct iobuf_arena *next; + struct iobuf_arena *prev; + }; + }; + + struct list_head all_list; + size_t page_size; /* size of all iobufs in this arena */ + size_t arena_size; + /* this is equal to rounded_size * num_iobufs. + (rounded_size comes with gf_iobuf_get_pagesize().) */ + size_t page_count; + + struct iobuf_pool *iobuf_pool; + + void *mem_base; + struct iobuf *iobufs; /* allocated iobufs list */ + + int active_cnt; + struct iobuf active; /* head node iobuf + (unused by itself) */ + int passive_cnt; + struct iobuf passive; /* head node iobuf + (unused by itself) */ + uint64_t alloc_cnt; /* total allocs in this pool */ + int max_active; /* max active buffers at a given time */ }; struct iobuf_pool { - gf_atomic_t mem_pool_hit; - gf_atomic_t mem_pool_miss; - gf_atomic_t active_cnt; + pthread_mutex_t mutex; + size_t arena_size; /* size of memory region in + arena */ + size_t default_page_size; /* default size of iobuf */ + + int arena_cnt; + struct list_head all_arenas; + struct list_head arenas[GF_VARIABLE_IOBUF_COUNT]; + /* array of arenas. Each element of the array is a list of arenas + holding iobufs of particular page_size */ + + struct list_head filled[GF_VARIABLE_IOBUF_COUNT]; + /* array of arenas without free iobufs */ + + struct list_head purge[GF_VARIABLE_IOBUF_COUNT]; + /* array of of arenas which can be purged */ + + uint64_t request_misses; /* mostly the requests for higher + value of iobufs */ + int rdma_device_count; + struct list_head *mr_list[GF_RDMA_DEVICE_COUNT]; + void *device[GF_RDMA_DEVICE_COUNT]; + int (*rdma_registration)(void **, void *); + int (*rdma_deregistration)(struct list_head **, struct iobuf_arena *); }; struct iobuf_pool * @@ -62,10 +142,13 @@ iobuf_unref(struct iobuf *iobuf); struct iobuf * iobuf_ref(struct iobuf *iobuf); void +iobuf_pool_destroy(struct iobuf_pool *iobuf_pool); +void iobuf_to_iovec(struct iobuf *iob, struct iovec *iov); #define iobuf_ptr(iob) ((iob)->ptr) -#define iobuf_pagesize(iob) (iob->page_size) +#define iobpool_default_pagesize(iobpool) ((iobpool)->default_page_size) +#define iobuf_pagesize(iob) (iob->iobuf_arena->page_size) struct iobref { gf_lock_t lock; |
