From 9f8802e8334c283694e430b3b53eedfea77f858f Mon Sep 17 00:00:00 2001 From: Jan Musial Date: Tue, 9 Feb 2021 10:53:43 +0100 Subject: [PATCH 1/6] Decrease memory requirements for metadata io Magic child metadata request count (33) was deducted experimentally. Signed-off-by: Jan Musial --- src/metadata/metadata_io.c | 10 ++++++---- src/metadata/metadata_io.h | 27 +++++++++++++-------------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c index 0d521c7..5073fa6 100644 --- a/src/metadata/metadata_io.c +++ b/src/metadata/metadata_io.c @@ -179,10 +179,11 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv, static void metadata_io_req_fill(struct metadata_io_request *m_req) { ocf_cache_t cache = m_req->cache; + struct metadata_io_request_asynch *a_req = m_req->asynch; int i; for (i = 0; i < m_req->count; i++) { - m_req->on_meta_fill(cache, m_req->data, + a_req->on_meta_fill(cache, m_req->data, m_req->page + i, m_req->context); } } @@ -190,10 +191,11 @@ static void metadata_io_req_fill(struct metadata_io_request *m_req) static void metadata_io_req_drain(struct metadata_io_request *m_req) { ocf_cache_t cache = m_req->cache; + struct metadata_io_request_asynch *a_req = m_req->asynch; int i; for (i = 0; i < m_req->count; i++) { - m_req->on_meta_drain(cache, m_req->data, + a_req->on_meta_drain(cache, m_req->data, m_req->page + i, m_req->context); } } @@ -389,6 +391,8 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, a_req->page = page; a_req->count = count; a_req->flags = flags; + a_req->on_meta_fill = io_hndl; + a_req->on_meta_drain = io_hndl; /* IO Requests initialization */ for (i = 0; i < req_count; i++) { @@ -397,8 +401,6 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, m_req->asynch = a_req; m_req->cache = cache; m_req->context = context; - m_req->on_meta_fill = io_hndl; - m_req->on_meta_drain = io_hndl; m_req->req.io_if = &metadata_io_restart_if; m_req->req.io_queue = queue; m_req->req.cache = cache; diff --git a/src/metadata/metadata_io.h b/src/metadata/metadata_io.h index 30123c4..7109ce2 100644 --- a/src/metadata/metadata_io.h +++ b/src/metadata/metadata_io.h @@ -45,22 +45,19 @@ struct metadata_io_request_asynch; * IO request context */ struct metadata_io_request { - ocf_cache_t cache; - void *context; - uint32_t page; - uint32_t count; - ocf_metadata_io_event_t on_meta_fill; - ocf_metadata_io_event_t on_meta_drain; - ctx_data_t *data; - int error; - struct metadata_io_request_asynch *asynch; - env_atomic finished; - struct ocf_request req; struct list_head list; + ocf_cache_t cache; + void *context; + ctx_data_t *data; + struct metadata_io_request_asynch *asynch; + env_atomic finished; + uint32_t page; + uint32_t count; + int error; }; -#define METADATA_IO_REQS_LIMIT 128 +#define METADATA_IO_REQS_LIMIT 33 /* * Asynchronous IO request context @@ -68,14 +65,16 @@ struct metadata_io_request { struct metadata_io_request_asynch { struct metadata_io_request reqs[METADATA_IO_REQS_LIMIT]; void *context; - int error; env_atomic req_remaining; env_atomic req_active; env_atomic req_current; + ocf_metadata_io_event_t on_meta_fill; + ocf_metadata_io_event_t on_meta_drain; + ocf_metadata_io_end_t on_complete; uint32_t page; uint32_t count; int flags; - ocf_metadata_io_end_t on_complete; + int error; }; void metadata_io_req_complete(struct metadata_io_request *m_req); From b47ef2c386670fe3e53e448d9a8edf10b84efead Mon Sep 17 00:00:00 2001 From: Jan Musial Date: Tue, 9 Feb 2021 10:55:05 +0100 Subject: [PATCH 2/6] Change vmalloc in metadata asynch io to kmalloc Vmalloc is very slow in comparison to kmalloc Signed-off-by: Jan Musial --- src/metadata/metadata_io.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c index 5073fa6..697207b 100644 --- a/src/metadata/metadata_io.c +++ b/src/metadata/metadata_io.c @@ -301,7 +301,7 @@ void metadata_io_req_finalize(struct metadata_io_request *m_req) struct metadata_io_request_asynch *a_req = m_req->asynch; if (env_atomic_dec_return(&a_req->req_active) == 0) - env_vfree(a_req); + env_free(a_req); } static uint32_t metadata_io_max_page(ocf_cache_t cache) @@ -379,7 +379,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, if (count == 0) return 0; - a_req = env_vzalloc_flags(sizeof(*a_req), ENV_MEM_NOIO); + a_req = env_zalloc(sizeof(*a_req), ENV_MEM_NOIO); if (!a_req) return -OCF_ERR_NO_MEM; @@ -427,14 +427,14 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, compl_hndl(cache, context, a_req->error); if (env_atomic_dec_return(&a_req->req_active) == 0) - env_vfree(a_req); + env_free(a_req); return 0; err: while (i--) ctx_data_free(cache->owner, a_req->reqs[i].data); - env_vfree(a_req); + env_free(a_req); return -OCF_ERR_NO_MEM; } From 8e21aa6441c2d457a07b170c287e9d265285342e Mon Sep 17 00:00:00 2001 From: Jan Musial Date: Tue, 9 Feb 2021 11:05:48 +0100 Subject: [PATCH 3/6] Remove not needed req allocator size table Signed-off-by: Jan Musial --- src/ocf_request.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ocf_request.c b/src/ocf_request.c index a48f42e..c3ac31f 100644 --- a/src/ocf_request.c +++ b/src/ocf_request.c @@ -37,7 +37,6 @@ enum ocf_req_size { struct ocf_req_allocator { env_allocator *allocator[ocf_req_size_max]; - size_t size[ocf_req_size_max]; }; static inline size_t ocf_req_sizeof_map(struct ocf_request *req) @@ -67,6 +66,7 @@ int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx) int i; struct ocf_req_allocator *req; char name[ALLOCATOR_NAME_MAX] = { '\0' }; + ssize_t size; OCF_DEBUG_TRACE(cache); @@ -78,20 +78,20 @@ int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx) goto err; for (i = 0; i < ARRAY_SIZE(req->allocator); i++) { - req->size[i] = ocf_req_sizeof(1 << i); + size = ocf_req_sizeof(1 << i); if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT, (1 << i)) < 0) { goto err; } - req->allocator[i] = env_allocator_create(req->size[i], name); + req->allocator[i] = env_allocator_create(size, name); if (!req->allocator[i]) goto err; OCF_DEBUG_PARAM(cache, "New request allocator, lines = %u, " - "size = %lu", 1 << i, req->size[i]); + "size = %lu", 1 << i, size); } return 0; From c243ad3df09b27be8116c2cd9f7fd2a0334164e5 Mon Sep 17 00:00:00 2001 From: Jan Musial Date: Thu, 25 Feb 2021 14:30:55 +0100 Subject: [PATCH 4/6] Use mpool to allocate ocf_requests Signed-off-by: Jan Musial --- src/ocf_ctx_priv.h | 2 +- src/ocf_request.c | 124 ++++++--------------------------------------- src/ocf_request.h | 3 -- 3 files changed, 17 insertions(+), 112 deletions(-) diff --git a/src/ocf_ctx_priv.h b/src/ocf_ctx_priv.h index 6ebbe26..76df528 100644 --- a/src/ocf_ctx_priv.h +++ b/src/ocf_ctx_priv.h @@ -30,7 +30,7 @@ struct ocf_ctx { } core_pool; struct { - struct ocf_req_allocator *req; + struct env_mpool *req; } resources; }; diff --git a/src/ocf_request.c b/src/ocf_request.c index c3ac31f..5f9f272 100644 --- a/src/ocf_request.c +++ b/src/ocf_request.c @@ -32,11 +32,6 @@ enum ocf_req_size { ocf_req_size_32, ocf_req_size_64, ocf_req_size_128, - ocf_req_size_max, -}; - -struct ocf_req_allocator { - env_allocator *allocator[ocf_req_size_max]; }; static inline size_t ocf_req_sizeof_map(struct ocf_request *req) @@ -48,113 +43,31 @@ static inline size_t ocf_req_sizeof_map(struct ocf_request *req) return size; } -static inline size_t ocf_req_sizeof(uint32_t lines) -{ - size_t size = sizeof(struct ocf_request) + - (lines * sizeof(struct ocf_map_info)); - - ENV_BUG_ON(lines == 0); - return size; -} - -#define ALLOCATOR_NAME_FMT "ocf_req_%u" -/* Max number of digits in decimal representation of unsigned int is 10 */ -#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + 10) - int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx) { - int i; - struct ocf_req_allocator *req; - char name[ALLOCATOR_NAME_MAX] = { '\0' }; - ssize_t size; + ocf_ctx->resources.req = env_mpool_create(sizeof(struct ocf_request), + sizeof(struct ocf_map_info), ENV_MEM_NORMAL, ocf_req_size_128, + false, NULL, "ocf_req"); - OCF_DEBUG_TRACE(cache); - - ocf_ctx->resources.req = env_zalloc(sizeof(*(ocf_ctx->resources.req)), - ENV_MEM_NORMAL); - req = ocf_ctx->resources.req; - - if (!req) - goto err; - - for (i = 0; i < ARRAY_SIZE(req->allocator); i++) { - size = ocf_req_sizeof(1 << i); - - if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT, - (1 << i)) < 0) { - goto err; - } - - req->allocator[i] = env_allocator_create(size, name); - - if (!req->allocator[i]) - goto err; - - OCF_DEBUG_PARAM(cache, "New request allocator, lines = %u, " - "size = %lu", 1 << i, size); - } + if (ocf_ctx->resources.req == NULL) + return -1; return 0; - -err: - ocf_req_allocator_deinit(ocf_ctx); - return -1; } void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx) { - int i; - struct ocf_req_allocator *req; - - OCF_DEBUG_TRACE(cache); - - - if (!ocf_ctx->resources.req) - return; - - req = ocf_ctx->resources.req; - - for (i = 0; i < ARRAY_SIZE(req->allocator); i++) { - if (req->allocator[i]) { - env_allocator_destroy(req->allocator[i]); - req->allocator[i] = NULL; - } - } - - env_free(req); + env_mpool_destroy(ocf_ctx->resources.req); ocf_ctx->resources.req = NULL; } -static inline env_allocator *_ocf_req_get_allocator_1( - struct ocf_cache *cache) -{ - return cache->owner->resources.req->allocator[0]; -} - -static env_allocator *_ocf_req_get_allocator( - struct ocf_cache *cache, uint32_t count) -{ - struct ocf_ctx *ocf_ctx = cache->owner; - unsigned int idx = 31 - __builtin_clz(count); - - if (__builtin_ffs(count) <= idx) - idx++; - - ENV_BUG_ON(count == 0); - - if (idx >= ocf_req_size_max) - return NULL; - - return ocf_ctx->resources.req->allocator[idx]; -} - struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, uint64_t addr, uint32_t bytes, int rw) { uint64_t core_line_first, core_line_last, core_line_count; ocf_cache_t cache = queue->cache; struct ocf_request *req; - env_allocator *allocator; + bool map_allocated = true; if (likely(bytes)) { core_line_first = ocf_bytes_2_lines(cache, addr); @@ -166,17 +79,17 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, core_line_count = 1; } - allocator = _ocf_req_get_allocator(cache, core_line_count); - if (allocator) { - req = env_allocator_new(allocator); - } else { - req = env_allocator_new(_ocf_req_get_allocator_1(cache)); + req = env_mpool_new(cache->owner->resources.req, core_line_count); + if (!req) { + map_allocated = false; + req = env_mpool_new(cache->owner->resources.req, 1); } + if (unlikely(!req)) return NULL; - if (allocator) + if (map_allocated) req->map = req->__map; OCF_DEBUG_TRACE(cache); @@ -197,7 +110,6 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, req->core_line_first = core_line_first; req->core_line_last = core_line_last; req->core_line_count = core_line_count; - req->alloc_core_line_count = core_line_count; req->rw = rw; req->part_id = PARTITION_DEFAULT; @@ -282,7 +194,6 @@ void ocf_req_get(struct ocf_request *req) void ocf_req_put(struct ocf_request *req) { - env_allocator *allocator; ocf_queue_t queue = req->io_queue; if (env_atomic_dec_return(&req->ref_count)) @@ -293,13 +204,10 @@ void ocf_req_put(struct ocf_request *req) if (!req->d2c && req->io_queue != req->cache->mngt_queue) ocf_refcnt_dec(&req->cache->refcnt.metadata); - allocator = _ocf_req_get_allocator(req->cache, - req->alloc_core_line_count); - if (allocator) { - env_allocator_del(allocator, req); - } else { + if (!env_mpool_del(req->cache->owner->resources.req, req, + req->core_line_count)) { + env_mpool_del(req->cache->owner->resources.req, req, 1); env_free(req->map); - env_allocator_del(_ocf_req_get_allocator_1(req->cache), req); } ocf_queue_put(queue); diff --git a/src/ocf_request.h b/src/ocf_request.h index 5899cb6..007746a 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -166,9 +166,6 @@ struct ocf_request { uint32_t core_line_count; /*! Core line count */ - uint32_t alloc_core_line_count; - /*! Core line count for which request was initially allocated */ - int error; /*!< This filed indicates an error for OCF request */ From 2dc36657bfe42656bae73da0415c285546ad9e17 Mon Sep 17 00:00:00 2001 From: Jan Musial Date: Thu, 25 Feb 2021 14:31:17 +0100 Subject: [PATCH 5/6] Use mpool to allocate metadata_io requests Signed-off-by: Jan Musial --- src/metadata/metadata_io.c | 56 +++++++++++++++++++++++++++++++++++--- src/metadata/metadata_io.h | 16 ++++++++--- src/ocf_ctx.c | 10 ++++++- src/ocf_ctx_priv.h | 1 + 4 files changed, 74 insertions(+), 9 deletions(-) diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c index 697207b..06377b5 100644 --- a/src/metadata/metadata_io.c +++ b/src/metadata/metadata_io.c @@ -45,6 +45,19 @@ struct metadata_io_read_i_atomic_context { void *priv; }; +enum ocf_mio_size { + ocf_mio_size_1 = 0, + ocf_mio_size_2, + ocf_mio_size_4, + ocf_mio_size_8, + ocf_mio_size_16, + ocf_mio_size_32, + ocf_mio_size_64, + ocf_mio_size_max, +}; + +#define METADATA_IO_REQS_LIMIT 128 + static void metadata_io_read_i_atomic_complete( struct metadata_io_read_i_atomic_context *context, int error) { @@ -301,7 +314,8 @@ void metadata_io_req_finalize(struct metadata_io_request *m_req) struct metadata_io_request_asynch *a_req = m_req->asynch; if (env_atomic_dec_return(&a_req->req_active) == 0) - env_free(a_req); + env_mpool_del(m_req->cache->owner->resources.mio, a_req, + a_req->alloc_req_count); } static uint32_t metadata_io_max_page(ocf_cache_t cache) @@ -375,11 +389,13 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, uint32_t io_count = OCF_DIV_ROUND_UP(count, max_count); uint32_t req_count = OCF_MIN(io_count, METADATA_IO_REQS_LIMIT); int i; + struct env_mpool *mio_allocator = cache->owner->resources.mio; if (count == 0) return 0; - a_req = env_zalloc(sizeof(*a_req), ENV_MEM_NOIO); + + a_req = env_mpool_new(mio_allocator, req_count); if (!a_req) return -OCF_ERR_NO_MEM; @@ -389,6 +405,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, a_req->on_complete = compl_hndl; a_req->context = context; a_req->page = page; + a_req->alloc_req_count = req_count; a_req->count = count; a_req->flags = flags; a_req->on_meta_fill = io_hndl; @@ -427,14 +444,15 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, compl_hndl(cache, context, a_req->error); if (env_atomic_dec_return(&a_req->req_active) == 0) - env_free(a_req); + env_mpool_del(mio_allocator, a_req, req_count); return 0; err: while (i--) ctx_data_free(cache->owner, a_req->reqs[i].data); - env_free(a_req); + + env_mpool_del(mio_allocator, a_req, req_count); return -OCF_ERR_NO_MEM; } @@ -457,6 +475,36 @@ int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue, page, count, flags, drain_hndl, compl_hndl); } +#define MIO_RPOOL_LIMIT 16 +#define MIO_RPOOL_THRESHOLD ocf_mio_size_16 /* This is statically determined to + not exceed one page (4096B). + Change if apropriate. */ + +int ocf_metadata_io_ctx_init(struct ocf_ctx *ocf_ctx) +{ + uint32_t limits[] = { + [0 ... MIO_RPOOL_THRESHOLD - 1] = -1, + [MIO_RPOOL_THRESHOLD ... ocf_mio_size_max - 1] = MIO_RPOOL_LIMIT + }; + + ocf_ctx->resources.mio = env_mpool_create( + sizeof(struct metadata_io_request_asynch), + sizeof(struct metadata_io_request), + ENV_MEM_NOIO, ocf_mio_size_max - 1, true, + limits, + "ocf_mio"); + if (ocf_ctx->resources.mio == NULL) + return -1; + + return 0; +} + +void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx) +{ + env_mpool_destroy(ocf_ctx->resources.mio); + ocf_ctx->resources.mio = NULL; +} + int ocf_metadata_io_init(ocf_cache_t cache) { return ocf_metadata_updater_init(cache); diff --git a/src/metadata/metadata_io.h b/src/metadata/metadata_io.h index 7109ce2..c26adf8 100644 --- a/src/metadata/metadata_io.h +++ b/src/metadata/metadata_io.h @@ -54,16 +54,12 @@ struct metadata_io_request { env_atomic finished; uint32_t page; uint32_t count; - int error; }; -#define METADATA_IO_REQS_LIMIT 33 - /* * Asynchronous IO request context */ struct metadata_io_request_asynch { - struct metadata_io_request reqs[METADATA_IO_REQS_LIMIT]; void *context; env_atomic req_remaining; env_atomic req_active; @@ -73,8 +69,10 @@ struct metadata_io_request_asynch { ocf_metadata_io_end_t on_complete; uint32_t page; uint32_t count; + uint32_t alloc_req_count; /*< Number of allocated metadata_io_requests */ int flags; int error; + struct metadata_io_request reqs[]; }; void metadata_io_req_complete(struct metadata_io_request *m_req); @@ -144,6 +142,16 @@ int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue, ocf_metadata_io_event_t drain_hndl, ocf_metadata_io_end_t compl_hndl); +/** + * Initialize ocf_ctx related structures of metadata_io (mpool). + */ +int ocf_metadata_io_ctx_init(struct ocf_ctx *ocf_ctx); + +/** + * Deinitialize ocf_ctx related structures of metadata_io + */ +void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx); + /** * Function for initializing metadata io. */ diff --git a/src/ocf_ctx.c b/src/ocf_ctx.c index d2397b5..3256889 100644 --- a/src/ocf_ctx.c +++ b/src/ocf_ctx.c @@ -11,6 +11,7 @@ #include "ocf_logger_priv.h" #include "ocf_core_priv.h" #include "mngt/ocf_mngt_core_pool_priv.h" +#include "metadata/metadata_io.h" /* * @@ -177,6 +178,10 @@ int ocf_ctx_create(ocf_ctx_t *ctx, const struct ocf_ctx_config *cfg) if (ret) goto err_logger; + ret = ocf_metadata_io_ctx_init(ocf_ctx); + if (ret) + goto err_mio; + ret = ocf_core_volume_type_init(ocf_ctx); if (ret) goto err_utils; @@ -188,6 +193,8 @@ int ocf_ctx_create(ocf_ctx_t *ctx, const struct ocf_ctx_config *cfg) return 0; err_utils: + ocf_metadata_io_ctx_deinit(ocf_ctx); +err_mio: ocf_req_allocator_deinit(ocf_ctx); err_logger: ocf_logger_close(&ocf_ctx->logger); @@ -234,7 +241,8 @@ void ocf_ctx_put(ocf_ctx_t ctx) ocf_mngt_core_pool_deinit(ctx); ocf_ctx_unregister_volume_types(ctx); env_rmutex_destroy(&ctx->lock); - + + ocf_metadata_io_ctx_deinit(ctx); ocf_req_allocator_deinit(ctx); ocf_logger_close(&ctx->logger); env_free(ctx); diff --git a/src/ocf_ctx_priv.h b/src/ocf_ctx_priv.h index 76df528..9cba8a5 100644 --- a/src/ocf_ctx_priv.h +++ b/src/ocf_ctx_priv.h @@ -31,6 +31,7 @@ struct ocf_ctx { struct { struct env_mpool *req; + struct env_mpool *mio; } resources; }; From 8756fe121f2f5eb295cff1643503f14d8b0e879b Mon Sep 17 00:00:00 2001 From: Jan Musial Date: Mon, 8 Mar 2021 10:21:22 +0100 Subject: [PATCH 6/6] Add mpools to POSIX env Signed-off-by: Jan Musial --- env/posix/ocf_env.h | 4 ++ env/posix/utils_mpool.c | 146 ++++++++++++++++++++++++++++++++++++++++ env/posix/utils_mpool.h | 91 +++++++++++++++++++++++++ 3 files changed, 241 insertions(+) create mode 100644 env/posix/utils_mpool.c create mode 100644 env/posix/utils_mpool.h diff --git a/env/posix/ocf_env.h b/env/posix/ocf_env.h index 5294989..2434efa 100644 --- a/env/posix/ocf_env.h +++ b/env/posix/ocf_env.h @@ -34,6 +34,7 @@ #include "ocf_env_list.h" #include "ocf_env_headers.h" #include "ocf/ocf_err.h" +#include "utils_mpool.h" /* linux sector 512-bytes */ #define ENV_SECTOR_SHIFT 9 @@ -201,6 +202,9 @@ typedef struct _env_allocator env_allocator; env_allocator *env_allocator_create(uint32_t size, const char *fmt_name, ...); +#define env_allocator_create_extended(size, fmt_name, limit) \ + env_allocator_create(size, fmt_name) + void env_allocator_destroy(env_allocator *allocator); void *env_allocator_new(env_allocator *allocator); diff --git a/env/posix/utils_mpool.c b/env/posix/utils_mpool.c new file mode 100644 index 0000000..2b04304 --- /dev/null +++ b/env/posix/utils_mpool.c @@ -0,0 +1,146 @@ +/* + * Copyright(c) 2012-2021 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "utils_mpool.h" +#include "ocf_env.h" + +struct env_mpool { + int mpool_max; + /*!< Max mpool allocation order */ + + env_allocator *allocator[env_mpool_max]; + /*!< OS handle to memory pool */ + + uint32_t hdr_size; + /*!< Data header size (constant allocation part) */ + + uint32_t elem_size; + /*!< Per element size increment (variable allocation part) */ + + bool fallback; + /*!< Should mpool fallback to vmalloc */ + + int flags; + /*!< Allocation flags */ +}; + +struct env_mpool *env_mpool_create(uint32_t hdr_size, uint32_t elem_size, + int flags, int mpool_max, bool fallback, + const uint32_t limits[env_mpool_max], + const char *name_perfix) +{ + uint32_t i; + char name[MPOOL_ALLOCATOR_NAME_MAX] = { '\0' }; + int result; + struct env_mpool *mpool; + size_t size; + + mpool = env_zalloc(sizeof(*mpool), ENV_MEM_NORMAL); + if (!mpool) + return NULL; + + mpool->flags = flags; + mpool->fallback = fallback; + mpool->mpool_max = mpool_max; + mpool->hdr_size = hdr_size; + mpool->elem_size = elem_size; + + for (i = 0; i < min(env_mpool_max, mpool_max + 1); i++) { + result = snprintf(name, sizeof(name), "%s_%u", name_perfix, + (1 << i)); + if (result < 0 || result >= sizeof(name)) + goto err; + + size = hdr_size + (elem_size * (1 << i)); + + mpool->allocator[i] = env_allocator_create_extended( + size, name, limits ? limits[i] : -1); + + if (!mpool->allocator[i]) + goto err; + } + + return mpool; + +err: + env_mpool_destroy(mpool); + return NULL; +} + +void env_mpool_destroy(struct env_mpool *mallocator) +{ + if (mallocator) { + uint32_t i; + + for (i = 0; i < env_mpool_max; i++) + if (mallocator->allocator[i]) + env_allocator_destroy(mallocator->allocator[i]); + + env_free(mallocator); + } +} + +static env_allocator *env_mpool_get_allocator( + struct env_mpool *mallocator, uint32_t count) +{ + unsigned int idx; + + if (unlikely(count == 0)) + return env_mpool_1; + + idx = 31 - __builtin_clz(count); + + if (__builtin_ffs(count) <= idx) + idx++; + + if (idx >= env_mpool_max || idx > mallocator->mpool_max) + return NULL; + + return mallocator->allocator[idx]; +} + +void *env_mpool_new_f(struct env_mpool *mpool, uint32_t count, int flags) +{ + void *items = NULL; + env_allocator *allocator; + size_t size = mpool->hdr_size + (mpool->elem_size * count); + + allocator = env_mpool_get_allocator(mpool, count); + + if (allocator) { + items = env_allocator_new(allocator); + } else if(mpool->fallback) { + items = env_zalloc(size, 0); + } + +#ifdef ZERO_OR_NULL_PTR + if (ZERO_OR_NULL_PTR(items)) + return NULL; +#endif + + return items; +} + +void *env_mpool_new(struct env_mpool *mpool, uint32_t count) +{ + return env_mpool_new_f(mpool, count, mpool->flags); +} + +bool env_mpool_del(struct env_mpool *mpool, + void *items, uint32_t count) +{ + env_allocator *allocator; + + allocator = env_mpool_get_allocator(mpool, count); + + if (allocator) + env_allocator_del(allocator, items); + else if (mpool->fallback) + env_free(items); + else + return false; + + return true; +} diff --git a/env/posix/utils_mpool.h b/env/posix/utils_mpool.h new file mode 100644 index 0000000..00c7ff5 --- /dev/null +++ b/env/posix/utils_mpool.h @@ -0,0 +1,91 @@ +/* + * Copyright(c) 2012-2021 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef UTILS_MPOOL_H_ +#define UTILS_MPOOL_H_ + +#include +#include + +#define MPOOL_ALLOCATOR_NAME_MAX 128 + +enum { + env_mpool_1, + env_mpool_2, + env_mpool_4, + env_mpool_8, + env_mpool_16, + env_mpool_32, + env_mpool_64, + env_mpool_128, + + env_mpool_max +}; + +struct env_mpool; + +/** + * @brief Create CAS memory pool + * + * @param hdr_size size of constant allocation part + * @param elem_size size increment for each element + * @param flags Allocation flags + * @param mpool_max Maximal allocator size (power of two) + * @param fallback Should allocations fall back to vmalloc if allocator fails + * @param limits Array of rpool preallocation limits per each mpool allocation + * order or NULL if defaults are to be used. Array should have + * mpool_max elements + * @param name_prefix Format name prefix + * + * @return CAS memory pool + */ +struct env_mpool *env_mpool_create(uint32_t hdr_size, uint32_t elem_size, + int flags, int mpool_max, bool fallback, + const uint32_t limits[env_mpool_max], + const char *name_perfix); + +/** + * @brief Destroy existing memory pool + * + * @param mpool memory pool + */ +void env_mpool_destroy(struct env_mpool *mpool); + +/** + * @brief Allocate new items of memory pool + * + * @note Allocation based on ATOMIC memory pool and this function can be called + * when IRQ disable + * + * @param mpool CAS memory pool reference + * @param count Count of elements to be allocated + * + * @return Pointer to the new items + */ +void *env_mpool_new(struct env_mpool *mpool, uint32_t count); + +/** + * @brief Allocate new items of memory pool with specified allocation flag + * + * @param mpool CAS memory pool reference + * @param count Count of elements to be allocated + * @param flags Kernel allocation falgs + * + * @return Pointer to the new items + */ +void *env_mpool_new_f(struct env_mpool *mpool, uint32_t count, int flags); + +/** + * @brief Free existing items of memory pool + * + * @param mpool CAS memory pool reference + * @param items Items to be freed + * @param count - Count of elements to be free + * + * @return Allocation was freed + */ +bool env_mpool_del(struct env_mpool *mpool, void *items, uint32_t count); + +#endif /* UTILS_MPOOL_H_ */