Use mpool to allocate metadata_io requests

Signed-off-by: Jan Musial <jan.musial@intel.com>
This commit is contained in:
Jan Musial 2021-02-25 14:31:17 +01:00
parent c243ad3df0
commit 2dc36657bf
4 changed files with 74 additions and 9 deletions

View File

@ -45,6 +45,19 @@ struct metadata_io_read_i_atomic_context {
void *priv; void *priv;
}; };
enum ocf_mio_size {
ocf_mio_size_1 = 0,
ocf_mio_size_2,
ocf_mio_size_4,
ocf_mio_size_8,
ocf_mio_size_16,
ocf_mio_size_32,
ocf_mio_size_64,
ocf_mio_size_max,
};
#define METADATA_IO_REQS_LIMIT 128
static void metadata_io_read_i_atomic_complete( static void metadata_io_read_i_atomic_complete(
struct metadata_io_read_i_atomic_context *context, int error) struct metadata_io_read_i_atomic_context *context, int error)
{ {
@ -301,7 +314,8 @@ void metadata_io_req_finalize(struct metadata_io_request *m_req)
struct metadata_io_request_asynch *a_req = m_req->asynch; struct metadata_io_request_asynch *a_req = m_req->asynch;
if (env_atomic_dec_return(&a_req->req_active) == 0) if (env_atomic_dec_return(&a_req->req_active) == 0)
env_free(a_req); env_mpool_del(m_req->cache->owner->resources.mio, a_req,
a_req->alloc_req_count);
} }
static uint32_t metadata_io_max_page(ocf_cache_t cache) static uint32_t metadata_io_max_page(ocf_cache_t cache)
@ -375,11 +389,13 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
uint32_t io_count = OCF_DIV_ROUND_UP(count, max_count); uint32_t io_count = OCF_DIV_ROUND_UP(count, max_count);
uint32_t req_count = OCF_MIN(io_count, METADATA_IO_REQS_LIMIT); uint32_t req_count = OCF_MIN(io_count, METADATA_IO_REQS_LIMIT);
int i; int i;
struct env_mpool *mio_allocator = cache->owner->resources.mio;
if (count == 0) if (count == 0)
return 0; return 0;
a_req = env_zalloc(sizeof(*a_req), ENV_MEM_NOIO);
a_req = env_mpool_new(mio_allocator, req_count);
if (!a_req) if (!a_req)
return -OCF_ERR_NO_MEM; return -OCF_ERR_NO_MEM;
@ -389,6 +405,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
a_req->on_complete = compl_hndl; a_req->on_complete = compl_hndl;
a_req->context = context; a_req->context = context;
a_req->page = page; a_req->page = page;
a_req->alloc_req_count = req_count;
a_req->count = count; a_req->count = count;
a_req->flags = flags; a_req->flags = flags;
a_req->on_meta_fill = io_hndl; a_req->on_meta_fill = io_hndl;
@ -427,14 +444,15 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
compl_hndl(cache, context, a_req->error); compl_hndl(cache, context, a_req->error);
if (env_atomic_dec_return(&a_req->req_active) == 0) if (env_atomic_dec_return(&a_req->req_active) == 0)
env_free(a_req); env_mpool_del(mio_allocator, a_req, req_count);
return 0; return 0;
err: err:
while (i--) while (i--)
ctx_data_free(cache->owner, a_req->reqs[i].data); ctx_data_free(cache->owner, a_req->reqs[i].data);
env_free(a_req);
env_mpool_del(mio_allocator, a_req, req_count);
return -OCF_ERR_NO_MEM; return -OCF_ERR_NO_MEM;
} }
@ -457,6 +475,36 @@ int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
page, count, flags, drain_hndl, compl_hndl); page, count, flags, drain_hndl, compl_hndl);
} }
#define MIO_RPOOL_LIMIT 16
#define MIO_RPOOL_THRESHOLD ocf_mio_size_16 /* This is statically determined to
not exceed one page (4096B).
Change if apropriate. */
int ocf_metadata_io_ctx_init(struct ocf_ctx *ocf_ctx)
{
uint32_t limits[] = {
[0 ... MIO_RPOOL_THRESHOLD - 1] = -1,
[MIO_RPOOL_THRESHOLD ... ocf_mio_size_max - 1] = MIO_RPOOL_LIMIT
};
ocf_ctx->resources.mio = env_mpool_create(
sizeof(struct metadata_io_request_asynch),
sizeof(struct metadata_io_request),
ENV_MEM_NOIO, ocf_mio_size_max - 1, true,
limits,
"ocf_mio");
if (ocf_ctx->resources.mio == NULL)
return -1;
return 0;
}
void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx)
{
env_mpool_destroy(ocf_ctx->resources.mio);
ocf_ctx->resources.mio = NULL;
}
int ocf_metadata_io_init(ocf_cache_t cache) int ocf_metadata_io_init(ocf_cache_t cache)
{ {
return ocf_metadata_updater_init(cache); return ocf_metadata_updater_init(cache);

View File

@ -54,16 +54,12 @@ struct metadata_io_request {
env_atomic finished; env_atomic finished;
uint32_t page; uint32_t page;
uint32_t count; uint32_t count;
int error;
}; };
#define METADATA_IO_REQS_LIMIT 33
/* /*
* Asynchronous IO request context * Asynchronous IO request context
*/ */
struct metadata_io_request_asynch { struct metadata_io_request_asynch {
struct metadata_io_request reqs[METADATA_IO_REQS_LIMIT];
void *context; void *context;
env_atomic req_remaining; env_atomic req_remaining;
env_atomic req_active; env_atomic req_active;
@ -73,8 +69,10 @@ struct metadata_io_request_asynch {
ocf_metadata_io_end_t on_complete; ocf_metadata_io_end_t on_complete;
uint32_t page; uint32_t page;
uint32_t count; uint32_t count;
uint32_t alloc_req_count; /*< Number of allocated metadata_io_requests */
int flags; int flags;
int error; int error;
struct metadata_io_request reqs[];
}; };
void metadata_io_req_complete(struct metadata_io_request *m_req); void metadata_io_req_complete(struct metadata_io_request *m_req);
@ -144,6 +142,16 @@ int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
ocf_metadata_io_event_t drain_hndl, ocf_metadata_io_event_t drain_hndl,
ocf_metadata_io_end_t compl_hndl); ocf_metadata_io_end_t compl_hndl);
/**
* Initialize ocf_ctx related structures of metadata_io (mpool).
*/
int ocf_metadata_io_ctx_init(struct ocf_ctx *ocf_ctx);
/**
* Deinitialize ocf_ctx related structures of metadata_io
*/
void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx);
/** /**
* Function for initializing metadata io. * Function for initializing metadata io.
*/ */

View File

@ -11,6 +11,7 @@
#include "ocf_logger_priv.h" #include "ocf_logger_priv.h"
#include "ocf_core_priv.h" #include "ocf_core_priv.h"
#include "mngt/ocf_mngt_core_pool_priv.h" #include "mngt/ocf_mngt_core_pool_priv.h"
#include "metadata/metadata_io.h"
/* /*
* *
@ -177,6 +178,10 @@ int ocf_ctx_create(ocf_ctx_t *ctx, const struct ocf_ctx_config *cfg)
if (ret) if (ret)
goto err_logger; goto err_logger;
ret = ocf_metadata_io_ctx_init(ocf_ctx);
if (ret)
goto err_mio;
ret = ocf_core_volume_type_init(ocf_ctx); ret = ocf_core_volume_type_init(ocf_ctx);
if (ret) if (ret)
goto err_utils; goto err_utils;
@ -188,6 +193,8 @@ int ocf_ctx_create(ocf_ctx_t *ctx, const struct ocf_ctx_config *cfg)
return 0; return 0;
err_utils: err_utils:
ocf_metadata_io_ctx_deinit(ocf_ctx);
err_mio:
ocf_req_allocator_deinit(ocf_ctx); ocf_req_allocator_deinit(ocf_ctx);
err_logger: err_logger:
ocf_logger_close(&ocf_ctx->logger); ocf_logger_close(&ocf_ctx->logger);
@ -234,7 +241,8 @@ void ocf_ctx_put(ocf_ctx_t ctx)
ocf_mngt_core_pool_deinit(ctx); ocf_mngt_core_pool_deinit(ctx);
ocf_ctx_unregister_volume_types(ctx); ocf_ctx_unregister_volume_types(ctx);
env_rmutex_destroy(&ctx->lock); env_rmutex_destroy(&ctx->lock);
ocf_metadata_io_ctx_deinit(ctx);
ocf_req_allocator_deinit(ctx); ocf_req_allocator_deinit(ctx);
ocf_logger_close(&ctx->logger); ocf_logger_close(&ctx->logger);
env_free(ctx); env_free(ctx);

View File

@ -31,6 +31,7 @@ struct ocf_ctx {
struct { struct {
struct env_mpool *req; struct env_mpool *req;
struct env_mpool *mio;
} resources; } resources;
}; };