replace metadata updater with metadata I/O concurrency

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2021-04-03 21:26:59 -05:00 committed by Kozlowski Mateusz
parent 06f3c937c3
commit 953e0f25d7
8 changed files with 83 additions and 45 deletions

View File

@ -8,6 +8,8 @@
#include "../utils/utils_alock.h"
struct metadata_io_request;
int ocf_mio_async_lock(struct ocf_alock *alock,
struct metadata_io_request *m_req,
ocf_req_async_lock_cb cmpl);

View File

@ -1610,16 +1610,12 @@ int ocf_metadata_init(struct ocf_cache *cache,
OCF_DEBUG_TRACE(cache);
ret = ocf_metadata_init_fixed_size(cache, cache_line_size);
if (ret) {
ocf_metadata_io_deinit(cache);
if (ret)
return ret;
}
ret = ocf_metadata_concurrency_init(&cache->metadata.lock);
if (ret) {
ocf_metadata_deinit_fixed_size(cache);
ocf_metadata_io_deinit(cache);
return ret;
}
@ -1632,7 +1628,6 @@ void ocf_metadata_deinit(struct ocf_cache *cache)
ocf_metadata_deinit_fixed_size(cache);
ocf_metadata_concurrency_deinit(&cache->metadata.lock);
ocf_metadata_io_deinit(cache);
}
void ocf_metadata_error(struct ocf_cache *cache)

View File

@ -12,6 +12,7 @@
#include "../utils/utils_io.h"
#include "../ocf_request.h"
#include "../ocf_def_priv.h"
#include "../concurrency/ocf_mio_concurrency.h"
#define OCF_METADATA_IO_DEBUG 0
@ -264,7 +265,7 @@ static struct ocf_io_if metadata_io_restart_if = {
.write = metadata_io_restart_req,
};
static void metadata_io_req_advance(struct metadata_io_request *m_req);
static void metadata_io_req_advance(struct metadata_io_request *m_req);
/*
* Iterative asynchronous write callback
@ -272,7 +273,6 @@ static void metadata_io_req_advance(struct metadata_io_request *m_req);
static void metadata_io_io_end(struct metadata_io_request *m_req, int error)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
ocf_cache_t cache = m_req->cache;
OCF_CHECK_NULL(a_req);
OCF_CHECK_NULL(a_req->on_complete);
@ -286,16 +286,36 @@ static void metadata_io_io_end(struct metadata_io_request *m_req, int error)
OCF_DEBUG_PARAM(cache, "Page = %u", m_req->page);
if (a_req->mio_conc)
ocf_mio_async_unlock(a_req->mio_conc, m_req);
metadata_io_req_advance(m_req);
env_atomic_set(&m_req->finished, 1);
ocf_metadata_updater_kick(cache);
metadata_io_req_complete(m_req);
}
static void matadata_io_page_lock_acquired(struct ocf_request *req)
{
ocf_engine_push_req_front(req, true);
}
static void metadata_io_req_submit(struct metadata_io_request *m_req)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
int lock;
env_atomic_set(&m_req->finished, 0);
metadata_updater_submit(m_req);
if (a_req->mio_conc) {
lock = ocf_mio_async_lock(a_req->mio_conc, m_req,
matadata_io_page_lock_acquired);
/* TODO: error handling for lock < 0 */
}
if (!a_req->mio_conc || lock == OCF_LOCK_ACQUIRED)
matadata_io_page_lock_acquired(&m_req->req);
}
void metadata_io_req_end(struct metadata_io_request *m_req)
@ -320,7 +340,12 @@ void metadata_io_req_finalize(struct metadata_io_request *m_req)
static uint32_t metadata_io_max_page(ocf_cache_t cache)
{
return ocf_volume_get_max_io_size(&cache->device->volume) / PAGE_SIZE;
uint32_t volume_max_io_pages = ocf_volume_get_max_io_size(
&cache->device->volume) / PAGE_SIZE;
struct metadata_io_request *m_req;
uint32_t request_map_capacity_pages = sizeof(m_req->map) * 8;
return OCF_MIN(volume_max_io_pages, request_map_capacity_pages);
}
static void metadata_io_req_advance(struct metadata_io_request *m_req)
@ -344,6 +369,7 @@ static void metadata_io_req_advance(struct metadata_io_request *m_req)
m_req->page = a_req->page + curr * max_count;
m_req->count = OCF_MIN(a_req->count - curr * max_count, max_count);
m_req->req.core_line_count = m_req->count;
}
static void metadata_io_req_start(struct metadata_io_request *m_req)
@ -381,7 +407,8 @@ void metadata_io_req_complete(struct metadata_io_request *m_req)
static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
void *context, uint32_t page, uint32_t count, int flags,
ocf_metadata_io_event_t io_hndl,
ocf_metadata_io_end_t compl_hndl)
ocf_metadata_io_end_t compl_hndl,
struct ocf_alock *mio_conc)
{
struct metadata_io_request_asynch *a_req;
struct metadata_io_request *m_req;
@ -410,6 +437,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
a_req->flags = flags;
a_req->on_meta_fill = io_hndl;
a_req->on_meta_drain = io_hndl;
a_req->mio_conc = mio_conc;
/* IO Requests initialization */
for (i = 0; i < req_count; i++) {
@ -460,10 +488,11 @@ err:
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count, int flags,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_end_t compl_hndl)
ocf_metadata_io_end_t compl_hndl,
struct ocf_alock *mio_conc)
{
return metadata_io_i_asynch(cache, queue, OCF_WRITE, context,
page, count, flags, fill_hndl, compl_hndl);
page, count, flags, fill_hndl, compl_hndl, mio_conc);
}
int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
@ -472,7 +501,7 @@ int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
ocf_metadata_io_end_t compl_hndl)
{
return metadata_io_i_asynch(cache, queue, OCF_READ, context,
page, count, flags, drain_hndl, compl_hndl);
page, count, flags, drain_hndl, compl_hndl, NULL);
}
#define MIO_RPOOL_LIMIT 16
@ -505,13 +534,3 @@ void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx)
env_mpool_destroy(ocf_ctx->resources.mio);
ocf_ctx->resources.mio = NULL;
}
int ocf_metadata_io_init(ocf_cache_t cache)
{
return ocf_metadata_updater_init(cache);
}
void ocf_metadata_io_deinit(ocf_cache_t cache)
{
ocf_metadata_updater_stop(cache);
}

View File

@ -6,6 +6,8 @@
#ifndef __METADATA_IO_H__
#define __METADATA_IO_H__
#include "../concurrency/ocf_mio_concurrency.h"
/**
* @file metadata_io.h
* @brief Metadata IO utilities
@ -68,6 +70,7 @@ struct metadata_io_request_asynch {
ocf_metadata_io_event_t on_meta_fill;
ocf_metadata_io_event_t on_meta_drain;
ocf_metadata_io_end_t on_complete;
struct ocf_alock *mio_conc;
uint32_t page;
uint32_t count;
uint32_t alloc_req_count; /*< Number of allocated metadata_io_requests */
@ -123,7 +126,8 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue,
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count, int flags,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_end_t compl_hndl);
ocf_metadata_io_end_t compl_hndl,
struct ocf_alock *mio_conc);
/**
* @brief Iterative asynchronous pages read
@ -153,14 +157,4 @@ int ocf_metadata_io_ctx_init(struct ocf_ctx *ocf_ctx);
*/
void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx);
/**
* Function for initializing metadata io.
*/
int ocf_metadata_io_init(ocf_cache_t cache);
/**
* Function for deinitializing metadata io.
*/
void ocf_metadata_io_deinit(ocf_cache_t cache);
#endif /* METADATA_IO_UTILS_H_ */

View File

@ -83,6 +83,8 @@ static int _raw_ram_deinit(ocf_cache_t cache,
raw->mem_pool = NULL;
}
ocf_mio_concurrency_deinit(&raw->mio_conc);
return 0;
}
@ -95,16 +97,27 @@ static int _raw_ram_init(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
size_t mem_pool_size;
int ret;
OCF_DEBUG_TRACE(cache);
/* TODO: caller should specify explicitly whether to init mio conc? */
if (lock_page_pfn) {
ret = ocf_mio_concurrency_init(&raw->mio_conc,
raw->ssd_pages_offset, raw->ssd_pages, cache);
if (ret)
return ret;
}
/* Allocate memory pool for entries */
mem_pool_size = raw->ssd_pages;
mem_pool_size *= PAGE_SIZE;
raw->mem_pool_limit = mem_pool_size;
raw->mem_pool = env_secure_alloc(mem_pool_size);
if (!raw->mem_pool)
if (!raw->mem_pool) {
ocf_mio_concurrency_deinit(&raw->mio_conc);
return -OCF_ERR_NO_MEM;
}
ENV_BUG_ON(env_memset(raw->mem_pool, mem_pool_size, 0));
raw->lock_page = lock_page_pfn;
@ -310,7 +323,8 @@ static void _raw_ram_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
raw->ssd_pages_offset, raw->ssd_pages, 0,
_raw_ram_flush_all_fill, _raw_ram_flush_all_complete);
_raw_ram_flush_all_fill, _raw_ram_flush_all_complete,
raw->mio_conc);
if (result)
_raw_ram_flush_all_complete(cache, context, result);
}
@ -516,7 +530,8 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
raw->ssd_pages_offset + start_page, count,
req->ioi.io.flags,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete);
_raw_ram_flush_do_asynch_io_complete,
raw->mio_conc);
if (result)
break;

View File

@ -7,6 +7,7 @@
#define __METADATA_RAW_H__
#include "metadata_segment_id.h"
#include "../concurrency/ocf_mio_concurrency.h"
/**
* @file metadata_raw.h
@ -88,6 +89,8 @@ struct ocf_metadata_raw {
ocf_flush_page_synch_t lock_page; /*!< Page lock callback */
ocf_flush_page_synch_t unlock_page; /*!< Page unlock callback */
struct ocf_alock *mio_conc;
};
/**

View File

@ -126,6 +126,8 @@ int raw_dynamic_deinit(ocf_cache_t cache,
OCF_DEBUG_TRACE(cache);
ocf_mio_concurrency_deinit(&raw->mio_conc);
for (i = 0; i < raw->ssd_pages; i++)
env_secure_free(ctrl->pages[i], PAGE_SIZE);
@ -147,19 +149,30 @@ int raw_dynamic_init(ocf_cache_t cache,
{
struct _raw_ctrl *ctrl;
size_t size = sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
int ret;
OCF_DEBUG_TRACE(cache);
if (raw->entry_size > PAGE_SIZE)
return -1;
/* TODO: caller should specify explicitly whether to init mio conc? */
if (lock_page_pfn) {
ret = ocf_mio_concurrency_init(&raw->mio_conc,
raw->ssd_pages_offset, raw->ssd_pages, cache);
if (ret)
return ret;
}
ctrl = env_vmalloc(size);
if (!ctrl)
if (!ctrl) {
ocf_mio_concurrency_deinit(&raw->mio_conc);
return -1;
}
ENV_BUG_ON(env_memset(ctrl, size, 0));
if (env_mutex_init(&ctrl->lock)) {
ocf_mio_concurrency_deinit(&raw->mio_conc);
env_vfree(ctrl);
return -1;
}
@ -519,7 +532,8 @@ void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
raw->ssd_pages_offset, raw->ssd_pages, 0,
raw_dynamic_flush_all_fill,
raw_dynamic_flush_all_complete);
raw_dynamic_flush_all_complete,
raw->mio_conc);
if (result)
OCF_CMPL_RET(priv, result);
}

View File

@ -1231,10 +1231,6 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
params.flags.added_to_list = true;
env_rmutex_unlock(&ctx->lock);
result = ocf_metadata_io_init(tmp_cache);
if (result)
goto _cache_mngt_init_instance_ERROR;
ocf_cache_log(tmp_cache, log_debug, "Metadata initialized\n");
_ocf_mngt_cache_init(tmp_cache, &params);