Merge pull request #509 from Open-CAS/rm-metadata-updater

Remove metadata updater
This commit is contained in:
Robert Baldyga
2021-06-17 09:34:18 +02:00
committed by GitHub
32 changed files with 1566 additions and 1666 deletions

View File

@@ -1585,16 +1585,12 @@ int ocf_metadata_init(struct ocf_cache *cache,
OCF_DEBUG_TRACE(cache);
ret = ocf_metadata_init_fixed_size(cache, cache_line_size);
if (ret) {
ocf_metadata_io_deinit(cache);
if (ret)
return ret;
}
ret = ocf_metadata_concurrency_init(&cache->metadata.lock);
if (ret) {
ocf_metadata_deinit_fixed_size(cache);
ocf_metadata_io_deinit(cache);
return ret;
}
@@ -1607,7 +1603,6 @@ void ocf_metadata_deinit(struct ocf_cache *cache)
ocf_metadata_deinit_fixed_size(cache);
ocf_metadata_concurrency_deinit(&cache->metadata.lock);
ocf_metadata_io_deinit(cache);
}
void ocf_metadata_error(struct ocf_cache *cache)

View File

@@ -12,6 +12,7 @@
#include "../utils/utils_io.h"
#include "../ocf_request.h"
#include "../ocf_def_priv.h"
#include "../concurrency/ocf_mio_concurrency.h"
#define OCF_METADATA_IO_DEBUG 0
@@ -264,7 +265,7 @@ static struct ocf_io_if metadata_io_restart_if = {
.write = metadata_io_restart_req,
};
static void metadata_io_req_advance(struct metadata_io_request *m_req);
static void metadata_io_req_advance(struct metadata_io_request *m_req);
/*
* Iterative asynchronous write callback
@@ -272,7 +273,6 @@ static void metadata_io_req_advance(struct metadata_io_request *m_req);
static void metadata_io_io_end(struct metadata_io_request *m_req, int error)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
ocf_cache_t cache = m_req->cache;
OCF_CHECK_NULL(a_req);
OCF_CHECK_NULL(a_req->on_complete);
@@ -286,16 +286,50 @@ static void metadata_io_io_end(struct metadata_io_request *m_req, int error)
OCF_DEBUG_PARAM(cache, "Page = %u", m_req->page);
if (a_req->mio_conc)
ocf_mio_async_unlock(a_req->mio_conc, m_req);
metadata_io_req_advance(m_req);
env_atomic_set(&m_req->finished, 1);
ocf_metadata_updater_kick(cache);
metadata_io_req_complete(m_req);
}
static void matadata_io_page_lock_acquired(struct ocf_request *req)
{
ocf_engine_push_req_front(req, true);
}
void metadata_io_req_finalize(struct metadata_io_request *m_req)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
if (env_atomic_dec_return(&a_req->req_active) == 0)
env_mpool_del(m_req->cache->owner->resources.mio, a_req,
a_req->alloc_req_count);
}
static void metadata_io_req_submit(struct metadata_io_request *m_req)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
int lock;
env_atomic_set(&m_req->finished, 0);
metadata_updater_submit(m_req);
if (a_req->mio_conc) {
lock = ocf_mio_async_lock(a_req->mio_conc, m_req,
matadata_io_page_lock_acquired);
if (lock != OCF_LOCK_ACQUIRED) {
a_req->error = lock;
metadata_io_req_finalize(m_req);
return;
}
}
if (!a_req->mio_conc || lock == OCF_LOCK_ACQUIRED)
matadata_io_page_lock_acquired(&m_req->req);
}
void metadata_io_req_end(struct metadata_io_request *m_req)
@@ -309,18 +343,14 @@ void metadata_io_req_end(struct metadata_io_request *m_req)
ctx_data_free(cache->owner, m_req->data);
}
void metadata_io_req_finalize(struct metadata_io_request *m_req)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
if (env_atomic_dec_return(&a_req->req_active) == 0)
env_mpool_del(m_req->cache->owner->resources.mio, a_req,
a_req->alloc_req_count);
}
static uint32_t metadata_io_max_page(ocf_cache_t cache)
{
return ocf_volume_get_max_io_size(&cache->device->volume) / PAGE_SIZE;
uint32_t volume_max_io_pages = ocf_volume_get_max_io_size(
&cache->device->volume) / PAGE_SIZE;
struct metadata_io_request *m_req;
uint32_t request_map_capacity_pages = sizeof(m_req->alock_status) * 8;
return OCF_MIN(volume_max_io_pages, request_map_capacity_pages);
}
static void metadata_io_req_advance(struct metadata_io_request *m_req)
@@ -344,6 +374,7 @@ static void metadata_io_req_advance(struct metadata_io_request *m_req)
m_req->page = a_req->page + curr * max_count;
m_req->count = OCF_MIN(a_req->count - curr * max_count, max_count);
m_req->req.core_line_count = m_req->count;
}
static void metadata_io_req_start(struct metadata_io_request *m_req)
@@ -381,7 +412,8 @@ void metadata_io_req_complete(struct metadata_io_request *m_req)
static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
void *context, uint32_t page, uint32_t count, int flags,
ocf_metadata_io_event_t io_hndl,
ocf_metadata_io_end_t compl_hndl)
ocf_metadata_io_end_t compl_hndl,
struct ocf_alock *mio_conc)
{
struct metadata_io_request_asynch *a_req;
struct metadata_io_request *m_req;
@@ -410,6 +442,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
a_req->flags = flags;
a_req->on_meta_fill = io_hndl;
a_req->on_meta_drain = io_hndl;
a_req->mio_conc = mio_conc;
/* IO Requests initialization */
for (i = 0; i < req_count; i++) {
@@ -425,6 +458,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
m_req->req.info.internal = true;
m_req->req.rw = dir;
m_req->req.map = LIST_POISON1;
m_req->req.alock_status = (uint8_t*)&m_req->alock_status;
/* If req_count == io_count and count is not multiple of
* max_count, for last we can allocate data smaller that
@@ -460,10 +494,11 @@ err:
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count, int flags,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_end_t compl_hndl)
ocf_metadata_io_end_t compl_hndl,
struct ocf_alock *mio_conc)
{
return metadata_io_i_asynch(cache, queue, OCF_WRITE, context,
page, count, flags, fill_hndl, compl_hndl);
page, count, flags, fill_hndl, compl_hndl, mio_conc);
}
int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
@@ -472,7 +507,7 @@ int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
ocf_metadata_io_end_t compl_hndl)
{
return metadata_io_i_asynch(cache, queue, OCF_READ, context,
page, count, flags, drain_hndl, compl_hndl);
page, count, flags, drain_hndl, compl_hndl, NULL);
}
#define MIO_RPOOL_LIMIT 16
@@ -505,13 +540,3 @@ void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx)
env_mpool_destroy(ocf_ctx->resources.mio);
ocf_ctx->resources.mio = NULL;
}
int ocf_metadata_io_init(ocf_cache_t cache)
{
return ocf_metadata_updater_init(cache);
}
void ocf_metadata_io_deinit(ocf_cache_t cache)
{
ocf_metadata_updater_stop(cache);
}

View File

@@ -6,6 +6,8 @@
#ifndef __METADATA_IO_H__
#define __METADATA_IO_H__
#include "../concurrency/ocf_mio_concurrency.h"
/**
* @file metadata_io.h
* @brief Metadata IO utilities
@@ -54,6 +56,7 @@ struct metadata_io_request {
env_atomic finished;
uint32_t page;
uint32_t count;
uint64_t alock_status;
};
/*
@@ -67,6 +70,7 @@ struct metadata_io_request_asynch {
ocf_metadata_io_event_t on_meta_fill;
ocf_metadata_io_event_t on_meta_drain;
ocf_metadata_io_end_t on_complete;
struct ocf_alock *mio_conc;
uint32_t page;
uint32_t count;
uint32_t alloc_req_count; /*< Number of allocated metadata_io_requests */
@@ -122,7 +126,8 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue,
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count, int flags,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_end_t compl_hndl);
ocf_metadata_io_end_t compl_hndl,
struct ocf_alock *mio_conc);
/**
* @brief Iterative asynchronous pages read
@@ -152,14 +157,4 @@ int ocf_metadata_io_ctx_init(struct ocf_ctx *ocf_ctx);
*/
void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx);
/**
* Function for initializing metadata io.
*/
int ocf_metadata_io_init(ocf_cache_t cache);
/**
* Function for deinitializing metadata io.
*/
void ocf_metadata_io_deinit(ocf_cache_t cache);
#endif /* METADATA_IO_UTILS_H_ */

View File

@@ -51,7 +51,7 @@ int ocf_metadata_actor(struct ocf_cache *cache,
ocf_cache_line_t i, next_i;
uint64_t start_line, end_line;
int ret = 0;
struct ocf_cache_line_concurrency *c =
struct ocf_alock *c =
ocf_cache_line_concurrency(cache);
start_line = ocf_bytes_2_lines(cache, start_byte);

View File

@@ -83,6 +83,8 @@ static int _raw_ram_deinit(ocf_cache_t cache,
raw->mem_pool = NULL;
}
ocf_mio_concurrency_deinit(&raw->mio_conc);
return 0;
}
@@ -95,16 +97,27 @@ static int _raw_ram_init(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
size_t mem_pool_size;
int ret;
OCF_DEBUG_TRACE(cache);
/* TODO: caller should specify explicitly whether to init mio conc? */
if (lock_page_pfn) {
ret = ocf_mio_concurrency_init(&raw->mio_conc,
raw->ssd_pages_offset, raw->ssd_pages, cache);
if (ret)
return ret;
}
/* Allocate memory pool for entries */
mem_pool_size = raw->ssd_pages;
mem_pool_size *= PAGE_SIZE;
raw->mem_pool_limit = mem_pool_size;
raw->mem_pool = env_secure_alloc(mem_pool_size);
if (!raw->mem_pool)
if (!raw->mem_pool) {
ocf_mio_concurrency_deinit(&raw->mio_conc);
return -OCF_ERR_NO_MEM;
}
ENV_BUG_ON(env_memset(raw->mem_pool, mem_pool_size, 0));
raw->lock_page = lock_page_pfn;
@@ -310,7 +323,8 @@ static void _raw_ram_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
raw->ssd_pages_offset, raw->ssd_pages, 0,
_raw_ram_flush_all_fill, _raw_ram_flush_all_complete);
_raw_ram_flush_all_fill, _raw_ram_flush_all_complete,
raw->mio_conc);
if (result)
_raw_ram_flush_all_complete(cache, context, result);
}
@@ -516,7 +530,8 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
raw->ssd_pages_offset + start_page, count,
req->ioi.io.flags,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete);
_raw_ram_flush_do_asynch_io_complete,
raw->mio_conc);
if (result)
break;

View File

@@ -7,6 +7,7 @@
#define __METADATA_RAW_H__
#include "metadata_segment_id.h"
#include "../concurrency/ocf_mio_concurrency.h"
/**
* @file metadata_raw.h
@@ -88,6 +89,8 @@ struct ocf_metadata_raw {
ocf_flush_page_synch_t lock_page; /*!< Page lock callback */
ocf_flush_page_synch_t unlock_page; /*!< Page unlock callback */
struct ocf_alock *mio_conc;
};
/**

View File

@@ -126,6 +126,8 @@ int raw_dynamic_deinit(ocf_cache_t cache,
OCF_DEBUG_TRACE(cache);
ocf_mio_concurrency_deinit(&raw->mio_conc);
for (i = 0; i < raw->ssd_pages; i++)
env_secure_free(ctrl->pages[i], PAGE_SIZE);
@@ -147,19 +149,30 @@ int raw_dynamic_init(ocf_cache_t cache,
{
struct _raw_ctrl *ctrl;
size_t size = sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
int ret;
OCF_DEBUG_TRACE(cache);
if (raw->entry_size > PAGE_SIZE)
return -1;
/* TODO: caller should specify explicitly whether to init mio conc? */
if (lock_page_pfn) {
ret = ocf_mio_concurrency_init(&raw->mio_conc,
raw->ssd_pages_offset, raw->ssd_pages, cache);
if (ret)
return ret;
}
ctrl = env_vmalloc(size);
if (!ctrl)
if (!ctrl) {
ocf_mio_concurrency_deinit(&raw->mio_conc);
return -1;
}
ENV_BUG_ON(env_memset(ctrl, size, 0));
if (env_mutex_init(&ctrl->lock)) {
ocf_mio_concurrency_deinit(&raw->mio_conc);
env_vfree(ctrl);
return -1;
}
@@ -519,7 +532,8 @@ void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
raw->ssd_pages_offset, raw->ssd_pages, 0,
raw_dynamic_flush_all_fill,
raw_dynamic_flush_all_complete);
raw_dynamic_flush_all_complete,
raw->mio_conc);
if (result)
OCF_CMPL_RET(priv, result);
}

View File

@@ -1,163 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_io.h"
#include "metadata_updater_priv.h"
#include "../ocf_priv.h"
#include "../engine/engine_common.h"
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
#include "../utils/utils_io.h"
int ocf_metadata_updater_init(ocf_cache_t cache)
{
ocf_metadata_updater_t mu = &cache->metadata_updater;
struct ocf_metadata_io_syncher *syncher = &mu->syncher;
INIT_LIST_HEAD(&syncher->in_progress_head);
INIT_LIST_HEAD(&syncher->pending_head);
env_mutex_init(&syncher->lock);
return ctx_metadata_updater_init(cache->owner, mu);
}
void ocf_metadata_updater_kick(ocf_cache_t cache)
{
ctx_metadata_updater_kick(cache->owner, &cache->metadata_updater);
}
void ocf_metadata_updater_stop(ocf_cache_t cache)
{
ctx_metadata_updater_stop(cache->owner, &cache->metadata_updater);
env_mutex_destroy(&cache->metadata_updater.syncher.lock);
}
void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv)
{
OCF_CHECK_NULL(mu);
mu->priv = priv;
}
void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu)
{
OCF_CHECK_NULL(mu);
return mu->priv;
}
ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu)
{
OCF_CHECK_NULL(mu);
return container_of(mu, struct ocf_cache, metadata_updater);
}
static int _metadata_updater_iterate_in_progress(ocf_cache_t cache,
struct list_head *finished, struct metadata_io_request *new_req)
{
struct ocf_metadata_io_syncher *syncher =
&cache->metadata_updater.syncher;
struct metadata_io_request *curr, *temp;
list_for_each_entry_safe(curr, temp, &syncher->in_progress_head, list) {
if (env_atomic_read(&curr->finished)) {
list_move_tail(&curr->list, finished);
continue;
}
if (new_req) {
/* If request specified, check if overlap occurs. */
if (ocf_io_overlaps(new_req->page, new_req->count,
curr->page, curr->count)) {
return 1;
}
}
}
return 0;
}
static void metadata_updater_process_finished(struct list_head *finished)
{
struct metadata_io_request *curr, *temp;
list_for_each_entry_safe(curr, temp, finished, list) {
list_del(&curr->list);
metadata_io_req_complete(curr);
}
}
void metadata_updater_submit(struct metadata_io_request *m_req)
{
ocf_cache_t cache = m_req->cache;
struct ocf_metadata_io_syncher *syncher =
&cache->metadata_updater.syncher;
struct list_head finished;
int ret;
INIT_LIST_HEAD(&finished);
env_mutex_lock(&syncher->lock);
ret = _metadata_updater_iterate_in_progress(cache, &finished, m_req);
/* Either add it to in-progress list or pending list for deferred
* execution.
*/
if (ret == 0)
list_add_tail(&m_req->list, &syncher->in_progress_head);
else
list_add_tail(&m_req->list, &syncher->pending_head);
env_mutex_unlock(&syncher->lock);
if (ret == 0)
ocf_engine_push_req_front(&m_req->req, true);
metadata_updater_process_finished(&finished);
}
uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu)
{
struct metadata_io_request *curr, *temp;
struct ocf_metadata_io_syncher *syncher;
struct list_head finished;
ocf_cache_t cache;
int ret;
OCF_CHECK_NULL(mu);
INIT_LIST_HEAD(&finished);
cache = ocf_metadata_updater_get_cache(mu);
syncher = &cache->metadata_updater.syncher;
env_mutex_lock(&syncher->lock);
if (list_empty(&syncher->pending_head)) {
/*
* If pending list is empty, we iterate over in progress
* list to free memory used by finished requests.
*/
_metadata_updater_iterate_in_progress(cache, &finished, NULL);
env_mutex_unlock(&syncher->lock);
metadata_updater_process_finished(&finished);
env_cond_resched();
return 0;
}
list_for_each_entry_safe(curr, temp, &syncher->pending_head, list) {
ret = _metadata_updater_iterate_in_progress(cache, &finished, curr);
if (ret == 0) {
/* Move to in-progress list and kick the workers */
list_move_tail(&curr->list, &syncher->in_progress_head);
}
env_mutex_unlock(&syncher->lock);
metadata_updater_process_finished(&finished);
if (ret == 0)
ocf_engine_push_req_front(&curr->req, true);
env_cond_resched();
env_mutex_lock(&syncher->lock);
}
env_mutex_unlock(&syncher->lock);
return 0;
}

View File

@@ -1,32 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_UPDATER_PRIV_H__
#define __METADATA_UPDATER_PRIV_H__
#include "../ocf_def_priv.h"
#include "metadata_io.h"
struct ocf_metadata_updater {
/* Metadata flush synchronizer context */
struct ocf_metadata_io_syncher {
struct list_head in_progress_head;
struct list_head pending_head;
env_mutex lock __attribute__((aligned(64)));
} syncher;
void *priv;
};
void metadata_updater_submit(struct metadata_io_request *m_req);
int ocf_metadata_updater_init(struct ocf_cache *cache);
void ocf_metadata_updater_kick(struct ocf_cache *cache);
void ocf_metadata_updater_stop(struct ocf_cache *cache);
#endif /* __METADATA_UPDATER_PRIV_H__ */