Replace submit with forward in standby

Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com>
Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
Robert Baldyga 2023-10-13 09:13:07 +02:00 committed by Michal Mielewczyk
parent 322ae2687d
commit 29ca8fbbe4
3 changed files with 103 additions and 146 deletions

View File

@ -27,14 +27,13 @@
static int passive_io_resume(struct ocf_request *req)
{
struct ocf_request *master = req->master_io_req;
struct ocf_io *io = &master->ioi.io;
ocf_cache_t cache = req->cache;
struct ocf_metadata_ctrl *ctrl = cache->metadata.priv;
struct ocf_io *io = (struct ocf_io*) req->data;
ctx_data_t *data = ocf_io_get_data(io);
uint64_t io_start_page = BYTES_TO_PAGES(io->addr);
uint64_t io_pages_count = BYTES_TO_PAGES(io->bytes);
uint64_t io_end_page = io_start_page + io_pages_count - 1;
ocf_end_io_t io_cmpl = req->master_io_req;
enum ocf_metadata_segment_id update_segments[] = {
metadata_segment_sb_config,
metadata_segment_collision,
@ -58,13 +57,14 @@ static int passive_io_resume(struct ocf_request *req)
overlap_page = overlap_start - raw_start_page;
overlap_count = overlap_end - overlap_start + 1;
ctx_data_seek(cache->owner, data, ctx_data_seek_begin,
ctx_data_seek(cache->owner, req->data, ctx_data_seek_begin,
PAGES_TO_BYTES(overlap_start_data));
ocf_metadata_raw_update(cache, raw, data, overlap_page, overlap_count);
ocf_metadata_raw_update(cache, raw, req->data, overlap_page,
overlap_count);
}
ocf_pio_async_unlock(req->cache->standby.concurrency, req);
io_cmpl(io, 0);
master->complete(master, 0);
env_allocator_del(cache->standby.allocator, req);
return 0;
}
@ -74,29 +74,30 @@ static void passive_io_page_lock_acquired(struct ocf_request *req)
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
ocf_end_io_t io_cmpl)
int ocf_metadata_passive_update(struct ocf_request *master)
{
ocf_cache_t cache = master->cache;
struct ocf_metadata_ctrl *ctrl = cache->metadata.priv;
struct ocf_request *req;
struct ocf_io *io = &master->ioi.io;
uint64_t io_start_page = BYTES_TO_PAGES(io->addr);
uint64_t io_end_page = io_start_page + BYTES_TO_PAGES(io->bytes);
struct ocf_request *req;
int lock = 0;
if (io->dir == OCF_READ) {
io_cmpl(io, 0);
master->complete(master, 0);
return 0;
}
if (io_start_page >= ctrl->count_pages) {
io_cmpl(io, 0);
master->complete(master, 0);
return 0;
}
if (io->addr % PAGE_SIZE || io->bytes % PAGE_SIZE) {
ocf_cache_log(cache, log_warn,
"Metadata update not aligned to page size!\n");
io_cmpl(io, -OCF_ERR_INVAL);
master->complete(master, -OCF_ERR_INVAL);
return -OCF_ERR_INVAL;
}
@ -104,13 +105,13 @@ int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
//FIXME handle greater IOs
ocf_cache_log(cache, log_warn,
"IO size exceedes max supported size!\n");
io_cmpl(io, -OCF_ERR_INVAL);
master->complete(master, -OCF_ERR_INVAL);
return -OCF_ERR_INVAL;
}
req = (struct ocf_request*)env_allocator_new(cache->standby.allocator);
if (!req) {
io_cmpl(io, -OCF_ERR_NO_MEM);
master->complete(master, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
@ -118,8 +119,8 @@ int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
req->info.internal = true;
req->engine_handler = passive_io_resume;
req->rw = OCF_WRITE;
req->data = io;
req->master_io_req = io_cmpl;
req->master_io_req = master;
req->data = master->data;
req->cache = cache;
env_atomic_set(&req->lock_remaining, 0);
@ -131,7 +132,7 @@ int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
req, passive_io_page_lock_acquired);
if (lock < 0) {
env_allocator_del(cache->standby.allocator, req);
io_cmpl(io, lock);
master->complete(master, lock);
return lock;
}

View File

@ -1,13 +1,13 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __OCF_METADATA_PASSIVE_IO_H__
#define __OCF_METADATA_PASSIVE_IO_H__
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
ocf_end_io_t io_cmpl);
int ocf_metadata_passive_update(struct ocf_request *master);
int ocf_metadata_passive_io_ctx_init(ocf_cache_t cache);

View File

@ -281,13 +281,6 @@ void *ocf_cache_get_priv(ocf_cache_t cache)
return cache->priv;
}
struct ocf_cache_volume_io_priv {
struct ocf_io *io;
struct ctx_data_t *data;
env_atomic remaining;
env_atomic error;
};
struct ocf_cache_volume {
ocf_cache_t cache;
};
@ -299,84 +292,35 @@ static inline ocf_cache_t ocf_volume_to_cache(ocf_volume_t volume)
return cache_volume->cache;
}
static void ocf_cache_volume_io_complete_generic(struct ocf_io *vol_io,
static void ocf_cache_volume_io_complete_generic(struct ocf_request *req,
int error)
{
struct ocf_cache_volume_io_priv *priv;
struct ocf_io *io = vol_io->priv1;
ocf_cache_t cache = ocf_volume_to_cache(ocf_io_get_volume(io));
ocf_cache_t cache = req->cache;
priv = ocf_io_get_priv(io);
if (env_atomic_dec_return(&priv->remaining))
return;
ocf_io_put(vol_io);
ocf_io_end(io, error);
ocf_refcnt_dec(&cache->refcnt.metadata);
ocf_io_end(&req->ioi.io, error);
}
static void ocf_cache_io_complete(struct ocf_io *io, int error)
static void ocf_cache_io_complete(struct ocf_request *req, int error)
{
struct ocf_cache_volume_io_priv *priv;
ocf_cache_t cache;
struct ocf_request *req = ocf_io_to_req(io);
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
ocf_cache_t cache = req->cache;
if (error)
req->error = req->error ?: error;
priv = ocf_io_get_priv(io);
env_atomic_cmpxchg(&priv->error, 0, error);
if (env_atomic_dec_return(&priv->remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
ocf_refcnt_dec(&cache->refcnt.metadata);
ocf_io_end(io, env_atomic_read(&priv->error));
}
static void ocf_cache_volume_io_complete(struct ocf_io *vol_io, int error)
{
struct ocf_io *io = vol_io->priv1;
ocf_io_put(vol_io);
ocf_cache_io_complete(io, error);
}
static int ocf_cache_volume_prepare_vol_io(struct ocf_io *io,
struct ocf_io **vol_io)
{
ocf_cache_t cache;
struct ocf_io *tmp_io;
OCF_CHECK_NULL(io);
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
tmp_io = ocf_volume_new_io(ocf_cache_get_volume(cache), io->io_queue,
io->addr, io->bytes, io->dir, io->io_class, io->flags);
if (!tmp_io)
return -OCF_ERR_NO_MEM;
*vol_io = tmp_io;
return 0;
ocf_io_end(&req->ioi.io, error);
}
static void ocf_cache_volume_submit_io(struct ocf_io *io)
{
struct ocf_cache_volume_io_priv *priv;
struct ocf_io *vol_io;
ocf_cache_t cache;
struct ocf_request *req = ocf_io_to_req(io);
ocf_cache_t cache = req->cache;
int result;
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
priv = ocf_io_get_priv(io);
if (!ocf_refcnt_inc(&cache->refcnt.metadata)) {
ocf_io_end(io, -OCF_ERR_IO);
return;
@ -386,44 +330,27 @@ static void ocf_cache_volume_submit_io(struct ocf_io *io)
return;
}
env_atomic_set(&priv->remaining, 3);
env_atomic_set(&priv->error, 0);
env_atomic_set(&req->req_remaining, 3);
result = ocf_cache_volume_prepare_vol_io(io, &vol_io);
if (result) {
ocf_io_end(io, result);
return;
}
req->cache_forward_end = ocf_cache_io_complete;
ocf_req_forward_cache_io(req, req->rw, req->byte_position,
req->byte_length, req->offset);
result = ocf_io_set_data(vol_io, priv->data, 0);
if (result) {
ocf_io_put(vol_io);
ocf_io_end(io, result);
return;
}
ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete);
ocf_volume_submit_io(vol_io);
result = ocf_metadata_passive_update(cache, io, ocf_cache_io_complete);
req->complete = ocf_cache_io_complete;
result = ocf_metadata_passive_update(req);
if (result) {
ocf_cache_log(cache, log_crit,
"Metadata update error (error=%d)!\n", result);
}
ocf_cache_io_complete(io, 0);
// TODO why the result is not passed to io_cmpl???
ocf_cache_io_complete(req, 0);
}
static void ocf_cache_volume_submit_flush(struct ocf_io *io)
{
struct ocf_cache_volume_io_priv *priv;
struct ocf_io *vol_io;
ocf_cache_t cache;
int result;
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
priv = ocf_io_get_priv(io);
struct ocf_request *req = ocf_io_to_req(io);
ocf_cache_t cache = req->cache;
if (!ocf_refcnt_inc(&cache->refcnt.metadata)) {
ocf_io_end(io, -OCF_ERR_IO);
@ -434,28 +361,15 @@ static void ocf_cache_volume_submit_flush(struct ocf_io *io)
return;
}
env_atomic_set(&priv->remaining, 1);
result = ocf_cache_volume_prepare_vol_io(io, &vol_io);
if (result) {
ocf_io_end(io, result);
return;
}
ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete_generic);
ocf_volume_submit_flush(vol_io);
req->cache_forward_end = ocf_cache_volume_io_complete_generic;
ocf_req_forward_cache_flush(req);
}
static void ocf_cache_volume_submit_discard(struct ocf_io *io)
{
struct ocf_cache_volume_io_priv *priv;
struct ocf_io *vol_io;
ocf_cache_t cache;
int result;
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
priv = ocf_io_get_priv(io);
struct ocf_request *req = ocf_io_to_req(io);
ocf_cache_t cache = req->cache;
if (!ocf_refcnt_inc(&cache->refcnt.metadata)) {
ocf_io_end(io, -OCF_ERR_IO);
@ -466,16 +380,9 @@ static void ocf_cache_volume_submit_discard(struct ocf_io *io)
return;
}
env_atomic_set(&priv->remaining, 1);
result = ocf_cache_volume_prepare_vol_io(io, &vol_io);
if (result) {
ocf_io_end(io, result);
return;
}
ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete_generic);
ocf_volume_submit_discard(vol_io);
req->cache_forward_end = ocf_cache_volume_io_complete_generic;
ocf_req_forward_cache_discard(req, req->byte_position,
req->byte_length);
}
/* *** VOLUME OPS *** */
@ -517,26 +424,30 @@ static uint64_t ocf_cache_volume_get_byte_length(ocf_volume_t volume)
static int ocf_cache_io_set_data(struct ocf_io *io,
ctx_data_t *data, uint32_t offset)
{
struct ocf_cache_volume_io_priv *priv = ocf_io_get_priv(io);
struct ocf_request *req;
if (!data || offset)
return -OCF_ERR_INVAL;
OCF_CHECK_NULL(io);
priv->data = data;
req = ocf_io_to_req(io);
req->data = data;
req->offset = offset;
return 0;
}
static ctx_data_t *ocf_cache_io_get_data(struct ocf_io *io)
{
struct ocf_cache_volume_io_priv *priv = ocf_io_get_priv(io);
struct ocf_request *req;
return priv->data;
OCF_CHECK_NULL(io);
req = ocf_io_to_req(io);
return req->data;
}
const struct ocf_volume_properties ocf_cache_volume_properties = {
.name = "OCF_Cache",
.io_priv_size = sizeof(struct ocf_cache_volume_io_priv),
.io_priv_size = 0,
.volume_priv_size = sizeof(struct ocf_cache_volume),
.caps = {
.atomic_writes = 0,
@ -559,10 +470,55 @@ const struct ocf_volume_properties ocf_cache_volume_properties = {
.deinit = NULL,
};
static int ocf_cache_io_allocator_init(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name)
{
return 0;
}
static void ocf_cache_io_allocator_deinit(ocf_io_allocator_t allocator)
{
}
static void *ocf_cache_io_allocator_new(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir)
{
struct ocf_request *req;
req = ocf_req_new(queue, NULL, addr, bytes, dir);
if (!req)
return NULL;
return &req->ioi;
}
static void ocf_cache_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
{
struct ocf_request *req;
req = container_of(obj, struct ocf_request, ioi);
ocf_req_put(req);
}
const struct ocf_io_allocator_type ocf_cache_io_allocator_type = {
.ops = {
.allocator_init = ocf_cache_io_allocator_init,
.allocator_deinit = ocf_cache_io_allocator_deinit,
.allocator_new = ocf_cache_io_allocator_new,
.allocator_del = ocf_cache_io_allocator_del,
},
};
const struct ocf_volume_extended ocf_cache_volume_extended = {
.allocator_type = &ocf_cache_io_allocator_type,
};
int ocf_cache_volume_type_init(ocf_ctx_t ctx)
{
return ocf_ctx_register_volume_type_internal(ctx, OCF_VOLUME_TYPE_CACHE,
&ocf_cache_volume_properties, NULL);
&ocf_cache_volume_properties,
&ocf_cache_volume_extended);
}
bool ocf_dbg_cache_is_settled(ocf_cache_t cache)