Replace submit with forward in metadata_io

Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com>
Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
Robert Baldyga 2023-10-13 10:41:34 +02:00 committed by Michal Mielewczyk
parent 29ca8fbbe4
commit af3b379bb8
2 changed files with 46 additions and 79 deletions

View File

@ -65,7 +65,7 @@ static void metadata_io_read_i_atomic_complete(
{
context->compl_hndl(context->cache, context->priv, error);
ctx_data_free(context->cache->owner, context->data);
ctx_data_free(context->cache->owner, context->req->data);
ocf_req_put(context->req);
env_vfree(context);
}
@ -73,13 +73,12 @@ static void metadata_io_read_i_atomic_complete(
/*
* Iterative read end callback
*/
static void metadata_io_read_i_atomic_step_end(struct ocf_io *io, int error)
static void metadata_io_read_i_atomic_step_end(struct ocf_request *req,
int error)
{
struct metadata_io_read_i_atomic_context *context = io->priv1;
struct metadata_io_read_i_atomic_context *context = req->priv;
OCF_DEBUG_TRACE(ocf_volume_get_cache(ocf_io_get_volume(io)));
ocf_io_put(io);
OCF_DEBUG_TRACE(req->cache);
if (error) {
metadata_io_read_i_atomic_complete(context, error);
@ -87,54 +86,37 @@ static void metadata_io_read_i_atomic_step_end(struct ocf_io *io, int error)
}
context->drain_hndl(context->priv, context->curr_offset,
context->curr_count, context->data);
context->curr_count, req->data);
context->count -= context->curr_count;
context->curr_offset += context->curr_count;
if (context->count > 0)
ocf_queue_push_req(context->req,
if (context->count > 0) {
ocf_queue_push_req(req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
else
} else {
metadata_io_read_i_atomic_complete(context, 0);
}
}
static int metadata_io_read_i_atomic_step(struct ocf_request *req)
{
struct metadata_io_read_i_atomic_context *context = req->priv;
ocf_cache_t cache = context->cache;
uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE;
struct ocf_io *io;
int result = 0;
/* Get sectors count of this IO iteration */
context->curr_count = OCF_MIN(max_sectors_count, context->count);
/* Reset position in data buffer */
ctx_data_seek(cache->owner, context->data, ctx_data_seek_begin, 0);
ctx_data_seek(cache->owner, req->data, ctx_data_seek_begin, 0);
/* Allocate new IO */
io = ocf_new_cache_io(cache, req->io_queue,
req->cache_forward_end = metadata_io_read_i_atomic_step_end;
ocf_req_forward_cache_metadata(req, OCF_READ,
cache->device->metadata_offset +
SECTORS_TO_BYTES(context->curr_offset),
SECTORS_TO_BYTES(context->curr_count), OCF_READ, 0, 0);
if (!io) {
metadata_io_read_i_atomic_complete(context, -OCF_ERR_NO_MEM);
return 0;
}
/* Setup IO */
ocf_io_set_cmpl(io, context, NULL, metadata_io_read_i_atomic_step_end);
result = ocf_io_set_data(io, context->data, 0);
if (result) {
ocf_io_put(io);
metadata_io_read_i_atomic_complete(context, result);
return 0;
}
/* Submit IO */
ocf_volume_submit_metadata(io);
PAGES_TO_BYTES(context->curr_offset),
PAGES_TO_BYTES(context->curr_count), 0);
return 0;
}
@ -149,6 +131,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv,
struct metadata_io_read_i_atomic_context *context;
uint64_t io_sectors_count = cache->device->collision_table_entries *
ocf_line_sectors(cache);
struct ocf_request *req;
OCF_DEBUG_TRACE(cache);
@ -156,24 +139,26 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv,
if (!context)
return -OCF_ERR_NO_MEM;
context->req = ocf_req_new_mngt(cache, queue);
if (!context->req) {
req = ocf_req_new_mngt(cache, queue);
if (!req) {
env_vfree(context);
return -OCF_ERR_NO_MEM;
}
context->req->info.internal = true;
context->req->engine_handler = metadata_io_read_i_atomic_step;
context->req->priv = context;
/* Allocate one 4k page for metadata*/
context->data = ctx_data_alloc(cache->owner, 1);
if (!context->data) {
ocf_req_put(context->req);
req->data = ctx_data_alloc(cache->owner, 1);
if (!req->data) {
ocf_req_put(req);
env_vfree(context);
return -OCF_ERR_NO_MEM;
}
req->info.internal = true;
req->engine_handler = metadata_io_read_i_atomic_step;
req->priv = context;
context->req = req;
context->cache = cache;
context->count = io_sectors_count;
context->curr_offset = 0;
@ -195,7 +180,7 @@ static void metadata_io_req_fill(struct metadata_io_request *m_req)
int i;
for (i = 0; i < m_req->count; i++) {
a_req->on_meta_fill(cache, m_req->data,
a_req->on_meta_fill(cache, m_req->req.data,
m_req->page + i, m_req->context);
}
}
@ -207,27 +192,19 @@ static void metadata_io_req_drain(struct metadata_io_request *m_req)
int i;
for (i = 0; i < m_req->count; i++) {
a_req->on_meta_drain(cache, m_req->data,
a_req->on_meta_drain(cache, m_req->req.data,
m_req->page + i, m_req->context);
}
}
static void metadata_io_io_end(struct metadata_io_request *m_req, int error);
static void metadata_io_io_cmpl(struct ocf_io *io, int error)
{
metadata_io_io_end(io->priv1, error);
ocf_io_put(io);
}
static void metadata_io_end(struct ocf_request *req, int error);
static int metadata_io_do(struct ocf_request *req)
{
struct metadata_io_request *m_req = req->priv;
ocf_cache_t cache = req->cache;
struct ocf_io *io;
int ret;
ctx_data_seek(cache->owner, m_req->data, ctx_data_seek_begin, 0);
ctx_data_seek(cache->owner, req->data, ctx_data_seek_begin, 0);
/* Fill with the latest metadata. */
if (m_req->req.rw == OCF_WRITE) {
@ -238,25 +215,13 @@ static int metadata_io_do(struct ocf_request *req)
m_req->page % OCF_NUM_GLOBAL_META_LOCKS);
}
io = ocf_new_cache_io(cache, req->io_queue,
PAGES_TO_BYTES(m_req->page),
PAGES_TO_BYTES(m_req->count),
m_req->req.rw, 0, m_req->asynch->flags);
if (!io) {
metadata_io_io_end(m_req, -OCF_ERR_NO_MEM);
return 0;
}
ctx_data_seek(cache->owner, req->data, ctx_data_seek_begin, 0);
req->cache_forward_end = metadata_io_end;
ocf_req_forward_cache_io(req, req->rw, PAGES_TO_BYTES(m_req->page),
PAGES_TO_BYTES(m_req->count), 0);
/* Setup IO */
ocf_io_set_cmpl(io, m_req, NULL, metadata_io_io_cmpl);
ctx_data_seek(cache->owner, m_req->data, ctx_data_seek_begin, 0);
ret = ocf_io_set_data(io, m_req->data, 0);
if (ret) {
ocf_io_put(io);
metadata_io_io_end(m_req, ret);
return ret;
}
ocf_volume_submit_io(io);
return 0;
}
@ -307,8 +272,9 @@ static void metadata_io_req_advance(struct metadata_io_request *m_req);
/*
* Iterative asynchronous write callback
*/
static void metadata_io_io_end(struct metadata_io_request *m_req, int error)
static void metadata_io_end(struct ocf_request *req, int error)
{
struct metadata_io_request *m_req = req->priv;
struct metadata_io_request_asynch *a_req = m_req->asynch;
OCF_CHECK_NULL(a_req);
@ -339,7 +305,7 @@ static void metadata_io_req_end(struct metadata_io_request *m_req)
if (env_atomic_dec_return(&a_req->req_remaining) == 0)
a_req->on_complete(cache, a_req->context, a_req->error);
ctx_data_free(cache->owner, m_req->data);
ctx_data_free(cache->owner, m_req->req.data);
}
static uint32_t metadata_io_max_page(ocf_cache_t cache)
@ -460,14 +426,15 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
m_req->req.rw = dir;
m_req->req.map = LIST_POISON1;
m_req->req.alock_status = (uint8_t*)&m_req->alock_status;
m_req->req.ioi.io.flags = flags;
/* If req_count == io_count and count is not multiple of
* max_count, for last we can allocate data smaller that
* max_count as we are sure it will never be resubmitted.
*/
m_req->data = ctx_data_alloc(cache->owner,
m_req->req.data = ctx_data_alloc(cache->owner,
OCF_MIN(max_count, count - i * max_count));
if (!m_req->data)
if (!m_req->req.data)
goto err;
}
@ -485,7 +452,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
err:
while (i--)
ctx_data_free(cache->owner, a_req->reqs[i].data);
ctx_data_free(cache->owner, a_req->reqs[i].req.data);
env_mpool_del(mio_allocator, a_req, req_count);

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -51,7 +52,6 @@ struct metadata_io_request {
struct list_head list;
ocf_cache_t cache;
void *context;
ctx_data_t *data;
struct metadata_io_request_asynch *asynch;
uint32_t page;
uint32_t count;