Merge pull request #828 from mmichal10/io_forward

Io forward pt.1
This commit is contained in:
Robert Baldyga 2024-09-19 19:42:49 +02:00 committed by GitHub
commit df280cf5ec
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
40 changed files with 1145 additions and 557 deletions

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2019-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -87,6 +88,41 @@ static void volume_submit_discard(struct ocf_io *io)
io->end(io, 0);
}
void volume_forward_io(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset)
{
struct ocf_io *io = ocf_forward_get_io(token);
struct myvolume *myvolume = ocf_volume_get_priv(volume);
struct volume_data *data = ocf_io_get_data(io);
if (dir == OCF_WRITE) {
memcpy(myvolume->mem + addr,
data->ptr + offset, bytes);
} else {
memcpy(data->ptr + offset,
myvolume->mem + addr, bytes);
}
printf("VOL FWD: (name: %s), IO: (dir: %s, addr: %ld, bytes: %ld)\n",
myvolume->name, dir == OCF_READ ? "read" : "write",
addr, bytes);
ocf_forward_end(token, 0);
}
void volume_forward_flush(ocf_volume_t volume, ocf_forward_token_t token)
{
ocf_forward_end(token, 0);
}
void volume_forward_discard(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes)
{
ocf_forward_end(token, 0);
}
/*
* Let's set maximum io size to 128 KiB.
*/
@ -145,6 +181,9 @@ const struct ocf_volume_properties volume_properties = {
.submit_io = volume_submit_io,
.submit_flush = volume_submit_flush,
.submit_discard = volume_submit_discard,
.forward_io = volume_forward_io,
.forward_flush = volume_forward_flush,
.forward_discard = volume_forward_discard,
.get_max_io_size = volume_get_max_io_size,
.get_length = volume_get_length,
},

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -235,4 +236,68 @@ void ocf_io_handle(struct ocf_io *io, void *opaque);
*/
ocf_volume_t ocf_io_get_volume(struct ocf_io *io);
/**
* @brief Get the original OCF IO associated with forward token
*
* @param[in] token Forward token
*/
struct ocf_io *ocf_forward_get_io(ocf_forward_token_t token);
/**
* @brief Forward io to another subvolume
*
* Forwarding automatically increases forwarded io refcount, so at some
* point additional ocf_forward_end() needs to be called to balance it.
*
* @param[in] token Forward token
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] dir Direction OCF_READ/OCF_WRITE
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
* @param[in] offset Offset within the IO data
*/
void ocf_forward_io(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset);
/**
* @brief Forward flush to another subvolume
*
* Forwarding automatically increases forwarded io refcount, so at some
* point additional ocf_forward_end() needs to be called to balance it.
*
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
*/
void ocf_forward_flush(ocf_volume_t volume, ocf_forward_token_t token);
/**
* @brief Forward discard to another subvolume
*
* Forwarding automatically increases forwarded io refcount, so at some
* point additional ocf_forward_end() needs to be called to balance it.
*
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
*/
void ocf_forward_discard(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes);
/**
* @brief Increment forwarded io refcount
*
* @param[in] token Forward token
*/
void ocf_forward_get(ocf_forward_token_t token);
/**
* @brief Complete the forwarded io
*
* @param[in] token Forward token to be completed
* @param[in] error Completion status code
*/
void ocf_forward_end(ocf_forward_token_t token, int error);
#endif /* __OCF_IO_H__ */

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -72,6 +73,17 @@ typedef struct ocf_volume_uuid *ocf_uuid_t;
*/
typedef void ctx_data_t;
/**
* @brief IO forward token
*
* The token is associated with IO that is being forwarded. It allows
* OCF to keep track of which IO has been forwarded where. It also has
* refcount which can be increased/decreased on each forward level, so
* that there is no need to introduce additional counters if at some
* level the forward needs to be splitted into several sub-forwards.
*/
typedef uint64_t ocf_forward_token_t;
/**
* @brief handle to I/O queue
*/

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -82,6 +83,39 @@ struct ocf_volume_ops {
*/
void (*submit_write_zeroes)(struct ocf_io *io);
/**
* @brief Forward the original io directly to the volume
*
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] dir Direction OCF_READ/OCF_WRITE
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
* @param[in] offset Offset within the IO data
*/
void (*forward_io)(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes,
uint64_t offset);
/**
* @brief Forward the original flush io directly to the volume
*
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
*/
void (*forward_flush)(ocf_volume_t volume, ocf_forward_token_t token);
/**
* @brief Forward the original discard io directly to the volume
*
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
*/
void (*forward_discard)(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes);
/**
* @brief Volume initialization callback, called when volume object
* is being initialized

View File

@ -43,6 +43,8 @@ enum ocf_io_if_type {
OCF_IO_FLUSH_IF,
OCF_IO_DISCARD_IF,
OCF_IO_D2C_IF,
OCF_IO_D2C_FLUSH_IF,
OCF_IO_D2C_DISCARD_IF,
OCF_IO_PRIV_MAX_IF,
};
@ -96,26 +98,40 @@ static const struct ocf_io_if IO_IFS[OCF_IO_PRIV_MAX_IF] = {
},
.name = "Fast",
},
[OCF_IO_DISCARD_IF] = {
.cbs = {
[OCF_READ] = ocf_discard,
[OCF_WRITE] = ocf_discard,
},
.name = "Discard",
},
[OCF_IO_D2C_IF] = {
.cbs = {
[OCF_READ] = ocf_io_d2c,
[OCF_WRITE] = ocf_io_d2c,
},
.name = "Direct to core",
},
[OCF_IO_FLUSH_IF] = {
.cbs = {
[OCF_READ] = ocf_engine_flush,
[OCF_WRITE] = ocf_engine_flush,
},
.name = "Ops engine",
.name = "Flush",
},
[OCF_IO_DISCARD_IF] = {
.cbs = {
[OCF_READ] = ocf_engine_discard,
[OCF_WRITE] = ocf_engine_discard,
},
.name = "Discard",
},
[OCF_IO_D2C_IF] = {
.cbs = {
[OCF_READ] = ocf_d2c_io,
[OCF_WRITE] = ocf_d2c_io,
},
.name = "Direct to core io",
},
[OCF_IO_D2C_FLUSH_IF] = {
.cbs = {
[OCF_READ] = ocf_d2c_flush,
[OCF_WRITE] = ocf_d2c_flush,
},
.name = "Direct to core flush",
},
[OCF_IO_D2C_DISCARD_IF] = {
.cbs = {
[OCF_READ] = ocf_d2c_discard,
[OCF_WRITE] = ocf_d2c_discard,
},
.name = "Direct to core discard",
},
};
@ -138,17 +154,6 @@ const char *ocf_get_io_iface_name(ocf_req_cache_mode_t cache_mode)
return cache_mode_io_if_map[cache_mode]->name;
}
static ocf_req_cb ocf_io_if_type_to_engine_cb(
enum ocf_io_if_type io_if_type, int rw)
{
if (unlikely(io_if_type == OCF_IO_MAX_IF ||
io_if_type == OCF_IO_PRIV_MAX_IF)) {
return NULL;
}
return IO_IFS[io_if_type].cbs[rw];
}
static ocf_req_cb ocf_cache_mode_to_engine_cb(
ocf_req_cache_mode_t req_cache_mode, int rw)
{
@ -257,17 +262,12 @@ int ocf_engine_hndl_fast_req(struct ocf_request *req)
return ret;
}
static void ocf_engine_hndl_2dc_req(struct ocf_request *req)
{
IO_IFS[OCF_IO_D2C_IF].cbs[req->rw](req);
}
void ocf_engine_hndl_discard_req(struct ocf_request *req)
{
ocf_req_get(req);
if (req->d2c) {
ocf_engine_hndl_2dc_req(req);
IO_IFS[OCF_IO_D2C_DISCARD_IF].cbs[req->rw](req);
return;
}
@ -279,8 +279,8 @@ void ocf_engine_hndl_flush_req(struct ocf_request *req)
ocf_req_get(req);
req->engine_handler = (req->d2c) ?
ocf_io_if_type_to_engine_cb(OCF_IO_D2C_IF, req->rw) :
ocf_io_if_type_to_engine_cb(OCF_IO_FLUSH_IF, req->rw);
IO_IFS[OCF_IO_D2C_FLUSH_IF].cbs[req->rw] :
IO_IFS[OCF_IO_FLUSH_IF].cbs[req->rw];
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC);
}

View File

@ -10,10 +10,11 @@
#include "engine_bf.h"
#include "engine_inv.h"
#include "engine_common.h"
#include "engine_io.h"
#include "cache_engine.h"
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_io.h"
#define OCF_ENGINE_DEBUG_IO_NAME "bf"
#include "engine_debug.h"
@ -43,19 +44,9 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
struct ocf_cache *cache = req->cache;
if (error) {
req->error = error;
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
}
if (req->error)
inc_fallback_pt_error_counter(req->cache);
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&req->req_remaining))
return;
}
backfill_queue_dec_unblock(req->cache);
@ -67,7 +58,7 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
req->data = NULL;
}
if (req->error) {
if (error) {
ocf_engine_invalidate(req);
} else {
ocf_req_unlock(ocf_cache_line_concurrency(cache), req);
@ -79,22 +70,13 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
static int _ocf_backfill_do(struct ocf_request *req)
{
unsigned int reqs_to_issue;
req->data = req->cp_data;
if (unlikely(req->data == NULL)) {
env_atomic_set(&req->req_remaining, 1);
_ocf_backfill_complete(req, -OCF_ERR_NO_MEM);
return 0;
}
reqs_to_issue = ocf_engine_io_count(req);
/* There will be #reqs_to_issue completions */
env_atomic_set(&req->req_remaining, reqs_to_issue);
ocf_submit_cache_reqs(req->cache, req, OCF_WRITE, 0, req->byte_length,
reqs_to_issue, _ocf_backfill_complete);
ocf_engine_forward_cache_io_req(req, OCF_WRITE, _ocf_backfill_complete);
return 0;
}

View File

@ -7,9 +7,9 @@
#include "../ocf_cache_priv.h"
#include "engine_d2c.h"
#include "engine_common.h"
#include "engine_io.h"
#include "cache_engine.h"
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "d2c"
@ -17,31 +17,72 @@
static void _ocf_d2c_completion(struct ocf_request *req, int error)
{
req->error = error;
OCF_DEBUG_RQ(req, "Completion");
if (req->error)
if (error)
ocf_core_stats_core_error_update(req->core, req->rw);
/* Complete request */
req->complete(req, req->error);
req->complete(req, error);
/* Release OCF request */
ocf_req_put(req);
}
int ocf_io_d2c(struct ocf_request *req)
int ocf_d2c_io(struct ocf_request *req)
{
ocf_core_t core = req->core;
OCF_DEBUG_TRACE(req->cache);
/* Get OCF request - increase reference counter */
ocf_req_get(req);
ocf_submit_volume_req(&core->volume, req, _ocf_d2c_completion);
ocf_engine_forward_core_io_req(req, _ocf_d2c_completion);
ocf_engine_update_block_stats(req);
ocf_core_stats_pt_block_update(req->core, req->part_id, req->rw,
req->byte_length);
ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
req->info.hit_no, req->core_line_count);
/* Put OCF request - decrease reference counter */
ocf_req_put(req);
return 0;
}
int ocf_d2c_flush(struct ocf_request *req)
{
OCF_DEBUG_TRACE(req->cache);
/* Get OCF request - increase reference counter */
ocf_req_get(req);
ocf_engine_forward_core_flush_req(req, _ocf_d2c_completion);
ocf_engine_update_block_stats(req);
ocf_core_stats_pt_block_update(req->core, req->part_id, req->rw,
req->byte_length);
ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
req->info.hit_no, req->core_line_count);
/* Put OCF request - decrease reference counter */
ocf_req_put(req);
return 0;
}
int ocf_d2c_discard(struct ocf_request *req)
{
OCF_DEBUG_TRACE(req->cache);
/* Get OCF request - increase reference counter */
ocf_req_get(req);
ocf_engine_forward_core_discard_req(req, _ocf_d2c_completion);
ocf_engine_update_block_stats(req);
@ -55,5 +96,4 @@ int ocf_io_d2c(struct ocf_request *req)
ocf_req_put(req);
return 0;
}

View File

@ -1,11 +1,16 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef ENGINE_2DC_H_
#define ENGINE_2DC_H_
int ocf_io_d2c(struct ocf_request *req);
int ocf_d2c_io(struct ocf_request *req);
int ocf_d2c_flush(struct ocf_request *req);
int ocf_d2c_discard(struct ocf_request *req);
#endif /* ENGINE_2DC_H_ */

View File

@ -8,10 +8,10 @@
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_discard.h"
#include "engine_io.h"
#include "../metadata/metadata.h"
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG 0
@ -25,75 +25,33 @@ static void _ocf_discard_complete_req(struct ocf_request *req, int error)
ocf_req_put(req);
}
static void _ocf_discard_core_complete(struct ocf_io *io, int error)
{
struct ocf_request *req = io->priv1;
OCF_DEBUG_RQ(req, "Core DISCARD Completion");
_ocf_discard_complete_req(req, error);
ocf_io_put(io);
}
static int _ocf_discard_core(struct ocf_request *req)
{
struct ocf_io *io;
int err;
req->byte_position = SECTORS_TO_BYTES(req->discard.sector);
req->byte_length = SECTORS_TO_BYTES(req->discard.nr_sects);
io = ocf_volume_new_io(&req->core->volume, req->io_queue,
SECTORS_TO_BYTES(req->discard.sector),
SECTORS_TO_BYTES(req->discard.nr_sects),
OCF_WRITE, 0, 0);
if (!io) {
_ocf_discard_complete_req(req, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
ocf_io_set_cmpl(io, req, NULL, _ocf_discard_core_complete);
err = ocf_io_set_data(io, req->data, req->offset);
if (err) {
_ocf_discard_core_complete(io, err);
return err;
}
ocf_volume_submit_discard(io);
ocf_engine_forward_core_discard_req(req, _ocf_discard_complete_req);
return 0;
}
static void _ocf_discard_cache_flush_complete(struct ocf_io *io, int error)
static void _ocf_discard_cache_flush_complete(struct ocf_request *req, int error)
{
struct ocf_request *req = io->priv1;
if (error) {
ocf_metadata_error(req->cache);
_ocf_discard_complete_req(req, error);
ocf_io_put(io);
return;
}
req->engine_handler = _ocf_discard_core;
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
ocf_io_put(io);
}
static int _ocf_discard_flush_cache(struct ocf_request *req)
{
struct ocf_io *io;
io = ocf_volume_new_io(&req->cache->device->volume, req->io_queue,
0, 0, OCF_WRITE, 0, 0);
if (!io) {
ocf_metadata_error(req->cache);
_ocf_discard_complete_req(req, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
ocf_io_set_cmpl(io, req, NULL, _ocf_discard_cache_flush_complete);
ocf_volume_submit_flush(io);
ocf_engine_forward_cache_flush_req(req,
_ocf_discard_cache_flush_complete);
return 0;
}
@ -116,9 +74,6 @@ static void _ocf_discard_finish_step(struct ocf_request *req)
static void _ocf_discard_step_complete(struct ocf_request *req, int error)
{
if (error)
req->error |= error;
if (env_atomic_dec_return(&req->req_remaining))
return;
@ -127,9 +82,9 @@ static void _ocf_discard_step_complete(struct ocf_request *req, int error)
/* Release WRITE lock of request */
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
if (req->error) {
if (error) {
ocf_metadata_error(req->cache);
_ocf_discard_complete_req(req, req->error);
_ocf_discard_complete_req(req, error);
return;
}
@ -242,7 +197,7 @@ static int _ocf_discard_step(struct ocf_request *req)
return 0;
}
int ocf_discard(struct ocf_request *req)
int ocf_engine_discard(struct ocf_request *req)
{
OCF_DEBUG_TRACE(req->cache);

View File

@ -1,11 +1,12 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __ENGINE_DISCARD_H__
#define __ENGINE_DISCARD_H__
int ocf_discard(struct ocf_request *req);
int ocf_engine_discard(struct ocf_request *req);
#endif

View File

@ -8,11 +8,11 @@
#include "../ocf_cache_priv.h"
#include "engine_fast.h"
#include "engine_common.h"
#include "engine_io.h"
#include "engine_pt.h"
#include "engine_wb.h"
#include "../ocf_request.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_io.h"
#include "../concurrency/ocf_concurrency.h"
#include "../metadata/metadata.h"
@ -31,19 +31,11 @@
static void _ocf_read_fast_complete(struct ocf_request *req, int error)
{
if (error) {
req->error |= error;
ocf_core_stats_cache_error_update(req->core, OCF_READ);
}
if (env_atomic_dec_return(&req->req_remaining)) {
/* Not all requests finished */
return;
}
OCF_DEBUG_RQ(req, "HIT completion");
if (req->error) {
if (error) {
ocf_core_stats_cache_error_update(req->core, OCF_READ);
OCF_DEBUG_RQ(req, "ERROR");
ocf_queue_push_req_pt(req);
@ -51,7 +43,7 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
/* Complete request */
req->complete(req, req->error);
req->complete(req, error);
/* Free the request at the last point of the completion path */
ocf_req_put(req);
@ -86,10 +78,7 @@ static int _ocf_read_fast_do(struct ocf_request *req)
/* Submit IO */
OCF_DEBUG_RQ(req, "Submit");
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
ocf_submit_cache_reqs(req->cache, req, OCF_READ, 0, req->byte_length,
ocf_engine_io_count(req), _ocf_read_fast_complete);
ocf_engine_forward_cache_io_req(req, OCF_READ, _ocf_read_fast_complete);
/* Update statistics */
ocf_engine_update_request_stats(req);

View File

@ -6,10 +6,10 @@
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_common.h"
#include "engine_io.h"
#include "cache_engine.h"
#include "engine_flush.h"
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#define OCF_ENGINE_DEBUG_IO_NAME "flush"
#include "engine_debug.h"
@ -17,7 +17,7 @@
static void _ocf_engine_flush_complete(struct ocf_request *req, int error)
{
if (error)
req->error |= error;
req->error = req->error ?: error;
if (env_atomic_dec_return(&req->req_remaining))
return;
@ -45,17 +45,13 @@ int ocf_engine_flush(struct ocf_request *req)
env_atomic_set(&req->req_remaining, 2);
/* Submit operation into core device */
ocf_submit_volume_req(&req->core->volume, req,
_ocf_engine_flush_complete);
ocf_engine_forward_core_flush_req(req, _ocf_engine_flush_complete);
/* submit flush to cache device */
ocf_submit_cache_flush(req, _ocf_engine_flush_complete);
ocf_engine_forward_cache_flush_req(req, _ocf_engine_flush_complete);
/* Put OCF request - decrease reference counter */
ocf_req_put(req);
return 0;
}

View File

@ -19,18 +19,12 @@
static void _ocf_invalidate_req(struct ocf_request *req, int error)
{
if (error) {
req->error = error;
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
}
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(req, "Completion");
if (req->error)
if (error) {
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
ocf_engine_error(req, true, "Failed to flush metadata to cache");
}
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
@ -48,16 +42,14 @@ static int _ocf_invalidate_do(struct ocf_request *req)
ocf_purge_map_info(req);
ocf_hb_req_prot_unlock_wr(req);
env_atomic_inc(&req->req_remaining);
if (ocf_volume_is_atomic(&cache->device->volume) &&
req->info.flush_metadata) {
/* Metadata flush IO */
ocf_metadata_flush_do_asynch(cache, req, _ocf_invalidate_req);
} else {
_ocf_invalidate_req(req, 0);
}
_ocf_invalidate_req(req, 0);
return 0;
}

162
src/engine/engine_io.c Normal file
View File

@ -0,0 +1,162 @@
/*
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "ocf/ocf.h"
#include "engine_io.h"
#include "engine_common.h"
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_volume_priv.h"
#include "../ocf_request.h"
#include "../utils/utils_cache_line.h"
void ocf_engine_forward_cache_io(struct ocf_request *req, int dir,
uint64_t offset, uint64_t size, ocf_req_end_t callback)
{
ocf_cache_t cache = req->cache;
uint32_t seek = req->byte_position % ocf_line_size(cache);
uint32_t first_cl = ocf_bytes_2_lines(cache, offset + seek);
uint64_t addr;
req->cache_forward_end = callback;
addr = cache->device->metadata_offset;
addr += req->map[first_cl].coll_idx * ocf_line_size(cache);
addr += (offset + seek) % ocf_line_size(cache);
ocf_core_stats_cache_block_update(req->core, req->part_id,
dir, req->byte_length);
ocf_req_forward_cache_io(req, dir, addr, size,
req->offset + offset);
}
void ocf_engine_forward_cache_io_req(struct ocf_request *req, int dir,
ocf_req_end_t callback)
{
ocf_cache_t cache = req->cache;
uint64_t addr, bytes, total_bytes = 0, addr_next = 0;
uint32_t i;
req->cache_forward_end = callback;
if (ocf_engine_is_sequential(req)) {
addr = cache->device->metadata_offset;
addr += req->map[0].coll_idx * ocf_line_size(cache);
addr += req->byte_position % ocf_line_size(cache);
ocf_core_stats_cache_block_update(req->core, req->part_id,
dir, req->byte_length);
ocf_req_forward_cache_io(req, dir, addr, req->byte_length,
req->offset);
return;
}
ocf_req_forward_cache_get(req);
for (i = 0; i < req->core_line_count; i++) {
if (addr_next) {
addr = addr_next;
} else {
addr = req->map[i].coll_idx;
addr *= ocf_line_size(cache);
addr += cache->device->metadata_offset;
}
bytes = ocf_line_size(cache);
if (i == 0) {
uint64_t seek = (req->byte_position) %
ocf_line_size(cache);
addr += seek;
bytes -= seek;
}
for (; i < (req->core_line_count - 1); i++) {
addr_next = req->map[i + 1].coll_idx;
addr_next *= ocf_line_size(cache);
addr_next += cache->device->metadata_offset;
if (addr_next != (addr + bytes))
break;
bytes += ocf_line_size(cache);
}
if (i == (req->core_line_count - 1)) {
uint64_t skip = (ocf_line_size(cache) -
((req->byte_position + req->byte_length) %
ocf_line_size(cache))) % ocf_line_size(cache);
bytes -= skip;
}
bytes = OCF_MIN(bytes, req->byte_length - total_bytes);
ENV_BUG_ON(bytes == 0);
ocf_core_stats_cache_block_update(req->core, req->part_id,
dir, bytes);
ocf_req_forward_cache_io(req, dir, addr, bytes,
req->offset + total_bytes);
total_bytes += bytes;
}
ENV_BUG_ON(total_bytes != req->byte_length);
ocf_req_forward_cache_put(req);
}
void ocf_engine_forward_cache_flush_req(struct ocf_request *req,
ocf_req_end_t callback)
{
req->cache_forward_end = callback;
ocf_req_forward_cache_flush(req);
}
void ocf_engine_forward_cache_discard_req(struct ocf_request *req,
ocf_req_end_t callback)
{
req->cache_forward_end = callback;
ocf_req_forward_cache_discard(req, req->byte_position,
req->byte_length);
}
void ocf_engine_forward_core_io_req(struct ocf_request *req,
ocf_req_end_t callback)
{
ocf_core_stats_core_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->core_forward_end = callback;
ocf_req_forward_core_io(req, req->rw, req->byte_position,
req->byte_length, req->offset);
}
void ocf_engine_forward_core_flush_req(struct ocf_request *req,
ocf_req_end_t callback)
{
ocf_core_stats_core_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->core_forward_end = callback;
ocf_req_forward_core_flush(req);
}
void ocf_engine_forward_core_discard_req(struct ocf_request *req,
ocf_req_end_t callback)
{
ocf_core_stats_core_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->core_forward_end = callback;
ocf_req_forward_core_discard(req, req->byte_position, req->byte_length);
}

32
src/engine/engine_io.h Normal file
View File

@ -0,0 +1,32 @@
/*
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef ENGINE_IO_H_
#define ENGINE_IO_H_
#include "../ocf_request.h"
void ocf_engine_forward_cache_io(struct ocf_request *req, int dir,
uint64_t offset, uint64_t size, ocf_req_end_t callback);
void ocf_engine_forward_cache_io_req(struct ocf_request *req, int dir,
ocf_req_end_t callback);
void ocf_engine_forward_cache_flush_req(struct ocf_request *req,
ocf_req_end_t callback);
void ocf_engine_forward_cache_discard_req(struct ocf_request *req,
ocf_req_end_t callback);
void ocf_engine_forward_core_io_req(struct ocf_request *req,
ocf_req_end_t callback);
void ocf_engine_forward_core_flush_req(struct ocf_request *req,
ocf_req_end_t callback);
void ocf_engine_forward_core_discard_req(struct ocf_request *req,
ocf_req_end_t callback);
#endif /* ENGINE_IO_H_ */

View File

@ -8,9 +8,9 @@
#include "engine_pt.h"
#include "engine_rd.h"
#include "engine_common.h"
#include "engine_io.h"
#include "cache_engine.h"
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#include "../utils/utils_user_part.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
@ -20,19 +20,13 @@
static void _ocf_read_pt_complete(struct ocf_request *req, int error)
{
if (error)
req->error |= error;
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(req, "Completion");
if (req->error)
if (error)
ocf_core_stats_core_error_update(req->core, OCF_READ);
/* Complete request */
req->complete(req, req->error);
req->complete(req, error);
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
@ -42,12 +36,10 @@ static void _ocf_read_pt_complete(struct ocf_request *req, int error)
static inline void _ocf_read_pt_submit(struct ocf_request *req)
{
env_atomic_set(&req->req_remaining, 1); /* Core device IO */
OCF_DEBUG_RQ(req, "Submit");
/* Core read */
ocf_submit_volume_req(&req->core->volume, req, _ocf_read_pt_complete);
ocf_engine_forward_core_io_req(req, _ocf_read_pt_complete);
}
int ocf_read_pt_do(struct ocf_request *req)

View File

@ -11,9 +11,9 @@
#include "engine_inv.h"
#include "engine_bf.h"
#include "engine_common.h"
#include "engine_io.h"
#include "cache_engine.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_io.h"
#include "../ocf_request.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_user_part.h"
@ -31,19 +31,12 @@ static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "HIT completion");
if (error) {
req->error |= error;
ocf_core_stats_cache_error_update(req->core, OCF_READ);
inc_fallback_pt_error_counter(req->cache);
}
if (env_atomic_dec_return(&req->req_remaining) > 0)
return;
if (req->error) {
ocf_queue_push_req_pt(req);
} else {
ocf_req_unlock(c, req);
req->complete(req, req->error);
req->complete(req, error);
ocf_req_put(req);
}
}
@ -54,14 +47,9 @@ static void _ocf_read_generic_miss_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "MISS completion");
if (error)
req->error = error;
if (env_atomic_dec_return(&req->req_remaining) > 0)
return;
if (req->error) {
req->complete(req, req->error);
if (error) {
/* --- Do not backfill --- */
req->complete(req, error);
ocf_core_stats_core_error_update(req->core, OCF_READ);
@ -80,18 +68,15 @@ static void _ocf_read_generic_miss_complete(struct ocf_request *req, int error)
req->byte_length);
}
/* Complete request */
req->complete(req, req->error);
req->complete(req, error);
ocf_engine_backfill(req);
}
void ocf_read_generic_submit_hit(struct ocf_request *req)
{
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
ocf_submit_cache_reqs(req->cache, req, OCF_READ, 0, req->byte_length,
ocf_engine_io_count(req), _ocf_read_generic_hit_complete);
ocf_engine_forward_cache_io_req(req, OCF_READ,
_ocf_read_generic_hit_complete);
}
static inline void _ocf_read_generic_submit_miss(struct ocf_request *req)
@ -99,8 +84,6 @@ static inline void _ocf_read_generic_submit_miss(struct ocf_request *req)
struct ocf_cache *cache = req->cache;
int ret;
env_atomic_set(&req->req_remaining, 1);
req->cp_data = ctx_data_alloc(cache->owner,
BYTES_TO_PAGES(req->byte_length));
if (!req->cp_data) {
@ -119,9 +102,7 @@ static inline void _ocf_read_generic_submit_miss(struct ocf_request *req)
}
err_alloc:
/* Submit read request to core device. */
ocf_submit_volume_req(&req->core->volume, req,
_ocf_read_generic_miss_complete);
ocf_engine_forward_core_io_req(req, _ocf_read_generic_miss_complete);
}
static int _ocf_read_generic_do(struct ocf_request *req)

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "ocf/ocf.h"
@ -8,9 +9,9 @@
#include "engine_wt.h"
#include "engine_wi.h"
#include "engine_common.h"
#include "engine_io.h"
#include "cache_engine.h"
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wa"

View File

@ -8,12 +8,12 @@
#include "../ocf_cache_priv.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_io.h"
#include "engine_wb.h"
#include "engine_wi.h"
#include "engine_inv.h"
#include "../metadata/metadata.h"
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_request.h"
#include "../utils/utils_user_part.h"
@ -50,17 +50,11 @@ static void _ocf_write_wb_update_bits(struct ocf_request *req)
static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
{
if (error)
req->error = error;
if (env_atomic_dec_return(&req->req_remaining))
return;
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
req->complete(req, req->error);
req->complete(req, error);
ocf_req_put(req);
}
@ -69,37 +63,29 @@ static int ocf_write_wb_do_flush_metadata(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
env_atomic_set(&req->req_remaining, 1); /* One core IO */
_ocf_write_wb_update_bits(req);
if (req->info.flush_metadata) {
OCF_DEBUG_RQ(req, "Flush metadata");
ocf_metadata_flush_do_asynch(cache, req,
_ocf_write_wb_io_flush_metadata);
} else {
_ocf_write_wb_io_flush_metadata(req, 0);
}
_ocf_write_wb_io_flush_metadata(req, 0);
return 0;
}
static void _ocf_write_wb_complete(struct ocf_request *req, int error)
{
if (error) {
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
req->error |= error;
}
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(req, "Completion");
if (req->error) {
if (error) {
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
ocf_engine_error(req, true, "Failed to write data to cache");
req->complete(req, req->error);
req->complete(req, error);
ocf_engine_invalidate(req);
} else {
@ -111,10 +97,6 @@ static void _ocf_write_wb_complete(struct ocf_request *req, int error)
static inline void _ocf_write_wb_submit(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
/*
* 1. Submit data
* 2. Wait for completion of data
@ -137,8 +119,7 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req)
OCF_DEBUG_RQ(req, "Submit Data");
/* Data IO */
ocf_submit_cache_reqs(cache, req, OCF_WRITE, 0, req->byte_length,
ocf_engine_io_count(req), _ocf_write_wb_complete);
ocf_engine_forward_cache_io_req(req, OCF_WRITE, _ocf_write_wb_complete);
}
int ocf_write_wb_do(struct ocf_request *req)

View File

@ -8,10 +8,10 @@
#include "../ocf_cache_priv.h"
#include "engine_wi.h"
#include "engine_common.h"
#include "engine_io.h"
#include "../concurrency/ocf_concurrency.h"
#include "../ocf_request.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wi"
@ -22,7 +22,7 @@ static int _ocf_write_wi_next_pass(struct ocf_request *req)
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
if (req->wi_second_pass) {
req->complete(req, req->error);
req->complete(req, 0);
ocf_req_put(req);
return 0;
@ -48,25 +48,19 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
{
if (error) {
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
req->error |= error;
ocf_engine_error(req, true, "Failed to write data to cache");
}
if (env_atomic_dec_return(&req->req_remaining))
return;
if (!req->error && !req->wi_second_pass && ocf_engine_is_miss(req)) {
if (!error && !req->wi_second_pass && ocf_engine_is_miss(req)) {
/* need another pass */
ocf_queue_push_req_cb(req, _ocf_write_wi_next_pass,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
return;
}
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
req->complete(req, req->error);
req->complete(req, error);
ocf_req_put(req);
}
@ -105,20 +99,14 @@ static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req)
static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
{
if (error) {
req->error = error;
ocf_core_stats_core_error_update(req->core, OCF_WRITE);
}
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(req, "Completion");
if (req->error) {
if (error) {
ocf_core_stats_core_error_update(req->core, OCF_WRITE);
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
req->complete(req, req->error);
req->complete(req, error);
ocf_req_put(req);
} else {
@ -146,13 +134,10 @@ static int _ocf_write_wi_core_write(struct ocf_request *req)
return 0;
}
env_atomic_set(&req->req_remaining, 1); /* One core IO */
OCF_DEBUG_RQ(req, "Submit");
/* Submit write IO to the core */
ocf_submit_volume_req(&req->core->volume, req,
_ocf_write_wi_core_complete);
ocf_engine_forward_core_io_req(req, _ocf_write_wi_core_complete);
/* Update statistics */
ocf_engine_update_block_stats(req);

View File

@ -9,10 +9,10 @@
#include "../ocf_cache_priv.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_io.h"
#include "engine_rd.h"
#include "engine_pt.h"
#include "../metadata/metadata.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_user_part.h"
#include "../concurrency/ocf_concurrency.h"
@ -23,13 +23,10 @@
static void ocf_read_wo_cache_complete(struct ocf_request *req, int error)
{
if (error) {
req->error = req->error ?: error;
ocf_core_stats_cache_error_update(req->core, OCF_READ);
req->error |= error;
}
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(req, "Completion");
if (req->error)
@ -48,8 +45,7 @@ static void ocf_read_wo_cache_io(struct ocf_request *req, uint64_t offset,
uint64_t size)
{
OCF_DEBUG_RQ(req, "Submit cache");
env_atomic_inc(&req->req_remaining);
ocf_submit_cache_reqs(req->cache, req, OCF_READ, offset, size, 1,
ocf_engine_forward_cache_io(req, OCF_READ, offset, size,
ocf_read_wo_cache_complete);
}
@ -66,7 +62,9 @@ static int ocf_read_wo_cache_do(struct ocf_request *req)
uint64_t offset = 0;
uint64_t increment = 0;
env_atomic_set(&req->req_remaining, 1);
req->cache_forward_end = ocf_read_wo_cache_complete;
ocf_req_forward_cache_get(req);
for (line = 0; line < req->core_line_count; ++line) {
entry = &req->map[line];
@ -147,23 +145,21 @@ static int ocf_read_wo_cache_do(struct ocf_request *req)
if (io)
ocf_read_wo_cache_io(req, io_start, offset - io_start);
ocf_read_wo_cache_complete(req, 0);
ocf_req_forward_cache_put(req);
return 0;
}
static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
{
if (error) {
req->error |= error;
if (error)
ocf_core_stats_core_error_update(req->core, OCF_READ);
}
/* if all mapped cachelines are clean, the data we've read from core
* is valid and we can complete the request */
if (!req->info.dirty_any || req->error) {
if (error || !req->info.dirty_any) {
OCF_DEBUG_RQ(req, "Completion");
req->complete(req, req->error);
req->complete(req, error);
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
ocf_req_put(req);
return;
@ -191,8 +187,7 @@ static int ocf_read_wo_do(struct ocf_request *req)
} else {
OCF_DEBUG_RQ(req, "Submit core");
ocf_submit_volume_req(&req->core->volume, req,
_ocf_read_wo_core_complete);
ocf_engine_forward_core_io_req(req, _ocf_read_wo_core_complete);
}
ocf_engine_update_request_stats(req);

View File

@ -10,8 +10,8 @@
#include "engine_wi.h"
#include "engine_inv.h"
#include "engine_common.h"
#include "engine_io.h"
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_user_part.h"
#include "../metadata/metadata.h"
@ -58,12 +58,6 @@ static void _ocf_write_wt_do_flush_metadata_compl(struct ocf_request *req,
int error)
{
if (error)
req->error = error;
if (env_atomic_dec_return(&req->req_remaining))
return;
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
@ -77,8 +71,6 @@ static int ocf_write_wt_do_flush_metadata(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
env_atomic_set(&req->req_remaining, 1);
_ocf_write_wt_update_bits(req);
if (req->info.flush_metadata) {
@ -86,10 +78,10 @@ static int ocf_write_wt_do_flush_metadata(struct ocf_request *req)
ocf_metadata_flush_do_asynch(cache, req,
_ocf_write_wt_do_flush_metadata_compl);
} else {
_ocf_write_wt_do_flush_metadata_compl(req, 0);
}
_ocf_write_wt_do_flush_metadata_compl(req, 0);
return 0;
}
@ -136,8 +128,8 @@ static void _ocf_write_wt_cache_complete(struct ocf_request *req, int error)
static void _ocf_write_wt_core_complete(struct ocf_request *req, int error)
{
if (error) {
req->error = error;
req->info.core_error = 1;
req->error = error;
ocf_core_stats_core_error_update(req->core, OCF_WRITE);
}
@ -146,22 +138,17 @@ static void _ocf_write_wt_core_complete(struct ocf_request *req, int error)
static inline void _ocf_write_wt_submit(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
/* Submit IOs */
OCF_DEBUG_RQ(req, "Submit");
/* Calculate how many IOs need to be submited */
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req)); /* Cache IO */
env_atomic_inc(&req->req_remaining); /* Core device IO */
env_atomic_set(&req->req_remaining, 2); /* cache IO + core IO */
/* To cache */
ocf_submit_cache_reqs(cache, req, OCF_WRITE, 0, req->byte_length,
ocf_engine_io_count(req), _ocf_write_wt_cache_complete);
ocf_engine_forward_cache_io_req(req, OCF_WRITE,
_ocf_write_wt_cache_complete);
/* To core */
ocf_submit_volume_req(&req->core->volume, req,
_ocf_write_wt_core_complete);
ocf_engine_forward_core_io_req(req, _ocf_write_wt_core_complete);
}
static int _ocf_write_wt_do(struct ocf_request *req)

View File

@ -19,23 +19,17 @@
static int ocf_zero_purge(struct ocf_request *req)
{
if (req->error) {
ocf_engine_error(req, true, "Failed to discard data on cache");
} else {
/* There are mapped cache line, need to remove them */
/* There are mapped cache line, need to remove them */
ocf_hb_req_prot_lock_wr(req); /*- Metadata WR access ---------------*/
ocf_hb_req_prot_lock_wr(req); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(req);
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(req);
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
}
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
req->complete(req, req->error);
req->complete(req, 0);
ocf_req_put(req);
return 0;
@ -45,11 +39,13 @@ static void _ocf_zero_io_flush_metadata(struct ocf_request *req, int error)
{
if (error) {
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
req->error = error;
}
ocf_engine_error(req, true, "Failed to discard data on cache");
if (env_atomic_dec_return(&req->req_remaining))
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
req->complete(req, error);
ocf_req_put(req);
return;
}
ocf_queue_push_req_cb(req, ocf_zero_purge,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);

View File

@ -320,9 +320,13 @@ static void ocf_cache_io_complete(struct ocf_io *io, int error)
{
struct ocf_cache_volume_io_priv *priv;
ocf_cache_t cache;
struct ocf_request *req = ocf_io_to_req(io);
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
if (error)
req->error = req->error ?: error;
priv = ocf_io_get_priv(io);
env_atomic_cmpxchg(&priv->error, 0, error);

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -93,6 +94,111 @@ static void ocf_composite_volume_submit_discard(struct ocf_io *master_io)
ocf_composite_volume_handle_io(master_io, ocf_volume_submit_discard);
}
void ocf_composite_forward_io(ocf_volume_t cvolume,
ocf_forward_token_t token, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset)
{
struct ocf_composite_volume *composite = ocf_volume_get_priv(cvolume);
uint64_t member_bytes, caddr;
int i;
ENV_BUG_ON(addr >= composite->length);
ENV_BUG_ON(addr + bytes > composite->length);
caddr = addr;
for (i = 0; i < composite->members_cnt; i++) {
if (addr >= composite->end_addr[i])
continue;
if (unlikely(!composite->member[i].volume.opened)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
addr = addr - (i > 0 ? composite->end_addr[i-1] : 0);
break;
}
for (; i < composite->members_cnt && bytes; i++) {
if (unlikely(!composite->member[i].volume.opened)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
member_bytes = OCF_MIN(bytes, composite->end_addr[i] - caddr);
ocf_forward_io(&composite->member[i].volume, token, dir, addr,
member_bytes, offset);
addr = 0;
caddr = composite->end_addr[i];
bytes -= member_bytes;
offset += member_bytes;
}
/* Put io forward counter to account for the original forward */
ocf_forward_end(token, 0);
}
void ocf_composite_forward_flush(ocf_volume_t cvolume,
ocf_forward_token_t token)
{
struct ocf_composite_volume *composite = ocf_volume_get_priv(cvolume);
int i;
for (i = 0; i < composite->members_cnt; i++)
ocf_forward_flush(&composite->member[i].volume, token);
/* Put io forward counter to account for the original forward */
ocf_forward_end(token, 0);
}
void ocf_composite_forward_discard(ocf_volume_t cvolume,
ocf_forward_token_t token, uint64_t addr, uint64_t bytes)
{
struct ocf_composite_volume *composite = ocf_volume_get_priv(cvolume);
uint64_t member_bytes, caddr;
int i;
caddr = addr;
ENV_BUG_ON(addr >= composite->length);
ENV_BUG_ON(addr + bytes > composite->length);
for (i = 0; i < composite->members_cnt; i++) {
if (addr >= composite->end_addr[i])
continue;
if (unlikely(!composite->member[i].volume.opened)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
addr = addr - (i > 0 ? composite->end_addr[i-1] : 0);
break;
}
for (; i < composite->members_cnt && bytes; i++) {
if (unlikely(!composite->member[i].volume.opened)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
member_bytes = OCF_MIN(bytes, composite->end_addr[i] - caddr);
ocf_forward_discard(&composite->member[i].volume, token, addr,
member_bytes);
addr = 0;
caddr = composite->end_addr[i];
bytes -= member_bytes;
}
/* Put io forward counter to account for the original forward */
ocf_forward_end(token, 0);
}
/* *** VOLUME OPS *** */
static int ocf_composite_volume_open(ocf_volume_t cvolume, void *volume_params)
@ -211,6 +317,9 @@ const struct ocf_volume_properties ocf_composite_volume_properties = {
.submit_flush = ocf_composite_volume_submit_flush,
.submit_discard = ocf_composite_volume_submit_discard,
.submit_metadata = NULL,
.forward_io = ocf_composite_forward_io,
.forward_flush = ocf_composite_forward_flush,
.forward_discard = ocf_composite_forward_discard,
.open = ocf_composite_volume_open,
.close = ocf_composite_volume_close,

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -56,4 +57,8 @@ ocf_rotate_right(unsigned long long bits, unsigned shift, unsigned width)
((1ULL << width) - 1);
}
struct ocf_request;
typedef void (*ocf_req_end_t)(struct ocf_request *req, int error);
#endif

View File

@ -431,3 +431,130 @@ void ocf_req_hash(struct ocf_request *req)
ocf_core_get_id(req->core));
}
}
void ocf_req_forward_cache_io(struct ocf_request *req, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset)
{
ocf_volume_t volume = ocf_cache_get_volume(req->cache);
ocf_forward_token_t token = ocf_req_to_cache_forward_token(req);
req->cache_error = 0;
ocf_req_forward_cache_get(req);
ocf_volume_forward_io(volume, token, dir, addr, bytes, offset);
}
void ocf_req_forward_cache_flush(struct ocf_request *req)
{
ocf_volume_t volume = ocf_cache_get_volume(req->cache);
ocf_forward_token_t token = ocf_req_to_cache_forward_token(req);
req->cache_error = 0;
ocf_req_forward_cache_get(req);
ocf_volume_forward_flush(volume, token);
}
void ocf_req_forward_cache_discard(struct ocf_request *req, uint64_t addr,
uint64_t bytes)
{
ocf_volume_t volume = ocf_cache_get_volume(req->cache);
ocf_forward_token_t token = ocf_req_to_cache_forward_token(req);
req->cache_error = 0;
ocf_req_forward_cache_get(req);
ocf_volume_forward_discard(volume, token, addr, bytes);
}
void ocf_req_forward_core_io(struct ocf_request *req, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset)
{
ocf_volume_t volume = ocf_core_get_volume(req->core);
ocf_forward_token_t token = ocf_req_to_core_forward_token(req);
req->core_error = 0;
ocf_req_forward_core_get(req);
ocf_volume_forward_io(volume, token, dir, addr, bytes, offset);
}
void ocf_req_forward_core_flush(struct ocf_request *req)
{
ocf_volume_t volume = ocf_core_get_volume(req->core);
ocf_forward_token_t token = ocf_req_to_core_forward_token(req);
req->core_error = 0;
ocf_req_forward_core_get(req);
ocf_volume_forward_flush(volume, token);
}
void ocf_req_forward_core_discard(struct ocf_request *req, uint64_t addr,
uint64_t bytes)
{
ocf_volume_t volume = ocf_core_get_volume(req->core);
ocf_forward_token_t token = ocf_req_to_core_forward_token(req);
req->core_error = 0;
ocf_req_forward_core_get(req);
ocf_volume_forward_discard(volume, token, addr, bytes);
}
struct ocf_io *ocf_forward_get_io(ocf_forward_token_t token)
{
struct ocf_request *req = (struct ocf_request *)(token & ~1);
return &req->ioi.io;
}
static inline void _ocf_forward_get(ocf_forward_token_t token)
{
struct ocf_request *req = (struct ocf_request *)(token & ~1);
if (token & 1)
ocf_req_forward_cache_get(req);
else
ocf_req_forward_core_get(req);
}
void ocf_forward_get(ocf_forward_token_t token)
{
_ocf_forward_get(token);
}
void ocf_forward_io(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset)
{
_ocf_forward_get(token);
ocf_volume_forward_io(volume, token, dir, addr, bytes, offset);
}
void ocf_forward_flush(ocf_volume_t volume, ocf_forward_token_t token)
{
_ocf_forward_get(token);
ocf_volume_forward_flush(volume, token);
}
void ocf_forward_discard(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes)
{
_ocf_forward_get(token);
ocf_volume_forward_discard(volume, token, addr, bytes);
}
void ocf_forward_end(ocf_forward_token_t token, int error)
{
struct ocf_request *req = ocf_req_forward_token_to_req(token);
req->error |= error;
if (token & 1) {
req->cache_error = req->cache_error ?: error;
ocf_req_forward_cache_put(req);
} else {
req->core_error = req->core_error ?: error;
ocf_req_forward_core_put(req);
}
}

View File

@ -9,6 +9,7 @@
#include "ocf_env.h"
#include "ocf_io_priv.h"
#include "ocf_def_priv.h"
#include "metadata/metadata_structs.h"
typedef enum {
@ -128,6 +129,11 @@ struct ocf_request {
struct ocf_io_internal ioi;
/*!< OCF IO associated with request */
ocf_req_end_t cache_forward_end;
ocf_req_end_t core_forward_end;
env_atomic cache_remaining;
env_atomic core_remaining;
env_atomic ref_count;
/*!< Reference usage count, once OCF request reaches zero it
* will be de-initialed. Get/Put method are intended to modify
@ -260,7 +266,7 @@ struct ocf_request {
struct ocf_req_info info;
/*!< Detailed request info */
void (*complete)(struct ocf_request *ocf_req, int error);
ocf_req_end_t complete;
/*!< Request completion function */
struct ocf_req_discard_info discard;
@ -276,8 +282,6 @@ struct ocf_request {
struct ocf_map_info __map[0];
};
typedef void (*ocf_req_end_t)(struct ocf_request *req, int error);
/**
* @brief Initialize OCF request allocation utility
*
@ -512,4 +516,57 @@ static inline bool ocf_req_is_4k(uint64_t addr, uint32_t bytes)
return !((addr % PAGE_SIZE) || (bytes % PAGE_SIZE));
}
static inline void ocf_req_forward_cache_get(struct ocf_request *req)
{
env_atomic_inc(&req->cache_remaining);
}
static inline void ocf_req_forward_cache_put(struct ocf_request *req)
{
if (env_atomic_dec_return(&req->cache_remaining) == 0)
req->cache_forward_end(req, req->cache_error);
}
static inline void ocf_req_forward_core_get(struct ocf_request *req)
{
env_atomic_inc(&req->core_remaining);
}
static inline void ocf_req_forward_core_put(struct ocf_request *req)
{
if (env_atomic_dec_return(&req->core_remaining) == 0)
req->core_forward_end(req, req->core_error);
}
static inline ocf_forward_token_t ocf_req_to_cache_forward_token(struct ocf_request *req)
{
return (ocf_forward_token_t)req | 1;
}
static inline ocf_forward_token_t ocf_req_to_core_forward_token(struct ocf_request *req)
{
return (ocf_forward_token_t)req;
}
static inline struct ocf_request *ocf_req_forward_token_to_req(ocf_forward_token_t token)
{
return (struct ocf_request *)(token & ~1);
}
void ocf_req_forward_cache_io(struct ocf_request *req, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset);
void ocf_req_forward_cache_flush(struct ocf_request *req);
void ocf_req_forward_cache_discard(struct ocf_request *req, uint64_t addr,
uint64_t bytes);
void ocf_req_forward_core_io(struct ocf_request *req, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset);
void ocf_req_forward_core_flush(struct ocf_request *req);
void ocf_req_forward_core_discard(struct ocf_request *req, uint64_t addr,
uint64_t bytes);
#endif /* __OCF_REQUEST_H__ */

View File

@ -1,11 +1,13 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "ocf/ocf.h"
#include "ocf_priv.h"
#include "ocf_volume_priv.h"
#include "ocf_request.h"
#include "ocf_io_priv.h"
#include "ocf_env.h"
@ -328,6 +330,46 @@ void ocf_volume_submit_discard(struct ocf_io *io)
volume->type->properties->ops.submit_discard(io);
}
void ocf_volume_forward_io(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset)
{
ENV_BUG_ON(!volume->type->properties->ops.forward_io);
if (!volume->opened) {
ocf_forward_end(token, -OCF_ERR_IO);
return;
}
volume->type->properties->ops.forward_io(volume, token,
dir, addr, bytes, offset);
}
void ocf_volume_forward_flush(ocf_volume_t volume, ocf_forward_token_t token)
{
ENV_BUG_ON(!volume->type->properties->ops.forward_flush);
if (!volume->opened) {
ocf_forward_end(token, -OCF_ERR_IO);
return;
}
volume->type->properties->ops.forward_flush(volume, token);
}
void ocf_volume_forward_discard(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes)
{
ENV_BUG_ON(!volume->type->properties->ops.forward_discard);
if (!volume->opened) {
ocf_forward_end(token, -OCF_ERR_IO);
return;
}
volume->type->properties->ops.forward_discard(volume, token,
addr, bytes);
}
int ocf_volume_open(ocf_volume_t volume, void *volume_params)
{
int ret;

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -46,6 +47,14 @@ void ocf_volume_move(ocf_volume_t volume, ocf_volume_t from);
void ocf_volume_set_uuid(ocf_volume_t volume,
const struct ocf_volume_uuid *uuid);
void ocf_volume_forward_io(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset);
void ocf_volume_forward_flush(ocf_volume_t volume, ocf_forward_token_t token);
void ocf_volume_forward_discard(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes);
static inline void ocf_volume_submit_metadata(struct ocf_io *io)
{
ocf_volume_t volume = ocf_io_get_volume(io);

View File

@ -214,158 +214,3 @@ err_io:
env_vfree(context);
cmpl(priv, result);
}
static void ocf_submit_volume_req_cmpl(struct ocf_io *io, int error)
{
struct ocf_request *req = io->priv1;
ocf_req_end_t callback = io->priv2;
callback(req, error);
ocf_io_put(io);
}
void ocf_submit_cache_flush(struct ocf_request *req, ocf_req_end_t callback)
{
uint64_t flags = req->ioi.io.flags;
struct ocf_io *io;
io = ocf_new_cache_io(req->cache, req->io_queue, 0, 0, OCF_WRITE, 0,
flags);
if (!io) {
callback(req, -OCF_ERR_NO_MEM);
return;
}
ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
ocf_volume_submit_flush(io);
}
void ocf_submit_cache_reqs(struct ocf_cache *cache,
struct ocf_request *req, int dir, uint64_t offset,
uint64_t size, unsigned int reqs, ocf_req_end_t callback)
{
uint64_t flags = req->ioi.io.flags;
uint32_t io_class = req->ioi.io.io_class;
uint64_t addr, bytes, total_bytes = 0;
struct ocf_io *io;
int err;
uint32_t i;
uint32_t first_cl = ocf_bytes_2_lines(cache, req->byte_position +
offset) - ocf_bytes_2_lines(cache, req->byte_position);
ENV_BUG_ON(req->byte_length < offset + size);
ENV_BUG_ON(first_cl + reqs > req->core_line_count);
if (reqs == 1) {
addr = req->map[first_cl].coll_idx;
addr *= ocf_line_size(cache);
addr += cache->device->metadata_offset;
addr += ((req->byte_position + offset) % ocf_line_size(cache));
bytes = size;
io = ocf_new_cache_io(cache, req->io_queue,
addr, bytes, dir, io_class, flags);
if (!io) {
callback(req, -OCF_ERR_NO_MEM);
return;
}
ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
err = ocf_io_set_data(io, req->data, req->offset + offset);
if (err) {
ocf_io_put(io);
callback(req, err);
return;
}
ocf_core_stats_cache_block_update(req->core, io_class,
dir, bytes);
ocf_volume_submit_io(io);
return;
}
/* Issue requests to cache. */
for (i = 0; i < reqs; i++) {
addr = req->map[first_cl + i].coll_idx;
addr *= ocf_line_size(cache);
addr += cache->device->metadata_offset;
bytes = ocf_line_size(cache);
if (i == 0) {
uint64_t seek = ((req->byte_position + offset) %
ocf_line_size(cache));
addr += seek;
bytes -= seek;
} else if (i == (reqs - 1)) {
uint64_t skip = (ocf_line_size(cache) -
((req->byte_position + offset + size) %
ocf_line_size(cache))) % ocf_line_size(cache);
bytes -= skip;
}
bytes = OCF_MIN(bytes, size - total_bytes);
ENV_BUG_ON(bytes == 0);
io = ocf_new_cache_io(cache, req->io_queue,
addr, bytes, dir, io_class, flags);
if (!io) {
/* Finish all IOs which left with ERROR */
for (; i < reqs; i++)
callback(req, -OCF_ERR_NO_MEM);
return;
}
ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
err = ocf_io_set_data(io, req->data,
req->offset + offset + total_bytes);
if (err) {
ocf_io_put(io);
/* Finish all IOs which left with ERROR */
for (; i < reqs; i++)
callback(req, err);
return;
}
ocf_core_stats_cache_block_update(req->core, io_class,
dir, bytes);
ocf_volume_submit_io(io);
total_bytes += bytes;
}
ENV_BUG_ON(total_bytes != size);
}
void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
ocf_req_end_t callback)
{
uint64_t flags = req->ioi.io.flags;
uint32_t io_class = req->ioi.io.io_class;
int dir = req->rw;
struct ocf_io *io;
int err;
ocf_core_stats_core_block_update(req->core, io_class, dir,
req->byte_length);
io = ocf_volume_new_io(volume, req->io_queue, req->byte_position,
req->byte_length, dir, io_class, flags);
if (!io) {
callback(req, -OCF_ERR_NO_MEM);
return;
}
ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
err = ocf_io_set_data(io, req->data, req->offset);
if (err) {
ocf_io_put(io);
callback(req, err);
return;
}
ocf_volume_submit_io(io);
}

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -8,41 +9,6 @@
#include "../ocf_request.h"
/**
* Checks if 2 IOs are overlapping.
* @param start1 start of first range (inclusive)
* @param end1 end of first range (exclusive)
* @param start2 start of second range (inclusive)
* @param end2 end of second range (exclusive)
* @return 0 in case overlap is not detected, otherwise 1
*/
static inline int ocf_io_range_overlaps(uint32_t start1, uint32_t end1,
uint32_t start2, uint32_t end2)
{
if (start2 <= start1 && end2 >= start1)
return 1;
if (start2 >= start1 && end1 >= start2)
return 1;
return 0;
}
/**
* Checks if 2 IOs are overlapping.
* @param start1 start of first range (inclusive)
* @param count1 no of bytes, cachelines (etc) for first range
* @param start2 start of second range (inclusive)
* @param count2 no of bytes, cachelines (etc) for second range
* @return 0 in case overlap is not detected, otherwise 1
*/
static inline int ocf_io_overlaps(uint32_t start1, uint32_t count1,
uint32_t start2, uint32_t count2)
{
return ocf_io_range_overlaps(start1, start1 + count1 - 1, start2,
start2 + count2 - 1);
}
typedef void (*ocf_submit_end_t)(void *priv, int error);
void ocf_submit_volume_flush(ocf_volume_t volume,
@ -57,15 +23,6 @@ void ocf_submit_write_zeros(ocf_volume_t volume, uint64_t addr,
void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
void *buffer, ocf_submit_end_t cmpl, void *priv);
void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
ocf_req_end_t callback);
void ocf_submit_cache_reqs(struct ocf_cache *cache,
struct ocf_request *req, int dir, uint64_t offset,
uint64_t size, unsigned int reqs, ocf_req_end_t callback);
void ocf_submit_cache_flush(struct ocf_request *req, ocf_req_end_t callback);
static inline struct ocf_io *ocf_new_cache_io(ocf_cache_t cache,
ocf_queue_t queue, uint64_t addr, uint32_t bytes,
uint32_t dir, uint32_t io_class, uint64_t flags)

View File

@ -67,6 +67,18 @@ class Io(Structure):
def get_instance(cls, ref):
return cls._instances_[cast(ref, c_void_p).value]
@staticmethod
def get_by_forward_token(token):
return OcfLib.getInstance().ocf_forward_get_io(token)
@staticmethod
def forward_get(token):
OcfLib.getInstance().ocf_forward_get(token)
@staticmethod
def forward_end(token, error):
OcfLib.getInstance().ocf_forward_end(token, error)
def del_object(self):
del type(self)._instances_[cast(byref(self), c_void_p).value]
@ -149,6 +161,14 @@ IoOps.GET_DATA = CFUNCTYPE(c_void_p, POINTER(Io))
IoOps._fields_ = [("_set_data", IoOps.SET_DATA), ("_get_data", IoOps.GET_DATA)]
lib = OcfLib.getInstance()
lib.ocf_forward_get.argtypes = [c_uint64]
lib.ocf_forward_get_io.argtypes = [c_uint64]
lib.ocf_forward_get_io.restype = POINTER(Io)
lib.ocf_forward_end.argtypes = [c_uint64, c_int]
lib.ocf_io_set_cmpl_wrapper.argtypes = [POINTER(Io), c_void_p, c_void_p, Io.END]
lib.ocf_io_set_data.argtypes = [POINTER(Io), c_void_p, c_uint32]

View File

@ -51,6 +51,9 @@ class VolumeOps(Structure):
SUBMIT_METADATA = CFUNCTYPE(None, c_void_p)
SUBMIT_DISCARD = CFUNCTYPE(None, c_void_p)
SUBMIT_WRITE_ZEROES = CFUNCTYPE(None, c_void_p)
FORWARD_IO = CFUNCTYPE(None, c_void_p, c_uint64, c_int, c_uint64, c_uint64, c_uint64)
FORWARD_FLUSH = CFUNCTYPE(None, c_void_p, c_uint64)
FORWARD_DISCARD = CFUNCTYPE(None, c_void_p, c_uint64, c_uint64, c_uint64)
ON_INIT = CFUNCTYPE(c_int, c_void_p)
ON_DEINIT = CFUNCTYPE(None, c_void_p)
OPEN = CFUNCTYPE(c_int, c_void_p, c_void_p)
@ -64,6 +67,9 @@ class VolumeOps(Structure):
("_submit_metadata", SUBMIT_METADATA),
("_submit_discard", SUBMIT_DISCARD),
("_submit_write_zeroes", SUBMIT_WRITE_ZEROES),
("_forward_io", FORWARD_IO),
("_forward_flush", FORWARD_FLUSH),
("_forward_discard", FORWARD_DISCARD),
("_on_init", ON_INIT),
("_on_deinit", ON_DEINIT),
("_open", OPEN),
@ -132,6 +138,18 @@ class Volume:
def _submit_write_zeroes(write_zeroes):
raise NotImplementedError
@VolumeOps.FORWARD_IO
def _forward_io(volume, token, rw, addr, nbytes, offset):
Volume.get_instance(volume).forward_io(token, rw, addr, nbytes, offset)
@VolumeOps.FORWARD_FLUSH
def _forward_flush(volume, token):
Volume.get_instance(volume).forward_flush(token)
@VolumeOps.FORWARD_DISCARD
def _forward_discard(volume, token, addr, nbytes):
Volume.get_instance(volume).forward_discard(token, addr, nbytes)
@VolumeOps.ON_INIT
def _on_init(ref):
return 0
@ -181,6 +199,9 @@ class Volume:
_submit_metadata=_submit_metadata,
_submit_discard=_submit_discard,
_submit_write_zeroes=_submit_write_zeroes,
_forward_io=_forward_io,
_forward_flush=_forward_flush,
_forward_discard=_forward_discard,
_open=_open,
_close=_close,
_get_max_io_size=_get_max_io_size,
@ -329,6 +350,28 @@ class Volume:
else:
self._reject_io(io)
def _reject_forward(self, token):
Io.forward_end(token, -OcfErrorCode.OCF_ERR_IO)
def forward_io(self, token, rw, addr, nbytes, offset):
if self.is_online:
self.inc_stats(IoDir(rw))
self.do_forward_io(token, rw, addr, nbytes, offset)
else:
self._reject_forward(token)
def forward_flush(self, token):
if self.is_online:
self.do_forward_flush(token)
else:
self._reject_forward(token)
def forward_discard(self, token, addr, nbytes):
if self.is_online:
self.do_forward_discard(token, addr, nbytes)
else:
self._reject_forward(token)
def new_io(
self, queue: Queue, addr: int, length: int, direction: IoDir, io_class: int, flags: int,
):
@ -470,6 +513,37 @@ class RamVolume(Volume):
except: # noqa E722
io.contents._end(io, -OcfErrorCode.OCF_ERR_IO)
def do_forward_io(self, token, rw, addr, nbytes, offset):
try:
io = Io.get_by_forward_token(token)
if rw == IoDir.WRITE:
src_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
src = Data.get_instance(src_ptr.value).handle.value + offset
dst = self.data_ptr + addr
elif rw == IoDir.READ:
dst_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
dst = Data.get_instance(dst_ptr.value).handle.value + offset
src = self.data_ptr + addr
memmove(dst, src, nbytes)
Io.forward_end(token, 0)
except Exception as e: # noqa E722
Io.forward_end(token, -OcfErrorCode.OCF_ERR_IO)
def do_forward_flush(self, token):
Io.forward_end(token, 0)
def do_forward_discard(self, token, addr, nbytes):
try:
dst = self.data_ptr + addr
memset(dst, 0, nbytes)
Io.forward_end(token, 0)
except: # noqa E722
Io.forward_end(token, -OcfErrorCode.OCF_ERR_NOT_SUPP)
def dump(self, offset=0, size=0, ignore=VOLUME_POISON, **kwargs):
if size == 0:
size = int(self.size) - int(offset)
@ -517,44 +591,68 @@ class ErrorDevice(Volume):
super().close()
self.vol.close()
def should_forward_io(self, io):
def should_forward_io(self, rw, addr):
if not self.armed:
return True
direction = IoDir(io.contents._dir)
direction = IoDir(rw)
seq_no_match = (
self.error_seq_no[direction] >= 0
and self.error_seq_no[direction] <= self.io_seq_no[direction]
)
sector_match = io.contents._addr in self.error_sectors
sector_match = addr in self.error_sectors
self.io_seq_no[direction] += 1
return not seq_no_match and not sector_match
def complete_with_error(self, io):
def complete_submit_with_error(self, io):
self.error = True
direction = IoDir(io.contents._dir)
self.stats["errors"][direction] += 1
io.contents._end(io, -OcfErrorCode.OCF_ERR_IO)
def do_submit_io(self, io):
if self.should_forward_io(io):
if self.should_forward_io(io.contents._dir, io.contents._addr):
self.vol.do_submit_io(io)
else:
self.complete_with_error(io)
self.complete_submit_with_error(io)
def do_submit_flush(self, flush):
if self.data_only or self.should_forward_io(flush):
self.vol.do_submit_flush(flush)
def do_submit_flush(self, io):
if self.data_only or self.should_forward_io(io.contents._dir, io.contents._addr):
self.vol.do_submit_flush(io)
else:
self.complete_with_error(flush)
self.complete_submit_with_error(io)
def do_submit_discard(self, discard):
if self.data_only or self.should_forward_io(discard):
self.vol.do_submit_discard(discard)
def do_submit_discard(self, io):
if self.data_only or self.should_forward_io(io.contents._dir, io.contents._addr):
self.vol.do_submit_discard(io)
else:
self.complete_with_error(discard)
self.complete_submit_with_error(io)
def complete_forward_with_error(self, token, rw):
self.error = True
direction = IoDir(rw)
self.stats["errors"][direction] += 1
Io.forward_end(token, -OcfErrorCode.OCF_ERR_IO)
def do_forward_io(self, token, rw, addr, nbytes, offset):
if self.should_forward_io(rw, addr):
self.vol.do_forward_io(token, rw, addr, nbytes, offset)
else:
self.complete_forward_with_error(token, rw)
def do_forward_flush(self, token):
if self.data_only or self.should_forward_io(0, 0):
self.vol.do_forward_flush(token)
else:
self.complete_forward_with_error(token, rw)
def do_forward_discard(self, token, addr, nbytes):
if self.data_only or self.should_forward_io(0, addr):
self.vol.do_forward_discard(token, addr, nbytes)
else:
self.complete_forward_with_error(token, rw)
def arm(self):
self.armed = True
@ -611,32 +709,89 @@ class TraceDevice(Volume):
super().close()
self.vol.close()
def _trace(self, io, io_type):
def _trace(self, io_type, rw, addr, nbytes, flags):
submit = True
if self.trace_fcn:
submit = self.trace_fcn(self, io, io_type)
submit = self.trace_fcn(self, io_type, rw, addr, nbytes, flags)
return submit
def do_submit_io(self, io):
submit = self._trace(io, TraceDevice.IoType.Data)
submit = self._trace(
TraceDevice.IoType.Data,
io.contents._dir,
io.contents._addr,
io.contents._bytes,
io.contents._flags
)
if submit:
self.vol.do_submit_io(io)
def do_submit_flush(self, io):
submit = self._trace(io, TraceDevice.IoType.Flush)
submit = self._trace(
TraceDevice.IoType.Flush,
io.contents._dir,
io.contents._addr,
io.contents._bytes,
io.contents._flags
)
if submit:
self.vol.do_submit_flush(io)
def do_submit_discard(self, io):
submit = self._trace(io, TraceDevice.IoType.Discard)
submit = self._trace(
TraceDevice.IoType.Discard,
io.contents._dir,
io.contents._addr,
io.contents._bytes,
io.contents._flags
)
if submit:
self.vol.do_submit_discard(io)
def do_forward_io(self, token, rw, addr, nbytes, offset):
io = Io.get_by_forward_token(token)
submit = self._trace(
TraceDevice.IoType.Data,
rw,
addr,
nbytes,
io.contents._flags
)
if submit:
self.vol.do_forward_io(token, rw, addr, nbytes, offset)
def do_forward_flush(self, token):
io = Io.get_by_forward_token(token)
submit = self._trace(
TraceDevice.IoType.Flush,
IoDir.WRITE,
0,
0,
io.contents._flags
)
if submit:
self.vol.do_forward_flush(token)
def do_forward_discard(self, token, addr, nbytes):
io = Io.get_by_forward_token(token)
submit = self._trace(
TraceDevice.IoType.Discard,
IoDir.WRITE,
addr,
nbytes,
io.contents._flags
)
if submit:
self.vol.do_forward_discard(token, addr, nbytes)
def get_length(self):
return self.vol.get_length()

View File

@ -1,5 +1,6 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause
#
@ -23,17 +24,17 @@ class OcfInternalVolume(Volume):
queue = self.parent.get_default_queue() # TODO multiple queues?
return self.new_io(queue, addr, _bytes, _dir, _class, _flags)
def _alloc_io(self, io):
def _alloc_io(self, io, rw=None, addr=None, nbytes=None, offset=0):
exp_obj_io = self.__alloc_io(
io.contents._addr,
io.contents._bytes,
io.contents._dir,
addr or io.contents._addr,
nbytes or io.contents._bytes,
rw or io.contents._dir,
io.contents._class,
io.contents._flags,
)
cdata = OcfLib.getInstance().ocf_io_get_data(io)
OcfLib.getInstance().ocf_io_set_data(byref(exp_obj_io), cdata, 0)
OcfLib.getInstance().ocf_io_set_data(byref(exp_obj_io), cdata, offset)
def cb(error):
nonlocal io
@ -62,6 +63,29 @@ class OcfInternalVolume(Volume):
io = self._alloc_io(discard)
io.submit_discard()
def do_forward_io(self, token, rw, addr, nbytes, offset):
orig_io = Io.get_by_forward_token(token)
io = self._alloc_io(orig_io, rw, addr, nbytes, offset)
def cb(error):
nonlocal io
Io.forward_end(io.token, error)
io.token = token
io.callback = cb
io.submit()
def do_forward_flush(self, token):
orig_io = Io.get_by_forward_token(token)
io = self._alloc_io(orig_io)
io.submit_flush()
def do_forward_discard(self, token, addr, nbytes):
orig_io = Io.get_by_forward_token(token)
io = self._alloc_io(orig_io, addr=addr, nbytes=nbytes)
io.submit_discard()
def _read(self, offset=0, size=0):
if size == 0:
size = self.get_length().B - offset

View File

@ -1,5 +1,6 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause
#
@ -92,6 +93,22 @@ class ReplicatedVolume(Volume):
self.primary.submit_discard(discard)
self.secondary.submit_discard(discard)
def do_forward_io(self, token, rw, addr, nbytes, offset):
if rw == IoDir.WRITE:
Io.forward_get(token)
self.secondary.do_forward_io(token, rw, addr, nbytes, offset)
self.primary.do_forward_io(token, rw, addr, nbytes, offset)
def do_forward_flush(self, token):
Io.forward_get(token)
self.secondary.do_forward_flush(token)
self.primary.do_forward_flush(token)
def do_forward_discard(self, token, addr, nbytes):
Io.forward_get(token)
self.secondary.do_forward_discard(token, addr, nbytes)
self.primary.do_forward_discard(token, addr, nbytes)
def dump(self, offset=0, size=0, ignore=VOLUME_POISON, **kwargs):
self.primary.dump()

View File

@ -20,13 +20,13 @@ def test_discard_propagation(pyocf_ctx):
pyocf_ctx.register_volume_type(TraceDevice)
def trace_discard(vol, io, io_type):
def trace_discard(vol, io_type, rw, addr, nbytes, flags):
nonlocal discards
if io_type == TraceDevice.IoType.Discard:
if vol.uuid not in discards:
discards[vol.uuid] = []
discards[vol.uuid].append((io.contents._addr, io.contents._bytes))
discards[vol.uuid].append((addr, nbytes))
return True

View File

@ -7,7 +7,7 @@
from pyocf.types.cache import Cache
from pyocf.types.data import Data
from pyocf.types.core import Core
from pyocf.types.io import IoDir, Sync
from pyocf.types.io import Io, IoDir, Sync
from pyocf.types.volume import RamVolume, IoFlags, TraceDevice
from pyocf.types.volume_core import CoreVolume
from pyocf.utils import Size
@ -18,22 +18,19 @@ def test_flush_propagation(pyocf_ctx):
pyocf_ctx.register_volume_type(TraceDevice)
def trace_flush(vol, io, io_type):
def trace_flush(vol, io_type, rw, addr, nbytes, flags):
nonlocal flushes
if io_type == TraceDevice.IoType.Flush or int(io.contents._flags) & IoFlags.FLUSH:
if io_type == TraceDevice.IoType.Flush:
if vol.uuid not in flushes:
flushes[vol.uuid] = []
flushes[vol.uuid].append((io.contents._addr, io.contents._bytes))
flushes[vol.uuid].append((addr, nbytes))
return True
cache_device = TraceDevice(RamVolume(Size.from_MiB(50)), trace_fcn=trace_flush)
core_device = TraceDevice(RamVolume(Size.from_MiB(100)), trace_fcn=trace_flush)
addr = Size.from_MiB(2).B
size = Size.from_MiB(1).B
cache = Cache.start_on_device(cache_device)
core = Core.using_device(core_device)
cache.add_core(core)
@ -44,9 +41,7 @@ def test_flush_propagation(pyocf_ctx):
flushes = {}
vol.open()
io = vol.new_io(queue, addr, size, IoDir.WRITE, 0, IoFlags.FLUSH)
data = Data(byte_count=0)
io.set_data(data, 0)
io = vol.new_io(queue, 0, 0, IoDir.WRITE, 0, 0)
completion = Sync(io).submit_flush()
vol.close()
@ -62,9 +57,4 @@ def test_flush_propagation(pyocf_ctx):
assert len(cache_flushes) == 1
assert len(core_flushes) == 1
assert core_flushes[0] == (addr, size)
# empty flush expected to be sent to cache device
assert cache_flushes[0] == (0, 0)
cache.stop()

View File

@ -48,6 +48,14 @@ class FlushValVolume(RamVolume):
self.flush_last = True
super().submit_flush(flush)
def forward_io(self, token, rw, addr, nbytes, offset):
self.flush_last = False
super().forward_io(token, rw, addr, nbytes, offset)
def forward_flush(self, token):
self.flush_last = True
super().forward_flush(token)
def test_flush_after_mngmt(pyocf_ctx):
"""

View File

@ -166,12 +166,9 @@ def setup_tracing(backends):
TraceDevice.IoType.Data: [],
}
def trace(vol, io, io_type):
if int(io.contents._flags) & IoFlags.FLUSH:
io_type = TraceDevice.IoType.Flush
def trace(vol, io_type, rw, addr, nbytes, flags):
io_trace[vol][io_type].append(
IoEvent(io.contents._dir, io.contents._addr, io.contents._bytes)
IoEvent(rw, addr, nbytes)
)
return True