Merge pull request #831 from mmichal10/io_forward_pt2

Io forward pt2
This commit is contained in:
Robert Baldyga 2024-09-20 17:21:36 +02:00 committed by GitHub
commit 6907abeba2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
53 changed files with 1432 additions and 1883 deletions

View File

@ -239,7 +239,7 @@ err_sem:
/*
* Callback function called when write completes.
*/
void complete_write(struct ocf_io *io, int error)
void complete_write(ocf_io_t io, void *priv1, void *priv2, int error)
{
struct volume_data *data = ocf_io_get_data(io);
@ -253,7 +253,7 @@ void complete_write(struct ocf_io *io, int error)
/*
* Callback function called when read completes.
*/
void complete_read(struct ocf_io *io, int error)
void complete_read(ocf_io_t io, void *priv1, void *priv2, int error)
{
struct volume_data *data = ocf_io_get_data(io);
@ -274,7 +274,7 @@ int submit_io(ocf_core_t core, struct volume_data *data,
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_volume_t core_vol = ocf_core_get_front_volume(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
struct ocf_io *io;
ocf_io_t io;
/* Allocate new io */
io = ocf_volume_new_io(core_vol, cache_priv->io_queue, addr, len, dir, 0, 0);

View File

@ -43,57 +43,11 @@ static void volume_close(ocf_volume_t volume)
free(myvolume->mem);
}
/*
* In submit_io() function we simulate read or write to backend storage device
* by doing memcpy() to or from previously allocated memory buffer.
*/
static void volume_submit_io(struct ocf_io *io)
{
struct myvolume_io *myvolume_io = ocf_io_get_priv(io);
struct volume_data *data;
struct myvolume *myvolume;
uint32_t offset = myvolume_io->offset;
data = ocf_io_get_data(io);
myvolume = ocf_volume_get_priv(ocf_io_get_volume(io));
if (io->dir == OCF_WRITE) {
memcpy(myvolume->mem + io->addr,
data->ptr + offset, io->bytes);
} else {
memcpy(data->ptr + offset,
myvolume->mem + io->addr, io->bytes);
}
printf("VOL: (name: %s), IO: (dir: %s, addr: %ld, bytes: %d)\n",
myvolume->name, io->dir == OCF_READ ? "read" : "write",
io->addr, io->bytes);
io->end(io, 0);
}
/*
* We don't need to implement submit_flush(). Just complete io with success.
*/
static void volume_submit_flush(struct ocf_io *io)
{
io->end(io, 0);
}
/*
* We don't need to implement submit_discard(). Just complete io with success.
*/
static void volume_submit_discard(struct ocf_io *io)
{
io->end(io, 0);
}
void volume_forward_io(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset)
{
struct ocf_io *io = ocf_forward_get_io(token);
struct myvolume *myvolume = ocf_volume_get_priv(volume);
struct volume_data *data = ocf_io_get_data(io);
struct volume_data *data = ocf_forward_get_data(token);
if (dir == OCF_WRITE) {
memcpy(myvolume->mem + addr,
@ -139,30 +93,6 @@ static uint64_t volume_get_length(ocf_volume_t volume)
return VOL_SIZE;
}
/*
* In set_data() we just assing data and offset to io.
*/
static int myvolume_io_set_data(struct ocf_io *io, ctx_data_t *data,
uint32_t offset)
{
struct myvolume_io *myvolume_io = ocf_io_get_priv(io);
myvolume_io->data = data;
myvolume_io->offset = offset;
return 0;
}
/*
* In get_data() return data stored in io.
*/
static ctx_data_t *myvolume_io_get_data(struct ocf_io *io)
{
struct myvolume_io *myvolume_io = ocf_io_get_priv(io);
return myvolume_io->data;
}
/*
* This structure contains volume properties. It describes volume
* type, which can be later instantiated as backend storage for cache
@ -170,7 +100,6 @@ static ctx_data_t *myvolume_io_get_data(struct ocf_io *io)
*/
const struct ocf_volume_properties volume_properties = {
.name = "Example volume",
.io_priv_size = sizeof(struct myvolume_io),
.volume_priv_size = sizeof(struct myvolume),
.caps = {
.atomic_writes = 0,
@ -178,19 +107,12 @@ const struct ocf_volume_properties volume_properties = {
.ops = {
.open = volume_open,
.close = volume_close,
.submit_io = volume_submit_io,
.submit_flush = volume_submit_flush,
.submit_discard = volume_submit_discard,
.forward_io = volume_forward_io,
.forward_flush = volume_forward_flush,
.forward_discard = volume_forward_discard,
.get_max_io_size = volume_get_max_io_size,
.get_length = volume_get_length,
},
.io_ops = {
.set_data = myvolume_io_set_data,
.get_data = myvolume_io_get_data,
},
};
/*

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2019-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -11,11 +12,6 @@
#include "ctx.h"
#include "data.h"
struct myvolume_io {
struct volume_data *data;
uint32_t offset;
};
struct myvolume {
uint8_t *mem;
const char *name;

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -151,7 +152,7 @@ ocf_core_state_t ocf_core_get_state(ocf_core_t core);
*
* @param[in] io IO to be submitted
*/
static inline void ocf_core_submit_io(struct ocf_io *io)
static inline void ocf_core_submit_io(ocf_io_t io)
{
ocf_volume_submit_io(io);
}
@ -161,7 +162,7 @@ static inline void ocf_core_submit_io(struct ocf_io *io)
*
* @param[in] io IO to be submitted
*/
static inline void ocf_core_submit_flush(struct ocf_io *io)
static inline void ocf_core_submit_flush(ocf_io_t io)
{
ocf_volume_submit_flush(io);
}
@ -171,7 +172,7 @@ static inline void ocf_core_submit_flush(struct ocf_io *io)
*
* @param[in] io IO to be submitted
*/
static inline void ocf_core_submit_discard(struct ocf_io *io)
static inline void ocf_core_submit_discard(ocf_io_t io)
{
ocf_volume_submit_discard(io);
}

View File

@ -15,8 +15,6 @@
* @brief OCF IO definitions
*/
struct ocf_io;
/**
* @brief OCF IO start
*
@ -24,7 +22,7 @@ struct ocf_io;
*
* @param[in] io OCF IO being started
*/
typedef void (*ocf_start_io_t)(struct ocf_io *io);
typedef void (*ocf_start_io_t)(ocf_io_t io);
/**
* @brief OCF IO handle
@ -33,7 +31,7 @@ typedef void (*ocf_start_io_t)(struct ocf_io *io);
*
* @param[in] io OCF IO to handle
*/
typedef void (*ocf_handle_io_t)(struct ocf_io *io, void *opaque);
typedef void (*ocf_handle_io_t)(ocf_io_t io, void *opaque);
/**
* @brief OCF IO completion
@ -41,105 +39,11 @@ typedef void (*ocf_handle_io_t)(struct ocf_io *io, void *opaque);
* @note Completion function for OCF IO
*
* @param[in] io OCF IO being completed
* @param[in] priv1 Completion priv 1
* @param[in] priv2 Completion priv 2
* @param[in] error Completion status code
*/
typedef void (*ocf_end_io_t)(struct ocf_io *io, int error);
/**
* @brief OCF IO main structure
*/
struct ocf_io {
/**
* @brief OCF IO destination address
*/
uint64_t addr;
/**
* @brief OCF IO flags
*/
uint64_t flags;
/**
* @brief OCF IO size in bytes
*/
uint32_t bytes;
/**
* @brief OCF IO destination class
*/
uint32_t io_class;
/**
* @brief OCF IO direction
*/
uint32_t dir;
/**
* @brief Queue handle
*/
ocf_queue_t io_queue;
/**
* @brief OCF IO start function
*/
ocf_start_io_t start;
/**
* @brief OCF IO private 1
*/
void *priv1;
/**
* @brief OCF IO private 2
*/
void *priv2;
/**
* @brief OCF IO handle function
*/
ocf_handle_io_t handle;
/**
* @brief OCF IO completion function
*/
ocf_end_io_t end;
};
/**
* @brief OCF IO operations set structure
*/
struct ocf_io_ops {
/**
* @brief Set up data vector in OCF IO
*
* @param[in] io OCF IO to set up
* @param[in] data Source context data
* @param[in] offset Data offset in source context data
*
* @retval 0 Data set up successfully
* @retval Non-zero Data set up failure
*/
int (*set_data)(struct ocf_io *io, ctx_data_t *data,
uint32_t offset);
/**
* @brief Get context data from OCF IO
*
* @param[in] io OCF IO to get data
*
* @return Data vector from IO
*/
ctx_data_t *(*get_data)(struct ocf_io *io);
};
/**
* @brief Get IO private context structure
*
* @param[in] io OCF IO
*
* @return IO private context structure
*/
void *ocf_io_get_priv(struct ocf_io *io);
typedef void (*ocf_end_io_t)(ocf_io_t io, void *priv1, void *priv2, int error);
/**
* @brief Increase reference counter in OCF IO
@ -148,7 +52,7 @@ void *ocf_io_get_priv(struct ocf_io *io);
*
* @param[in] io OCF IO
*/
void ocf_io_get(struct ocf_io *io);
void ocf_io_get(ocf_io_t io);
/**
* @brief Decrease reference counter in OCF IO
@ -157,7 +61,7 @@ void ocf_io_get(struct ocf_io *io);
*
* @param[in] io OCF IO
*/
void ocf_io_put(struct ocf_io *io);
void ocf_io_put(ocf_io_t io);
/**
* @brief Set OCF IO completion function
@ -166,13 +70,8 @@ void ocf_io_put(struct ocf_io *io);
* @param[in] context Context for completion function
* @param[in] fn Completion function
*/
static inline void ocf_io_set_cmpl(struct ocf_io *io, void *context,
void *context2, ocf_end_io_t fn)
{
io->priv1 = context;
io->priv2 = context2;
io->end = fn;
}
void ocf_io_set_cmpl(ocf_io_t io, void *context,
void *context2, ocf_end_io_t fn);
/**
* @brief Set OCF IO start function
@ -180,10 +79,7 @@ static inline void ocf_io_set_cmpl(struct ocf_io *io, void *context,
* @param[in] io OCF IO
* @param[in] fn Start callback function
*/
static inline void ocf_io_set_start(struct ocf_io *io, ocf_start_io_t fn)
{
io->start = fn;
}
void ocf_io_set_start(ocf_io_t io, ocf_start_io_t fn);
/**
* @brief Set OCF IO handle function
@ -191,16 +87,11 @@ static inline void ocf_io_set_start(struct ocf_io *io, ocf_start_io_t fn)
* @param[in] io OCF IO
* @param[in] fn Handle callback function
*/
static inline void ocf_io_set_handle(struct ocf_io *io, ocf_handle_io_t fn)
{
io->handle = fn;
}
void ocf_io_set_handle(ocf_io_t io, ocf_handle_io_t fn);
/**
* @brief Set up data vector in OCF IO
*
* @note Wrapper for set up data vector function
*
* @param[in] io OCF IO to set up
* @param[in] data Source data vector
* @param[in] offset Data offset in source data vector
@ -208,18 +99,25 @@ static inline void ocf_io_set_handle(struct ocf_io *io, ocf_handle_io_t fn)
* @retval 0 Data set up successfully
* @retval Non-zero Data set up failure
*/
int ocf_io_set_data(struct ocf_io *io, ctx_data_t *data, uint32_t offset);
int ocf_io_set_data(ocf_io_t io, ctx_data_t *data, uint32_t offset);
/**
* @brief Get data vector from OCF IO
*
* @note Wrapper for get data vector function
*
* @param[in] io OCF IO to get data
*
* @return Data vector from IO
*/
ctx_data_t *ocf_io_get_data(struct ocf_io *io);
ctx_data_t *ocf_io_get_data(ocf_io_t io);
/**
* @brief Get offset within the data from OCF IO
*
* @param[in] io OCF IO to get data
*
* @return Offset within data
*/
uint32_t ocf_io_get_offset(ocf_io_t io);
/**
* @brief Handle IO in cache engine
@ -227,21 +125,42 @@ ctx_data_t *ocf_io_get_data(struct ocf_io *io);
* @param[in] io OCF IO to be handled
* @param[in] opaque OCF opaque
*/
void ocf_io_handle(struct ocf_io *io, void *opaque);
void ocf_io_handle(ocf_io_t io, void *opaque);
/**
* @brief Get volume associated with io
*
* @param[in] io OCF IO to be handled
*/
ocf_volume_t ocf_io_get_volume(struct ocf_io *io);
ocf_volume_t ocf_io_get_volume(ocf_io_t io);
/**
* @brief Get the original OCF IO associated with forward token
* @brief Get the data to be submitted
*
* @param[in] token Forward token
*/
struct ocf_io *ocf_forward_get_io(ocf_forward_token_t token);
ctx_data_t *ocf_forward_get_data(ocf_forward_token_t token);
/**
* @brief Get io queue of forwarded io
*
* @param[in] token Forward token
*/
ocf_queue_t ocf_forward_get_io_queue(ocf_forward_token_t token);
/**
* @brief Get io class of forwarded io
*
* @param[in] token Forward token
*/
uint8_t ocf_forward_get_io_class(ocf_forward_token_t token);
/**
* @brief Get flags of forwarded io
*
* @param[in] token Forward token
*/
uint64_t ocf_forward_get_flags(ocf_forward_token_t token);
/**
* @brief Forward io to another subvolume
@ -285,6 +204,53 @@ void ocf_forward_flush(ocf_volume_t volume, ocf_forward_token_t token);
void ocf_forward_discard(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes);
/**
* @brief Forward write_zeros to another subvolume
*
* Forwarding automatically increases forwarded io refcount, so at some
* point additional ocf_forward_end() needs to be called to balance it.
*
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
*/
void ocf_forward_write_zeros(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes);
/**
* @brief Forward metadata io to another subvolume
*
* Forwarding automatically increases forwarded io refcount, so at some
* point additional ocf_forward_end() needs to be called to balance it.
*
* @param[in] token Forward token
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] dir Direction OCF_READ/OCF_WRITE
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
* @param[in] offset Offset within the IO data
*/
void ocf_forward_metadata(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset);
/**
* @brief Forward io simple to another subvolume
*
* Forwarding automatically increases forwarded io refcount, so at some
* point additional ocf_forward_end() needs to be called to balance it.
*
* @param[in] token Forward token
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] dir Direction OCF_READ/OCF_WRITE
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
*/
void ocf_forward_io_simple(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes);
/**
* @brief Increment forwarded io refcount
*

View File

@ -73,6 +73,12 @@ typedef struct ocf_volume_uuid *ocf_uuid_t;
*/
typedef void ctx_data_t;
struct ocf_request;
/**
* @brief handle to io
*/
typedef struct ocf_request *ocf_io_t;
/**
* @brief IO forward token
*

View File

@ -17,8 +17,6 @@
#include "ocf/ocf_err.h"
#include "ocf/ocf_io.h"
struct ocf_io;
/**
* @brief OCF volume UUID maximum allowed size
*/
@ -52,28 +50,28 @@ struct ocf_volume_ops {
*
* @param[in] io IO to be submitted
*/
void (*submit_io)(struct ocf_io *io);
void (*submit_io)(ocf_io_t io);
/**
* @brief Submit IO with flush command
*
* @param[in] io IO to be submitted
*/
void (*submit_flush)(struct ocf_io *io);
void (*submit_flush)(ocf_io_t io);
/**
* @brief Submit IO with metadata
*
* @param[in] io IO to be submitted
*/
void (*submit_metadata)(struct ocf_io *io);
void (*submit_metadata)(ocf_io_t io);
/**
* @brief Submit IO with discard command
*
* @param[in] io IO to be submitted
*/
void (*submit_discard)(struct ocf_io *io);
void (*submit_discard)(ocf_io_t io);
/**
* @brief Submit operation to write zeroes to target address (including
@ -81,7 +79,7 @@ struct ocf_volume_ops {
*
* @param[in] io IO description (addr, size)
*/
void (*submit_write_zeroes)(struct ocf_io *io);
void (*submit_write_zeroes)(ocf_io_t io);
/**
* @brief Forward the original io directly to the volume
@ -116,6 +114,47 @@ struct ocf_volume_ops {
void (*forward_discard)(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes);
/**
* @brief Froward operation to write zeros to target address (including
* metadata extended LBAs in atomic mode)
*
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
*/
void (*forward_write_zeros)(ocf_volume_t volume,
ocf_forward_token_t token, uint64_t addr,
uint64_t bytes);
/**
* @brief Forward the metadata io directly to the volume
*
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] dir Direction OCF_READ/OCF_WRITE
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
* @param[in] offset Offset within the IO data
*/
void (*forward_metadata)(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes,
uint64_t offset);
/**
* @brief Forward the io directly to the volume in context
* where cache is not initialized yet
*
* @param[in] volume Volume to which IO is being submitted
* @param[in] token Token representing IO to be forwarded
* @param[in] dir Direction OCF_READ/OCF_WRITE
* @param[in] addr Address to which IO is being submitted
* @param[in] bytes Length of the IO
*/
void (*forward_io_simple)(ocf_volume_t volume,
ocf_forward_token_t token, int dir,
uint64_t addr, uint64_t bytes);
/**
* @brief Volume initialization callback, called when volume object
* is being initialized
@ -180,18 +219,12 @@ struct ocf_volume_properties {
const char *name;
/*!< The name of volume operations */
uint32_t io_priv_size;
/*!< Size of io private context structure */
uint32_t volume_priv_size;
/*!< Size of volume private context structure */
struct ocf_volume_caps caps;
/*!< Volume capabilities */
struct ocf_io_ops io_ops;
/*!< IO operations */
void (*deinit)(void);
/*!< Deinitialize volume type */
@ -316,7 +349,7 @@ int ocf_volume_is_atomic(ocf_volume_t volume);
*
* @return ocf_io on success atomic, otherwise NULL
*/
struct ocf_io *ocf_volume_new_io(ocf_volume_t volume, ocf_queue_t queue,
ocf_io_t ocf_volume_new_io(ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir,
uint32_t io_class, uint64_t flags);
@ -326,21 +359,21 @@ struct ocf_io *ocf_volume_new_io(ocf_volume_t volume, ocf_queue_t queue,
*
* @param[in] io IO
*/
void ocf_volume_submit_io(struct ocf_io *io);
void ocf_volume_submit_io(ocf_io_t io);
/**
* @brief Submit flush to volume
*
* @param[in] io IO
*/
void ocf_volume_submit_flush(struct ocf_io *io);
void ocf_volume_submit_flush(ocf_io_t io);
/**
* @brief Submit discard to volume
*
* @param[in] io IO
*/
void ocf_volume_submit_discard(struct ocf_io *io);
void ocf_volume_submit_discard(ocf_io_t io);
/**
* @brief Open volume

View File

@ -157,8 +157,7 @@ void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
return;
}
if (cache->pt_unaligned_io && !ocf_req_is_4k(req->byte_position,
req->byte_length)) {
if (cache->pt_unaligned_io && !ocf_req_is_4k(req->addr, req->bytes)) {
req->cache_mode = ocf_req_cache_mode_pt;
return;
}

View File

@ -31,8 +31,8 @@ void ocf_engine_error(struct ocf_request *req,
if (ocf_cache_log_rl(cache)) {
ocf_core_log(req->core, log_err,
"%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg,
BYTES_TO_SECTORS(req->byte_position),
req->byte_length);
BYTES_TO_SECTORS(req->addr),
req->bytes);
}
}
@ -529,27 +529,23 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line)
{
struct ocf_cleaner_attribs *attribs = getter_context;
struct ocf_request *req = attribs->cmpl_context;
struct ocf_request *req = getter_context;
struct ocf_map_info *entry;
for (; attribs->getter_item < req->core_line_count;
attribs->getter_item++) {
if (unlikely(item >= req->core_line_count))
return -1;
struct ocf_map_info *entry = &req->map[attribs->getter_item];
entry = &req->map[item];
if (entry->status != LOOKUP_HIT)
continue;
if (entry->status != LOOKUP_HIT)
return -1;
if (!metadata_test_dirty(cache, entry->coll_idx))
continue;
if (!metadata_test_dirty(cache, entry->coll_idx))
return -1;
/* Line to be cleaned found, go to next item and return */
*line = entry->coll_idx;
attribs->getter_item++;
return 0;
}
return -1;
/* Line to be cleaned found, go to next item and return */
*line = entry->coll_idx;
return 0;
}
void ocf_engine_clean(struct ocf_request *req)
@ -562,10 +558,9 @@ void ocf_engine_clean(struct ocf_request *req)
.cmpl_fn = _ocf_engine_clean_end,
.getter = _ocf_engine_clean_getter,
.getter_context = &attribs,
.getter_item = 0,
.getter_context = req,
.count = req->info.dirty_any,
.count = req->core_line_count,
.io_queue = req->io_queue
};
@ -576,7 +571,7 @@ void ocf_engine_clean(struct ocf_request *req)
void ocf_engine_update_block_stats(struct ocf_request *req)
{
ocf_core_stats_vol_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->bytes);
}
void ocf_engine_update_request_stats(struct ocf_request *req)

View File

@ -41,7 +41,7 @@ int ocf_d2c_io_fast(struct ocf_request *req)
ocf_engine_update_block_stats(req);
ocf_core_stats_pt_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->bytes);
ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
req->info.hit_no, req->core_line_count);
@ -61,7 +61,7 @@ int ocf_d2c_flush_fast(struct ocf_request *req)
ocf_engine_update_block_stats(req);
ocf_core_stats_pt_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->bytes);
ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
req->info.hit_no, req->core_line_count);
@ -81,7 +81,7 @@ int ocf_d2c_discard_fast(struct ocf_request *req)
ocf_engine_update_block_stats(req);
ocf_core_stats_pt_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->bytes);
ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
req->info.hit_no, req->core_line_count);

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -33,8 +34,8 @@
#define OCF_DEBUG_RQ(req, format, ...) \
ocf_cache_log(req->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \
format"\n", OCF_ENGINE_DEBUG_IO_NAME, \
OCF_READ == (req)->rw ? "RD" : "WR", req->byte_position, \
req->byte_length, __func__, ##__VA_ARGS__)
OCF_READ == (req)->rw ? "RD" : "WR", req->addr, \
req->bytes, __func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_PREFIX

View File

@ -28,8 +28,8 @@ static void _ocf_discard_complete_req(struct ocf_request *req, int error)
static int _ocf_discard_core(struct ocf_request *req)
{
req->byte_position = SECTORS_TO_BYTES(req->discard.sector);
req->byte_length = SECTORS_TO_BYTES(req->discard.nr_sects);
req->addr = SECTORS_TO_BYTES(req->discard.sector);
req->bytes = SECTORS_TO_BYTES(req->discard.nr_sects);
ocf_engine_forward_core_discard_req(req, _ocf_discard_complete_req);
@ -60,7 +60,7 @@ static int _ocf_discard_step(struct ocf_request *req);
static void _ocf_discard_finish_step(struct ocf_request *req)
{
req->discard.handled += BYTES_TO_SECTORS(req->byte_length);
req->discard.handled += BYTES_TO_SECTORS(req->bytes);
if (req->discard.handled < req->discard.nr_sects)
req->engine_handler = _ocf_discard_step;
@ -149,13 +149,13 @@ static int _ocf_discard_step(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
req->byte_position = SECTORS_TO_BYTES(req->discard.sector +
req->addr = SECTORS_TO_BYTES(req->discard.sector +
req->discard.handled);
req->byte_length = OCF_MIN(SECTORS_TO_BYTES(req->discard.nr_sects -
req->bytes = OCF_MIN(SECTORS_TO_BYTES(req->discard.nr_sects -
req->discard.handled), MAX_TRIM_RQ_SIZE);
req->core_line_first = ocf_bytes_2_lines(cache, req->byte_position);
req->core_line_first = ocf_bytes_2_lines(cache, req->addr);
req->core_line_last =
ocf_bytes_2_lines(cache, req->byte_position + req->byte_length - 1);
ocf_bytes_2_lines(cache, req->addr + req->bytes - 1);
req->core_line_count = req->core_line_last - req->core_line_first + 1;
req->engine_handler = _ocf_discard_step_do;

View File

@ -16,7 +16,7 @@ void ocf_engine_forward_cache_io(struct ocf_request *req, int dir,
uint64_t offset, uint64_t size, ocf_req_end_t callback)
{
ocf_cache_t cache = req->cache;
uint32_t seek = req->byte_position % ocf_line_size(cache);
uint32_t seek = req->addr % ocf_line_size(cache);
uint32_t first_cl = ocf_bytes_2_lines(cache, offset + seek);
uint64_t addr;
@ -27,7 +27,7 @@ void ocf_engine_forward_cache_io(struct ocf_request *req, int dir,
addr += (offset + seek) % ocf_line_size(cache);
ocf_core_stats_cache_block_update(req->core, req->part_id,
dir, req->byte_length);
dir, req->bytes);
ocf_req_forward_cache_io(req, dir, addr, size,
req->offset + offset);
@ -45,12 +45,12 @@ void ocf_engine_forward_cache_io_req(struct ocf_request *req, int dir,
if (ocf_engine_is_sequential(req)) {
addr = cache->device->metadata_offset;
addr += req->map[0].coll_idx * ocf_line_size(cache);
addr += req->byte_position % ocf_line_size(cache);
addr += req->addr % ocf_line_size(cache);
ocf_core_stats_cache_block_update(req->core, req->part_id,
dir, req->byte_length);
dir, req->bytes);
ocf_req_forward_cache_io(req, dir, addr, req->byte_length,
ocf_req_forward_cache_io(req, dir, addr, req->bytes,
req->offset);
return;
}
@ -67,7 +67,7 @@ void ocf_engine_forward_cache_io_req(struct ocf_request *req, int dir,
bytes = ocf_line_size(cache);
if (i == 0) {
uint64_t seek = (req->byte_position) %
uint64_t seek = (req->addr) %
ocf_line_size(cache);
addr += seek;
@ -87,13 +87,13 @@ void ocf_engine_forward_cache_io_req(struct ocf_request *req, int dir,
if (i == (req->core_line_count - 1)) {
uint64_t skip = (ocf_line_size(cache) -
((req->byte_position + req->byte_length) %
((req->addr + req->bytes) %
ocf_line_size(cache))) % ocf_line_size(cache);
bytes -= skip;
}
bytes = OCF_MIN(bytes, req->byte_length - total_bytes);
bytes = OCF_MIN(bytes, req->bytes - total_bytes);
ENV_BUG_ON(bytes == 0);
ocf_core_stats_cache_block_update(req->core, req->part_id,
@ -105,7 +105,7 @@ void ocf_engine_forward_cache_io_req(struct ocf_request *req, int dir,
total_bytes += bytes;
}
ENV_BUG_ON(total_bytes != req->byte_length);
ENV_BUG_ON(total_bytes != req->bytes);
ocf_req_forward_cache_put(req);
}
@ -123,27 +123,27 @@ void ocf_engine_forward_cache_discard_req(struct ocf_request *req,
{
req->cache_forward_end = callback;
ocf_req_forward_cache_discard(req, req->byte_position,
req->byte_length);
ocf_req_forward_cache_discard(req, req->addr,
req->bytes);
}
void ocf_engine_forward_core_io_req(struct ocf_request *req,
ocf_req_end_t callback)
{
ocf_core_stats_core_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->bytes);
req->core_forward_end = callback;
ocf_req_forward_core_io(req, req->rw, req->byte_position,
req->byte_length, req->offset);
ocf_req_forward_core_io(req, req->rw, req->addr,
req->bytes, req->offset);
}
void ocf_engine_forward_core_flush_req(struct ocf_request *req,
ocf_req_end_t callback)
{
ocf_core_stats_core_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->bytes);
req->core_forward_end = callback;
@ -154,9 +154,9 @@ void ocf_engine_forward_core_discard_req(struct ocf_request *req,
ocf_req_end_t callback)
{
ocf_core_stats_core_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->bytes);
req->core_forward_end = callback;
ocf_req_forward_core_discard(req, req->byte_position, req->byte_length);
ocf_req_forward_core_discard(req, req->addr, req->bytes);
}

View File

@ -79,7 +79,7 @@ int ocf_read_pt_do(struct ocf_request *req)
ocf_engine_update_block_stats(req);
ocf_core_stats_pt_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->bytes);
ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
req->info.hit_no, req->core_line_count);

View File

@ -65,7 +65,7 @@ static void _ocf_read_generic_miss_complete(struct ocf_request *req, int error)
/* Copy data to the backfill buffer */
if (req->cp_data) {
ctx_data_cpy(cache->owner, req->cp_data, req->data, 0, 0,
req->byte_length);
req->bytes);
}
req->complete(req, error);
@ -85,12 +85,12 @@ static inline void _ocf_read_generic_submit_miss(struct ocf_request *req)
int ret;
req->cp_data = ctx_data_alloc(cache->owner,
BYTES_TO_PAGES(req->byte_length));
BYTES_TO_PAGES(req->bytes));
if (!req->cp_data) {
/* If buffer allocation for backfill fails, ignore the error */
ocf_cache_log(cache, log_warn, "Backfill buffer allocation "
"error (size %u)\n",
req->byte_length);
req->bytes);
goto err_alloc;
}

View File

@ -143,7 +143,7 @@ static int _ocf_write_wi_core_write(struct ocf_request *req)
ocf_engine_update_block_stats(req);
ocf_core_stats_pt_block_update(req->core, req->part_id, req->rw,
req->byte_length);
req->bytes);
ocf_core_stats_request_pt_update(req->core, req->part_id, req->rw,
req->info.hit_no, req->core_line_count);

View File

@ -76,15 +76,15 @@ static inline void ocf_zero_map_info(struct ocf_request *req)
if (map_idx == 0) {
/* First */
start_bit = BYTES_TO_SECTORS(req->byte_position)
% ocf_line_sectors(cache);
start_bit = (BYTES_TO_SECTORS(req->addr)
% ocf_line_sectors(cache));
}
if (map_idx == (count - 1)) {
/* Last */
end_bit = BYTES_TO_SECTORS(req->byte_position +
req->byte_length - 1) %
ocf_line_sectors(cache);
end_bit = (BYTES_TO_SECTORS(req->addr +
req->bytes - 1) %
ocf_line_sectors(cache));
}
ocf_metadata_flush_mark(cache, req, map_idx, INVALID,

View File

@ -65,7 +65,7 @@ static void metadata_io_read_i_atomic_complete(
{
context->compl_hndl(context->cache, context->priv, error);
ctx_data_free(context->cache->owner, context->data);
ctx_data_free(context->cache->owner, context->req->data);
ocf_req_put(context->req);
env_vfree(context);
}
@ -73,13 +73,12 @@ static void metadata_io_read_i_atomic_complete(
/*
* Iterative read end callback
*/
static void metadata_io_read_i_atomic_step_end(struct ocf_io *io, int error)
static void metadata_io_read_i_atomic_step_end(struct ocf_request *req,
int error)
{
struct metadata_io_read_i_atomic_context *context = io->priv1;
struct metadata_io_read_i_atomic_context *context = req->priv;
OCF_DEBUG_TRACE(ocf_volume_get_cache(ocf_io_get_volume(io)));
ocf_io_put(io);
OCF_DEBUG_TRACE(req->cache);
if (error) {
metadata_io_read_i_atomic_complete(context, error);
@ -87,16 +86,17 @@ static void metadata_io_read_i_atomic_step_end(struct ocf_io *io, int error)
}
context->drain_hndl(context->priv, context->curr_offset,
context->curr_count, context->data);
context->curr_count, req->data);
context->count -= context->curr_count;
context->curr_offset += context->curr_count;
if (context->count > 0)
ocf_queue_push_req(context->req,
if (context->count > 0) {
ocf_queue_push_req(req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
else
} else {
metadata_io_read_i_atomic_complete(context, 0);
}
}
static int metadata_io_read_i_atomic_step(struct ocf_request *req)
@ -104,37 +104,19 @@ static int metadata_io_read_i_atomic_step(struct ocf_request *req)
struct metadata_io_read_i_atomic_context *context = req->priv;
ocf_cache_t cache = context->cache;
uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE;
struct ocf_io *io;
int result = 0;
/* Get sectors count of this IO iteration */
context->curr_count = OCF_MIN(max_sectors_count, context->count);
/* Reset position in data buffer */
ctx_data_seek(cache->owner, context->data, ctx_data_seek_begin, 0);
ctx_data_seek(cache->owner, req->data, ctx_data_seek_begin, 0);
/* Allocate new IO */
io = ocf_new_cache_io(cache, req->io_queue,
req->cache_forward_end = metadata_io_read_i_atomic_step_end;
ocf_req_forward_cache_metadata(req, OCF_READ,
cache->device->metadata_offset +
SECTORS_TO_BYTES(context->curr_offset),
SECTORS_TO_BYTES(context->curr_count), OCF_READ, 0, 0);
if (!io) {
metadata_io_read_i_atomic_complete(context, -OCF_ERR_NO_MEM);
return 0;
}
/* Setup IO */
ocf_io_set_cmpl(io, context, NULL, metadata_io_read_i_atomic_step_end);
result = ocf_io_set_data(io, context->data, 0);
if (result) {
ocf_io_put(io);
metadata_io_read_i_atomic_complete(context, result);
return 0;
}
/* Submit IO */
ocf_volume_submit_metadata(io);
PAGES_TO_BYTES(context->curr_offset),
PAGES_TO_BYTES(context->curr_count), 0);
return 0;
}
@ -149,6 +131,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv,
struct metadata_io_read_i_atomic_context *context;
uint64_t io_sectors_count = cache->device->collision_table_entries *
ocf_line_sectors(cache);
struct ocf_request *req;
OCF_DEBUG_TRACE(cache);
@ -156,24 +139,26 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv,
if (!context)
return -OCF_ERR_NO_MEM;
context->req = ocf_req_new_mngt(cache, queue);
if (!context->req) {
req = ocf_req_new_mngt(cache, queue);
if (!req) {
env_vfree(context);
return -OCF_ERR_NO_MEM;
}
context->req->info.internal = true;
context->req->engine_handler = metadata_io_read_i_atomic_step;
context->req->priv = context;
/* Allocate one 4k page for metadata*/
context->data = ctx_data_alloc(cache->owner, 1);
if (!context->data) {
ocf_req_put(context->req);
req->data = ctx_data_alloc(cache->owner, 1);
if (!req->data) {
ocf_req_put(req);
env_vfree(context);
return -OCF_ERR_NO_MEM;
}
req->info.internal = true;
req->engine_handler = metadata_io_read_i_atomic_step;
req->priv = context;
context->req = req;
context->cache = cache;
context->count = io_sectors_count;
context->curr_offset = 0;
@ -195,7 +180,7 @@ static void metadata_io_req_fill(struct metadata_io_request *m_req)
int i;
for (i = 0; i < m_req->count; i++) {
a_req->on_meta_fill(cache, m_req->data,
a_req->on_meta_fill(cache, m_req->req.data,
m_req->page + i, m_req->context);
}
}
@ -207,27 +192,19 @@ static void metadata_io_req_drain(struct metadata_io_request *m_req)
int i;
for (i = 0; i < m_req->count; i++) {
a_req->on_meta_drain(cache, m_req->data,
a_req->on_meta_drain(cache, m_req->req.data,
m_req->page + i, m_req->context);
}
}
static void metadata_io_io_end(struct metadata_io_request *m_req, int error);
static void metadata_io_io_cmpl(struct ocf_io *io, int error)
{
metadata_io_io_end(io->priv1, error);
ocf_io_put(io);
}
static void metadata_io_end(struct ocf_request *req, int error);
static int metadata_io_do(struct ocf_request *req)
{
struct metadata_io_request *m_req = req->priv;
ocf_cache_t cache = req->cache;
struct ocf_io *io;
int ret;
ctx_data_seek(cache->owner, m_req->data, ctx_data_seek_begin, 0);
ctx_data_seek(cache->owner, req->data, ctx_data_seek_begin, 0);
/* Fill with the latest metadata. */
if (m_req->req.rw == OCF_WRITE) {
@ -238,25 +215,13 @@ static int metadata_io_do(struct ocf_request *req)
m_req->page % OCF_NUM_GLOBAL_META_LOCKS);
}
io = ocf_new_cache_io(cache, req->io_queue,
PAGES_TO_BYTES(m_req->page),
PAGES_TO_BYTES(m_req->count),
m_req->req.rw, 0, m_req->asynch->flags);
if (!io) {
metadata_io_io_end(m_req, -OCF_ERR_NO_MEM);
return 0;
}
ctx_data_seek(cache->owner, req->data, ctx_data_seek_begin, 0);
req->cache_forward_end = metadata_io_end;
ocf_req_forward_cache_io(req, req->rw, PAGES_TO_BYTES(m_req->page),
PAGES_TO_BYTES(m_req->count), 0);
/* Setup IO */
ocf_io_set_cmpl(io, m_req, NULL, metadata_io_io_cmpl);
ctx_data_seek(cache->owner, m_req->data, ctx_data_seek_begin, 0);
ret = ocf_io_set_data(io, m_req->data, 0);
if (ret) {
ocf_io_put(io);
metadata_io_io_end(m_req, ret);
return ret;
}
ocf_volume_submit_io(io);
return 0;
}
@ -307,8 +272,9 @@ static void metadata_io_req_advance(struct metadata_io_request *m_req);
/*
* Iterative asynchronous write callback
*/
static void metadata_io_io_end(struct metadata_io_request *m_req, int error)
static void metadata_io_end(struct ocf_request *req, int error)
{
struct metadata_io_request *m_req = req->priv;
struct metadata_io_request_asynch *a_req = m_req->asynch;
OCF_CHECK_NULL(a_req);
@ -339,7 +305,7 @@ static void metadata_io_req_end(struct metadata_io_request *m_req)
if (env_atomic_dec_return(&a_req->req_remaining) == 0)
a_req->on_complete(cache, a_req->context, a_req->error);
ctx_data_free(cache->owner, m_req->data);
ctx_data_free(cache->owner, m_req->req.data);
}
static uint32_t metadata_io_max_page(ocf_cache_t cache)
@ -460,14 +426,15 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
m_req->req.rw = dir;
m_req->req.map = LIST_POISON1;
m_req->req.alock_status = (uint8_t*)&m_req->alock_status;
m_req->req.flags = flags;
/* If req_count == io_count and count is not multiple of
* max_count, for last we can allocate data smaller that
* max_count as we are sure it will never be resubmitted.
*/
m_req->data = ctx_data_alloc(cache->owner,
m_req->req.data = ctx_data_alloc(cache->owner,
OCF_MIN(max_count, count - i * max_count));
if (!m_req->data)
if (!m_req->req.data)
goto err;
}
@ -485,7 +452,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
err:
while (i--)
ctx_data_free(cache->owner, a_req->reqs[i].data);
ctx_data_free(cache->owner, a_req->reqs[i].req.data);
env_mpool_del(mio_allocator, a_req, req_count);

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -51,7 +52,6 @@ struct metadata_io_request {
struct list_head list;
ocf_cache_t cache;
void *context;
ctx_data_t *data;
struct metadata_io_request_asynch *asynch;
uint32_t page;
uint32_t count;

View File

@ -27,14 +27,12 @@
static int passive_io_resume(struct ocf_request *req)
{
struct ocf_request *master = req->master_io_req;
ocf_cache_t cache = req->cache;
struct ocf_metadata_ctrl *ctrl = cache->metadata.priv;
struct ocf_io *io = (struct ocf_io*) req->data;
ctx_data_t *data = ocf_io_get_data(io);
uint64_t io_start_page = BYTES_TO_PAGES(io->addr);
uint64_t io_pages_count = BYTES_TO_PAGES(io->bytes);
uint64_t io_start_page = BYTES_TO_PAGES(master->addr);
uint64_t io_pages_count = BYTES_TO_PAGES(master->bytes);
uint64_t io_end_page = io_start_page + io_pages_count - 1;
ocf_end_io_t io_cmpl = req->master_io_req;
enum ocf_metadata_segment_id update_segments[] = {
metadata_segment_sb_config,
metadata_segment_collision,
@ -58,13 +56,14 @@ static int passive_io_resume(struct ocf_request *req)
overlap_page = overlap_start - raw_start_page;
overlap_count = overlap_end - overlap_start + 1;
ctx_data_seek(cache->owner, data, ctx_data_seek_begin,
ctx_data_seek(cache->owner, req->data, ctx_data_seek_begin,
PAGES_TO_BYTES(overlap_start_data));
ocf_metadata_raw_update(cache, raw, data, overlap_page, overlap_count);
ocf_metadata_raw_update(cache, raw, req->data, overlap_page,
overlap_count);
}
ocf_pio_async_unlock(req->cache->standby.concurrency, req);
io_cmpl(io, 0);
master->complete(master, 0);
env_allocator_del(cache->standby.allocator, req);
return 0;
}
@ -74,52 +73,52 @@ static void passive_io_page_lock_acquired(struct ocf_request *req)
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
ocf_end_io_t io_cmpl)
int ocf_metadata_passive_update(struct ocf_request *master)
{
ocf_cache_t cache = master->cache;
struct ocf_metadata_ctrl *ctrl = cache->metadata.priv;
uint64_t io_start_page = BYTES_TO_PAGES(master->addr);
uint64_t io_end_page = io_start_page + BYTES_TO_PAGES(master->bytes);
struct ocf_request *req;
uint64_t io_start_page = BYTES_TO_PAGES(io->addr);
uint64_t io_end_page = io_start_page + BYTES_TO_PAGES(io->bytes);
int lock = 0;
if (io->dir == OCF_READ) {
io_cmpl(io, 0);
if (master->rw == OCF_READ) {
master->complete(master, 0);
return 0;
}
if (io_start_page >= ctrl->count_pages) {
io_cmpl(io, 0);
master->complete(master, 0);
return 0;
}
if (io->addr % PAGE_SIZE || io->bytes % PAGE_SIZE) {
if (master->addr % PAGE_SIZE || master->bytes % PAGE_SIZE) {
ocf_cache_log(cache, log_warn,
"Metadata update not aligned to page size!\n");
io_cmpl(io, -OCF_ERR_INVAL);
master->complete(master, -OCF_ERR_INVAL);
return -OCF_ERR_INVAL;
}
if (io->bytes > MAX_PASSIVE_IO_SIZE) {
if (master->bytes > MAX_PASSIVE_IO_SIZE) {
//FIXME handle greater IOs
ocf_cache_log(cache, log_warn,
"IO size exceedes max supported size!\n");
io_cmpl(io, -OCF_ERR_INVAL);
master->complete(master, -OCF_ERR_INVAL);
return -OCF_ERR_INVAL;
}
req = (struct ocf_request*)env_allocator_new(cache->standby.allocator);
if (!req) {
io_cmpl(io, -OCF_ERR_NO_MEM);
master->complete(master, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
req->io_queue = io->io_queue;;
req->io_queue = master->io_queue;;
req->info.internal = true;
req->engine_handler = passive_io_resume;
req->rw = OCF_WRITE;
req->data = io;
req->master_io_req = io_cmpl;
req->master_io_req = master;
req->data = master->data;
req->cache = cache;
env_atomic_set(&req->lock_remaining, 0);
@ -131,7 +130,7 @@ int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
req, passive_io_page_lock_acquired);
if (lock < 0) {
env_allocator_del(cache->standby.allocator, req);
io_cmpl(io, lock);
master->complete(master, lock);
return lock;
}

View File

@ -1,13 +1,13 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __OCF_METADATA_PASSIVE_IO_H__
#define __OCF_METADATA_PASSIVE_IO_H__
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
ocf_end_io_t io_cmpl);
int ocf_metadata_passive_update(struct ocf_request *master);
int ocf_metadata_passive_io_ctx_init(ocf_cache_t cache);

View File

@ -621,7 +621,7 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
result |= metadata_io_write_i_asynch(cache, req->io_queue, ctx,
raw->ssd_pages_offset + start_page, count,
req->ioi.io.flags,
req->flags,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete,
raw->mio_conc);

View File

@ -32,66 +32,31 @@
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
struct _raw_atomic_flush_ctx {
struct ocf_request *req;
ocf_req_end_t complete;
env_atomic flush_req_cnt;
};
static void _raw_atomic_io_discard_cmpl(struct _raw_atomic_flush_ctx *ctx,
int error)
static void _raw_atomic_io_discard_cmpl(struct ocf_request *req, int error)
{
ocf_req_end_t complete = req->priv;
if (error)
ctx->req->error = error;
if (env_atomic_dec_return(&ctx->flush_req_cnt))
return;
if (ctx->req->error)
ocf_metadata_error(ctx->req->cache);
ocf_metadata_error(req->cache);
/* Call metadata flush completed call back */
OCF_DEBUG_MSG(ctx->req->cache, "Asynchronous flushing complete");
ctx->complete(ctx->req, ctx->req->error);
env_free(ctx);
complete(req, error);
}
static void _raw_atomic_io_discard_end(struct ocf_io *io, int error)
static void _raw_atomic_io_discard_do(struct ocf_request *req,
uint64_t start_addr, uint32_t len)
{
struct _raw_atomic_flush_ctx *ctx = io->priv1;
ocf_io_put(io); /* Release IO */
_raw_atomic_io_discard_cmpl(ctx, error);
}
static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
uint64_t start_addr, uint32_t len, struct _raw_atomic_flush_ctx *ctx)
{
struct ocf_request *req = context;
struct ocf_io *io;
io = ocf_new_cache_io(cache, NULL, start_addr, len, OCF_WRITE, 0, 0);
if (!io) {
req->error = -OCF_ERR_NO_MEM;
return req->error;
}
ocf_cache_t cache = req->cache;
OCF_DEBUG_PARAM(cache, "Page to flushing = %" ENV_PRIu64 ", count of pages = %u",
start_addr, len);
env_atomic_inc(&ctx->flush_req_cnt);
ocf_io_set_cmpl(io, ctx, NULL, _raw_atomic_io_discard_end);
if (cache->device->volume.features.discard_zeroes)
ocf_volume_submit_discard(io);
ocf_req_forward_cache_discard(req, start_addr, len);
else
ocf_volume_submit_write_zeroes(io);
return req->error;
ocf_req_forward_cache_write_zeros(req, start_addr, len);
}
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
@ -114,14 +79,12 @@ static inline void _raw_atomic_add_page(struct ocf_cache *cache,
(*idx)++;
}
static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
struct ocf_request *req, int map_idx,
struct _raw_atomic_flush_ctx *ctx)
static void _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
struct ocf_request *req, int map_idx)
{
struct ocf_map_info *map = &req->map[map_idx];
uint32_t len = 0;
uint64_t start_addr;
int result = 0;
start_addr = map->coll_idx;
start_addr *= ocf_line_size(cache);
@ -131,9 +94,7 @@ static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
len = SECTORS_TO_BYTES(map->stop_flush - map->start_flush);
len += SECTORS_TO_BYTES(1);
result = _raw_atomic_io_discard_do(cache, req, start_addr, len, ctx);
return result;
_raw_atomic_io_discard_do(req, start_addr, len);
}
int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
@ -147,7 +108,6 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
int line_no = req->core_line_count;
struct ocf_map_info *map;
uint64_t start_addr;
struct _raw_atomic_flush_ctx *ctx;
ENV_BUG_ON(!complete);
@ -157,24 +117,16 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
return 0;
}
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
if (!ctx) {
complete(req, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
ctx->req = req;
ctx->complete = complete;
env_atomic_set(&ctx->flush_req_cnt, 1);
req->priv = complete;
req->cache_forward_end = _raw_atomic_io_discard_cmpl;
if (line_no == 1) {
map = &req->map[0];
if (map->flush && map->status != LOOKUP_MISS) {
result = _raw_atomic_flush_do_asynch_sec(cache, req,
0, ctx);
}
_raw_atomic_io_discard_cmpl(ctx, result);
return result;
if (map->flush && map->status != LOOKUP_MISS)
_raw_atomic_flush_do_asynch_sec(cache, req, 0);
else
_raw_atomic_io_discard_cmpl(req, 0);
return 0;
}
if (line_no <= MAX_STACK_TAB_SIZE) {
@ -184,11 +136,11 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
ENV_MEM_NOIO);
if (!clines_tab) {
complete(req, -OCF_ERR_NO_MEM);
env_free(ctx);
return -OCF_ERR_NO_MEM;
}
}
ocf_req_forward_cache_get(req);
for (i = 0; i < line_no; i++) {
map = &req->map[i];
@ -198,8 +150,7 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
if (i == 0) {
/* First */
if (map->start_flush) {
_raw_atomic_flush_do_asynch_sec(cache, req, i,
ctx);
_raw_atomic_flush_do_asynch_sec(cache, req, i);
} else {
_raw_atomic_add_page(cache, clines_tab,
map->coll_idx, &clines_to_flush);
@ -207,8 +158,7 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
} else if (i == (line_no - 1)) {
/* Last */
if (map->stop_flush != ocf_line_end_sector(cache)) {
_raw_atomic_flush_do_asynch_sec(cache, req,
i, ctx);
_raw_atomic_flush_do_asynch_sec(cache, req, i);
} else {
_raw_atomic_add_page(cache, clines_tab,
map->coll_idx, &clines_to_flush);
@ -242,16 +192,11 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
len += ocf_line_size(cache);
}
result |= _raw_atomic_io_discard_do(cache, req, start_addr,
len, ctx);
if (result)
break;
_raw_atomic_io_discard_do(req, start_addr, len);
i++;
}
_raw_atomic_io_discard_cmpl(ctx, result);
ocf_req_forward_cache_put(req);
if (line_no > MAX_STACK_TAB_SIZE)
env_free(clines_tab);

View File

@ -352,8 +352,6 @@ struct raw_dynamic_load_all_context {
unsigned flapping_idx;
struct ocf_request *req;
ocf_cache_t cache;
struct ocf_io *io;
ctx_data_t *data;
uint8_t *zpage;
uint8_t *page;
uint64_t i_page;
@ -371,17 +369,15 @@ static void raw_dynamic_load_all_complete(
ocf_req_put(context->req);
env_secure_free(context->page, PAGE_SIZE);
env_free(context->zpage);
ctx_data_free(context->cache->owner, context->data);
ctx_data_free(context->cache->owner, context->req->data);
env_vfree(context);
}
static int raw_dynamic_load_all_update(struct ocf_request *req);
static void raw_dynamic_load_all_read_end(struct ocf_io *io, int error)
static void raw_dynamic_load_all_read_end(struct ocf_request *req, int error)
{
struct raw_dynamic_load_all_context *context = io->priv1;
ocf_io_put(io);
struct raw_dynamic_load_all_context *context = req->priv;
if (error) {
raw_dynamic_load_all_complete(context, error);
@ -389,7 +385,7 @@ static void raw_dynamic_load_all_read_end(struct ocf_io *io, int error)
}
context->req->engine_handler = raw_dynamic_load_all_update;
ocf_queue_push_req(context->req,
ocf_queue_push_req(req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
@ -399,7 +395,6 @@ static int raw_dynamic_load_all_read(struct ocf_request *req)
struct ocf_metadata_raw *raw = context->raw;
uint64_t ssd_pages_offset;
uint64_t count;
int result;
ssd_pages_offset = raw->ssd_pages_offset +
raw_dynamic_segment_size_on_ssd(raw) *
@ -407,28 +402,11 @@ static int raw_dynamic_load_all_read(struct ocf_request *req)
count = metadata_io_size(context->i_page, raw->ssd_pages);
/* Allocate IO */
context->io = ocf_new_cache_io(context->cache, req->io_queue,
PAGES_TO_BYTES(ssd_pages_offset + context->i_page),
PAGES_TO_BYTES(count), OCF_READ, 0, 0);
req->cache_forward_end = raw_dynamic_load_all_read_end;
if (!context->io) {
raw_dynamic_load_all_complete(context, -OCF_ERR_NO_MEM);
return 0;
}
/* Setup IO */
result = ocf_io_set_data(context->io, context->data, 0);
if (result) {
ocf_io_put(context->io);
raw_dynamic_load_all_complete(context, result);
return 0;
}
ocf_io_set_cmpl(context->io, context, NULL,
raw_dynamic_load_all_read_end);
/* Submit IO */
ocf_volume_submit_io(context->io);
ocf_req_forward_cache_io(req, OCF_READ,
PAGES_TO_BYTES(ssd_pages_offset + context->i_page),
PAGES_TO_BYTES(count), 0);
return 0;
}
@ -442,10 +420,10 @@ static int raw_dynamic_load_all_update(struct ocf_request *req)
int result = 0;
/* Reset head of data buffer */
ctx_data_seek_check(context->cache->owner, context->data,
ctx_data_seek_check(context->cache->owner, req->data,
ctx_data_seek_begin, 0);
result = raw_dynamic_update_pages(cache, raw, context->data,
result = raw_dynamic_update_pages(cache, raw, req->data,
context->i_page, count, &context->page, context->zpage);
context->i_page += count;
@ -466,6 +444,7 @@ void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
ocf_metadata_end_t cmpl, void *priv, unsigned flapping_idx)
{
struct raw_dynamic_load_all_context *context;
struct ocf_request *req;
int result;
ENV_BUG_ON(raw->flapping ? flapping_idx > 1 : flapping_idx != 0);
@ -481,37 +460,39 @@ void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
context->cmpl = cmpl;
context->priv = priv;
context->data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES);
if (!context->data) {
result = -OCF_ERR_NO_MEM;
goto err_data;
}
context->zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!context->zpage) {
result = -OCF_ERR_NO_MEM;
goto err_zpage;
}
context->req = ocf_req_new_mngt(cache, cache->mngt_queue);
if (!context->req) {
req = ocf_req_new_mngt(cache, cache->mngt_queue);
if (!req) {
result = -OCF_ERR_NO_MEM;
goto err_req;
}
context->req->info.internal = true;
context->req->priv = context;
context->req->engine_handler = raw_dynamic_load_all_read;
req->data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES);
if (!req->data) {
result = -OCF_ERR_NO_MEM;
goto err_data;
}
req->info.internal = true;
req->priv = context;
req->engine_handler = raw_dynamic_load_all_read;
context->req = req;
ocf_queue_push_req(context->req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
return;
err_data:
ocf_req_put(req);
err_req:
env_free(context->zpage);
err_zpage:
ctx_data_free(cache->owner, context->data);
err_data:
env_vfree(context);
OCF_CMPL_RET(priv, result);
}

View File

@ -506,8 +506,7 @@ static void ocf_metadata_flush_disk(ocf_pipeline_t pipeline,
struct ocf_metadata_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_submit_volume_flush(ocf_cache_get_volume(cache),
ocf_metadata_flush_disk_end, context);
ocf_submit_cache_flush(cache, ocf_metadata_flush_disk_end, context);
}
struct ocf_pipeline_arg ocf_metadata_flush_sb_args[] = {
@ -652,10 +651,10 @@ unsigned ocf_metadata_superblock_get_next_flapping_idx(
return (sb->config->flapping_idx + 1) % 2;
}
static void ocf_metadata_read_sb_complete(struct ocf_io *io, int error)
static void ocf_metadata_read_sb_complete(struct ocf_request *req, int error)
{
struct ocf_metadata_read_sb_ctx *context = io->priv1;
ctx_data_t *data = ocf_io_get_data(io);
struct ocf_metadata_read_sb_ctx *context = req->priv;
ctx_data_t *data = req->data;
if (!error) {
/* Read data from data into super block buffer */
@ -664,12 +663,12 @@ static void ocf_metadata_read_sb_complete(struct ocf_io *io, int error)
}
ctx_data_free(context->ctx, data);
ocf_io_put(io);
context->error = error;
context->cmpl(context);
env_free(context);
env_free(req);
}
int ocf_metadata_read_sb(ocf_ctx_t ctx, ocf_volume_t volume,
@ -677,8 +676,7 @@ int ocf_metadata_read_sb(ocf_ctx_t ctx, ocf_volume_t volume,
{
struct ocf_metadata_read_sb_ctx *context;
size_t sb_pages = BYTES_TO_PAGES(sizeof(context->superblock));
ctx_data_t *data;
struct ocf_io *io;
struct ocf_request *req;
int result = 0;
/* Allocate memory for first page of super block */
@ -693,43 +691,31 @@ int ocf_metadata_read_sb(ocf_ctx_t ctx, ocf_volume_t volume,
context->priv1 = priv1;
context->priv2 = priv2;
/* Allocate resources for IO */
io = ocf_volume_new_io(volume, NULL, 0, sb_pages * PAGE_SIZE,
OCF_READ, 0, 0);
if (!io) {
req = env_zalloc(sizeof(*req), ENV_MEM_NORMAL);
if (!req) {
ocf_log(ctx, log_err, "Memory allocation error");
result = -OCF_ERR_NO_MEM;
goto err_io;
goto err_req;
}
data = ctx_data_alloc(ctx, sb_pages);
if (!data) {
req->data = ctx_data_alloc(ctx, sb_pages);
if (!req->data) {
ocf_log(ctx, log_err, "Memory allocation error");
result = -OCF_ERR_NO_MEM;
goto err_data;
}
/*
* Read first page of cache device in order to recover metadata
* properties
*/
result = ocf_io_set_data(io, data, 0);
if (result) {
ocf_log(ctx, log_err, "Metadata IO configuration error\n");
result = -OCF_ERR_IO;
goto err_set_data;
}
req->volume_forward_end = ocf_metadata_read_sb_complete;
req->priv = context;
ocf_io_set_cmpl(io, context, NULL, ocf_metadata_read_sb_complete);
ocf_volume_submit_io(io);
ocf_req_forward_volume_io_simple(req, volume, OCF_READ, 0,
sb_pages * PAGE_SIZE);
return 0;
err_set_data:
ctx_data_free(ctx, data);
err_data:
ocf_io_put(io);
err_io:
env_free(req);
err_req:
env_free(context);
return result;
}

View File

@ -1057,9 +1057,9 @@ static void _ocf_mngt_test_volume_discard(
* Submit discard request
*/
ocf_submit_volume_discard(&cache->device->volume,
context->test.reserved_lba_addr, PAGE_SIZE,
_ocf_mngt_test_volume_discard_complete, context);
ocf_submit_cache_discard(cache, context->test.reserved_lba_addr,
PAGE_SIZE, _ocf_mngt_test_volume_discard_complete,
context);
}
static void _ocf_mngt_test_volume_second_read_complete(void *priv, int error)
@ -1819,11 +1819,11 @@ static void _ocf_mngt_attach_discard(ocf_pipeline_t pipeline,
if (!discard && ocf_volume_is_atomic(&cache->device->volume)) {
/* discard doesn't zero data - need to explicitly write zeros */
ocf_submit_write_zeros(&cache->device->volume, addr, length,
ocf_submit_cache_write_zeros(cache, addr, length,
_ocf_mngt_attach_discard_complete, context);
} else {
/* Discard volume after metadata */
ocf_submit_volume_discard(&cache->device->volume, addr, length,
ocf_submit_cache_discard(cache, addr, length,
_ocf_mngt_attach_discard_complete, context);
}
}
@ -1843,8 +1843,8 @@ static void _ocf_mngt_attach_flush(ocf_pipeline_t pipeline,
bool discard = cache->device->volume.features.discard_zeroes;
if (!discard && ocf_volume_is_atomic(&cache->device->volume)) {
ocf_submit_volume_flush(&cache->device->volume,
_ocf_mngt_attach_flush_complete, context);
ocf_submit_cache_flush(cache, _ocf_mngt_attach_flush_complete,
context);
} else {
ocf_pipeline_next(pipeline);
}

View File

@ -281,13 +281,6 @@ void *ocf_cache_get_priv(ocf_cache_t cache)
return cache->priv;
}
struct ocf_cache_volume_io_priv {
struct ocf_io *io;
struct ctx_data_t *data;
env_atomic remaining;
env_atomic error;
};
struct ocf_cache_volume {
ocf_cache_t cache;
};
@ -299,183 +292,97 @@ static inline ocf_cache_t ocf_volume_to_cache(ocf_volume_t volume)
return cache_volume->cache;
}
static void ocf_cache_volume_io_complete_generic(struct ocf_io *vol_io,
static void ocf_cache_volume_io_complete_generic(struct ocf_request *req,
int error)
{
struct ocf_cache_volume_io_priv *priv;
struct ocf_io *io = vol_io->priv1;
ocf_cache_t cache = ocf_volume_to_cache(ocf_io_get_volume(io));
ocf_cache_t cache = req->cache;
priv = ocf_io_get_priv(io);
if (env_atomic_dec_return(&priv->remaining))
return;
ocf_io_put(vol_io);
ocf_io_end(io, error);
ocf_refcnt_dec(&cache->refcnt.metadata);
ocf_io_end_func(req, error);
}
static void ocf_cache_io_complete(struct ocf_io *io, int error)
static void ocf_cache_io_complete(struct ocf_request *req, int error)
{
struct ocf_cache_volume_io_priv *priv;
ocf_cache_t cache;
struct ocf_request *req = ocf_io_to_req(io);
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
ocf_cache_t cache = req->cache;
if (error)
req->error = req->error ?: error;
priv = ocf_io_get_priv(io);
env_atomic_cmpxchg(&priv->error, 0, error);
if (env_atomic_dec_return(&priv->remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
ocf_refcnt_dec(&cache->refcnt.metadata);
ocf_io_end(io, env_atomic_read(&priv->error));
ocf_io_end_func(req, req->error);
}
static void ocf_cache_volume_io_complete(struct ocf_io *vol_io, int error)
static void ocf_cache_volume_submit_io(ocf_io_t io)
{
struct ocf_io *io = vol_io->priv1;
ocf_io_put(vol_io);
ocf_cache_io_complete(io, error);
}
static int ocf_cache_volume_prepare_vol_io(struct ocf_io *io,
struct ocf_io **vol_io)
{
ocf_cache_t cache;
struct ocf_io *tmp_io;
OCF_CHECK_NULL(io);
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
tmp_io = ocf_volume_new_io(ocf_cache_get_volume(cache), io->io_queue,
io->addr, io->bytes, io->dir, io->io_class, io->flags);
if (!tmp_io)
return -OCF_ERR_NO_MEM;
*vol_io = tmp_io;
return 0;
}
static void ocf_cache_volume_submit_io(struct ocf_io *io)
{
struct ocf_cache_volume_io_priv *priv;
struct ocf_io *vol_io;
ocf_cache_t cache;
struct ocf_request *req = ocf_io_to_req(io);
ocf_cache_t cache = req->cache;
int result;
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
priv = ocf_io_get_priv(io);
if (!ocf_refcnt_inc(&cache->refcnt.metadata)) {
ocf_io_end(io, -OCF_ERR_IO);
ocf_io_end_func(io, -OCF_ERR_IO);
return;
}
if (unlikely(!ocf_cache_is_standby(cache))) {
ocf_io_end(io, -OCF_ERR_CACHE_NOT_STANDBY);
ocf_io_end_func(io, -OCF_ERR_CACHE_NOT_STANDBY);
return;
}
env_atomic_set(&priv->remaining, 3);
env_atomic_set(&priv->error, 0);
env_atomic_set(&req->req_remaining, 3);
result = ocf_cache_volume_prepare_vol_io(io, &vol_io);
if (result) {
ocf_io_end(io, result);
return;
}
req->cache_forward_end = ocf_cache_io_complete;
ocf_req_forward_cache_io(req, req->rw, req->addr,
req->bytes, req->offset);
result = ocf_io_set_data(vol_io, priv->data, 0);
if (result) {
ocf_io_put(vol_io);
ocf_io_end(io, result);
return;
}
ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete);
ocf_volume_submit_io(vol_io);
result = ocf_metadata_passive_update(cache, io, ocf_cache_io_complete);
req->complete = ocf_cache_io_complete;
result = ocf_metadata_passive_update(req);
if (result) {
ocf_cache_log(cache, log_crit,
"Metadata update error (error=%d)!\n", result);
}
ocf_cache_io_complete(io, 0);
// TODO why the result is not passed to io_cmpl???
ocf_cache_io_complete(req, 0);
}
static void ocf_cache_volume_submit_flush(struct ocf_io *io)
static void ocf_cache_volume_submit_flush(ocf_io_t io)
{
struct ocf_cache_volume_io_priv *priv;
struct ocf_io *vol_io;
ocf_cache_t cache;
int result;
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
priv = ocf_io_get_priv(io);
struct ocf_request *req = ocf_io_to_req(io);
ocf_cache_t cache = req->cache;
if (!ocf_refcnt_inc(&cache->refcnt.metadata)) {
ocf_io_end(io, -OCF_ERR_IO);
ocf_io_end_func(io, -OCF_ERR_IO);
return;
}
if (unlikely(!ocf_cache_is_standby(cache))) {
ocf_io_end(io, -OCF_ERR_CACHE_NOT_STANDBY);
ocf_io_end_func(io, -OCF_ERR_CACHE_NOT_STANDBY);
return;
}
env_atomic_set(&priv->remaining, 1);
result = ocf_cache_volume_prepare_vol_io(io, &vol_io);
if (result) {
ocf_io_end(io, result);
return;
}
ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete_generic);
ocf_volume_submit_flush(vol_io);
req->cache_forward_end = ocf_cache_volume_io_complete_generic;
ocf_req_forward_cache_flush(req);
}
static void ocf_cache_volume_submit_discard(struct ocf_io *io)
static void ocf_cache_volume_submit_discard(ocf_io_t io)
{
struct ocf_cache_volume_io_priv *priv;
struct ocf_io *vol_io;
ocf_cache_t cache;
int result;
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
priv = ocf_io_get_priv(io);
struct ocf_request *req = ocf_io_to_req(io);
ocf_cache_t cache = req->cache;
if (!ocf_refcnt_inc(&cache->refcnt.metadata)) {
ocf_io_end(io, -OCF_ERR_IO);
ocf_io_end_func(io, -OCF_ERR_IO);
return;
}
if (unlikely(!ocf_cache_is_standby(cache))) {
ocf_io_end(io, -OCF_ERR_CACHE_NOT_STANDBY);
ocf_io_end_func(io, -OCF_ERR_CACHE_NOT_STANDBY);
return;
}
env_atomic_set(&priv->remaining, 1);
result = ocf_cache_volume_prepare_vol_io(io, &vol_io);
if (result) {
ocf_io_end(io, result);
return;
}
ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete_generic);
ocf_volume_submit_discard(vol_io);
req->cache_forward_end = ocf_cache_volume_io_complete_generic;
ocf_req_forward_cache_discard(req, req->addr,
req->bytes);
}
/* *** VOLUME OPS *** */
@ -512,31 +419,8 @@ static uint64_t ocf_cache_volume_get_byte_length(ocf_volume_t volume)
return ocf_volume_get_length(ocf_cache_get_volume(cache));
}
/* *** IO OPS *** */
static int ocf_cache_io_set_data(struct ocf_io *io,
ctx_data_t *data, uint32_t offset)
{
struct ocf_cache_volume_io_priv *priv = ocf_io_get_priv(io);
if (!data || offset)
return -OCF_ERR_INVAL;
priv->data = data;
return 0;
}
static ctx_data_t *ocf_cache_io_get_data(struct ocf_io *io)
{
struct ocf_cache_volume_io_priv *priv = ocf_io_get_priv(io);
return priv->data;
}
const struct ocf_volume_properties ocf_cache_volume_properties = {
.name = "OCF_Cache",
.io_priv_size = sizeof(struct ocf_cache_volume_io_priv),
.volume_priv_size = sizeof(struct ocf_cache_volume),
.caps = {
.atomic_writes = 0,
@ -552,17 +436,53 @@ const struct ocf_volume_properties ocf_cache_volume_properties = {
.get_max_io_size = ocf_cache_volume_get_max_io_size,
.get_length = ocf_cache_volume_get_byte_length,
},
.io_ops = {
.set_data = ocf_cache_io_set_data,
.get_data = ocf_cache_io_get_data,
},
.deinit = NULL,
};
static int ocf_cache_io_allocator_init(ocf_io_allocator_t allocator,
const char *name)
{
return 0;
}
static void ocf_cache_io_allocator_deinit(ocf_io_allocator_t allocator)
{
}
static void *ocf_cache_io_allocator_new(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir)
{
ocf_cache_t cache = ocf_volume_to_cache(volume);
return ocf_req_new_cache(cache, queue, addr, bytes, dir);
}
static void ocf_cache_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
{
struct ocf_request *req = obj;
ocf_req_put(req);
}
const struct ocf_io_allocator_type ocf_cache_io_allocator_type = {
.ops = {
.allocator_init = ocf_cache_io_allocator_init,
.allocator_deinit = ocf_cache_io_allocator_deinit,
.allocator_new = ocf_cache_io_allocator_new,
.allocator_del = ocf_cache_io_allocator_del,
},
};
const struct ocf_volume_extended ocf_cache_volume_extended = {
.allocator_type = &ocf_cache_io_allocator_type,
};
int ocf_cache_volume_type_init(ocf_ctx_t ctx)
{
return ocf_ctx_register_volume_type_internal(ctx, OCF_VOLUME_TYPE_CACHE,
&ocf_cache_volume_properties, NULL);
&ocf_cache_volume_properties,
&ocf_cache_volume_extended);
}
bool ocf_dbg_cache_is_settled(ocf_cache_t cache)

View File

@ -27,73 +27,6 @@ struct ocf_composite_volume {
unsigned max_io_size;
};
struct ocf_composite_volume_io {
struct ocf_io *member_io[OCF_COMPOSITE_VOLUME_MEMBERS_MAX];
ctx_data_t *data;
uint8_t begin_member;
uint8_t end_member;
env_atomic remaining;
env_atomic error;
};
static void ocf_composite_volume_master_cmpl(struct ocf_io *master_io,
int error)
{
struct ocf_composite_volume_io *cio = ocf_io_get_priv(master_io);
env_atomic_cmpxchg(&cio->error, 0, error);
if (env_atomic_dec_return(&cio->remaining))
return;
ocf_io_end(master_io, env_atomic_read(&cio->error));
}
static void ocf_composite_volume_io_cmpl(struct ocf_io *io, int error)
{
struct ocf_io *master_io = io->priv1;
ocf_composite_volume_master_cmpl(master_io, error);
}
static void ocf_composite_volume_handle_io(struct ocf_io *master_io,
void (*hndl)(struct ocf_io *io))
{
struct ocf_composite_volume_io *cio = ocf_io_get_priv(master_io);
int i;
env_atomic_set(&cio->remaining,
cio->end_member - cio->begin_member + 1);
env_atomic_set(&cio->error, 0);
for (i = cio->begin_member; i < cio->end_member; i++) {
ocf_io_set_cmpl(cio->member_io[i], master_io, NULL,
ocf_composite_volume_io_cmpl);
cio->member_io[i]->io_class = master_io->io_class;
cio->member_io[i]->flags = master_io->flags;
hndl(cio->member_io[i]);
}
ocf_composite_volume_master_cmpl(master_io, 0);
}
static void ocf_composite_volume_submit_io(struct ocf_io *master_io)
{
ocf_composite_volume_handle_io(master_io, ocf_volume_submit_io);
}
static void ocf_composite_volume_submit_flush(struct ocf_io *master_io)
{
ocf_composite_volume_handle_io(master_io, ocf_volume_submit_flush);
}
static void ocf_composite_volume_submit_discard(struct ocf_io *master_io)
{
ocf_composite_volume_handle_io(master_io, ocf_volume_submit_discard);
}
void ocf_composite_forward_io(ocf_volume_t cvolume,
ocf_forward_token_t token, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset)
@ -199,6 +132,136 @@ void ocf_composite_forward_discard(ocf_volume_t cvolume,
ocf_forward_end(token, 0);
}
void ocf_composite_forward_write_zeros(ocf_volume_t cvolume,
ocf_forward_token_t token, uint64_t addr, uint64_t bytes)
{
struct ocf_composite_volume *composite = ocf_volume_get_priv(cvolume);
uint64_t member_bytes, caddr;
int i;
caddr = addr;
ENV_BUG_ON(addr >= composite->length);
ENV_BUG_ON(addr + bytes > composite->length);
for (i = 0; i < composite->members_cnt; i++) {
if (addr >= composite->end_addr[i])
continue;
if (unlikely(!composite->member[i].volume.opened)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
addr = addr - (i > 0 ? composite->end_addr[i-1] : 0);
break;
}
for (; i < composite->members_cnt && bytes; i++) {
if (unlikely(!composite->member[i].volume.opened)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
member_bytes = OCF_MIN(bytes, composite->end_addr[i] - caddr);
ocf_forward_write_zeros(&composite->member[i].volume, token,
addr, member_bytes);
addr = 0;
caddr = composite->end_addr[i];
bytes -= member_bytes;
}
/* Put io forward counter to account for the original forward */
ocf_forward_end(token, 0);
}
void ocf_composite_forward_metadata(ocf_volume_t cvolume,
ocf_forward_token_t token, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset)
{
struct ocf_composite_volume *composite = ocf_volume_get_priv(cvolume);
uint64_t member_bytes, caddr;
int i;
ENV_BUG_ON(addr >= composite->length);
ENV_BUG_ON(addr + bytes > composite->length);
caddr = addr;
for (i = 0; i < composite->members_cnt; i++) {
if (addr >= composite->end_addr[i])
continue;
if (unlikely(!composite->member[i].volume.opened)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
addr = addr - (i > 0 ? composite->end_addr[i-1] : 0);
break;
}
for (; i < composite->members_cnt && bytes; i++) {
if (unlikely(!composite->member[i].volume.opened)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
member_bytes = OCF_MIN(bytes, composite->end_addr[i] - caddr);
ocf_forward_metadata(&composite->member[i].volume, token, dir,
addr, member_bytes, offset);
addr = 0;
caddr = composite->end_addr[i];
bytes -= member_bytes;
offset += member_bytes;
}
/* Put io forward counter to account for the original forward */
ocf_forward_end(token, 0);
}
void ocf_composite_forward_io_simple(ocf_volume_t cvolume,
ocf_forward_token_t token, int dir,
uint64_t addr, uint64_t bytes)
{
struct ocf_composite_volume *composite = ocf_volume_get_priv(cvolume);
uint64_t caddr;
int i;
ENV_BUG_ON(addr >= composite->length);
ENV_BUG_ON(addr + bytes > composite->length);
caddr = addr;
for (i = 0; i < composite->members_cnt; i++) {
if (addr >= composite->end_addr[i])
continue;
if (unlikely(!composite->member[i].volume.opened)) {
ocf_forward_end(token, -OCF_ERR_IO);
return;
}
addr = addr - (i > 0 ? composite->end_addr[i-1] : 0);
break;
}
if (caddr + bytes > composite->end_addr[i]) {
ocf_forward_end(token, -OCF_ERR_IO);
return;
}
ocf_forward_io_simple(&composite->member[i].volume, token,
dir, addr, bytes);
/* Put io forward counter to account for the original forward */
ocf_forward_end(token, 0);
}
/* *** VOLUME OPS *** */
static int ocf_composite_volume_open(ocf_volume_t cvolume, void *volume_params)
@ -262,64 +325,19 @@ static void ocf_composite_volume_on_deinit(ocf_volume_t cvolume)
ocf_volume_deinit(&composite->member[i].volume);
}
/* *** IO OPS *** */
static int ocf_composite_io_set_data(struct ocf_io *io,
ctx_data_t *data, uint32_t offset)
{
ocf_volume_t cvolume = ocf_io_get_volume(io);
struct ocf_composite_volume *composite = ocf_volume_get_priv(cvolume);
struct ocf_composite_volume_io *cio = ocf_io_get_priv(io);
uint64_t member_volume_start, member_data_offset;
int i, ret = 0;
cio->data = data;
for (i = cio->begin_member; i < cio->end_member; i++) {
/* Each member IO will have the same data set, but with
* different offset. First member will use bare offset set from
* caller, each subsequent member IO has to skip over parts
* "belonging" to previous members. */
if (i == cio->begin_member) {
member_data_offset = offset;
} else {
member_volume_start = composite->end_addr[i - 1];
member_data_offset = member_volume_start - io->addr;
member_data_offset += offset;
}
ret = ocf_io_set_data(cio->member_io[i], data,
member_data_offset);
if (ret)
break;
}
return ret;
}
static ctx_data_t *ocf_composite_io_get_data(struct ocf_io *io)
{
struct ocf_composite_volume_io *cio = ocf_io_get_priv(io);
return cio->data;
}
const struct ocf_volume_properties ocf_composite_volume_properties = {
.name = "OCF Composite",
.io_priv_size = sizeof(struct ocf_composite_volume_io),
.volume_priv_size = sizeof(struct ocf_composite_volume),
.caps = {
.atomic_writes = 0,
},
.ops = {
.submit_io = ocf_composite_volume_submit_io,
.submit_flush = ocf_composite_volume_submit_flush,
.submit_discard = ocf_composite_volume_submit_discard,
.submit_metadata = NULL,
.forward_io = ocf_composite_forward_io,
.forward_flush = ocf_composite_forward_flush,
.forward_discard = ocf_composite_forward_discard,
.forward_write_zeros = ocf_composite_forward_write_zeros,
.forward_metadata = ocf_composite_forward_metadata,
.forward_io_simple = ocf_composite_forward_io_simple,
.open = ocf_composite_volume_open,
.close = ocf_composite_volume_close,
@ -328,133 +346,14 @@ const struct ocf_volume_properties ocf_composite_volume_properties = {
.on_deinit = ocf_composite_volume_on_deinit,
},
.io_ops = {
.set_data = ocf_composite_io_set_data,
.get_data = ocf_composite_io_get_data,
},
.deinit = NULL,
};
static int ocf_composite_io_allocator_init(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name)
{
return ocf_io_allocator_default_init(allocator, priv_size, name);
}
static void ocf_composite_io_allocator_deinit(ocf_io_allocator_t allocator)
{
ocf_io_allocator_default_deinit(allocator);
}
static void *ocf_composite_io_allocator_new(ocf_io_allocator_t allocator,
ocf_volume_t cvolume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir)
{
struct ocf_composite_volume *composite = ocf_volume_get_priv(cvolume);
struct ocf_composite_volume_io *cio;
struct ocf_io_internal *ioi;
uint64_t member_addr, member_bytes, cur_addr, cur_bytes;
int i;
ioi = ocf_io_allocator_default_new(allocator, cvolume, queue,
addr, bytes, dir);
if (!ioi)
return NULL;
cio = ocf_io_get_priv(&ioi->io);
if (bytes == 0) {
/* Flush io - allocate io for each volume */
for (i = 0; i < composite->members_cnt; i++) {
cio->member_io[i] = ocf_io_new(&composite->member[i].volume,
queue, 0, 0, dir, 0, 0);
if (!cio->member_io[i])
goto err;
}
cio->begin_member = 0;
cio->end_member = composite->members_cnt;
return ioi;
}
for (i = 0; i < composite->members_cnt; i++) {
if (addr < composite->end_addr[i]) {
cio->begin_member = i;
break;
}
}
cur_addr = addr;
cur_bytes = bytes;
for (; i < composite->members_cnt; i++) {
member_addr = cur_addr - (i > 0 ? composite->end_addr[i-1] : 0);
member_bytes =
OCF_MIN(cur_addr + cur_bytes, composite->end_addr[i])
- cur_addr;
cio->member_io[i] = ocf_io_new(&composite->member[i].volume, queue,
member_addr, member_bytes, dir, 0, 0);
if (!cio->member_io[i])
goto err;
cur_addr += member_bytes;
cur_bytes -= member_bytes;
if (!cur_bytes) {
cio->end_member = i + 1;
break;
}
}
ENV_BUG_ON(cur_bytes != 0);
return ioi;
err:
for (i = 0; i < composite->members_cnt; i++) {
if (cio->member_io[i])
ocf_io_put(cio->member_io[i]);
}
ocf_io_allocator_default_del(allocator, ioi);
return NULL;
}
static void ocf_composite_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
{
struct ocf_io_internal *ioi = obj;
struct ocf_composite_volume_io *cio = ocf_io_get_priv(&ioi->io);
int i;
for (i = cio->begin_member; i < cio->end_member; i++) {
if (cio->member_io[i])
ocf_io_put(cio->member_io[i]);
}
ocf_io_allocator_default_del(allocator, ioi);
}
const struct ocf_io_allocator_type ocf_composite_io_allocator_type = {
.ops = {
.allocator_init = ocf_composite_io_allocator_init,
.allocator_deinit = ocf_composite_io_allocator_deinit,
.allocator_new = ocf_composite_io_allocator_new,
.allocator_del = ocf_composite_io_allocator_del,
},
};
const struct ocf_volume_extended ocf_composite_volume_extended = {
.allocator_type = &ocf_composite_io_allocator_type,
};
int ocf_composite_volume_type_init(ocf_ctx_t ctx)
{
return ocf_ctx_register_volume_type_internal(ctx,
OCF_VOLUME_TYPE_COMPOSITE,
&ocf_composite_volume_properties,
&ocf_composite_volume_extended);
&ocf_composite_volume_properties, NULL);
}
int ocf_composite_volume_create(ocf_composite_volume_t *volume, ocf_ctx_t ctx)

View File

@ -155,12 +155,9 @@ static uint64_t _calc_dirty_for(uint64_t dirty_since)
return dirty_since ? (current_time - dirty_since) : 0;
}
struct ocf_request *ocf_io_to_req(struct ocf_io *io)
struct ocf_request *ocf_io_to_req(ocf_io_t io)
{
struct ocf_io_internal *ioi;
ioi = container_of(io, struct ocf_io_internal, io);
return container_of(ioi, struct ocf_request, ioi);
return io;
}
static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume)
@ -179,30 +176,31 @@ static inline void dec_counter_if_req_was_dirty(struct ocf_request *req)
ocf_refcnt_dec(&req->cache->refcnt.dirty);
}
static inline int ocf_core_validate_io(struct ocf_io *io)
static inline int ocf_core_validate_io(ocf_io_t io)
{
struct ocf_request *req = ocf_io_to_req(io);
ocf_volume_t volume = ocf_io_get_volume(io);
ocf_core_t core = ocf_volume_to_core(volume);
if (io->addr + io->bytes > ocf_volume_get_length(volume))
if (req->addr + req->bytes > ocf_volume_get_length(volume))
return -OCF_ERR_INVAL;
if (io->io_class >= OCF_USER_IO_CLASS_MAX)
if (req->io.io_class >= OCF_USER_IO_CLASS_MAX)
return -OCF_ERR_INVAL;
if (io->dir != OCF_READ && io->dir != OCF_WRITE)
if (req->rw != OCF_READ && req->rw != OCF_WRITE)
return -OCF_ERR_INVAL;
if (!io->io_queue)
if (!req->io_queue)
return -OCF_ERR_INVAL;
if (!io->end)
if (!req->io.end)
return -OCF_ERR_INVAL;
/* Core volume I/O must not be queued on management queue - this would
* break I/O accounting code, resulting in use-after-free type of errors
* after cache detach, core remove etc. */
if (io->io_queue == ocf_core_get_cache(core)->mngt_queue)
if (req->io_queue == ocf_core_get_cache(core)->mngt_queue)
return -OCF_ERR_INVAL;
return 0;
@ -211,12 +209,12 @@ static inline int ocf_core_validate_io(struct ocf_io *io)
static void ocf_req_complete(struct ocf_request *req, int error)
{
/* Complete IO */
ocf_io_end(&req->ioi.io, error);
ocf_io_end_func(req, error);
dec_counter_if_req_was_dirty(req);
/* Invalidate OCF IO, it is not valid after completion */
ocf_io_put(&req->ioi.io);
ocf_io_put(req);
}
static inline ocf_req_cache_mode_t _ocf_core_req_resolve_fast_mode(
@ -236,8 +234,7 @@ static inline ocf_req_cache_mode_t _ocf_core_req_resolve_fast_mode(
return ocf_req_cache_mode_fast;
}
static int ocf_core_submit_io_fast(struct ocf_io *io, struct ocf_request *req,
ocf_cache_t cache)
static int ocf_core_submit_io_fast(struct ocf_request *req, ocf_cache_t cache)
{
ocf_req_cache_mode_t original_mode, resolved_mode;
int ret;
@ -259,9 +256,9 @@ static int ocf_core_submit_io_fast(struct ocf_io *io, struct ocf_request *req,
return ret;
}
static void ocf_core_volume_submit_io(struct ocf_io *io)
static void ocf_core_volume_submit_io(ocf_io_t io)
{
struct ocf_request *req;
struct ocf_request *req = ocf_io_to_req(io);
ocf_core_t core;
ocf_cache_t cache;
int ret;
@ -270,20 +267,18 @@ static void ocf_core_volume_submit_io(struct ocf_io *io)
ret = ocf_core_validate_io(io);
if (ret < 0) {
ocf_io_end(io, ret);
ocf_io_end_func(io, ret);
return;
}
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
core = req->core;
cache = ocf_core_get_cache(core);
if (unlikely(ocf_cache_is_standby(cache))) {
ocf_io_end(io, -OCF_ERR_CACHE_STANDBY);
ocf_io_end_func(io, -OCF_ERR_CACHE_STANDBY);
return;
}
req->core = core;
req->complete = ocf_req_complete;
ocf_io_get(io);
@ -298,16 +293,17 @@ static void ocf_core_volume_submit_io(struct ocf_io *io)
if (ret)
goto err;
req->part_id = ocf_user_part_class2id(cache, io->io_class);
req->part_id = ocf_user_part_class2id(cache, req->io.io_class);
ocf_resolve_effective_cache_mode(cache, core, req);
ocf_core_update_stats(core, io);
/* Prevent race condition */
/* In case of fastpath prevent completing the requets before updating
* sequential cutoff info */
ocf_req_get(req);
if (ocf_core_submit_io_fast(io, req, cache) == OCF_FAST_PATH_YES) {
if (ocf_core_submit_io_fast(req, cache) == OCF_FAST_PATH_YES) {
ocf_core_seq_cutoff_update(core, req);
ocf_req_put(req);
return;
@ -326,14 +322,13 @@ static void ocf_core_volume_submit_io(struct ocf_io *io)
return;
err:
ocf_io_end(io, ret);
ocf_io_put(io);
ocf_io_end_func(io, ret);
ocf_io_put(req);
}
static void ocf_core_volume_submit_flush(struct ocf_io *io)
static void ocf_core_volume_submit_flush(ocf_io_t io)
{
struct ocf_request *req;
ocf_core_t core;
struct ocf_request *req = ocf_io_to_req(io);
ocf_cache_t cache;
int ret;
@ -341,20 +336,17 @@ static void ocf_core_volume_submit_flush(struct ocf_io *io)
ret = ocf_core_validate_io(io);
if (ret < 0) {
ocf_io_end(io, ret);
ocf_io_end_func(io, ret);
return;
}
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
cache = ocf_core_get_cache(req->core);
if (unlikely(ocf_cache_is_standby(cache))) {
ocf_io_end(io, -OCF_ERR_CACHE_STANDBY);
ocf_io_end_func(io, -OCF_ERR_CACHE_STANDBY);
return;
}
req->core = core;
req->complete = ocf_req_complete;
ocf_io_get(io);
@ -367,36 +359,32 @@ static void ocf_core_volume_submit_flush(struct ocf_io *io)
ocf_engine_hndl_flush_req(req);
}
static void ocf_core_volume_submit_discard(struct ocf_io *io)
static void ocf_core_volume_submit_discard(ocf_io_t io)
{
struct ocf_request *req;
ocf_core_t core;
struct ocf_request *req = ocf_io_to_req(io);
ocf_cache_t cache;
int ret;
OCF_CHECK_NULL(io);
if (io->bytes == 0) {
ocf_io_end(io, -OCF_ERR_INVAL);
if (req->bytes == 0) {
ocf_io_end_func(io, -OCF_ERR_INVAL);
return;
}
ret = ocf_core_validate_io(io);
if (ret < 0) {
ocf_io_end(io, ret);
ocf_io_end_func(io, ret);
return;
}
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
cache = ocf_core_get_cache(req->core);
if (unlikely(ocf_cache_is_standby(cache))) {
ocf_io_end(io, -OCF_ERR_CACHE_STANDBY);
ocf_io_end_func(io, -OCF_ERR_CACHE_STANDBY);
return;
}
req->core = core;
req->complete = ocf_req_complete;
ocf_io_get(io);
@ -408,8 +396,7 @@ static void ocf_core_volume_submit_discard(struct ocf_io *io)
ret = ocf_req_alloc_map_discard(req);
if (ret) {
ocf_io_end(io, -OCF_ERR_NO_MEM);
ocf_io_put(io);
ocf_io_end_func(io, -OCF_ERR_NO_MEM);
return;
}
@ -447,39 +434,8 @@ static uint64_t ocf_core_volume_get_byte_length(ocf_volume_t volume)
return ocf_volume_get_length(&core->volume);
}
/* *** IO OPS *** */
static int ocf_core_io_set_data(struct ocf_io *io,
ctx_data_t *data, uint32_t offset)
{
struct ocf_request *req;
OCF_CHECK_NULL(io);
if (!data)
return -OCF_ERR_INVAL;
req = ocf_io_to_req(io);
req->data = data;
req->offset = offset;
return 0;
}
static ctx_data_t *ocf_core_io_get_data(struct ocf_io *io)
{
struct ocf_request *req;
OCF_CHECK_NULL(io);
req = ocf_io_to_req(io);
return req->data;
}
const struct ocf_volume_properties ocf_core_volume_properties = {
.name = "OCF_Core",
.io_priv_size = 0, /* Not used - custom allocator */
.volume_priv_size = sizeof(struct ocf_core_volume),
.caps = {
.atomic_writes = 0,
@ -495,15 +451,11 @@ const struct ocf_volume_properties ocf_core_volume_properties = {
.get_max_io_size = ocf_core_volume_get_max_io_size,
.get_length = ocf_core_volume_get_byte_length,
},
.io_ops = {
.set_data = ocf_core_io_set_data,
.get_data = ocf_core_io_get_data,
},
.deinit = NULL,
};
static int ocf_core_io_allocator_init(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name)
const char *name)
{
return 0;
}
@ -523,14 +475,15 @@ static void *ocf_core_io_allocator_new(ocf_io_allocator_t allocator,
if (!req)
return NULL;
return &req->ioi;
req->core = ocf_volume_to_core(volume);
return req;
}
static void ocf_core_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
{
struct ocf_request *req;
struct ocf_request *req = obj;
req = container_of(obj, struct ocf_request, ioi);
ocf_req_put(req);
}

View File

@ -106,6 +106,6 @@ ocf_core_id_t ocf_core_get_id(ocf_core_t core);
int ocf_core_volume_type_init(ocf_ctx_t ctx);
struct ocf_request *ocf_io_to_req(struct ocf_io *io);
struct ocf_request *ocf_io_to_req(ocf_io_t io);
#endif /* __OCF_CORE_PRIV_H__ */

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -7,34 +8,13 @@
#include "ocf_def_priv.h"
#include "ocf_io_priv.h"
#include "ocf_volume_priv.h"
#include "ocf_core_priv.h"
#include "utils/utils_io_allocator.h"
/*
* This is io allocator dedicated for bottom devices.
* Out IO structure looks like this:
* --------------> +-------------------------+
* | OCF is aware | |
* | of this part. | struct ocf_io_meta |
* | | |
* | +-------------------------+ <----------------
* | | | Bottom adapter |
* | | struct ocf_io | is aware of |
* | | | this part. |
* --------------> +-------------------------+ |
* | | |
* | Bottom adapter specific | |
* | context data structure. | |
* | | |
* +-------------------------+ <----------------
*/
#define OCF_IO_TOTAL(priv_size) \
(sizeof(struct ocf_io_internal) + priv_size)
int ocf_io_allocator_default_init(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name)
const char *name)
{
allocator->priv = env_allocator_create(OCF_IO_TOTAL(priv_size), name,
allocator->priv = env_allocator_create(sizeof(struct ocf_request), name,
true);
if (!allocator->priv)
return -OCF_ERR_NO_MEM;
@ -52,7 +32,18 @@ void *ocf_io_allocator_default_new(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir)
{
return env_allocator_new(allocator->priv);
struct ocf_request *req;
req = env_allocator_new(allocator->priv);
if (!req)
return NULL;
req->io_queue = queue;
req->addr = addr;
req->bytes = bytes;
req->rw = dir;
return req;
}
void ocf_io_allocator_default_del(ocf_io_allocator_t allocator, void *obj)
@ -78,11 +69,11 @@ ocf_io_allocator_type_t ocf_io_allocator_get_type_default(void)
* IO internal API
*/
struct ocf_io *ocf_io_new(ocf_volume_t volume, ocf_queue_t queue,
ocf_io_t ocf_io_new(ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir,
uint32_t io_class, uint64_t flags)
{
struct ocf_io_internal *ioi;
struct ocf_request *req;
uint32_t sector_size = SECTORS_TO_BYTES(1);
if ((addr % sector_size) || (bytes % sector_size))
@ -91,76 +82,98 @@ struct ocf_io *ocf_io_new(ocf_volume_t volume, ocf_queue_t queue,
if (!ocf_refcnt_inc(&volume->refcnt))
return NULL;
ioi = ocf_io_allocator_new(&volume->type->allocator, volume, queue,
req = ocf_io_allocator_new(&volume->type->allocator, volume, queue,
addr, bytes, dir);
if (!ioi) {
if (!req) {
ocf_refcnt_dec(&volume->refcnt);
return NULL;
}
ioi->meta.volume = volume;
ioi->meta.ops = &volume->type->properties->io_ops;
env_atomic_set(&ioi->meta.ref_count, 1);
env_atomic_set(&req->io.ref_count, 1);
req->io.volume = volume;
req->io.io_class = io_class;
req->flags = flags;
ioi->io.io_queue = queue;
ioi->io.addr = addr;
ioi->io.bytes = bytes;
ioi->io.dir = dir;
ioi->io.io_class = io_class;
ioi->io.flags = flags;
return &ioi->io;
return req;
}
/*
* IO external API
*/
void *ocf_io_get_priv(struct ocf_io* io)
int ocf_io_set_data(ocf_io_t io, ctx_data_t *data, uint32_t offset)
{
return (void *)io + sizeof(struct ocf_io);
struct ocf_request *req = ocf_io_to_req(io);
req->data = data;
req->offset = offset;
return 0;
}
int ocf_io_set_data(struct ocf_io *io, ctx_data_t *data, uint32_t offset)
ctx_data_t *ocf_io_get_data(ocf_io_t io)
{
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
struct ocf_request *req = ocf_io_to_req(io);
return ioi->meta.ops->set_data(io, data, offset);
return req->data;
}
ctx_data_t *ocf_io_get_data(struct ocf_io *io)
uint32_t ocf_io_get_offset(ocf_io_t io)
{
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
struct ocf_request *req = ocf_io_to_req(io);
return ioi->meta.ops->get_data(io);
return req->offset;
}
void ocf_io_get(struct ocf_io *io)
void ocf_io_get(ocf_io_t io)
{
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
struct ocf_request *req = ocf_io_to_req(io);
env_atomic_inc_return(&ioi->meta.ref_count);
env_atomic_inc_return(&req->io.ref_count);
}
void ocf_io_put(struct ocf_io *io)
void ocf_io_put(ocf_io_t io)
{
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
struct ocf_request *req = ocf_io_to_req(io);
struct ocf_volume *volume;
if (env_atomic_dec_return(&ioi->meta.ref_count))
if (env_atomic_dec_return(&req->io.ref_count))
return;
/* Hold volume reference to avoid use after free of ioi */
volume = ioi->meta.volume;
volume = req->io.volume;
ocf_io_allocator_del(&ioi->meta.volume->type->allocator, (void *)ioi);
ocf_io_allocator_del(&volume->type->allocator, (void *)req);
ocf_refcnt_dec(&volume->refcnt);
}
ocf_volume_t ocf_io_get_volume(struct ocf_io *io)
ocf_volume_t ocf_io_get_volume(ocf_io_t io)
{
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
struct ocf_request *req = ocf_io_to_req(io);
return ioi->meta.volume;
return req->io.volume;
}
void ocf_io_set_cmpl(ocf_io_t io, void *context,
void *context2, ocf_end_io_t fn)
{
struct ocf_request *req = ocf_io_to_req(io);
req->io.priv1 = context;
req->io.priv2 = context2;
req->io.end = fn;
}
void ocf_io_set_start(ocf_io_t io, ocf_start_io_t fn)
{
struct ocf_request *req = ocf_io_to_req(io);
req->io.start = fn;
}
void ocf_io_set_handle(ocf_io_t io, ocf_handle_io_t fn)
{
struct ocf_request *req = ocf_io_to_req(io);
req->io.handle = fn;
}

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -9,26 +10,8 @@
#include "ocf/ocf.h"
#include "utils/utils_io_allocator.h"
struct ocf_io_meta {
ocf_volume_t volume;
const struct ocf_io_ops *ops;
env_atomic ref_count;
struct ocf_request *req;
};
struct ocf_io_internal {
struct ocf_io_meta meta;
struct ocf_io io;
};
static inline struct ocf_io_internal *ocf_io_get_internal(struct ocf_io* io)
{
return container_of(io, struct ocf_io_internal, io);
}
int ocf_io_allocator_default_init(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name);
const char *name);
void ocf_io_allocator_default_deinit(ocf_io_allocator_t allocator);
@ -38,20 +21,10 @@ void *ocf_io_allocator_default_new(ocf_io_allocator_t allocator,
void ocf_io_allocator_default_del(ocf_io_allocator_t allocator, void *obj);
int ocf_io_allocator_init(ocf_io_allocator_t allocator, ocf_io_allocator_type_t type,
uint32_t priv_size, const char *name);
struct ocf_io *ocf_io_new(ocf_volume_t volume, ocf_queue_t queue,
ocf_io_t ocf_io_new(ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir,
uint32_t io_class, uint64_t flags);
static inline void ocf_io_end(struct ocf_io *io, int error)
{
if (io->end)
io->end(io, error);
}
void ocf_io_end_func(ocf_io_t io, int error);
#endif /* __OCF_IO_PRIV_H__ */

View File

@ -160,9 +160,10 @@ void ocf_queue_put(ocf_queue_t queue)
env_free(queue);
}
void ocf_io_handle(struct ocf_io *io, void *opaque)
/* TODO: Remove opaque. It's not longer needed. */
void ocf_io_handle(ocf_io_t io, void *opaque)
{
struct ocf_request *req = opaque;
struct ocf_request *req = ocf_io_to_req(io);
OCF_CHECK_NULL(req);
@ -215,10 +216,10 @@ void ocf_queue_run_single(ocf_queue_t q)
if (!io_req)
return;
if (io_req->ioi.io.handle)
io_req->ioi.io.handle(&io_req->ioi.io, io_req);
if (io_req->io.handle)
io_req->io.handle(io_req, io_req);
else
ocf_io_handle(&io_req->ioi.io, io_req);
ocf_io_handle(io_req, io_req);
}
void ocf_queue_run(ocf_queue_t q)

View File

@ -92,8 +92,8 @@ static inline void ocf_req_init(struct ocf_request *req, ocf_cache_t cache,
env_atomic_set(&req->ref_count, 1);
req->byte_position = addr;
req->byte_length = bytes;
req->addr = addr;
req->bytes = bytes;
req->rw = rw;
}
@ -326,7 +326,7 @@ int ocf_req_alloc_map_discard(struct ocf_request *req)
ENV_BUILD_BUG_ON(MAX_TRIM_RQ_SIZE / ocf_cache_line_size_4 *
sizeof(struct ocf_map_info) > 4 * KiB);
if (req->byte_length <= MAX_TRIM_RQ_SIZE)
if (req->bytes <= MAX_TRIM_RQ_SIZE)
return ocf_req_alloc_map(req);
/*
@ -334,9 +334,9 @@ int ocf_req_alloc_map_discard(struct ocf_request *req)
* can handle more than MAX_TRIM_RQ_SIZE, so for these cache line sizes
* discard request uses only part of the mapping array.
*/
req->byte_length = MAX_TRIM_RQ_SIZE;
req->bytes = MAX_TRIM_RQ_SIZE;
req->core_line_last = ocf_bytes_2_lines(req->cache,
req->byte_position + req->byte_length - 1);
req->addr + req->bytes - 1);
req->core_line_count = req->core_line_last - req->core_line_first + 1;
return ocf_req_alloc_map(req);
@ -432,6 +432,51 @@ void ocf_req_hash(struct ocf_request *req)
}
}
void ocf_io_end_func(ocf_io_t io, int error)
{
struct ocf_request *req = ocf_io_to_req(io);
if (req->io.end)
req->io.end(io, req->io.priv1, req->io.priv2, error);
}
void ocf_req_forward_volume_io(struct ocf_request *req, ocf_volume_t volume,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset)
{
ocf_forward_token_t token = ocf_req_to_cache_forward_token(req);
ocf_req_forward_cache_get(req);
ocf_volume_forward_io(volume, token, dir, addr, bytes, offset);
}
void ocf_req_forward_volume_flush(struct ocf_request *req, ocf_volume_t volume)
{
ocf_forward_token_t token = ocf_req_to_cache_forward_token(req);
ocf_req_forward_cache_get(req);
ocf_volume_forward_flush(volume, token);
}
void ocf_req_forward_volume_discard(struct ocf_request *req,
ocf_volume_t volume, uint64_t addr, uint64_t bytes)
{
ocf_forward_token_t token = ocf_req_to_cache_forward_token(req);
ocf_req_forward_cache_get(req);
ocf_volume_forward_discard(volume, token, addr, bytes);
}
void ocf_req_forward_volume_io_simple(struct ocf_request *req,
ocf_volume_t volume, int dir, uint64_t addr, uint64_t bytes)
{
ocf_forward_token_t token = ocf_req_to_cache_forward_token(req);
req->cache_error = 0;
ocf_req_forward_cache_get(req);
ocf_volume_forward_io_simple(volume, token, dir, addr, bytes);
}
void ocf_req_forward_cache_io(struct ocf_request *req, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset)
{
@ -467,6 +512,30 @@ void ocf_req_forward_cache_discard(struct ocf_request *req, uint64_t addr,
ocf_volume_forward_discard(volume, token, addr, bytes);
}
void ocf_req_forward_cache_write_zeros(struct ocf_request *req, uint64_t addr,
uint64_t bytes)
{
ocf_volume_t volume = ocf_cache_get_volume(req->cache);
ocf_forward_token_t token = ocf_req_to_cache_forward_token(req);
req->cache_error = 0;
ocf_req_forward_cache_get(req);
ocf_volume_forward_write_zeros(volume, token, addr, bytes);
}
void ocf_req_forward_cache_metadata(struct ocf_request *req, int dir,
uint64_t addr, uint64_t bytes, uint64_t offset)
{
ocf_volume_t volume = ocf_cache_get_volume(req->cache);
ocf_forward_token_t token = ocf_req_to_cache_forward_token(req);
req->cache_error = 0;
ocf_req_forward_cache_get(req);
ocf_volume_forward_metadata(volume, token, dir, addr, bytes, offset);
}
void ocf_req_forward_core_io(struct ocf_request *req, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset)
{
@ -502,11 +571,32 @@ void ocf_req_forward_core_discard(struct ocf_request *req, uint64_t addr,
ocf_volume_forward_discard(volume, token, addr, bytes);
}
struct ocf_io *ocf_forward_get_io(ocf_forward_token_t token)
ctx_data_t *ocf_forward_get_data(ocf_forward_token_t token)
{
struct ocf_request *req = (struct ocf_request *)(token & ~1);
return &req->ioi.io;
return req->data;
}
ocf_queue_t ocf_forward_get_io_queue(ocf_forward_token_t token)
{
struct ocf_request *req = (struct ocf_request *)(token & ~1);
return req->io_queue;
}
uint8_t ocf_forward_get_io_class(ocf_forward_token_t token)
{
struct ocf_request *req = (struct ocf_request *)(token & ~1);
return req->io.io_class;
}
uint64_t ocf_forward_get_flags(ocf_forward_token_t token)
{
struct ocf_request *req = (struct ocf_request *)(token & ~1);
return (token & 1) ? 0 : req->flags;
}
static inline void _ocf_forward_get(ocf_forward_token_t token)
@ -544,6 +634,27 @@ void ocf_forward_discard(ocf_volume_t volume, ocf_forward_token_t token,
ocf_volume_forward_discard(volume, token, addr, bytes);
}
void ocf_forward_write_zeros(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes)
{
_ocf_forward_get(token);
ocf_volume_forward_write_zeros(volume, token, addr, bytes);
}
void ocf_forward_metadata(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset)
{
_ocf_forward_get(token);
ocf_volume_forward_metadata(volume, token, dir, addr, bytes, offset);
}
void ocf_forward_io_simple(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes)
{
_ocf_forward_get(token);
ocf_volume_forward_io_simple(volume, token, dir, addr, bytes);
}
void ocf_forward_end(ocf_forward_token_t token, int error)
{
struct ocf_request *req = ocf_req_forward_token_to_req(token);

View File

@ -122,14 +122,60 @@ struct ocf_req_discard_info {
struct ocf_request;
typedef int (*ocf_req_cb)(struct ocf_request *req);
struct ocf_request_io {
/**
* @brief OCF IO destination class
*/
uint8_t io_class;
/**
* @brief OCF IO reference count
*/
env_atomic ref_count;
/**
* @brief Front volume handle
*/
ocf_volume_t volume;
/**
* @brief OCF IO start function
*/
ocf_start_io_t start;
/**
* @brief OCF IO private 1
*/
void *priv1;
/**
* @brief OCF IO private 2
*/
void *priv2;
/**
* @brief OCF IO handle function
*/
ocf_handle_io_t handle;
/**
* @brief OCF IO completion function
*/
ocf_end_io_t end;
};
/**
* @brief OCF IO request
*/
struct ocf_request {
struct ocf_io_internal ioi;
/*!< OCF IO associated with request */
/* This struct is temporary. It will be consolidated with ocf_request */
struct ocf_request_io io;
ocf_req_end_t cache_forward_end;
union {
ocf_req_end_t cache_forward_end;
ocf_req_end_t volume_forward_end;
};
ocf_req_end_t core_forward_end;
env_atomic cache_remaining;
env_atomic core_remaining;
@ -177,27 +223,30 @@ struct ocf_request {
ctx_data_t *cp_data;
/*!< Copy of request data */
uint64_t byte_position;
/*!< LBA byte position of request in core domain */
uint64_t core_line_first;
/*! First core line */
uint64_t core_line_last;
/*! Last core line */
uint32_t byte_length;
/*!< Byte length of OCF request */
uint32_t core_line_count;
/*! Core line count */
uint32_t alloc_core_line_count;
/*! Number of core lines at time of request allocation */
uint64_t addr;
/*!< LBA byte position of request in core domain */
uint32_t bytes;
/*!< Byte length of OCF request */
uint32_t offset;
/*!< Offset into request data*/
uint64_t flags;
/*!< IO flags */
int error;
/*!< This filed indicates an error for OCF request */
@ -553,6 +602,17 @@ static inline struct ocf_request *ocf_req_forward_token_to_req(ocf_forward_token
return (struct ocf_request *)(token & ~1);
}
void ocf_req_forward_volume_io(struct ocf_request *req, ocf_volume_t volume,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset);
void ocf_req_forward_volume_flush(struct ocf_request *req, ocf_volume_t volume);
void ocf_req_forward_volume_discard(struct ocf_request *req,
ocf_volume_t volume, uint64_t addr, uint64_t bytes);
void ocf_req_forward_volume_io_simple(struct ocf_request *req,
ocf_volume_t volume, int dir, uint64_t addr, uint64_t bytes);
void ocf_req_forward_cache_io(struct ocf_request *req, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset);
@ -561,6 +621,12 @@ void ocf_req_forward_cache_flush(struct ocf_request *req);
void ocf_req_forward_cache_discard(struct ocf_request *req, uint64_t addr,
uint64_t bytes);
void ocf_req_forward_cache_write_zeros(struct ocf_request *req, uint64_t addr,
uint64_t bytes);
void ocf_req_forward_cache_metadata(struct ocf_request *req, int dir,
uint64_t addr, uint64_t bytes, uint64_t offset);
void ocf_req_forward_core_io(struct ocf_request *req, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset);

View File

@ -209,7 +209,7 @@ bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req)
env_rwlock_read_lock(&req->io_queue->seq_cutoff->lock);
result = ocf_core_seq_cutoff_base_check(req->io_queue->seq_cutoff,
req->byte_position, req->byte_length, req->rw,
req->addr, req->bytes, req->rw,
threshold, &queue_stream);
env_rwlock_read_unlock(&req->io_queue->seq_cutoff->lock);
if (queue_stream)
@ -217,7 +217,7 @@ bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req)
env_rwlock_read_lock(&core->seq_cutoff->lock);
result = ocf_core_seq_cutoff_base_check(core->seq_cutoff,
req->byte_position, req->byte_length, req->rw,
req->addr, req->bytes, req->rw,
threshold, &core_stream);
env_rwlock_read_unlock(&core->seq_cutoff->lock);
@ -309,7 +309,7 @@ void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
if (policy == ocf_seq_cutoff_policy_never)
return;
if (req->byte_length >= threshold && promote_on_threshold)
if (req->bytes >= threshold && promote_on_threshold)
promote = true;
if (promotion_count == 1)
@ -318,7 +318,7 @@ void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
if (req->seq_cutoff_core || promote) {
env_rwlock_write_lock(&core->seq_cutoff->lock);
stream = ocf_core_seq_cutoff_base_update(core->seq_cutoff,
req->byte_position, req->byte_length, req->rw,
req->addr, req->bytes, req->rw,
promote);
env_rwlock_write_unlock(&core->seq_cutoff->lock);
@ -328,7 +328,7 @@ void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
env_rwlock_write_lock(&req->io_queue->seq_cutoff->lock);
stream = ocf_core_seq_cutoff_base_update(req->io_queue->seq_cutoff,
req->byte_position, req->byte_length, req->rw, true);
req->addr, req->bytes, req->rw, true);
env_rwlock_write_unlock(&req->io_queue->seq_cutoff->lock);
if (stream->bytes >= threshold && promote_on_threshold)

View File

@ -420,8 +420,9 @@ static int to_packet_idx(uint32_t len)
return IO_PACKET_SIZE;
}
void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io)
void ocf_core_update_stats(ocf_core_t core, ocf_io_t io)
{
struct ocf_request *req = ocf_io_to_req(io);
struct ocf_counters_debug *stats;
int idx;
@ -430,14 +431,14 @@ void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io)
stats = &core->counters->debug_stats;
idx = to_packet_idx(io->bytes);
if (io->dir == OCF_WRITE)
idx = to_packet_idx(req->bytes);
if (req->rw == OCF_WRITE)
env_atomic64_inc(&stats->write_size[idx]);
else
env_atomic64_inc(&stats->read_size[idx]);
idx = to_align_idx(io->addr);
if (io->dir == OCF_WRITE)
idx = to_align_idx(req->addr);
if (req->rw == OCF_WRITE)
env_atomic64_inc(&stats->write_align[idx]);
else
env_atomic64_inc(&stats->read_align[idx]);
@ -445,6 +446,6 @@ void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io)
#else
void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io) {}
void ocf_core_update_stats(ocf_core_t core, ocf_io_t io) {}
#endif

View File

@ -247,6 +247,6 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats);
* @param[in] core to which request pertains
* @param[in] io request for which stats are being updated
*/
void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io);
void ocf_core_update_stats(ocf_core_t core, ocf_io_t io);
#endif

View File

@ -7,6 +7,7 @@
#include "ocf/ocf.h"
#include "ocf_priv.h"
#include "ocf_volume_priv.h"
#include "ocf_core_priv.h"
#include "ocf_request.h"
#include "ocf_io_priv.h"
#include "ocf_env.h"
@ -39,8 +40,8 @@ int ocf_volume_type_init(struct ocf_volume_type **type,
struct ocf_volume_type *new_type;
int ret;
if (!ops->submit_io || !ops->open || !ops->close ||
!ops->get_max_io_size || !ops->get_length) {
if (!ops->open || !ops->close || !ops->get_max_io_size ||
!ops->get_length) {
return -OCF_ERR_INVAL;
}
@ -57,7 +58,7 @@ int ocf_volume_type_init(struct ocf_volume_type **type,
allocator_type = ocf_io_allocator_get_type_default();
ret = ocf_io_allocator_init(&new_type->allocator, allocator_type,
properties->io_priv_size, properties->name);
properties->name);
if (ret)
goto err;
@ -273,61 +274,72 @@ int ocf_volume_is_atomic(ocf_volume_t volume)
return volume->type->properties->caps.atomic_writes;
}
struct ocf_io *ocf_volume_new_io(ocf_volume_t volume, ocf_queue_t queue,
ocf_io_t ocf_volume_new_io(ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir,
uint32_t io_class, uint64_t flags)
{
return ocf_io_new(volume, queue, addr, bytes, dir, io_class, flags);
}
void ocf_volume_submit_io(struct ocf_io *io)
static void ocf_volume_req_forward_complete(struct ocf_request *req, int error)
{
ocf_volume_t volume = ocf_io_get_volume(io);
ENV_BUG_ON(!volume->type->properties->ops.submit_io);
if (!volume->opened) {
io->end(io, -OCF_ERR_IO);
return;
}
volume->type->properties->ops.submit_io(io);
ocf_io_end_func(req, error);
}
void ocf_volume_submit_flush(struct ocf_io *io)
void ocf_volume_submit_io(ocf_io_t io)
{
struct ocf_request *req = ocf_io_to_req(io);
ocf_volume_t volume = ocf_io_get_volume(io);
ENV_BUG_ON(!volume->type->properties->ops.submit_flush);
if (!volume->opened) {
io->end(io, -OCF_ERR_IO);
ocf_io_end_func(io, -OCF_ERR_IO);
return;
}
if (!volume->type->properties->ops.submit_flush) {
ocf_io_end(io, 0);
return;
if (likely(volume->type->properties->ops.submit_io)) {
volume->type->properties->ops.submit_io(io);
} else {
req->volume_forward_end = ocf_volume_req_forward_complete;
ocf_req_forward_volume_io(req, volume, req->rw, req->addr,
req->bytes, req->offset);
}
volume->type->properties->ops.submit_flush(io);
}
void ocf_volume_submit_discard(struct ocf_io *io)
void ocf_volume_submit_flush(ocf_io_t io)
{
struct ocf_request *req = ocf_io_to_req(io);
ocf_volume_t volume = ocf_io_get_volume(io);
if (!volume->opened) {
io->end(io, -OCF_ERR_IO);
ocf_io_end_func(io, -OCF_ERR_IO);
return;
}
if (!volume->type->properties->ops.submit_discard) {
ocf_io_end(io, 0);
if (likely(volume->type->properties->ops.submit_flush)) {
volume->type->properties->ops.submit_flush(io);
} else {
req->volume_forward_end = ocf_volume_req_forward_complete;
ocf_req_forward_volume_flush(req, volume);
}
}
void ocf_volume_submit_discard(ocf_io_t io)
{
struct ocf_request *req = ocf_io_to_req(io);
ocf_volume_t volume = ocf_io_get_volume(io);
if (!volume->opened) {
ocf_io_end_func(io, -OCF_ERR_IO);
return;
}
volume->type->properties->ops.submit_discard(io);
if (likely(volume->type->properties->ops.submit_discard)) {
volume->type->properties->ops.submit_discard(io);
} else {
req->volume_forward_end = ocf_volume_req_forward_complete;
ocf_req_forward_volume_discard(req, volume,
req->addr, req->bytes);
}
}
void ocf_volume_forward_io(ocf_volume_t volume, ocf_forward_token_t token,
@ -370,6 +382,53 @@ void ocf_volume_forward_discard(ocf_volume_t volume, ocf_forward_token_t token,
addr, bytes);
}
void ocf_volume_forward_write_zeros(ocf_volume_t volume,
ocf_forward_token_t token, uint64_t addr, uint64_t bytes)
{
ENV_BUG_ON(!volume->type->properties->ops.forward_write_zeros);
if (!volume->opened) {
ocf_forward_end(token, -OCF_ERR_IO);
return;
}
volume->type->properties->ops.forward_write_zeros(volume, token,
addr, bytes);
}
void ocf_volume_forward_metadata(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset)
{
ENV_BUG_ON(!volume->type->properties->ops.forward_metadata);
if (!volume->opened) {
ocf_forward_end(token, -OCF_ERR_IO);
return;
}
volume->type->properties->ops.forward_metadata(volume, token,
dir, addr, bytes, offset);
}
void ocf_volume_forward_io_simple(ocf_volume_t volume,
ocf_forward_token_t token, int dir,
uint64_t addr, uint64_t bytes)
{
if (!volume->type->properties->ops.forward_io_simple) {
ocf_volume_forward_io(volume, token, dir, addr, bytes, 0);
return;
}
if (!volume->opened) {
ocf_forward_end(token, -OCF_ERR_IO);
return;
}
volume->type->properties->ops.forward_io_simple(volume, token,
dir, addr, bytes);
}
int ocf_volume_open(ocf_volume_t volume, void *volume_params)
{
int ret;

View File

@ -55,7 +55,17 @@ void ocf_volume_forward_flush(ocf_volume_t volume, ocf_forward_token_t token);
void ocf_volume_forward_discard(ocf_volume_t volume, ocf_forward_token_t token,
uint64_t addr, uint64_t bytes);
static inline void ocf_volume_submit_metadata(struct ocf_io *io)
void ocf_volume_forward_write_zeros(ocf_volume_t volume,
ocf_forward_token_t token, uint64_t addr, uint64_t bytes);
void ocf_volume_forward_metadata(ocf_volume_t volume, ocf_forward_token_t token,
int dir, uint64_t addr, uint64_t bytes, uint64_t offset);
void ocf_volume_forward_io_simple(ocf_volume_t volume,
ocf_forward_token_t token, int dir,
uint64_t addr, uint64_t bytes);
static inline void ocf_volume_submit_metadata(ocf_io_t io)
{
ocf_volume_t volume = ocf_io_get_volume(io);
@ -64,7 +74,7 @@ static inline void ocf_volume_submit_metadata(struct ocf_io *io)
volume->type->properties->ops.submit_metadata(io);
}
static inline void ocf_volume_submit_write_zeroes(struct ocf_io *io)
static inline void ocf_volume_submit_write_zeroes(ocf_io_t io)
{
ocf_volume_t volume = ocf_io_get_volume(io);

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -192,17 +193,16 @@ static inline void ocf_purge_map_info(struct ocf_request *req)
if (map_idx == 0) {
/* First */
start_bit = BYTES_TO_SECTORS(req->byte_position)
% ocf_line_sectors(cache);
start_bit = (BYTES_TO_SECTORS(req->addr)
% ocf_line_sectors(cache));
}
if (map_idx == (count - 1)) {
/* Last */
end_bit = BYTES_TO_SECTORS(req->byte_position +
req->byte_length - 1) %
ocf_line_sectors(cache);
end_bit = (BYTES_TO_SECTORS(req->addr +
req->bytes - 1) %
ocf_line_sectors(cache));
}
ocf_metadata_start_collision_shared_access(cache, map[map_idx].
@ -218,8 +218,8 @@ static inline
uint8_t ocf_map_line_start_sector(struct ocf_request *req, uint32_t line)
{
if (line == 0) {
return BYTES_TO_SECTORS(req->byte_position)
% ocf_line_sectors(req->cache);
return (BYTES_TO_SECTORS(req->addr)
% ocf_line_sectors(req->cache));
}
return 0;
@ -229,9 +229,8 @@ static inline
uint8_t ocf_map_line_end_sector(struct ocf_request *req, uint32_t line)
{
if (line == req->core_line_count - 1) {
return BYTES_TO_SECTORS(req->byte_position +
req->byte_length - 1) %
ocf_line_sectors(req->cache);
return (BYTES_TO_SECTORS(req->addr + req->bytes - 1) %
ocf_line_sectors(req->cache));
}
return ocf_line_end_sector(req->cache);

View File

@ -139,11 +139,6 @@ static struct ocf_request *_ocf_cleaner_alloc_slave_req(
/* Slave request contains reference to master */
req->master_io_req = master;
/* One more additional slave request, increase global counter
* of requests count
*/
env_atomic_inc(&master->master_remaining);
OCF_DEBUG_PARAM(req->cache,
"New slave request, count = %u,all requests count = %d",
count, env_atomic_read(&master->master_remaining));
@ -281,38 +276,22 @@ static void _ocf_cleaner_finish_req(struct ocf_request *req)
_ocf_cleaner_dealloc_req(req);
}
static void _ocf_cleaner_flush_cache_io_end(struct ocf_io *io, int error)
static void _ocf_cleaner_flush_cache_end(struct ocf_request *req, int error)
{
struct ocf_request *req = io->priv1;
if (error) {
if (error)
ocf_metadata_error(req->cache);
req->error = error;
}
OCF_DEBUG_MSG(req->cache, "Cache flush finished");
_ocf_cleaner_finish_req(req);
ocf_io_put(io);
}
static int _ocf_cleaner_fire_flush_cache(struct ocf_request *req)
{
struct ocf_io *io;
OCF_DEBUG_TRACE(req->cache);
io = ocf_new_cache_io(req->cache, req->io_queue, 0, 0, OCF_WRITE, 0, 0);
if (!io) {
ocf_metadata_error(req->cache);
req->error = -OCF_ERR_NO_MEM;
return -OCF_ERR_NO_MEM;
}
ocf_io_set_cmpl(io, req, NULL, _ocf_cleaner_flush_cache_io_end);
ocf_volume_submit_flush(io);
req->cache_forward_end = _ocf_cleaner_flush_cache_end;
ocf_req_forward_cache_flush(req);
return 0;
}
@ -361,7 +340,6 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req)
if (metadata_test_dirty(cache, cache_line)) {
ocf_metadata_get_core_and_part_id(cache, cache_line,
&core_id, &req->part_id);
req->core = &cache->core[core_id];
ocf_metadata_start_collision_shared_access(cache,
cache_line);
@ -384,30 +362,25 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req)
return 0;
}
static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
struct ocf_request *req, int error)
static void _ocf_cleaner_flush_core_end(struct ocf_request *req, int error)
{
uint32_t i;
struct ocf_map_info *iter = req->map;
uint32_t i;
OCF_DEBUG_MSG(req->cache, "Core flush finished");
if (error) {
/* Flush error, set error for all cache line of this core */
/* Flush error, set error for all cleaned cache lines */
for (i = 0; i < req->core_line_count; i++, iter++) {
if (!iter->flush)
continue;
if (iter->core_id == map->core_id)
iter->invalid = true;
iter->invalid = true;
}
_ocf_cleaner_set_error(req);
}
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_MSG(req->cache, "Core flush finished");
/*
* All core writes done, switch to post cleaning activities
*/
@ -415,103 +388,45 @@ static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error)
static int _ocf_cleaner_fire_flush_core(struct ocf_request *req)
{
_ocf_cleaner_flush_cores_io_end(io->priv1, io->priv2, error);
req->core_forward_end = _ocf_cleaner_flush_core_end;
ocf_io_put(io);
}
static int _ocf_cleaner_fire_flush_cores(struct ocf_request *req)
{
uint32_t i;
ocf_core_id_t core_id = OCF_CORE_MAX;
struct ocf_cache *cache = req->cache;
struct ocf_map_info *iter = req->map;
ocf_core_t core;
struct ocf_io *io;
OCF_DEBUG_TRACE(req->cache);
/* Protect IO completion race */
env_atomic_set(&req->req_remaining, 1);
/* Submit flush requests */
for (i = 0; i < req->core_line_count; i++, iter++) {
if (iter->invalid) {
/* IO error, skip this item */
continue;
}
if (!iter->flush)
continue;
if (core_id == iter->core_id)
continue;
core_id = iter->core_id;
env_atomic_inc(&req->req_remaining);
core = ocf_cache_get_core(cache, core_id);
io = ocf_new_core_io(core, req->io_queue, 0, 0,
OCF_WRITE, 0, 0);
if (!io) {
_ocf_cleaner_flush_cores_io_end(iter, req, -OCF_ERR_NO_MEM);
continue;
}
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_flush_cores_io_cmpl);
ocf_volume_submit_flush(io);
}
/* Protect IO completion race */
_ocf_cleaner_flush_cores_io_end(NULL, req, 0);
/* Submit flush request */
ocf_req_forward_core_flush(req);
return 0;
}
static void _ocf_cleaner_core_io_end(struct ocf_request *req)
static void _ocf_cleaner_core_io_end(struct ocf_request *req, int error)
{
if (env_atomic_dec_return(&req->req_remaining))
return;
struct ocf_map_info *iter = req->map;
uint32_t i;
OCF_DEBUG_MSG(req->cache, "Core writes finished");
/*
* All cache read requests done, now we can submit writes to cores,
* Move processing to thread, where IO will be (and can be) submitted
*/
req->engine_handler = _ocf_cleaner_fire_flush_cores;
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error)
{
struct ocf_map_info *map = io->priv1;
struct ocf_request *req = io->priv2;
ocf_core_t core = ocf_cache_get_core(req->cache, map->core_id);
if (error) {
map->invalid |= 1;
for (i = 0; i < req->core_line_count; i++, iter++) {
if (!iter->flush)
continue;
iter->invalid = true;
ocf_core_stats_core_error_update(req->core, OCF_WRITE);
}
_ocf_cleaner_set_error(req);
ocf_core_stats_core_error_update(core, OCF_WRITE);
}
_ocf_cleaner_core_io_end(req);
ocf_io_put(io);
req->engine_handler = _ocf_cleaner_fire_flush_core;
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
struct ocf_map_info *iter, uint64_t begin, uint64_t end)
{
uint64_t addr, offset;
int err;
ocf_cache_t cache = req->cache;
struct ocf_io *io;
ocf_core_t core = ocf_cache_get_core(cache, iter->core_id);
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
iter->coll_idx);
@ -520,36 +435,15 @@ static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
offset = (ocf_line_size(cache) * iter->hash)
+ SECTORS_TO_BYTES(begin);
io = ocf_new_core_io(core, req->io_queue, addr,
SECTORS_TO_BYTES(end - begin), OCF_WRITE, part_id, 0);
if (!io)
goto error;
err = ocf_io_set_data(io, req->data, offset);
if (err) {
ocf_io_put(io);
goto error;
}
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_core_io_cmpl);
ocf_core_stats_core_block_update(core, part_id, OCF_WRITE,
ocf_core_stats_core_block_update(req->core, part_id, OCF_WRITE,
SECTORS_TO_BYTES(end - begin));
OCF_DEBUG_PARAM(req->cache, "Core write, line = %llu, "
"sector = %llu, count = %llu", iter->core_line, begin,
end - begin);
/* Increase IO counter to be processed */
env_atomic_inc(&req->req_remaining);
/* Send IO */
ocf_volume_submit_io(io);
return;
error:
iter->invalid = true;
_ocf_cleaner_set_error(req);
ocf_req_forward_core_io(req, OCF_WRITE, addr,
SECTORS_TO_BYTES(end - begin), offset);
}
static void _ocf_cleaner_core_submit_io(struct ocf_request *req,
@ -600,10 +494,10 @@ static int _ocf_cleaner_fire_core(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
/* Protect IO completion race */
env_atomic_set(&req->req_remaining, 1);
req->core_forward_end = _ocf_cleaner_core_io_end;
/* Submits writes to the core */
ocf_req_forward_core_get(req);
for (i = 0; i < req->core_line_count; i++) {
iter = &(req->map[i]);
@ -625,43 +519,32 @@ static int _ocf_cleaner_fire_core(struct ocf_request *req)
req->lock_idx, req->map[i].core_id,
req->map[i].core_line);
}
/* Protect IO completion race */
_ocf_cleaner_core_io_end(req);
ocf_req_forward_core_put(req);
return 0;
}
static void _ocf_cleaner_cache_io_end(struct ocf_request *req)
static void _ocf_cleaner_cache_io_end(struct ocf_request *req, int error)
{
if (env_atomic_dec_return(&req->req_remaining))
return;
/*
* All cache read requests done, now we can submit writes to cores,
* Move processing to thread, where IO will be (and can be) submitted
*/
req->engine_handler = _ocf_cleaner_fire_core;
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
struct ocf_map_info *iter = req->map;
uint32_t i;
OCF_DEBUG_MSG(req->cache, "Cache reads finished");
}
static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
{
struct ocf_map_info *map = io->priv1;
struct ocf_request *req = io->priv2;
ocf_core_t core = ocf_cache_get_core(req->cache, map->core_id);
if (error) {
map->invalid |= 1;
for (i = 0; i < req->core_line_count; i++, iter++) {
if (!iter->flush)
continue;
iter->invalid = true;
ocf_core_stats_cache_error_update(req->core, OCF_READ);
}
_ocf_cleaner_set_error(req);
ocf_core_stats_cache_error_update(core, OCF_READ);
}
_ocf_cleaner_cache_io_end(req);
ocf_io_put(io);
req->engine_handler = _ocf_cleaner_fire_core;
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
/*
@ -671,21 +554,16 @@ static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
static int _ocf_cleaner_fire_cache(struct ocf_request *req)
{
ocf_cache_t cache = req->cache;
ocf_core_t core;
uint32_t i;
struct ocf_map_info *iter = req->map;
uint64_t addr, offset;
ocf_part_id_t part_id;
struct ocf_io *io;
int err;
/* Protect IO completion race */
env_atomic_set(&req->req_remaining, 1);
req->cache_forward_end = _ocf_cleaner_cache_io_end;
req->bytes = ocf_line_size(cache);
ocf_req_forward_cache_get(req);
for (i = 0; i < req->core_line_count; i++, iter++) {
core = ocf_cache_get_core(cache, iter->core_id);
if (!core)
continue;
if (!iter->flush)
continue;
@ -700,35 +578,15 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req)
part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx);
io = ocf_new_cache_io(cache, req->io_queue,
addr, ocf_line_size(cache),
OCF_READ, part_id, 0);
if (!io) {
/* Allocation error */
iter->invalid = true;
_ocf_cleaner_set_error(req);
continue;
}
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_cache_io_cmpl);
err = ocf_io_set_data(io, req->data, offset);
if (err) {
ocf_io_put(io);
iter->invalid = true;
_ocf_cleaner_set_error(req);
continue;
}
ocf_core_stats_cache_block_update(core, part_id, OCF_READ,
ocf_core_stats_cache_block_update(req->core, part_id, OCF_READ,
ocf_line_size(cache));
env_atomic_inc(&req->req_remaining);
req->addr = iter->core_line * ocf_line_size(cache);
ocf_volume_submit_io(io);
ocf_req_forward_cache_io(req, OCF_READ, addr,
ocf_line_size(cache), offset);
}
/* Protect IO completion race */
_ocf_cleaner_cache_io_end(req);
ocf_req_forward_cache_put(req);
return 0;
}
@ -760,17 +618,23 @@ static int _ocf_cleaner_check_map(struct ocf_request *req)
return 0;
}
static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t count)
static int _ocf_cleaner_do_fire(struct ocf_request *req)
{
struct ocf_request *master;
int result;
req->engine_handler = _ocf_cleaner_check_map;
req->core_line_count = count;
req->addr = req->core_line_count * ocf_line_size(req->cache);
master = (req->master_io_req_type == ocf_cleaner_req_type_master) ?
req : req->master_io_req;
/* Handle cache lines locks */
result = _ocf_cleaner_cache_line_lock(req);
if (result >= 0) {
env_atomic_inc(&master->master_remaining);
if (result == OCF_LOCK_ACQUIRED) {
OCF_DEBUG_MSG(req->cache, "Lock acquired");
_ocf_cleaner_check_map(req);
@ -789,24 +653,62 @@ static void _ocf_cleaner_fire_error(struct ocf_request *master,
struct ocf_request *req, int err)
{
master->error = err;
_ocf_cleaner_complete_req(req);
_ocf_cleaner_dealloc_req(req);
}
uint32_t ocf_cleaner_populate_req(struct ocf_request *req, uint32_t curr,
const struct ocf_cleaner_attribs *attribs)
{
uint32_t count = attribs->count;
uint32_t map_max = req->core_line_count, map_curr;
ocf_cache_line_t cache_line;
uint64_t core_sector;
ocf_core_id_t core_id, last_core_id = OCF_CORE_ID_INVALID;
for (map_curr = 0; map_curr < map_max && curr < count; curr++) {
if (attribs->getter(req->cache, attribs->getter_context,
curr, &cache_line)) {
continue;
}
/* Get mapping info */
ocf_metadata_get_core_info(req->cache, cache_line,
&core_id, &core_sector);
if (last_core_id == OCF_CORE_ID_INVALID) {
last_core_id = core_id;
req->core = ocf_cache_get_core(req->cache, core_id);
}
if (core_id != last_core_id)
break;
req->map[map_curr].core_id = core_id;
req->map[map_curr].core_line = core_sector;
req->map[map_curr].coll_idx = cache_line;
req->map[map_curr].status = LOOKUP_HIT;
req->map[map_curr].hash = map_curr;
map_curr++;
}
req->core_line_count = map_curr;
return curr;
}
/*
* cleaner - Main function
*/
void ocf_cleaner_fire(struct ocf_cache *cache,
const struct ocf_cleaner_attribs *attribs)
{
uint32_t i, i_out = 0, count = attribs->count;
uint32_t count = attribs->count, curr = 0;
/* max cache lines to be cleaned with one request: 1024 if over 4k lines
* to be flushed, otherwise 128. for large cleaning operations, 1024 is
* optimal number, but for smaller 1024 is too large to benefit from
* cleaning request overlapping
*/
uint32_t max = _ocf_cleaner_get_req_max_count(count, false);
ocf_cache_line_t cache_line;
/* it is possible that more than one cleaning request will be generated
* for each cleaning order, thus multiple allocations. At the end of
* loop, req is set to zero and NOT deallocated, as deallocation is
@ -817,8 +719,6 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
*/
struct ocf_request *req = NULL, *master;
int err;
ocf_core_id_t core_id;
uint64_t core_sector;
/* Allocate master request */
master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
@ -827,70 +727,41 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
return;
}
req = master;
env_atomic_inc(&master->master_remaining);
for (i = 0; i < count; i++) {
/* when request hasn't yet been allocated or is just issued */
if (unlikely(!req)) {
if (max > count - i) {
/* less than max left */
max = count - i;
}
req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
if (unlikely(!req)) {
master->error = -OCF_ERR_NO_MEM;
break;
}
}
if (attribs->getter(cache, attribs->getter_context,
i, &cache_line)) {
OCF_DEBUG_MSG(cache, "Skip");
continue;
}
/* Get mapping info */
ocf_metadata_get_core_info(cache, cache_line, &core_id,
&core_sector);
if (unlikely(!cache->core[core_id].opened)) {
OCF_DEBUG_MSG(cache, "Core object inactive");
continue;
}
req->map[i_out].core_id = core_id;
req->map[i_out].core_line = core_sector;
req->map[i_out].coll_idx = cache_line;
req->map[i_out].status = LOOKUP_HIT;
req->map[i_out].hash = i_out;
i_out++;
if (max == i_out) {
err = _ocf_cleaner_do_fire(req, i_out);
if (err) {
_ocf_cleaner_fire_error(master, req, err);
req = NULL;
break;
}
i_out = 0;
req = NULL;
}
curr = ocf_cleaner_populate_req(master, curr, attribs);
if (unlikely(master->core_line_count == 0)) {
_ocf_cleaner_dealloc_req(master);
goto out;
}
if (req && i_out) {
err = _ocf_cleaner_do_fire(req, i_out);
if (err)
err = _ocf_cleaner_do_fire(master);
if (err) {
_ocf_cleaner_fire_error(master, master, err);
goto out;
}
while (curr < count) {
max = OCF_MIN(max, count - curr);
req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
if (!req) {
master->error = -OCF_ERR_NO_MEM;
break;
}
curr = ocf_cleaner_populate_req(req, curr, attribs);
if (unlikely(req->core_line_count == 0)) {
_ocf_cleaner_dealloc_req(req);
break;
}
err = _ocf_cleaner_do_fire(req);
if (err) {
_ocf_cleaner_fire_error(master, req, err);
req = NULL;
break;
}
}
out:
_ocf_cleaner_complete_req(master);
if (req && !i_out)
_ocf_cleaner_dealloc_req(req);
}
static int _ocf_cleaner_do_flush_data_getter(struct ocf_cache *cache,

View File

@ -41,10 +41,6 @@ struct ocf_cleaner_attribs {
/*!< Getter for collecting cache lines which will be cleaned */
void *getter_context;
/*!< Context for getting cache lines */
uint32_t getter_item;
/*!< Additional variable that can be used by cleaner call
* to iterate over items
*/
ocf_queue_t io_queue;
};

View File

@ -12,134 +12,116 @@
#include "utils_io.h"
#include "utils_cache_line.h"
struct ocf_submit_volume_context {
env_atomic req_remaining;
int error;
struct ocf_submit_cache_context {
ocf_submit_end_t cmpl;
void *priv;
};
static void _ocf_volume_flush_end(struct ocf_io *io, int error)
static void ocf_submit_cache_end(struct ocf_request *req, int error)
{
ocf_submit_end_t cmpl = io->priv1;
struct ocf_submit_cache_context *context = req->priv;
cmpl(io->priv2, error);
ocf_io_put(io);
context->cmpl(context->priv, error);
env_vfree(context);
ocf_req_put(req);
}
void ocf_submit_volume_flush(ocf_volume_t volume,
void ocf_submit_cache_flush(ocf_cache_t cache,
ocf_submit_end_t cmpl, void *priv)
{
struct ocf_io *io;
struct ocf_submit_cache_context *context;
struct ocf_request *req;
io = ocf_volume_new_io(volume, NULL, 0, 0, OCF_WRITE, 0, 0);
if (!io)
context = env_vzalloc(sizeof(*context));
if (!context)
OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
ocf_io_set_cmpl(io, cmpl, priv, _ocf_volume_flush_end);
context->cmpl = cmpl;
context->priv = priv;
ocf_volume_submit_flush(io);
}
static void ocf_submit_volume_end(struct ocf_io *io, int error)
{
struct ocf_submit_volume_context *context = io->priv1;
if (error)
context->error = error;
ocf_io_put(io);
if (env_atomic_dec_return(&context->req_remaining))
req = ocf_req_new_mngt(cache, cache->mngt_queue);
if (!req) {
cmpl(priv, -OCF_ERR_NO_MEM);
env_vfree(context);
return;
}
context->cmpl(context->priv, context->error);
env_vfree(context);
req->cache_forward_end = ocf_submit_cache_end;
req->priv = context;
ocf_req_forward_cache_flush(req);
}
void ocf_submit_volume_discard(ocf_volume_t volume, uint64_t addr,
void ocf_submit_cache_discard(ocf_cache_t cache, uint64_t addr,
uint64_t length, ocf_submit_end_t cmpl, void *priv)
{
struct ocf_submit_volume_context *context;
struct ocf_submit_cache_context *context;
uint64_t bytes;
uint64_t sector_mask = (1 << ENV_SECTOR_SHIFT) - 1;
uint64_t max_length = (uint32_t)~0 & ~sector_mask;
struct ocf_io *io;
struct ocf_request *req;
context = env_vzalloc(sizeof(*context));
if (!context)
OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
env_atomic_set(&context->req_remaining, 1);
context->cmpl = cmpl;
context->priv = priv;
req = ocf_req_new_mngt(cache, cache->mngt_queue);
if (!req) {
cmpl(priv, -OCF_ERR_NO_MEM);
env_vfree(context);
return;
}
req->cache_forward_end = ocf_submit_cache_end;
req->priv = context;
ocf_req_forward_cache_get(req);
while (length) {
bytes = OCF_MIN(length, max_length);
io = ocf_volume_new_io(volume, NULL, addr, bytes,
OCF_WRITE, 0, 0);
if (!io) {
context->error = -OCF_ERR_NO_MEM;
break;
}
env_atomic_inc(&context->req_remaining);
ocf_io_set_cmpl(io, context, NULL, ocf_submit_volume_end);
ocf_volume_submit_discard(io);
ocf_req_forward_cache_discard(req, addr, bytes);
addr += bytes;
length -= bytes;
}
if (env_atomic_dec_return(&context->req_remaining))
return;
cmpl(priv, context->error);
env_vfree(context);
ocf_req_forward_cache_put(req);
}
void ocf_submit_write_zeros(ocf_volume_t volume, uint64_t addr,
void ocf_submit_cache_write_zeros(ocf_cache_t cache, uint64_t addr,
uint64_t length, ocf_submit_end_t cmpl, void *priv)
{
struct ocf_submit_volume_context *context;
struct ocf_submit_cache_context *context;
uint32_t bytes;
uint32_t max_length = ~((uint32_t)PAGE_SIZE - 1);
struct ocf_io *io;
struct ocf_request *req;
context = env_vzalloc(sizeof(*context));
if (!context)
OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
env_atomic_set(&context->req_remaining, 1);
context->cmpl = cmpl;
context->priv = priv;
req = ocf_req_new_mngt(cache, cache->mngt_queue);
if (!req) {
cmpl(priv, -OCF_ERR_NO_MEM);
env_vfree(context);
return;
}
ocf_req_forward_cache_get(req);
while (length) {
bytes = OCF_MIN(length, max_length);
io = ocf_volume_new_io(volume, NULL, addr, bytes,
OCF_WRITE, 0, 0);
if (!io) {
context->error = -OCF_ERR_NO_MEM;
break;
}
env_atomic_inc(&context->req_remaining);
ocf_io_set_cmpl(io, context, NULL, ocf_submit_volume_end);
ocf_volume_submit_write_zeroes(io);
ocf_req_forward_cache_write_zeros(req, addr, bytes);
addr += bytes;
length -= bytes;
}
if (env_atomic_dec_return(&context->req_remaining))
return;
cmpl(priv, context->error);
env_vfree(context);
ocf_req_forward_cache_put(req);
}
struct ocf_submit_cache_page_context {
@ -149,12 +131,12 @@ struct ocf_submit_cache_page_context {
void *priv;
};
static void ocf_submit_cache_page_end(struct ocf_io *io, int error)
static void ocf_submit_cache_page_end(struct ocf_request *req, int error)
{
struct ocf_submit_cache_page_context *context = io->priv1;
ctx_data_t *data = ocf_io_get_data(io);
struct ocf_submit_cache_page_context *context = req->priv;
ctx_data_t *data = req->data;
if (io->dir == OCF_READ) {
if (req->rw == OCF_READ) {
ctx_data_rd_check(context->cache->owner, context->buffer,
data, PAGE_SIZE);
}
@ -162,7 +144,7 @@ static void ocf_submit_cache_page_end(struct ocf_io *io, int error)
context->cmpl(context->priv, error);
ctx_data_free(context->cache->owner, data);
env_vfree(context);
ocf_io_put(io);
ocf_req_put(req);
}
void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
@ -170,7 +152,7 @@ void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
{
struct ocf_submit_cache_page_context *context;
ctx_data_t *data;
struct ocf_io *io;
struct ocf_request *req;
int result = 0;
context = env_vmalloc(sizeof(*context));
@ -182,10 +164,10 @@ void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
context->cmpl = cmpl;
context->priv = priv;
io = ocf_new_cache_io(cache, NULL, addr, PAGE_SIZE, dir, 0, 0);
if (!io) {
req = ocf_req_new_mngt(cache, cache->mngt_queue);
if (!req) {
result = -OCF_ERR_NO_MEM;
goto err_io;
goto err_req;
}
data = ctx_data_alloc(cache->owner, 1);
@ -197,20 +179,21 @@ void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
if (dir == OCF_WRITE)
ctx_data_wr_check(cache->owner, data, buffer, PAGE_SIZE);
result = ocf_io_set_data(io, data, 0);
if (result)
goto err_set_data;
req->data = data;
ocf_io_set_cmpl(io, context, NULL, ocf_submit_cache_page_end);
req->cache_forward_end = ocf_submit_cache_page_end;
req->priv = context;
req->rw = dir;
req->addr = addr;
req->bytes = PAGE_SIZE;
ocf_req_forward_cache_io(req, dir, addr, PAGE_SIZE, 0);
ocf_volume_submit_io(io);
return;
err_set_data:
ctx_data_free(cache->owner, data);
err_data:
ocf_io_put(io);
err_io:
ocf_req_put(req);
err_req:
env_vfree(context);
cmpl(priv, result);
}

View File

@ -11,19 +11,19 @@
typedef void (*ocf_submit_end_t)(void *priv, int error);
void ocf_submit_volume_flush(ocf_volume_t volume,
void ocf_submit_cache_flush(ocf_cache_t cache,
ocf_submit_end_t cmpl, void *priv);
void ocf_submit_volume_discard(ocf_volume_t volume, uint64_t addr,
void ocf_submit_cache_discard(ocf_cache_t cache, uint64_t addr,
uint64_t length, ocf_submit_end_t cmpl, void *priv);
void ocf_submit_write_zeros(ocf_volume_t volume, uint64_t addr,
void ocf_submit_cache_write_zeros(ocf_cache_t cache, uint64_t addr,
uint64_t length, ocf_submit_end_t cmpl, void *priv);
void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
void *buffer, ocf_submit_end_t cmpl, void *priv);
static inline struct ocf_io *ocf_new_cache_io(ocf_cache_t cache,
static inline ocf_io_t ocf_new_cache_io(ocf_cache_t cache,
ocf_queue_t queue, uint64_t addr, uint32_t bytes,
uint32_t dir, uint32_t io_class, uint64_t flags)
@ -32,7 +32,7 @@ static inline struct ocf_io *ocf_new_cache_io(ocf_cache_t cache,
addr, bytes, dir, io_class, flags);
}
static inline struct ocf_io *ocf_new_core_io(ocf_core_t core,
static inline ocf_io_t ocf_new_core_io(ocf_core_t core,
ocf_queue_t queue, uint64_t addr, uint32_t bytes,
uint32_t dir, uint32_t io_class, uint64_t flags)
{

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2019-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -11,8 +12,7 @@
typedef struct ocf_io_allocator *ocf_io_allocator_t;
struct ocf_io_allocator_ops {
int (*allocator_init)(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name);
int (*allocator_init)(ocf_io_allocator_t allocator, const char *name);
void (*allocator_deinit)(ocf_io_allocator_t allocator);
void *(*allocator_new)(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
@ -45,11 +45,10 @@ static inline void ocf_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
}
static inline int ocf_io_allocator_init(ocf_io_allocator_t allocator,
ocf_io_allocator_type_t type, uint32_t size, const char *name)
ocf_io_allocator_type_t type, const char *name)
{
allocator->type = type;
return allocator->type->ops.allocator_init(allocator, size, name);
return allocator->type->ops.allocator_init(allocator, name);
}
static inline void ocf_io_allocator_deinit(ocf_io_allocator_t allocator)

View File

@ -47,7 +47,7 @@ static int _ocf_parallelize_hndl(struct ocf_request *req)
int error;
error = parallelize->handle(parallelize, parallelize->priv,
req->byte_position, parallelize->shards_cnt);
req->addr, parallelize->shards_cnt);
env_atomic_cmpxchg(&parallelize->error, 0, error);
@ -113,7 +113,7 @@ int ocf_parallelize_create(ocf_parallelize_t *parallelize,
tmp_parallelize->reqs[i]->info.internal = true;
tmp_parallelize->reqs[i]->engine_handler =
_ocf_parallelize_hndl;
tmp_parallelize->reqs[i]->byte_position = i;
tmp_parallelize->reqs[i]->addr = i;
tmp_parallelize->reqs[i]->priv = tmp_parallelize;
}

View File

@ -1,39 +1,40 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "ocf/ocf_io.h"
#include "ocf/ocf_core.h"
void ocf_io_set_cmpl_wrapper(struct ocf_io *io, void *context,
void ocf_io_set_cmpl_wrapper(ocf_io_t io, void *context,
void *context2, ocf_end_io_t fn)
{
ocf_io_set_cmpl(io, context, context2, fn);
}
void ocf_io_set_start_wrapper(struct ocf_io *io, ocf_start_io_t fn)
void ocf_io_set_start_wrapper(ocf_io_t io, ocf_start_io_t fn)
{
ocf_io_set_start(io, fn);
}
void ocf_io_set_handle_wrapper(struct ocf_io *io, ocf_handle_io_t fn)
void ocf_io_set_handle_wrapper(ocf_io_t io, ocf_handle_io_t fn)
{
ocf_io_set_handle(io, fn);
}
void ocf_core_submit_io_wrapper(struct ocf_io *io)
void ocf_core_submit_io_wrapper(ocf_io_t io)
{
ocf_core_submit_io(io);
}
void ocf_core_submit_flush_wrapper(struct ocf_io *io)
void ocf_core_submit_flush_wrapper(ocf_io_t io)
{
ocf_core_submit_flush(io);
}
void ocf_core_submit_discard_wrapper(struct ocf_io *io)
void ocf_core_submit_discard_wrapper(ocf_io_t io)
{
ocf_core_submit_discard(io);
}

View File

@ -39,7 +39,7 @@ class IoOps(Structure):
class Io(Structure):
START = CFUNCTYPE(None, c_void_p)
HANDLE = CFUNCTYPE(None, c_void_p, c_void_p)
END = CFUNCTYPE(None, c_void_p, c_int)
END = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_int)
_instances_ = {}
_fields_ = [
@ -67,10 +67,6 @@ class Io(Structure):
def get_instance(cls, ref):
return cls._instances_[cast(ref, c_void_p).value]
@staticmethod
def get_by_forward_token(token):
return OcfLib.getInstance().ocf_forward_get_io(token)
@staticmethod
def forward_get(token):
OcfLib.getInstance().ocf_forward_get(token)
@ -90,8 +86,8 @@ class Io(Structure):
@staticmethod
@END
def c_end(io, err):
Io.get_instance(io).end(err)
def c_end(io, priv1, priv2, err):
Io.get_instance(io).end(priv1, priv2, err)
@staticmethod
@START
@ -103,7 +99,7 @@ class Io(Structure):
def c_handle(io, opaque):
Io.get_instance(io).handle(opaque)
def end(self, err):
def end(self, priv1, priv2, err):
try:
self.callback(err)
except: # noqa E722
@ -164,9 +160,6 @@ lib = OcfLib.getInstance()
lib.ocf_forward_get.argtypes = [c_uint64]
lib.ocf_forward_get_io.argtypes = [c_uint64]
lib.ocf_forward_get_io.restype = POINTER(Io)
lib.ocf_forward_end.argtypes = [c_uint64, c_int]
lib.ocf_io_set_cmpl_wrapper.argtypes = [POINTER(Io), c_void_p, c_void_p, Io.END]

View File

@ -54,6 +54,9 @@ class VolumeOps(Structure):
FORWARD_IO = CFUNCTYPE(None, c_void_p, c_uint64, c_int, c_uint64, c_uint64, c_uint64)
FORWARD_FLUSH = CFUNCTYPE(None, c_void_p, c_uint64)
FORWARD_DISCARD = CFUNCTYPE(None, c_void_p, c_uint64, c_uint64, c_uint64)
FORWARD_WRITE_ZEROS = CFUNCTYPE(None, c_void_p, c_uint64, c_uint64, c_uint64)
FORWARD_METADATA = CFUNCTYPE(None, c_void_p, c_uint64, c_int, c_uint64, c_uint64, c_uint64)
FORWARD_IO_SIMPLE = CFUNCTYPE(None, c_void_p, c_uint64, c_int, c_uint64, c_uint64)
ON_INIT = CFUNCTYPE(c_int, c_void_p)
ON_DEINIT = CFUNCTYPE(None, c_void_p)
OPEN = CFUNCTYPE(c_int, c_void_p, c_void_p)
@ -70,6 +73,9 @@ class VolumeOps(Structure):
("_forward_io", FORWARD_IO),
("_forward_flush", FORWARD_FLUSH),
("_forward_discard", FORWARD_DISCARD),
("_forward_write_zeros", FORWARD_WRITE_ZEROS),
("_forward_metadata", FORWARD_METADATA),
("_forward_io_simple", FORWARD_IO_SIMPLE),
("_on_init", ON_INIT),
("_on_deinit", ON_DEINIT),
("_open", OPEN),
@ -82,19 +88,13 @@ class VolumeOps(Structure):
class VolumeProperties(Structure):
_fields_ = [
("_name", c_char_p),
("_io_priv_size", c_uint32),
("_volume_priv_size", c_uint32),
("_caps", VolumeCaps),
("_io_ops", IoOps),
("_deinit", c_char_p),
("_ops_", VolumeOps),
]
class VolumeIoPriv(Structure):
_fields_ = [("_data", c_void_p), ("_offset", c_uint64)]
VOLUME_POISON = 0x13
@ -109,35 +109,6 @@ class Volume:
if cls in Volume._ops_:
return Volume._ops_[cls]
@VolumeOps.SUBMIT_IO
def _submit_io(io):
io_structure = cast(io, POINTER(Io))
volume = Volume.get_instance(OcfLib.getInstance().ocf_io_get_volume(io_structure))
volume.submit_io(io_structure)
@VolumeOps.SUBMIT_FLUSH
def _submit_flush(flush):
io_structure = cast(flush, POINTER(Io))
volume = Volume.get_instance(OcfLib.getInstance().ocf_io_get_volume(io_structure))
volume.submit_flush(io_structure)
@VolumeOps.SUBMIT_METADATA
def _submit_metadata(meta):
raise NotImplementedError
@VolumeOps.SUBMIT_DISCARD
def _submit_discard(discard):
io_structure = cast(discard, POINTER(Io))
volume = Volume.get_instance(OcfLib.getInstance().ocf_io_get_volume(io_structure))
volume.submit_discard(io_structure)
@VolumeOps.SUBMIT_WRITE_ZEROES
def _submit_write_zeroes(write_zeroes):
raise NotImplementedError
@VolumeOps.FORWARD_IO
def _forward_io(volume, token, rw, addr, nbytes, offset):
Volume.get_instance(volume).forward_io(token, rw, addr, nbytes, offset)
@ -160,7 +131,7 @@ class Volume:
@VolumeOps.OPEN
def _open(ref, params):
uuid_ptr = cast(OcfLib.getInstance().ocf_volume_get_uuid(ref), POINTER(Uuid))
uuid_ptr = cast(lib.ocf_volume_get_uuid(ref), POINTER(Uuid))
uuid = str(uuid_ptr.contents._data, encoding="ascii")
try:
volume = Volume.get_by_uuid(uuid)
@ -194,11 +165,6 @@ class Volume:
return Volume.get_instance(ref).get_length()
Volume._ops_[cls] = VolumeOps(
_submit_io=_submit_io,
_submit_flush=_submit_flush,
_submit_metadata=_submit_metadata,
_submit_discard=_submit_discard,
_submit_write_zeroes=_submit_write_zeroes,
_forward_io=_forward_io,
_forward_flush=_forward_flush,
_forward_discard=_forward_discard,
@ -237,11 +203,9 @@ class Volume:
Volume._props_[cls] = VolumeProperties(
_name=str(cls.__name__).encode("ascii"),
_io_priv_size=sizeof(VolumeIoPriv),
_volume_priv_size=0,
_caps=VolumeCaps(_atomic_writes=0),
_ops_=cls.get_ops(),
_io_ops=cls.get_io_ops(),
_deinit=0,
)
return Volume._props_[cls]
@ -261,22 +225,6 @@ class Volume:
def get_by_uuid(cls, uuid):
return cls._uuid_[uuid]
@staticmethod
@IoOps.SET_DATA
def _io_set_data(io, data, offset):
io_priv = cast(OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
data = Data.get_instance(data)
io_priv.contents._offset = offset
io_priv.contents._data = data.handle
return 0
@staticmethod
@IoOps.GET_DATA
def _io_get_data(io):
io_priv = cast(OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
return io_priv.contents._data
def __init__(self, uuid=None):
if uuid:
if uuid in type(self)._uuid_:
@ -298,12 +246,6 @@ class Volume:
def get_max_io_size(self):
raise NotImplementedError
def do_submit_flush(self, flush):
raise NotImplementedError
def do_submit_discard(self, discard):
raise NotImplementedError
def get_stats(self):
return self.stats
@ -313,9 +255,6 @@ class Volume:
def inc_stats(self, _dir):
self.stats[_dir] += 1
def do_submit_io(self, io):
raise NotImplementedError
def dump(self, offset=0, size=0, ignore=VOLUME_POISON, **kwargs):
raise NotImplementedError
@ -328,28 +267,6 @@ class Volume:
def online(self):
self.is_online = True
def _reject_io(self, io):
cast(io, POINTER(Io)).contents._end(io, -OcfErrorCode.OCF_ERR_IO)
def submit_flush(self, io):
if self.is_online:
self.do_submit_flush(io)
else:
self._reject_io(io)
def submit_io(self, io):
if self.is_online:
self.inc_stats(IoDir(io.contents._dir))
self.do_submit_io(io)
else:
self._reject_io(io)
def submit_discard(self, io):
if self.is_online:
self.do_submit_discard(io)
else:
self._reject_io(io)
def _reject_forward(self, token):
Io.forward_end(token, -OcfErrorCode.OCF_ERR_IO)
@ -375,7 +292,6 @@ class Volume:
def new_io(
self, queue: Queue, addr: int, length: int, direction: IoDir, io_class: int, flags: int,
):
lib = OcfLib.getInstance()
io = lib.ocf_volume_new_io(
self.handle,
queue.handle if queue else c_void_p(),
@ -475,54 +391,14 @@ class RamVolume(Volume):
def get_max_io_size(self):
return S.from_KiB(128)
def do_submit_flush(self, flush):
flush.contents._end(flush, 0)
def do_submit_discard(self, discard):
try:
dst = self.data_ptr + discard.contents._addr
memset(dst, 0, discard.contents._bytes)
discard.contents._end(discard, 0)
except: # noqa E722
discard.contents._end(discard, -OcfErrorCode.OCF_ERR_NOT_SUPP)
def do_submit_io(self, io):
flags = int(io.contents._flags)
if flags & IoFlags.FLUSH:
self.do_submit_flush(io)
return
try:
io_priv = cast(OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
offset = io_priv.contents._offset
if io.contents._dir == IoDir.WRITE:
src_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
src = Data.get_instance(src_ptr.value).handle.value + offset
dst = self.data_ptr + io.contents._addr
elif io.contents._dir == IoDir.READ:
dst_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
dst = Data.get_instance(dst_ptr.value).handle.value + offset
src = self.data_ptr + io.contents._addr
memmove(dst, src, io.contents._bytes)
io_priv.contents._offset += io.contents._bytes
io.contents._end(io, 0)
except: # noqa E722
io.contents._end(io, -OcfErrorCode.OCF_ERR_IO)
def do_forward_io(self, token, rw, addr, nbytes, offset):
try:
io = Io.get_by_forward_token(token)
if rw == IoDir.WRITE:
src_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
src_ptr = cast(lib.ocf_forward_get_data(token), c_void_p)
src = Data.get_instance(src_ptr.value).handle.value + offset
dst = self.data_ptr + addr
elif rw == IoDir.READ:
dst_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
dst_ptr = cast(lib.ocf_forward_get_data(token), c_void_p)
dst = Data.get_instance(dst_ptr.value).handle.value + offset
src = self.data_ptr + addr
@ -594,7 +470,6 @@ class ErrorDevice(Volume):
def should_forward_io(self, rw, addr):
if not self.armed:
return True
direction = IoDir(rw)
seq_no_match = (
self.error_seq_no[direction] >= 0
@ -606,31 +481,7 @@ class ErrorDevice(Volume):
return not seq_no_match and not sector_match
def complete_submit_with_error(self, io):
self.error = True
direction = IoDir(io.contents._dir)
self.stats["errors"][direction] += 1
io.contents._end(io, -OcfErrorCode.OCF_ERR_IO)
def do_submit_io(self, io):
if self.should_forward_io(io.contents._dir, io.contents._addr):
self.vol.do_submit_io(io)
else:
self.complete_submit_with_error(io)
def do_submit_flush(self, io):
if self.data_only or self.should_forward_io(io.contents._dir, io.contents._addr):
self.vol.do_submit_flush(io)
else:
self.complete_submit_with_error(io)
def do_submit_discard(self, io):
if self.data_only or self.should_forward_io(io.contents._dir, io.contents._addr):
self.vol.do_submit_discard(io)
else:
self.complete_submit_with_error(io)
def complete_forward_with_error(self, token, rw):
def complete_forward_with_error(self, token, rw=IoDir.WRITE):
self.error = True
direction = IoDir(rw)
self.stats["errors"][direction] += 1
@ -643,16 +494,16 @@ class ErrorDevice(Volume):
self.complete_forward_with_error(token, rw)
def do_forward_flush(self, token):
if self.data_only or self.should_forward_io(0, 0):
if self.data_only or self.should_forward_io(IoDir.WRITE, 0):
self.vol.do_forward_flush(token)
else:
self.complete_forward_with_error(token, rw)
self.complete_forward_with_error(token)
def do_forward_discard(self, token, addr, nbytes):
if self.data_only or self.should_forward_io(0, addr):
if self.data_only or self.should_forward_io(IoDir.WRITE, addr):
self.vol.do_forward_discard(token, addr, nbytes)
else:
self.complete_forward_with_error(token, rw)
self.complete_forward_with_error(token)
def arm(self):
self.armed = True
@ -717,76 +568,40 @@ class TraceDevice(Volume):
return submit
def do_submit_io(self, io):
submit = self._trace(
TraceDevice.IoType.Data,
io.contents._dir,
io.contents._addr,
io.contents._bytes,
io.contents._flags
)
if submit:
self.vol.do_submit_io(io)
def do_submit_flush(self, io):
submit = self._trace(
TraceDevice.IoType.Flush,
io.contents._dir,
io.contents._addr,
io.contents._bytes,
io.contents._flags
)
if submit:
self.vol.do_submit_flush(io)
def do_submit_discard(self, io):
submit = self._trace(
TraceDevice.IoType.Discard,
io.contents._dir,
io.contents._addr,
io.contents._bytes,
io.contents._flags
)
if submit:
self.vol.do_submit_discard(io)
def do_forward_io(self, token, rw, addr, nbytes, offset):
io = Io.get_by_forward_token(token)
flags = lib.ocf_forward_get_flags(token)
submit = self._trace(
TraceDevice.IoType.Data,
rw,
addr,
nbytes,
io.contents._flags
flags
)
if submit:
self.vol.do_forward_io(token, rw, addr, nbytes, offset)
def do_forward_flush(self, token):
io = Io.get_by_forward_token(token)
flags = lib.ocf_forward_get_flags(token)
submit = self._trace(
TraceDevice.IoType.Flush,
IoDir.WRITE,
0,
0,
io.contents._flags
flags
)
if submit:
self.vol.do_forward_flush(token)
def do_forward_discard(self, token, addr, nbytes):
io = Io.get_by_forward_token(token)
flags = lib.ocf_forward_get_flags(token)
submit = self._trace(
TraceDevice.IoType.Discard,
IoDir.WRITE,
addr,
nbytes,
io.contents._flags
flags
)
if submit:
@ -809,11 +624,15 @@ class TraceDevice(Volume):
lib = OcfLib.getInstance()
lib.ocf_io_get_priv.restype = POINTER(VolumeIoPriv)
lib.ocf_io_get_offset.restype = c_uint32
lib.ocf_io_get_volume.argtypes = [c_void_p]
lib.ocf_io_get_volume.restype = c_void_p
lib.ocf_io_get_data.argtypes = [c_void_p]
lib.ocf_io_get_data.restype = c_void_p
lib.ocf_forward_get_data.argtypes = [c_uint64]
lib.ocf_forward_get_data.restype = c_void_p
lib.ocf_forward_get_flags.argtypes = [c_uint64]
lib.ocf_forward_get_flags.restype = c_uint64
lib.ocf_volume_new_io.argtypes = [
c_void_p,
c_void_p,

View File

@ -5,7 +5,7 @@
#
import logging
from ctypes import c_int, c_void_p, CFUNCTYPE, byref, c_uint32, c_uint64, cast, POINTER
from ctypes import c_int, c_void_p, CFUNCTYPE, byref, c_uint8, c_uint32, c_uint64, cast, POINTER
from ..ocf import OcfLib
from .volume import Volume, VOLUME_POISON
@ -24,11 +24,11 @@ class OcfInternalVolume(Volume):
queue = self.parent.get_default_queue() # TODO multiple queues?
return self.new_io(queue, addr, _bytes, _dir, _class, _flags)
def _alloc_io(self, io, rw=None, addr=None, nbytes=None, offset=0):
def _alloc_io(self, io):
exp_obj_io = self.__alloc_io(
addr or io.contents._addr,
nbytes or io.contents._bytes,
rw or io.contents._dir,
io.contents._addr,
io.contents._bytes,
io.contents._dir,
io.contents._class,
io.contents._flags,
)
@ -46,10 +46,10 @@ class OcfInternalVolume(Volume):
return exp_obj_io
def get_length(self):
return Size.from_B(OcfLib.getInstance().ocf_volume_get_length(self.handle))
return Size.from_B(lib.ocf_volume_get_length(self.handle))
def get_max_io_size(self):
return Size.from_B(OcfLib.getInstance().ocf_volume_get_max_io_size(self.handle))
return Size.from_B(lib.ocf_volume_get_max_io_size(self.handle))
def do_submit_io(self, io):
io = self._alloc_io(io)
@ -64,8 +64,12 @@ class OcfInternalVolume(Volume):
io.submit_discard()
def do_forward_io(self, token, rw, addr, nbytes, offset):
orig_io = Io.get_by_forward_token(token)
io = self._alloc_io(orig_io, rw, addr, nbytes, offset)
flags = lib.ocf_forward_get_flags(token)
io_class = lib.ocf_forward_get_io_class(token)
cdata = lib.ocf_forward_get_data(token)
io = self.__alloc_io(addr, nbytes, rw, io_class, flags)
lib.ocf_io_set_data(byref(io), cdata, offset)
def cb(error):
nonlocal io
@ -77,15 +81,37 @@ class OcfInternalVolume(Volume):
io.submit()
def do_forward_flush(self, token):
orig_io = Io.get_by_forward_token(token)
io = self._alloc_io(orig_io)
flags = lib.ocf_forward_get_flags(token)
io_class = lib.ocf_forward_get_io_class(token)
io = self.__alloc_io(0, 0, 0, io_class, flags)
def cb(error):
nonlocal io
Io.forward_end(io.token, error)
io.token = token
io.callback = cb
io.submit_flush()
def do_forward_discard(self, token, addr, nbytes):
orig_io = Io.get_by_forward_token(token)
io = self._alloc_io(orig_io, addr=addr, nbytes=nbytes)
flags = lib.ocf_forward_get_flags(token)
io_class = lib.ocf_forward_get_io_class(token)
io = self.__alloc_io(addr, nbytes, 0, io_class, flags)
def cb(error):
nonlocal io
Io.forward_end(io.token, error)
io.token = token
io.callback = cb
io.submit_discard()
def _read(self, offset=0, size=0):
if size == 0:
size = self.get_length().B - offset
@ -160,3 +186,9 @@ lib.ocf_volume_get_length.argtypes = [c_void_p]
lib.ocf_volume_get_length.restype = c_uint64
lib.ocf_io_get_data.argtypes = [POINTER(Io)]
lib.ocf_io_get_data.restype = c_void_p
lib.ocf_forward_get_data.argtypes = [c_uint64]
lib.ocf_forward_get_data.restype = c_void_p
lib.ocf_forward_get_flags.argtypes = [c_uint64]
lib.ocf_forward_get_flags.restype = c_uint64
lib.ocf_forward_get_io_class.argtypes = [c_uint64]
lib.ocf_forward_get_io_class.restype = c_uint8

View File

@ -4,10 +4,7 @@
# SPDX-License-Identifier: BSD-3-Clause
#
from ctypes import c_int, memmove, cast, c_void_p
from enum import IntEnum
from itertools import product
import random
from ctypes import memmove, cast, c_void_p, c_uint64
import pytest
@ -18,6 +15,7 @@ from pyocf.types.volume_core import CoreVolume
from pyocf.types.data import Data
from pyocf.types.io import IoDir, Sync
from pyocf.utils import Size
from pyocf.ocf import OcfLib
def __io(io, data):
@ -48,19 +46,19 @@ class FlagsValVolume(RamVolume):
self.fail = False
super().__init__(size)
def set_check(self, check):
self.check = check
def set_check(self):
self.check = True
self.fail = True
def submit_io(self, io):
def do_forward_io(self, token, rw, addr, nbytes, offset):
if self.check:
flags = io.contents._flags
if flags != self.flags:
self.fail = True
super().submit_io(io)
flags = lib.ocf_forward_get_flags(token)
if flags == self.flags:
self.fail = False
super().do_forward_io(token, rw, addr, nbytes, offset)
@pytest.mark.parametrize("cache_mode", CacheMode)
def test_io_flags(pyocf_ctx, cache_mode):
def test_io_flags(pyocf_ctx):
"""
Verify that I/O flags provided at the top volume interface
are propagated down to bottom volumes for all associated
@ -74,44 +72,54 @@ def test_io_flags(pyocf_ctx, cache_mode):
pyocf_ctx.register_volume_type(FlagsValVolume)
cache_device = FlagsValVolume(Size.from_MiB(50), flags)
cache_device = FlagsValVolume(Size.from_MiB(50), 0)
core_device = FlagsValVolume(Size.from_MiB(50), flags)
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode)
cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB)
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core)
cache_device.set_check(True)
core_device.set_check(True)
def set_check():
cache_device.set_check()
core_device.set_check()
# write miss
set_check()
io_to_exp_obj(vol, block_size * 0, block_size, data, 0, IoDir.WRITE, flags)
assert not cache_device.fail
assert not core_device.fail
# read miss
set_check()
io_to_exp_obj(vol, block_size * 1, block_size, data, 0, IoDir.READ, flags)
assert not cache_device.fail
assert not core_device.fail
# "dirty" read hit
set_check()
io_to_exp_obj(vol, block_size * 0, block_size, data, 0, IoDir.READ, flags)
assert not cache_device.fail
assert not core_device.fail
# "clean" read hit
set_check()
io_to_exp_obj(vol, block_size * 1, block_size, data, 0, IoDir.READ, flags)
assert not cache_device.fail
assert not core_device.fail
cache.change_cache_mode(CacheMode.WT)
# "dirty" write hit
set_check()
io_to_exp_obj(vol, block_size * 0, block_size, data, 0, IoDir.WRITE, flags)
assert not cache_device.fail
assert not core_device.fail
# "clean" write hit
set_check()
io_to_exp_obj(vol, block_size * 1, block_size, data, 0, IoDir.WRITE, flags)
assert not cache_device.fail
assert not core_device.fail
lib = OcfLib.getInstance()
lib.ocf_forward_get_flags.argtypes = [c_uint64]
lib.ocf_forward_get_flags.restype = c_uint64

View File

@ -12,7 +12,7 @@ from threading import Event
from collections import namedtuple
from pyocf.ocf import OcfLib
from pyocf.types.volume import RamVolume, ErrorDevice, TraceDevice, IoFlags, VolumeIoPriv
from pyocf.types.volume import RamVolume, ErrorDevice, TraceDevice, IoFlags
from pyocf.types.cvolume import CVolume
from pyocf.types.data import Data
from pyocf.types.io import IoDir
@ -287,19 +287,25 @@ def test_io_propagation_basic(pyocf_ctx):
ret = cvol_submit_data_io(cvol, addr, io_size)
assert ret == 0
ret = cvol_submit_flush_io(cvol, addr, io_size, IoFlags.FLUSH)
ret = cvol_submit_flush_io(cvol, addr, io_size)
assert ret == 0
ret = cvol_submit_discard_io(cvol, addr, io_size)
assert ret == 0
for io_type in TraceDevice.IoType:
ios = io_trace[vol][io_type]
assert len(ios) == 1
io = ios[0]
assert io.dir == IoDir.WRITE
assert io.addr == addr.B - int(vol_begin[i])
assert io.bytes == io_size.B
ios = io_trace[vol][TraceDevice.IoType.Data]
assert len(ios) == 1
assert ios[0].dir == IoDir.WRITE
assert ios[0].addr == addr.B - int(vol_begin[i])
assert ios[0].bytes == io_size.B
ios = io_trace[vol][TraceDevice.IoType.Flush]
assert len(ios) == i + 1
ios = io_trace[vol][TraceDevice.IoType.Discard]
assert len(ios) == 1
assert ios[0].addr == addr.B - int(vol_begin[i])
assert ios[0].bytes == io_size.B
cvol.close()
cvol.destroy()
@ -355,27 +361,36 @@ def test_io_propagation_cross_boundary(pyocf_ctx):
ret = cvol_submit_data_io(cvol, addr, io_size)
assert ret == 0
ret = cvol_submit_flush_io(cvol, addr, io_size, IoFlags.FLUSH)
ret = cvol_submit_flush_io(cvol, addr, io_size)
assert ret == 0
ret = cvol_submit_discard_io(cvol, addr, io_size)
assert ret == 0
for io_type in TraceDevice.IoType:
ios1 = io_trace[vols[i]][io_type]
ios2 = io_trace[vols[i + 1]][io_type]
ios1 = io_trace[vols[i]][TraceDevice.IoType.Data]
ios2 = io_trace[vols[i + 1]][TraceDevice.IoType.Data]
assert len(ios1) == 1
assert ios1[0].dir == IoDir.WRITE
assert ios1[0].addr == int(vols[i].vol.size - (io_size / 2))
assert ios1[0].bytes == io_size.B / 2
assert len(ios2) == 1
assert ios2[0].dir == IoDir.WRITE
assert ios2[0].addr == 0
assert ios2[0].bytes == io_size.B / 2
assert len(ios1) == 1
io = ios1[0]
assert io.dir == IoDir.WRITE
assert io.addr == int(vols[i].vol.size - (io_size / 2))
assert io.bytes == io_size.B / 2
ios1 = io_trace[vols[i]][TraceDevice.IoType.Flush]
ios2 = io_trace[vols[i + 1]][TraceDevice.IoType.Flush]
assert len(ios1) == 1
assert len(ios2) == 1
assert len(ios2) == 1
io = ios2[0]
assert io.dir == IoDir.WRITE
assert io.addr == 0
assert io.bytes == io_size.B / 2
ios1 = io_trace[vols[i]][TraceDevice.IoType.Discard]
ios2 = io_trace[vols[i + 1]][TraceDevice.IoType.Discard]
assert len(ios1) == 1
assert ios1[0].addr == int(vols[i].vol.size - (io_size / 2))
assert ios1[0].bytes == io_size.B / 2
assert len(ios2) == 1
assert ios2[0].addr == 0
assert ios2[0].bytes == io_size.B / 2
cvol.close()
cvol.destroy()
@ -483,25 +498,51 @@ def test_io_propagation_multiple_subvolumes(pyocf_ctx, rand_seed):
ret = cvol_submit_data_io(cvol, addr, size)
assert ret == 0
ret = cvol_submit_flush_io(cvol, addr, size, IoFlags.FLUSH)
ret = cvol_submit_flush_io(cvol, addr, size)
assert ret == 0
ret = cvol_submit_discard_io(cvol, addr, size)
assert ret == 0
for vol in middle:
for io in io_trace[vol].values():
assert len(io) == 1
assert io[0].addr == 0
assert io[0].bytes == int(vol.vol.size)
ios = io_trace[vol][TraceDevice.IoType.Data]
assert len(ios) == 1
assert ios[0].addr == 0
assert ios[0].bytes == int(vol.vol.size)
for io in io_trace[first].values():
assert io[0].addr == int(start_offset)
assert io[0].bytes == int(vol_size - start_offset)
ios = io_trace[first][TraceDevice.IoType.Data]
assert len(ios) == 1
assert ios[0].addr == int(start_offset)
assert ios[0].bytes == int(vol_size - start_offset)
for io in io_trace[last].values():
assert io[0].addr == 0
assert io[0].bytes == int(end_offset)
ios = io_trace[last][TraceDevice.IoType.Data]
assert len(ios) == 1
assert ios[0].addr == 0
assert ios[0].bytes == int(end_offset)
ios = io_trace[vol][TraceDevice.IoType.Flush]
assert len(ios) == 1
ios = io_trace[first][TraceDevice.IoType.Flush]
assert len(ios) == 1
ios = io_trace[last][TraceDevice.IoType.Flush]
assert len(ios) == 1
ios = io_trace[vol][TraceDevice.IoType.Discard]
assert len(ios) == 1
assert ios[0].addr == 0
assert ios[0].bytes == int(vol.vol.size)
ios = io_trace[first][TraceDevice.IoType.Discard]
assert len(ios) == 1
assert ios[0].addr == int(start_offset)
assert ios[0].bytes == int(vol_size - start_offset)
ios = io_trace[last][TraceDevice.IoType.Discard]
assert len(ios) == 1
assert ios[0].addr == 0
assert ios[0].bytes == int(end_offset)
cvol.close()
cvol.destroy()
@ -538,16 +579,16 @@ def test_io_completion(pyocf_ctx, rand_seed):
self.pending_ios = []
self.io_submitted = Event()
def do_submit_io(self, io):
self.pending_ios.append(("io", io))
def do_forward_io(self, token, rw, addr, nbytes, offset):
self.pending_ios.append(("io", token, rw, addr, nbytes, offset))
self.io_submitted.set()
def do_submit_flush(self, flush):
self.pending_ios.append(("flush", flush))
def do_forward_flush(self, token):
self.pending_ios.append(("flush", token))
self.io_submitted.set()
def do_submit_discard(self, discard):
self.pending_ios.append(("discard", discard))
def do_forward_discard(self, token, addr, nbytes):
self.pending_ios.append(("discard", token, addr, nbytes))
self.io_submitted.set()
def wait_submitted(self):
@ -558,13 +599,13 @@ def test_io_completion(pyocf_ctx, rand_seed):
if not self.pending_ios:
return False
io_type, io = self.pending_ios.pop()
io_type, token, *params = self.pending_ios.pop()
if io_type == "io":
super().do_submit_io(io)
super().do_forward_io(token, *params)
elif io_type == "flush":
super().do_submit_flush(io)
super().do_forward_flush(token)
elif io_type == "discard":
super().do_submit_discard(io)
super().do_forward_discard(token, *params)
else:
assert False
@ -586,14 +627,18 @@ def test_io_completion(pyocf_ctx, rand_seed):
addr = vol_size / 2
size = (subvol_count - 1) * vol_size
for op, flags in [("submit", 0), ("submit_flush", IoFlags.FLUSH), ("submit_discard", 0)]:
for op, cnt in [
("submit", subvol_count),
("submit_flush", len(vols)),
("submit_discard", subvol_count)
]:
io = cvol.new_io(
queue=None,
addr=addr,
length=size,
direction=IoDir.WRITE,
io_class=0,
flags=flags,
flags=0,
)
completion = OcfCompletion([("err", c_int)])
io.callback = completion.callback
@ -604,7 +649,7 @@ def test_io_completion(pyocf_ctx, rand_seed):
submit_fn = getattr(io, op)
submit_fn()
pending_vols = vols[:subvol_count]
pending_vols = vols[:cnt]
for v in pending_vols:
v.wait_submitted()
@ -674,7 +719,7 @@ def test_io_error(pyocf_ctx, rand_seed):
assert ret == -OcfErrorCode.OCF_ERR_IO
# verify flush properly propagated
ret = cvol_submit_flush_io(cvol, addr, size, IoFlags.FLUSH)
ret = cvol_submit_flush_io(cvol, addr, size)
assert ret == -OcfErrorCode.OCF_ERR_IO
# verdiscard discard properly propagated