Merge pull request #216 from robertbaldyga/io-and-req-in-single-allocation

Allocate io and req in single allocation
This commit is contained in:
Jan Musiał
2019-07-23 11:40:30 +02:00
committed by GitHub
50 changed files with 655 additions and 521 deletions

View File

@@ -254,6 +254,8 @@ int ocf_engine_hndl_req(struct ocf_request *req,
if (!req->io_if)
return -OCF_ERR_INVAL;
ocf_req_get(req);
/* Till OCF engine is not synchronous fully need to push OCF request
* to into OCF workers
*/
@@ -273,6 +275,8 @@ int ocf_engine_hndl_fast_req(struct ocf_request *req,
if (!io_if)
return -OCF_ERR_INVAL;
ocf_req_get(req);
switch (req->rw) {
case OCF_READ:
ret = io_if->read(req);
@@ -281,9 +285,12 @@ int ocf_engine_hndl_fast_req(struct ocf_request *req,
ret = io_if->write(req);
break;
default:
return OCF_FAST_PATH_NO;
ret = OCF_FAST_PATH_NO;
}
if (ret == OCF_FAST_PATH_NO)
ocf_req_put(req);
return ret;
}
@@ -299,6 +306,8 @@ static void ocf_engine_hndl_2dc_req(struct ocf_request *req)
void ocf_engine_hndl_discard_req(struct ocf_request *req)
{
ocf_req_get(req);
if (req->d2c) {
ocf_engine_hndl_2dc_req(req);
return;
@@ -314,6 +323,8 @@ void ocf_engine_hndl_discard_req(struct ocf_request *req)
void ocf_engine_hndl_ops_req(struct ocf_request *req)
{
ocf_req_get(req);
if (req->d2c)
req->io_if = &IO_IFS[OCF_IO_D2C_IF];
else

View File

@@ -42,7 +42,7 @@ int ocf_io_d2c(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@@ -71,19 +71,17 @@ static int _ocf_discard_core(struct ocf_request *req)
{
struct ocf_io *io;
io = ocf_volume_new_io(&req->core->volume);
io = ocf_volume_new_io(&req->core->volume, req->io_queue,
SECTORS_TO_BYTES(req->discard.sector),
SECTORS_TO_BYTES(req->discard.nr_sects),
OCF_WRITE, 0, 0);
if (!io) {
_ocf_discard_complete_req(req, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
ocf_io_configure(io, SECTORS_TO_BYTES(req->discard.sector),
SECTORS_TO_BYTES(req->discard.nr_sects),
OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, req, NULL, _ocf_discard_core_complete);
ocf_io_set_data(io, req->data, 0);
ocf_io_set_queue(io, req->io_queue);
ocf_volume_submit_discard(io);
@@ -111,16 +109,15 @@ static int _ocf_discard_flush_cache(struct ocf_request *req)
{
struct ocf_io *io;
io = ocf_volume_new_io(&req->cache->device->volume);
io = ocf_volume_new_io(&req->cache->device->volume, req->io_queue,
0, 0, OCF_WRITE, 0, 0);
if (!io) {
ocf_metadata_error(req->cache);
_ocf_discard_complete_req(req, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, req, NULL, _ocf_discard_cache_flush_complete);
ocf_io_set_queue(io, req->io_queue);
ocf_volume_submit_flush(io);
@@ -260,7 +257,7 @@ int ocf_discard(struct ocf_request *req)
{
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
if (req->rw == OCF_READ) {
req->complete(req, -OCF_ERR_INVAL);

View File

@@ -128,7 +128,7 @@ int ocf_read_fast(struct ocf_request *req)
hit = ocf_engine_is_hit(req);
if (hit) {
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
lock = ocf_req_trylock_rd(req);
}
@@ -198,7 +198,7 @@ int ocf_write_fast(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req);
if (mapped) {
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
lock = ocf_req_trylock_wr(req);
}

View File

@@ -110,7 +110,7 @@ int ocf_read_pt(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@@ -217,7 +217,7 @@ int ocf_read_generic(struct ocf_request *req)
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
if (env_atomic_read(&cache->pending_read_misses_list_blocked)) {
/* There are conditions to bypass IO */

View File

@@ -40,7 +40,7 @@ int ocf_write_wa(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@@ -172,7 +172,7 @@ int ocf_write_wb(struct ocf_request *req)
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Not sure if we need this. */
ocf_req_get(req);

View File

@@ -140,7 +140,7 @@ int ocf_write_wi(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@@ -207,7 +207,7 @@ int ocf_read_wo(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@@ -167,7 +167,7 @@ int ocf_write_wt(struct ocf_request *req)
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@@ -199,7 +199,8 @@ static int ocf_metadata_read_sb(ocf_ctx_t ctx, ocf_volume_t volume,
context->priv2 = priv2;
/* Allocate resources for IO */
io = ocf_volume_new_io(volume);
io = ocf_volume_new_io(volume, NULL, 0, sb_pages * PAGE_SIZE,
OCF_READ, 0, 0);
if (!io) {
ocf_log(ctx, log_err, "Memory allocation error");
result = -OCF_ERR_NO_MEM;
@@ -224,8 +225,6 @@ static int ocf_metadata_read_sb(ocf_ctx_t ctx, ocf_volume_t volume,
goto err_set_data;
}
ocf_io_configure(io, 0, sb_pages * PAGE_SIZE, OCF_READ, 0, 0);
ocf_io_set_cmpl(io, context, NULL, ocf_metadata_read_sb_complete);
ocf_volume_submit_io(io);

View File

@@ -711,18 +711,16 @@ static int ocf_metadata_query_cores_io(ocf_volume_t volume,
env_atomic_inc(&context->count);
/* Allocate new IO */
io = ocf_volume_new_io(volume);
io = ocf_volume_new_io(volume, NULL,
PAGES_TO_BYTES(page),
PAGES_TO_BYTES(num_pages),
OCF_READ, 0, 0);
if (!io) {
err = -OCF_ERR_NO_MEM;
goto exit_error;
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(page),
PAGES_TO_BYTES(num_pages),
OCF_READ, 0, 0);
ocf_io_set_cmpl(io, context, NULL,
ocf_metadata_query_cores_end_io);
err = ocf_io_set_data(io, data, PAGES_TO_BYTES(offset));

View File

@@ -63,7 +63,7 @@ static void metadata_io_read_i_atomic_step_end(struct ocf_io *io, int error)
{
struct metadata_io_read_i_atomic_context *context = io->priv1;
OCF_DEBUG_TRACE(ocf_volume_get_cache(io->volume));
OCF_DEBUG_TRACE(ocf_volume_get_cache(ocf_io_get_volume(io)));
ocf_io_put(io);
@@ -99,18 +99,17 @@ int metadata_io_read_i_atomic_step(struct ocf_request *req)
ctx_data_seek(cache->owner, context->data, ctx_data_seek_begin, 0);
/* Allocate new IO */
io = ocf_new_cache_io(cache);
io = ocf_new_cache_io(cache, req->io_queue,
cache->device->metadata_offset +
SECTORS_TO_BYTES(context->curr_offset),
SECTORS_TO_BYTES(context->curr_count), OCF_READ, 0, 0);
if (!io) {
metadata_io_read_i_atomic_complete(context, -OCF_ERR_NO_MEM);
return 0;
}
/* Setup IO */
ocf_io_configure(io, cache->device->metadata_offset +
SECTORS_TO_BYTES(context->curr_offset),
SECTORS_TO_BYTES(context->curr_count), OCF_READ, 0, 0);
ocf_io_set_queue(io, req->io_queue);
ocf_io_set_cmpl(io, context, NULL, metadata_io_read_i_atomic_step_end);
result = ocf_io_set_data(io, context->data, 0);
if (result) {
@@ -231,19 +230,16 @@ static int ocf_restart_meta_io(struct ocf_request *req)
metadata_io_req_fill(meta_io_req);
OCF_METADATA_UNLOCK_RD();
io = ocf_new_cache_io(cache);
io = ocf_new_cache_io(cache, req->io_queue,
PAGES_TO_BYTES(meta_io_req->page),
PAGES_TO_BYTES(meta_io_req->count),
OCF_WRITE, 0, 0);
if (!io) {
metadata_io_i_asynch_end(meta_io_req, -OCF_ERR_NO_MEM);
return 0;
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(meta_io_req->page),
PAGES_TO_BYTES(meta_io_req->count),
OCF_WRITE, 0, 0);
ocf_io_set_queue(io, req->io_queue);
ocf_io_set_cmpl(io, meta_io_req, NULL, metadata_io_i_asynch_cmpl);
ret = ocf_io_set_data(io, meta_io_req->data, 0);
if (ret) {
@@ -415,7 +411,10 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
ret = metadata_updater_check_overlaps(cache, &a_req->reqs[i]);
if (ret == 0) {
/* Allocate new IO */
io = ocf_new_cache_io(cache);
io = ocf_new_cache_io(cache, queue,
PAGES_TO_BYTES(a_req->reqs[i].page),
PAGES_TO_BYTES(a_req->reqs[i].count),
dir, 0, 0);
if (!io) {
error = -OCF_ERR_NO_MEM;
metadata_io_req_error(cache, a_req, i, error);
@@ -426,12 +425,6 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
metadata_io_req_fill(&a_req->reqs[i]);
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(a_req->reqs[i].page),
PAGES_TO_BYTES(a_req->reqs[i].count),
dir, 0, 0);
ocf_io_set_queue(io, queue);
ocf_io_set_cmpl(io, &a_req->reqs[i], NULL,
metadata_io_i_asynch_cmpl);
error = ocf_io_set_data(io, a_req->reqs[i].data, 0);

View File

@@ -70,8 +70,9 @@ static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
uint64_t start_addr, uint32_t len, struct _raw_atomic_flush_ctx *ctx)
{
struct ocf_request *req = context;
struct ocf_io *io = ocf_new_cache_io(cache);
struct ocf_io *io;
io = ocf_new_cache_io(cache, NULL, start_addr, len, OCF_WRITE, 0, 0);
if (!io) {
req->error = -OCF_ERR_NO_MEM;
return req->error;
@@ -82,7 +83,6 @@ static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
env_atomic_inc(&ctx->flush_req_cnt);
ocf_io_configure(io, start_addr, len, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, ctx, NULL, _raw_atomic_io_discard_end);
if (cache->device->volume.features.discard_zeroes)

View File

@@ -336,7 +336,10 @@ static int raw_dynamic_load_all_read(struct ocf_request *req)
count = OCF_MIN(RAW_DYNAMIC_LOAD_PAGES, raw->ssd_pages - context->i);
/* Allocate IO */
context->io = ocf_new_cache_io(context->cache);
context->io = ocf_new_cache_io(context->cache, req->io_queue,
PAGES_TO_BYTES(raw->ssd_pages_offset + context->i),
PAGES_TO_BYTES(count), OCF_READ, 0, 0);
if (!context->io) {
raw_dynamic_load_all_complete(context, -OCF_ERR_NO_MEM);
return 0;
@@ -349,11 +352,6 @@ static int raw_dynamic_load_all_read(struct ocf_request *req)
raw_dynamic_load_all_complete(context, result);
return 0;
}
ocf_io_configure(context->io,
PAGES_TO_BYTES(raw->ssd_pages_offset + context->i),
PAGES_TO_BYTES(count), OCF_READ, 0, 0);
ocf_io_set_queue(context->io, req->io_queue);
ocf_io_set_cmpl(context->io, context, NULL,
raw_dynamic_load_all_read_end);

View File

@@ -14,7 +14,7 @@
ocf_volume_t ocf_cache_get_volume(ocf_cache_t cache)
{
return ocf_cache_is_device_attached(cache) ? &cache->device->volume : NULL;
return cache->device ? &cache->device->volume : NULL;
}
ocf_cache_id_t ocf_cache_get_id(ocf_cache_t cache)

View File

@@ -134,9 +134,12 @@ int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
/* *** HELPER FUNCTIONS *** */
static inline struct ocf_core_io *ocf_io_to_core_io(struct ocf_io *io)
static inline struct ocf_request *ocf_io_to_req(struct ocf_io *io)
{
return ocf_io_get_priv(io);
struct ocf_io_internal *ioi;
ioi = container_of(io, struct ocf_io_internal, io);
return container_of(ioi, struct ocf_request, ioi);
}
static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume)
@@ -146,37 +149,27 @@ static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume)
return core_volume->core;
}
static inline int ocf_io_set_dirty(ocf_cache_t cache,
struct ocf_core_io *core_io)
static inline int ocf_io_set_dirty(struct ocf_request *req)
{
core_io->dirty = !!ocf_refcnt_inc(&cache->refcnt.dirty);
return core_io->dirty ? 0 : -EBUSY;
req->dirty = !!ocf_refcnt_inc(&req->cache->refcnt.dirty);
return req->dirty ? 0 : -EBUSY;
}
static inline void dec_counter_if_req_was_dirty(struct ocf_core_io *core_io,
ocf_cache_t cache)
static inline void dec_counter_if_req_was_dirty(struct ocf_request *req)
{
if (!core_io->dirty)
if (!req->dirty)
return;
core_io->dirty = 0;
ocf_refcnt_dec(&cache->refcnt.dirty);
req->dirty = 0;
ocf_refcnt_dec(&req->cache->refcnt.dirty);
}
static inline int ocf_core_validate_io(struct ocf_io *io)
{
ocf_core_t core;
ocf_volume_t volume = ocf_io_get_volume(io);
ocf_core_t core = ocf_volume_to_core(volume);
if (!io->volume)
return -OCF_ERR_INVAL;
if (!io->ops)
return -OCF_ERR_INVAL;
if (io->addr >= ocf_volume_get_length(io->volume))
return -OCF_ERR_INVAL;
if (io->addr + io->bytes > ocf_volume_get_length(io->volume))
if (io->addr + io->bytes > ocf_volume_get_length(volume))
return -OCF_ERR_INVAL;
if (io->io_class >= OCF_IO_CLASS_MAX)
@@ -194,7 +187,6 @@ static inline int ocf_core_validate_io(struct ocf_io *io)
/* Core volume I/O must not be queued on management queue - this would
* break I/O accounting code, resulting in use-after-free type of errors
* after cache detach, core remove etc. */
core = ocf_volume_to_core(io->volume);
if (io->io_queue == ocf_core_get_cache(core)->mngt_queue)
return -OCF_ERR_INVAL;
@@ -204,21 +196,20 @@ static inline int ocf_core_validate_io(struct ocf_io *io)
static void ocf_req_complete(struct ocf_request *req, int error)
{
/* Log trace */
ocf_trace_io_cmpl(ocf_io_to_core_io(req->io), req->cache);
ocf_trace_io_cmpl(req);
/* Complete IO */
ocf_io_end(req->io, error);
ocf_io_end(&req->ioi.io, error);
dec_counter_if_req_was_dirty(ocf_io_to_core_io(req->io), req->cache);
dec_counter_if_req_was_dirty(req);
/* Invalidate OCF IO, it is not valid after completion */
ocf_io_put(req->io);
req->io = NULL;
ocf_io_put(&req->ioi.io);
}
void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
ocf_req_cache_mode_t req_cache_mode;
ocf_core_t core;
ocf_cache_t cache;
@@ -228,16 +219,15 @@ void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
ret = ocf_core_validate_io(io);
if (ret < 0) {
io->end(io, ret);
ocf_io_end(io, ret);
return;
}
core_io = ocf_io_to_core_io(io);
core = ocf_volume_to_core(io->volume);
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
ocf_trace_init_io(core_io, cache);
ocf_trace_init_io(req);
if (unlikely(!env_bit_test(ocf_cache_state_running,
&cache->cache_state))) {
@@ -254,50 +244,40 @@ void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
if (io->dir == OCF_WRITE &&
ocf_req_cache_mode_has_lazy_write(req_cache_mode) &&
ocf_io_set_dirty(cache, core_io)) {
ocf_io_set_dirty(req)) {
req_cache_mode = ocf_req_cache_mode_wt;
}
core_io->req = ocf_req_new(io->io_queue, core, io->addr, io->bytes,
io->dir);
if (!core_io->req) {
dec_counter_if_req_was_dirty(core_io, cache);
io->end(io, -OCF_ERR_NO_MEM);
return;
}
if (core_io->req->d2c)
if (req->d2c)
req_cache_mode = ocf_req_cache_mode_d2c;
core_io->req->part_id = ocf_part_class2id(cache, io->io_class);
core_io->req->data = core_io->data;
core_io->req->complete = ocf_req_complete;
core_io->req->io = io;
req->core = core;
req->complete = ocf_req_complete;
req->part_id = ocf_part_class2id(cache, io->io_class);
ocf_seq_cutoff_update(core, core_io->req);
ocf_seq_cutoff_update(core, req);
ocf_core_update_stats(core, io);
if (io->dir == OCF_WRITE)
ocf_trace_io(core_io, ocf_event_operation_wr, cache);
ocf_trace_io(req, ocf_event_operation_wr);
else if (io->dir == OCF_READ)
ocf_trace_io(core_io, ocf_event_operation_rd, cache);
ocf_trace_io(req, ocf_event_operation_rd);
ocf_io_get(io);
ret = ocf_engine_hndl_req(core_io->req, req_cache_mode);
ret = ocf_engine_hndl_req(req, req_cache_mode);
if (ret) {
dec_counter_if_req_was_dirty(core_io, cache);
ocf_req_put(core_io->req);
io->end(io, ret);
dec_counter_if_req_was_dirty(req);
ocf_io_end(io, ret);
}
}
int ocf_core_submit_io_fast(struct ocf_io *io)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
ocf_req_cache_mode_t req_cache_mode;
struct ocf_event_io trace_event;
struct ocf_request *req;
ocf_core_t core;
ocf_cache_t cache;
int fast;
@@ -309,9 +289,8 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
if (ret < 0)
return ret;
core_io = ocf_io_to_core_io(io);
core = ocf_volume_to_core(io->volume);
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
if (unlikely(!env_bit_test(ocf_cache_state_running,
@@ -320,11 +299,17 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
return 0;
}
ret = ocf_req_alloc_map(req);
if (ret) {
ocf_io_end(io, -OCF_ERR_NO_MEM);
return 0;
}
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (io->dir == OCF_WRITE &&
ocf_req_cache_mode_has_lazy_write(req_cache_mode) &&
ocf_io_set_dirty(cache, core_io)) {
ocf_io_set_dirty(req)) {
req_cache_mode = ocf_req_cache_mode_wt;
}
@@ -344,35 +329,22 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
req_cache_mode = ocf_req_cache_mode_fast;
}
core_io->req = ocf_req_new_extended(io->io_queue, core,
io->addr, io->bytes, io->dir);
// We need additional pointer to req in case completion arrives before
// we leave this function and core_io is freed
req = core_io->req;
if (!req) {
dec_counter_if_req_was_dirty(core_io, cache);
io->end(io, -OCF_ERR_NO_MEM);
return 0;
}
if (req->d2c) {
dec_counter_if_req_was_dirty(core_io, cache);
ocf_req_put(req);
dec_counter_if_req_was_dirty(req);
return -OCF_ERR_IO;
}
req->part_id = ocf_part_class2id(cache, io->io_class);
req->data = core_io->data;
req->core = core;
req->complete = ocf_req_complete;
req->io = io;
req->part_id = ocf_part_class2id(cache, io->io_class);
ocf_core_update_stats(core, io);
if (cache->trace.trace_callback) {
if (io->dir == OCF_WRITE)
ocf_trace_prep_io_event(&trace_event, core_io, ocf_event_operation_wr);
ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_wr);
else if (io->dir == OCF_READ)
ocf_trace_prep_io_event(&trace_event, core_io, ocf_event_operation_rd);
ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_rd);
}
ocf_io_get(io);
@@ -384,10 +356,9 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
return 0;
}
dec_counter_if_req_was_dirty(core_io, cache);
dec_counter_if_req_was_dirty(req);
ocf_io_put(io);
ocf_req_put(req);
return -OCF_ERR_IO;
}
@@ -398,7 +369,7 @@ static void ocf_core_volume_submit_io(struct ocf_io *io)
static void ocf_core_volume_submit_flush(struct ocf_io *io)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
ocf_core_t core;
ocf_cache_t cache;
int ret;
@@ -411,9 +382,8 @@ static void ocf_core_volume_submit_flush(struct ocf_io *io)
return;
}
core_io = ocf_io_to_core_io(io);
core = ocf_volume_to_core(io->volume);
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
if (unlikely(!env_bit_test(ocf_cache_state_running,
@@ -422,25 +392,18 @@ static void ocf_core_volume_submit_flush(struct ocf_io *io)
return;
}
core_io->req = ocf_req_new(io->io_queue, core, io->addr, io->bytes,
io->dir);
if (!core_io->req) {
ocf_io_end(io, -OCF_ERR_NO_MEM);
return;
}
req->core = core;
req->complete = ocf_req_complete;
core_io->req->complete = ocf_req_complete;
core_io->req->io = io;
core_io->req->data = core_io->data;
ocf_trace_io(core_io, ocf_event_operation_flush, cache);
ocf_trace_io(req, ocf_event_operation_flush);
ocf_io_get(io);
ocf_engine_hndl_ops_req(core_io->req);
ocf_engine_hndl_ops_req(req);
}
static void ocf_core_volume_submit_discard(struct ocf_io *io)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
ocf_core_t core;
ocf_cache_t cache;
int ret;
@@ -453,9 +416,8 @@ static void ocf_core_volume_submit_discard(struct ocf_io *io)
return;
}
core_io = ocf_io_to_core_io(io);
core = ocf_volume_to_core(io->volume);
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
if (unlikely(!env_bit_test(ocf_cache_state_running,
@@ -464,20 +426,19 @@ static void ocf_core_volume_submit_discard(struct ocf_io *io)
return;
}
core_io->req = ocf_req_new_discard(io->io_queue, core,
io->addr, io->bytes, OCF_WRITE);
if (!core_io->req) {
ret = ocf_req_alloc_map_discard(req);
if (ret) {
ocf_io_end(io, -OCF_ERR_NO_MEM);
return;
}
core_io->req->complete = ocf_req_complete;
core_io->req->io = io;
core_io->req->data = core_io->data;
req->core = core;
req->complete = ocf_req_complete;
ocf_trace_io(core_io, ocf_event_operation_discard, cache);
ocf_trace_io(req, ocf_event_operation_discard);
ocf_io_get(io);
ocf_engine_hndl_discard_req(core_io->req);
ocf_engine_hndl_discard_req(req);
}
/* *** VOLUME OPS *** */
@@ -517,32 +478,32 @@ static uint64_t ocf_core_volume_get_byte_length(ocf_volume_t volume)
static int ocf_core_io_set_data(struct ocf_io *io,
ctx_data_t *data, uint32_t offset)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
OCF_CHECK_NULL(io);
if (!data || offset)
return -OCF_ERR_INVAL;
core_io = ocf_io_to_core_io(io);
core_io->data = data;
req = ocf_io_to_req(io);
req->data = data;
return 0;
}
static ctx_data_t *ocf_core_io_get_data(struct ocf_io *io)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
OCF_CHECK_NULL(io);
core_io = ocf_io_to_core_io(io);
return core_io->data;
req = ocf_io_to_req(io);
return req->data;
}
const struct ocf_volume_properties ocf_core_volume_properties = {
.name = "OCF Core",
.io_priv_size = sizeof(struct ocf_core_io),
.io_priv_size = 0, /* Not used - custom allocator */
.volume_priv_size = sizeof(struct ocf_core_volume),
.caps = {
.atomic_writes = 0,
@@ -564,10 +525,55 @@ const struct ocf_volume_properties ocf_core_volume_properties = {
},
};
static int ocf_core_io_allocator_init(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name)
{
return 0;
}
static void ocf_core_io_allocator_deinit(ocf_io_allocator_t allocator)
{
}
static void *ocf_core_io_allocator_new(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir)
{
struct ocf_request *req;
req = ocf_req_new(queue, NULL, addr, bytes, dir);
if (!req)
return NULL;
return &req->ioi;
}
static void ocf_core_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
{
struct ocf_request *req;
req = container_of(obj, struct ocf_request, ioi);
ocf_req_put(req);
}
const struct ocf_io_allocator_type ocf_core_io_allocator_type = {
.ops = {
.allocator_init = ocf_core_io_allocator_init,
.allocator_deinit = ocf_core_io_allocator_deinit,
.allocator_new = ocf_core_io_allocator_new,
.allocator_del = ocf_core_io_allocator_del,
},
};
const struct ocf_volume_extended ocf_core_volume_extended = {
.allocator_type = &ocf_core_io_allocator_type,
};
int ocf_core_volume_type_init(ocf_ctx_t ctx)
{
return ocf_ctx_register_volume_type(ctx, 0,
&ocf_core_volume_properties);
return ocf_ctx_register_volume_type_extended(ctx, 0,
&ocf_core_volume_properties,
&ocf_core_volume_extended);
}
void ocf_core_volume_type_deinit(ocf_ctx_t ctx)

View File

@@ -18,20 +18,6 @@
#define ocf_core_log(core, lvl, fmt, ...) \
ocf_core_log_prefix(core, lvl, ": ", fmt, ##__VA_ARGS__)
struct ocf_core_io {
bool dirty;
/*!< Indicates if io leaves dirty data */
struct ocf_request *req;
ctx_data_t *data;
log_sid_t sid;
/*!< Sequence ID */
uint64_t timestamp;
/*!< Timestamp */
};
struct ocf_metadata_uuid {
uint32_t size;
uint8_t data[OCF_VOLUME_UUID_MAX_SIZE];

View File

@@ -15,8 +15,9 @@
/*
*
*/
int ocf_ctx_register_volume_type(ocf_ctx_t ctx, uint8_t type_id,
const struct ocf_volume_properties *properties)
int ocf_ctx_register_volume_type_extended(ocf_ctx_t ctx, uint8_t type_id,
const struct ocf_volume_properties *properties,
const struct ocf_volume_extended *extended)
{
int result = 0;
@@ -31,7 +32,7 @@ int ocf_ctx_register_volume_type(ocf_ctx_t ctx, uint8_t type_id,
goto err;
}
ocf_volume_type_init(&ctx->volume_type[type_id], properties);
ocf_volume_type_init(&ctx->volume_type[type_id], properties, extended);
if (!ctx->volume_type[type_id])
result = -EINVAL;
@@ -50,6 +51,13 @@ err:
return result;
}
int ocf_ctx_register_volume_type(ocf_ctx_t ctx, uint8_t type_id,
const struct ocf_volume_properties *properties)
{
return ocf_ctx_register_volume_type_extended(ctx, type_id,
properties, NULL);
}
/*
*
*/

View File

@@ -9,6 +9,7 @@
#include "ocf_env.h"
#include "ocf/ocf_ctx.h"
#include "ocf_logger_priv.h"
#include "ocf_volume_priv.h"
#define OCF_VOLUME_TYPE_MAX 8
@@ -45,6 +46,10 @@ struct ocf_ctx {
#define ocf_log_stack_trace(ctx) \
ocf_log_stack_trace_raw(&ctx->logger)
int ocf_ctx_register_volume_type_extended(ocf_ctx_t ctx, uint8_t type_id,
const struct ocf_volume_properties *properties,
const struct ocf_volume_extended *extended);
/**
* @name Environment data buffer operations wrappers
* @{

View File

@@ -6,6 +6,7 @@
#include "ocf/ocf.h"
#include "ocf_io_priv.h"
#include "ocf_volume_priv.h"
#include "utils/utils_io_allocator.h"
/*
* This is io allocator dedicated for bottom devices.
@@ -26,52 +27,88 @@
* +-------------------------+ <----------------
*/
#define OCF_IO_TOTAL_SIZE(priv_size) (sizeof(struct ocf_io_meta) + \
sizeof(struct ocf_io) + priv_size)
#define OCF_IO_TOTAL(priv_size) \
(sizeof(struct ocf_io_internal) + priv_size)
env_allocator *ocf_io_allocator_create(uint32_t size, const char *name)
static int ocf_io_allocator_default_init(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name)
{
return env_allocator_create(OCF_IO_TOTAL_SIZE(size), name);
allocator->priv = env_allocator_create(OCF_IO_TOTAL(priv_size), name);
if (!allocator->priv)
return -OCF_ERR_NO_MEM;
return 0;
}
void ocf_io_allocator_destroy(env_allocator *allocator)
static void ocf_io_allocator_default_deinit(ocf_io_allocator_t allocator)
{
env_allocator_destroy(allocator);
env_allocator_destroy(allocator->priv);
allocator->priv = NULL;
}
static void *ocf_io_allocator_default_new(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir)
{
return env_allocator_new(allocator->priv);
}
static void ocf_io_allocator_default_del(ocf_io_allocator_t allocator, void *obj)
{
env_allocator_del(allocator->priv, obj);
}
const struct ocf_io_allocator_type type_default = {
.ops = {
.allocator_init = ocf_io_allocator_default_init,
.allocator_deinit = ocf_io_allocator_default_deinit,
.allocator_new = ocf_io_allocator_default_new,
.allocator_del = ocf_io_allocator_default_del,
},
};
ocf_io_allocator_type_t ocf_io_allocator_get_type_default(void)
{
return &type_default;
}
/*
* IO internal API
*/
void *ocf_io_get_meta(struct ocf_io* io)
static struct ocf_io_internal *ocf_io_get_internal(struct ocf_io* io)
{
return (void *)io - sizeof(struct ocf_io_meta);
return container_of(io, struct ocf_io_internal, io);
}
struct ocf_io *ocf_io_new(ocf_volume_t volume)
struct ocf_io *ocf_io_new(ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir,
uint32_t io_class, uint64_t flags)
{
struct ocf_io *io;
struct ocf_io_meta *io_meta;
void *data;
struct ocf_io_internal *ioi;
if (!ocf_refcnt_inc(&volume->refcnt))
return NULL;
data = env_allocator_new(volume->type->allocator);
if (!data) {
ioi = ocf_io_allocator_new(&volume->type->allocator, volume, queue,
addr, bytes, dir);
if (!ioi) {
ocf_refcnt_dec(&volume->refcnt);
return NULL;
}
io = data + sizeof(struct ocf_io_meta);
ioi->meta.volume = volume;
ioi->meta.ops = &volume->type->properties->io_ops;
env_atomic_set(&ioi->meta.ref_count, 1);
io_meta = ocf_io_get_meta(io);
ioi->io.io_queue = queue;
ioi->io.addr = addr;
ioi->io.bytes = bytes;
ioi->io.dir = dir;
ioi->io.io_class = io_class;
ioi->io.flags = flags;
io->volume = volume;
io->ops = &volume->type->properties->io_ops;
env_atomic_set(&io_meta->ref_count, 1);
return io;
return &ioi->io;
}
/*
@@ -83,22 +120,42 @@ void *ocf_io_get_priv(struct ocf_io* io)
return (void *)io + sizeof(struct ocf_io);
}
int ocf_io_set_data(struct ocf_io *io, ctx_data_t *data, uint32_t offset)
{
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
return ioi->meta.ops->set_data(io, data, offset);
}
ctx_data_t *ocf_io_get_data(struct ocf_io *io)
{
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
return ioi->meta.ops->get_data(io);
}
void ocf_io_get(struct ocf_io *io)
{
struct ocf_io_meta *io_meta = ocf_io_get_meta(io);
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
env_atomic_inc_return(&io_meta->ref_count);
env_atomic_inc_return(&ioi->meta.ref_count);
}
void ocf_io_put(struct ocf_io *io)
{
struct ocf_io_meta *io_meta = ocf_io_get_meta(io);
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
if (env_atomic_dec_return(&io_meta->ref_count))
if (env_atomic_dec_return(&ioi->meta.ref_count))
return;
ocf_refcnt_dec(&io->volume->refcnt);
ocf_refcnt_dec(&ioi->meta.volume->refcnt);
env_allocator_del(io->volume->type->allocator,
(void *)io - sizeof(struct ocf_io_meta));
ocf_io_allocator_del(&ioi->meta.volume->type->allocator, (void *)ioi);
}
ocf_volume_t ocf_io_get_volume(struct ocf_io *io)
{
struct ocf_io_internal *ioi = ocf_io_get_internal(io);
return ioi->meta.volume;
}

View File

@@ -7,18 +7,28 @@
#define __OCF_IO_PRIV_H__
#include "ocf/ocf.h"
#include "ocf_request.h"
#include "utils/utils_io_allocator.h"
struct ocf_io_meta {
ocf_volume_t volume;
const struct ocf_io_ops *ops;
env_atomic ref_count;
struct ocf_request *req;
};
env_allocator *ocf_io_allocator_create(uint32_t size, const char *name);
void ocf_io_allocator_destroy(env_allocator *allocator);
struct ocf_io_internal {
struct ocf_io_meta meta;
struct ocf_io io;
};
struct ocf_io *ocf_io_new(ocf_volume_t volume);
int ocf_io_allocator_init(ocf_io_allocator_t allocator, ocf_io_allocator_type_t type,
uint32_t priv_size, const char *name);
struct ocf_io *ocf_io_new(ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir,
uint32_t io_class, uint64_t flags);
static inline void ocf_io_start(struct ocf_io *io)
{

View File

@@ -90,10 +90,10 @@ void ocf_queue_run_single(ocf_queue_t q)
if (!io_req)
return;
if (io_req->io && io_req->io->handle)
io_req->io->handle(io_req->io, io_req);
if (io_req->ioi.io.handle)
io_req->ioi.io.handle(&io_req->ioi.io, io_req);
else
ocf_io_handle(io_req->io, io_req);
ocf_io_handle(&io_req->ioi.io, io_req);
}
void ocf_queue_run(ocf_queue_t q)

View File

@@ -201,6 +201,10 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
req->rw = rw;
req->part_id = PARTITION_DEFAULT;
req->discard.sector = BYTES_TO_SECTORS(addr);
req->discard.nr_sects = BYTES_TO_SECTORS(bytes);
req->discard.handled = 0;
return req;
}
@@ -218,6 +222,24 @@ int ocf_req_alloc_map(struct ocf_request *req)
return 0;
}
int ocf_req_alloc_map_discard(struct ocf_request *req)
{
if (req->byte_length <= MAX_TRIM_RQ_SIZE)
return ocf_req_alloc_map(req);
/*
* NOTE: For cache line size bigger than 8k a single-allocation mapping
* can handle more than MAX_TRIM_RQ_SIZE, so for these cache line sizes
* discard request uses only part of the mapping array.
*/
req->byte_length = MAX_TRIM_RQ_SIZE;
req->core_line_last = ocf_bytes_2_lines(req->cache,
req->byte_position + req->byte_length - 1);
req->core_line_count = req->core_line_last - req->core_line_first + 1;
return ocf_req_alloc_map(req);
}
struct ocf_request *ocf_req_new_extended(ocf_queue_t queue, ocf_core_t core,
uint64_t addr, uint32_t bytes, int rw)
{
@@ -243,10 +265,6 @@ struct ocf_request *ocf_req_new_discard(ocf_queue_t queue, ocf_core_t core,
if (!req)
return NULL;
req->discard.sector = BYTES_TO_SECTORS(addr);
req->discard.nr_sects = BYTES_TO_SECTORS(bytes);
req->discard.handled = 0;
return req;
}

View File

@@ -7,6 +7,7 @@
#define __OCF_REQUEST_H__
#include "ocf_env.h"
#include "ocf_io_priv.h"
struct ocf_req_allocator;
@@ -103,6 +104,9 @@ struct ocf_req_discard_info {
* @brief OCF IO request
*/
struct ocf_request {
struct ocf_io_internal ioi;
/*!< OCF IO associated with request */
env_atomic ref_count;
/*!< Reference usage count, once OCF request reaches zero it
* will be de-initialed. Get/Put method are intended to modify
@@ -179,6 +183,12 @@ struct ocf_request {
uint8_t master_io_req_type : 2;
/*!< Core device request context type */
log_sid_t sid;
/*!< Tracing sequence ID */
uint64_t timestamp;
/*!< Tracing timestamp */
ocf_queue_t io_queue;
/*!< I/O queue handle for which request should be submitted */
@@ -191,9 +201,6 @@ struct ocf_request {
void (*complete)(struct ocf_request *ocf_req, int error);
/*!< Request completion function */
struct ocf_io *io;
/*!< OCF IO associated with request */
struct ocf_req_discard_info discard;
struct ocf_map_info *map;
@@ -242,6 +249,16 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
*/
int ocf_req_alloc_map(struct ocf_request *req);
/**
* @brief Allocate OCF request map for discard request
*
* @param req OCF request
*
* @retval 0 Allocation succeed
* @retval non-zero Allocation failed
*/
int ocf_req_alloc_map_discard(struct ocf_request *req);
/**
* @brief Allocate new OCF request with NOIO map allocation for huge request
*

View File

@@ -40,19 +40,17 @@ static inline uint64_t ocf_trace_seq_id(ocf_cache_t cache)
return env_atomic64_inc_return(&cache->trace.trace_seq_ref);
}
static inline void ocf_trace_init_io(struct ocf_core_io *io, ocf_cache_t cache)
static inline void ocf_trace_init_io(struct ocf_request *req)
{
io->timestamp = env_ticks_to_nsecs(env_get_tick_count());
io->sid = ocf_trace_seq_id(cache);
req->timestamp = env_ticks_to_nsecs(env_get_tick_count());
req->sid = ocf_trace_seq_id(req->cache);
}
static inline void ocf_trace_prep_io_event(struct ocf_event_io *ev,
struct ocf_core_io *io, ocf_event_operation_t op)
struct ocf_request *req, ocf_event_operation_t op)
{
struct ocf_request *req = io->req;
ocf_event_init_hdr(&ev->hdr, ocf_event_type_io, io->sid,
io->timestamp, sizeof(*ev));
ocf_event_init_hdr(&ev->hdr, ocf_event_type_io, req->sid,
req->timestamp, sizeof(*ev));
ev->addr = req->byte_position;
if (op == ocf_event_operation_discard)
@@ -63,7 +61,7 @@ static inline void ocf_trace_prep_io_event(struct ocf_event_io *ev,
ev->operation = op;
ev->core_id = ocf_core_get_id(req->core);
ev->io_class = req->io->io_class;
ev->io_class = req->ioi.io.io_class;
}
static inline void ocf_trace_push(ocf_queue_t queue, void *trace, uint32_t size)
@@ -103,34 +101,31 @@ static inline void ocf_trace_push(ocf_queue_t queue, void *trace, uint32_t size)
env_atomic64_dec(&queue->trace_ref_cntr);
}
static inline void ocf_trace_io(struct ocf_core_io *io, ocf_event_operation_t dir, ocf_cache_t cache)
static inline void ocf_trace_io(struct ocf_request *req,
ocf_event_operation_t dir)
{
struct ocf_event_io ev;
struct ocf_request *req;
if (!cache->trace.trace_callback)
if (!req->cache->trace.trace_callback)
return;
req = io->req;
ocf_trace_prep_io_event(&ev, io, dir);
ocf_trace_prep_io_event(&ev, req, dir);
ocf_trace_push(req->io_queue, &ev, sizeof(ev));
}
static inline void ocf_trace_io_cmpl(struct ocf_core_io *io, ocf_cache_t cache)
static inline void ocf_trace_io_cmpl(struct ocf_request *req)
{
struct ocf_event_io_cmpl ev;
struct ocf_request *req;
if (!cache->trace.trace_callback)
if (!req->cache->trace.trace_callback)
return;
req = io->req;
ocf_event_init_hdr(&ev.hdr, ocf_event_type_io_cmpl,
ocf_trace_seq_id(cache),
ocf_trace_seq_id(req->cache),
env_ticks_to_nsecs(env_get_tick_count()),
sizeof(ev));
ev.rsid = io->sid;
ev.rsid = req->sid;
ev.is_hit = ocf_engine_is_hit(req);
ocf_trace_push(req->io_queue, &ev, sizeof(ev));

View File

@@ -16,9 +16,11 @@
*/
int ocf_volume_type_init(struct ocf_volume_type **type,
const struct ocf_volume_properties *properties)
const struct ocf_volume_properties *properties,
const struct ocf_volume_extended *extended)
{
const struct ocf_volume_ops *ops = &properties->ops;
ocf_io_allocator_type_t allocator_type;
struct ocf_volume_type *new_type;
int ret;
@@ -34,12 +36,15 @@ int ocf_volume_type_init(struct ocf_volume_type **type,
if (!new_type)
return -OCF_ERR_NO_MEM;
new_type->allocator = ocf_io_allocator_create(
if (extended && extended->allocator_type)
allocator_type = extended->allocator_type;
else
allocator_type = ocf_io_allocator_get_type_default();
ret = ocf_io_allocator_init(&new_type->allocator, allocator_type,
properties->io_priv_size, properties->name);
if (!new_type->allocator) {
ret = -OCF_ERR_NO_MEM;
if (ret)
goto err;
}
new_type->properties = properties;
@@ -54,7 +59,7 @@ err:
void ocf_volume_type_deinit(struct ocf_volume_type *type)
{
ocf_io_allocator_destroy(type->allocator);
ocf_io_allocator_deinit(&type->allocator);
env_free(type);
}
@@ -227,47 +232,55 @@ int ocf_volume_is_atomic(ocf_volume_t volume)
return volume->type->properties->caps.atomic_writes;
}
struct ocf_io *ocf_volume_new_io(ocf_volume_t volume)
struct ocf_io *ocf_volume_new_io(ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir,
uint32_t io_class, uint64_t flags)
{
return ocf_io_new(volume);
return ocf_io_new(volume, queue, addr, bytes, dir, io_class, flags);
}
void ocf_volume_submit_io(struct ocf_io *io)
{
ENV_BUG_ON(!io->volume->type->properties->ops.submit_io);
ocf_volume_t volume = ocf_io_get_volume(io);
if (!io->volume->opened)
ENV_BUG_ON(!volume->type->properties->ops.submit_io);
if (!volume->opened)
io->end(io, -OCF_ERR_IO);
io->volume->type->properties->ops.submit_io(io);
volume->type->properties->ops.submit_io(io);
}
void ocf_volume_submit_flush(struct ocf_io *io)
{
ENV_BUG_ON(!io->volume->type->properties->ops.submit_flush);
ocf_volume_t volume = ocf_io_get_volume(io);
if (!io->volume->opened)
ENV_BUG_ON(!volume->type->properties->ops.submit_flush);
if (!volume->opened)
io->end(io, -OCF_ERR_IO);
if (!io->volume->type->properties->ops.submit_flush) {
if (!volume->type->properties->ops.submit_flush) {
ocf_io_end(io, 0);
return;
}
io->volume->type->properties->ops.submit_flush(io);
volume->type->properties->ops.submit_flush(io);
}
void ocf_volume_submit_discard(struct ocf_io *io)
{
if (!io->volume->opened)
ocf_volume_t volume = ocf_io_get_volume(io);
if (!volume->opened)
io->end(io, -OCF_ERR_IO);
if (!io->volume->type->properties->ops.submit_discard) {
if (!volume->type->properties->ops.submit_discard) {
ocf_io_end(io, 0);
return;
}
io->volume->type->properties->ops.submit_discard(io);
volume->type->properties->ops.submit_discard(io);
}
int ocf_volume_open(ocf_volume_t volume, void *volume_params)

View File

@@ -9,10 +9,15 @@
#include "ocf_env.h"
#include "ocf_io_priv.h"
#include "utils/utils_refcnt.h"
#include "utils/utils_io_allocator.h"
struct ocf_volume_extended {
ocf_io_allocator_type_t allocator_type;
};
struct ocf_volume_type {
const struct ocf_volume_properties *properties;
env_allocator *allocator;
struct ocf_io_allocator allocator;
};
struct ocf_volume {
@@ -31,7 +36,8 @@ struct ocf_volume {
};
int ocf_volume_type_init(struct ocf_volume_type **type,
const struct ocf_volume_properties *properties);
const struct ocf_volume_properties *properties,
const struct ocf_volume_extended *extended);
void ocf_volume_type_deinit(struct ocf_volume_type *type);
@@ -42,16 +48,20 @@ void ocf_volume_set_uuid(ocf_volume_t volume,
static inline void ocf_volume_submit_metadata(struct ocf_io *io)
{
ENV_BUG_ON(!io->volume->type->properties->ops.submit_metadata);
ocf_volume_t volume = ocf_io_get_volume(io);
io->volume->type->properties->ops.submit_metadata(io);
ENV_BUG_ON(!volume->type->properties->ops.submit_metadata);
volume->type->properties->ops.submit_metadata(io);
}
static inline void ocf_volume_submit_write_zeroes(struct ocf_io *io)
{
ENV_BUG_ON(!io->volume->type->properties->ops.submit_write_zeroes);
ocf_volume_t volume = ocf_io_get_volume(io);
io->volume->type->properties->ops.submit_write_zeroes(io);
ENV_BUG_ON(!volume->type->properties->ops.submit_write_zeroes);
volume->type->properties->ops.submit_write_zeroes(io);
}
#endif /*__OCF_VOLUME_PRIV_H__ */

View File

@@ -270,16 +270,14 @@ static int _ocf_cleaner_fire_flush_cache(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
io = ocf_volume_new_io(&req->cache->device->volume);
io = ocf_new_cache_io(req->cache, req->io_queue, 0, 0, OCF_WRITE, 0, 0);
if (!io) {
ocf_metadata_error(req->cache);
req->error = -OCF_ERR_NO_MEM;
return -OCF_ERR_NO_MEM;
}
ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, req, NULL, _ocf_cleaner_flush_cache_io_end);
ocf_io_set_queue(io, req->io_queue);
ocf_volume_submit_flush(io);
@@ -395,6 +393,7 @@ static int _ocf_cleaner_fire_flush_cores(struct ocf_request *req)
ocf_core_id_t core_id = OCF_CORE_MAX;
struct ocf_cache *cache = req->cache;
struct ocf_map_info *iter = req->map;
ocf_core_t core;
struct ocf_io *io;
OCF_DEBUG_TRACE(req->cache);
@@ -419,15 +418,15 @@ static int _ocf_cleaner_fire_flush_cores(struct ocf_request *req)
env_atomic_inc(&req->req_remaining);
io = ocf_new_core_io(cache, core_id);
core = ocf_cache_get_core(cache, core_id);
io = ocf_new_core_io(core, req->io_queue, 0, 0,
OCF_WRITE, 0, 0);
if (!io) {
_ocf_cleaner_flush_cores_io_end(iter, req, -OCF_ERR_NO_MEM);
continue;
}
ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_flush_cores_io_cmpl);
ocf_io_set_queue(io, req->io_queue);
ocf_volume_submit_flush(io);
}
@@ -480,25 +479,24 @@ static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
{
uint64_t addr, offset;
int err;
struct ocf_cache *cache = req->cache;
ocf_cache_t cache = req->cache;
ocf_core_t core = ocf_cache_get_core(cache, iter->core_id);
struct ocf_io *io;
struct ocf_counters_block *core_stats =
&cache->core[iter->core_id].counters->core_blocks;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
iter->coll_idx);
io = ocf_new_core_io(cache, iter->core_id);
if (!io)
goto error;
addr = (ocf_line_size(cache) * iter->core_line)
+ SECTORS_TO_BYTES(begin);
offset = (ocf_line_size(cache) * iter->hash_key)
+ SECTORS_TO_BYTES(begin);
ocf_io_configure(io, addr, SECTORS_TO_BYTES(end - begin), OCF_WRITE,
part_id, 0);
ocf_io_set_queue(io, req->io_queue);
io = ocf_new_core_io(core, req->io_queue, addr,
SECTORS_TO_BYTES(end - begin), OCF_WRITE, part_id, 0);
if (!io)
goto error;
err = ocf_io_set_data(io, req->data, offset);
if (err) {
ocf_io_put(io);
@@ -660,14 +658,6 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req)
cache_stats = &cache->core[iter->core_id].
counters->cache_blocks;
io = ocf_new_cache_io(cache);
if (!io) {
/* Allocation error */
iter->invalid = true;
_ocf_cleaner_set_error(req);
continue;
}
OCF_DEBUG_PARAM(req->cache, "Cache read, line = %u",
iter->coll_idx);
@@ -680,10 +670,17 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req)
part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx);
io = ocf_new_cache_io(cache, req->io_queue,
addr, ocf_line_size(cache),
OCF_READ, part_id, 0);
if (!io) {
/* Allocation error */
iter->invalid = true;
_ocf_cleaner_set_error(req);
continue;
}
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_cache_io_cmpl);
ocf_io_configure(io, addr, ocf_line_size(cache), OCF_READ,
part_id, 0);
ocf_io_set_queue(io, req->io_queue);
err = ocf_io_set_data(io, req->data, offset);
if (err) {
ocf_io_put(io);

View File

@@ -31,11 +31,10 @@ void ocf_submit_volume_flush(ocf_volume_t volume,
{
struct ocf_io *io;
io = ocf_volume_new_io(volume);
io = ocf_volume_new_io(volume, NULL, 0, 0, OCF_WRITE, 0, 0);
if (!io)
OCF_CMPL_RET(priv, -OCF_ERR_NO_MEM);
ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, cmpl, priv, _ocf_volume_flush_end);
ocf_volume_submit_flush(io);
@@ -74,7 +73,10 @@ void ocf_submit_volume_discard(ocf_volume_t volume, uint64_t addr,
context->priv = priv;
while (length) {
io = ocf_volume_new_io(volume);
bytes = OCF_MIN(length, max_length);
io = ocf_volume_new_io(volume, NULL, addr, bytes,
OCF_WRITE, 0, 0);
if (!io) {
context->error = -OCF_ERR_NO_MEM;
break;
@@ -82,9 +84,6 @@ void ocf_submit_volume_discard(ocf_volume_t volume, uint64_t addr,
env_atomic_inc(&context->req_remaining);
bytes = OCF_MIN(length, max_length);
ocf_io_configure(io, addr, bytes, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, context, NULL, ocf_submit_volume_end);
ocf_volume_submit_discard(io);
@@ -116,7 +115,10 @@ void ocf_submit_write_zeros(ocf_volume_t volume, uint64_t addr,
context->priv = priv;
while (length) {
io = ocf_volume_new_io(volume);
bytes = OCF_MIN(length, max_length);
io = ocf_volume_new_io(volume, NULL, addr, bytes,
OCF_WRITE, 0, 0);
if (!io) {
context->error = -OCF_ERR_NO_MEM;
break;
@@ -124,9 +126,6 @@ void ocf_submit_write_zeros(ocf_volume_t volume, uint64_t addr,
env_atomic_inc(&context->req_remaining);
bytes = OCF_MIN(length, max_length);
ocf_io_configure(io, addr, bytes, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, context, NULL, ocf_submit_volume_end);
ocf_volume_submit_write_zeroes(io);
@@ -181,7 +180,7 @@ void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
context->cmpl = cmpl;
context->priv = priv;
io = ocf_volume_new_io(&cache->device->volume);
io = ocf_new_cache_io(cache, NULL, addr, PAGE_SIZE, dir, 0, 0);
if (!io) {
result = -OCF_ERR_NO_MEM;
goto err_io;
@@ -200,7 +199,6 @@ void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
if (result)
goto err_set_data;
ocf_io_configure(io, addr, PAGE_SIZE, dir, 0, 0);
ocf_io_set_cmpl(io, context, NULL, ocf_submit_cache_page_end);
ocf_volume_submit_io(io);
@@ -230,8 +228,8 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
uint64_t size, unsigned int reqs, ocf_req_end_t callback)
{
struct ocf_counters_block *cache_stats;
uint64_t flags = req->io ? req->io->flags : 0;
uint32_t class = req->io ? req->io->io_class : 0;
uint64_t flags = req->ioi.io.flags;
uint32_t class = req->ioi.io.io_class;
uint64_t addr, bytes, total_bytes = 0;
struct ocf_io *io;
int err;
@@ -245,12 +243,6 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
cache_stats = &req->core->counters->cache_blocks;
if (reqs == 1) {
io = ocf_new_cache_io(cache);
if (!io) {
callback(req, -OCF_ERR_NO_MEM);
goto update_stats;
}
addr = ocf_metadata_map_lg2phy(cache,
req->map[first_cl].coll_idx);
addr *= ocf_line_size(cache);
@@ -258,8 +250,13 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
addr += ((req->byte_position + offset) % ocf_line_size(cache));
bytes = size;
ocf_io_configure(io, addr, bytes, dir, class, flags);
ocf_io_set_queue(io, req->io_queue);
io = ocf_new_cache_io(cache, req->io_queue,
addr, bytes, dir, class, flags);
if (!io) {
callback(req, -OCF_ERR_NO_MEM);
goto update_stats;
}
ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
err = ocf_io_set_data(io, req->data, offset);
@@ -277,15 +274,6 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
/* Issue requests to cache. */
for (i = 0; i < reqs; i++) {
io = ocf_new_cache_io(cache);
if (!io) {
/* Finish all IOs which left with ERROR */
for (; i < reqs; i++)
callback(req, -OCF_ERR_NO_MEM);
goto update_stats;
}
addr = ocf_metadata_map_lg2phy(cache,
req->map[first_cl + i].coll_idx);
addr *= ocf_line_size(cache);
@@ -309,8 +297,15 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
bytes = OCF_MIN(bytes, size - total_bytes);
ENV_BUG_ON(bytes == 0);
ocf_io_configure(io, addr, bytes, dir, class, flags);
ocf_io_set_queue(io, req->io_queue);
io = ocf_new_cache_io(cache, req->io_queue,
addr, bytes, dir, class, flags);
if (!io) {
/* Finish all IOs which left with ERROR */
for (; i < reqs; i++)
callback(req, -OCF_ERR_NO_MEM);
goto update_stats;
}
ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
err = ocf_io_set_data(io, req->data, offset + total_bytes);
@@ -338,8 +333,8 @@ void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
ocf_req_end_t callback)
{
struct ocf_counters_block *core_stats;
uint64_t flags = req->io ? req->io->flags : 0;
uint32_t class = req->io ? req->io->io_class : 0;
uint64_t flags = req->ioi.io.flags;
uint32_t class = req->ioi.io.io_class;
int dir = req->rw;
struct ocf_io *io;
int err;
@@ -350,15 +345,13 @@ void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
else if (dir == OCF_READ)
env_atomic64_add(req->byte_length, &core_stats->read_bytes);
io = ocf_volume_new_io(volume);
io = ocf_volume_new_io(volume, req->io_queue, req->byte_position,
req->byte_length, dir, class, flags);
if (!io) {
callback(req, -OCF_ERR_NO_MEM);
return;
}
ocf_io_configure(io, req->byte_position, req->byte_length, dir,
class, flags);
ocf_io_set_queue(io, req->io_queue);
ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
err = ocf_io_set_data(io, req->data, 0);
if (err) {

View File

@@ -64,17 +64,21 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
struct ocf_request *req, int dir, uint64_t offset,
uint64_t size, unsigned int reqs, ocf_req_end_t callback);
static inline struct ocf_io *ocf_new_cache_io(struct ocf_cache *cache)
static inline struct ocf_io *ocf_new_cache_io(ocf_cache_t cache,
ocf_queue_t queue, uint64_t addr, uint32_t bytes,
uint32_t dir, uint32_t io_class, uint64_t flags)
{
return ocf_volume_new_io(&cache->device->volume);
return ocf_volume_new_io(ocf_cache_get_volume(cache), queue,
addr, bytes, dir, io_class, flags);
}
static inline struct ocf_io *ocf_new_core_io(struct ocf_cache *cache,
ocf_core_id_t core_id)
static inline struct ocf_io *ocf_new_core_io(ocf_core_t core,
ocf_queue_t queue, uint64_t addr, uint32_t bytes,
uint32_t dir, uint32_t io_class, uint64_t flags)
{
ENV_BUG_ON(core_id >= OCF_CORE_MAX);
return ocf_volume_new_io(&cache->core[core_id].volume);
return ocf_volume_new_io(ocf_core_get_volume(core), queue,
addr, bytes, dir, io_class, flags);
}
#endif /* UTILS_IO_H_ */

View File

@@ -0,0 +1,62 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_IO_ALLOCATOR_H__
#define __UTILS_IO_ALLOCATOR_H__
#include "ocf/ocf_types.h"
typedef struct ocf_io_allocator *ocf_io_allocator_t;
struct ocf_io_allocator_ops {
int (*allocator_init)(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name);
void (*allocator_deinit)(ocf_io_allocator_t allocator);
void *(*allocator_new)(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir);
void (*allocator_del)(ocf_io_allocator_t allocator, void *obj);
};
struct ocf_io_allocator_type {
struct ocf_io_allocator_ops ops;
};
typedef const struct ocf_io_allocator_type *ocf_io_allocator_type_t;
struct ocf_io_allocator {
const struct ocf_io_allocator_type *type;
void *priv;
};
static inline void *ocf_io_allocator_new(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir)
{
return allocator->type->ops.allocator_new(allocator, volume, queue,
addr, bytes, dir);
}
static inline void ocf_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
{
allocator->type->ops.allocator_del(allocator, obj);
}
static inline int ocf_io_allocator_init(ocf_io_allocator_t allocator,
ocf_io_allocator_type_t type, uint32_t size, const char *name)
{
allocator->type = type;
return allocator->type->ops.allocator_init(allocator, size, name);
}
static inline void ocf_io_allocator_deinit(ocf_io_allocator_t allocator)
{
allocator->type->ops.allocator_deinit(allocator);
}
ocf_io_allocator_type_t ocf_io_allocator_get_type_default(void);
#endif /* __UTILS_IO_ALLOCATOR__ */