Allocate io and request in single allocation

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga 2019-06-03 11:28:03 +02:00
parent 61414f889e
commit e64bb50a4b
20 changed files with 209 additions and 163 deletions

View File

@ -254,6 +254,8 @@ int ocf_engine_hndl_req(struct ocf_request *req,
if (!req->io_if)
return -OCF_ERR_INVAL;
ocf_req_get(req);
/* Till OCF engine is not synchronous fully need to push OCF request
* to into OCF workers
*/
@ -273,6 +275,8 @@ int ocf_engine_hndl_fast_req(struct ocf_request *req,
if (!io_if)
return -OCF_ERR_INVAL;
ocf_req_get(req);
switch (req->rw) {
case OCF_READ:
ret = io_if->read(req);
@ -281,9 +285,12 @@ int ocf_engine_hndl_fast_req(struct ocf_request *req,
ret = io_if->write(req);
break;
default:
return OCF_FAST_PATH_NO;
ret = OCF_FAST_PATH_NO;
}
if (ret == OCF_FAST_PATH_NO)
ocf_req_put(req);
return ret;
}
@ -299,6 +306,8 @@ static void ocf_engine_hndl_2dc_req(struct ocf_request *req)
void ocf_engine_hndl_discard_req(struct ocf_request *req)
{
ocf_req_get(req);
if (req->d2c) {
ocf_engine_hndl_2dc_req(req);
return;
@ -314,6 +323,8 @@ void ocf_engine_hndl_discard_req(struct ocf_request *req)
void ocf_engine_hndl_ops_req(struct ocf_request *req)
{
ocf_req_get(req);
if (req->d2c)
req->io_if = &IO_IFS[OCF_IO_D2C_IF];
else

View File

@ -42,7 +42,7 @@ int ocf_io_d2c(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@ -255,7 +255,7 @@ int ocf_discard(struct ocf_request *req)
{
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
if (req->rw == OCF_READ) {
req->complete(req, -OCF_ERR_INVAL);

View File

@ -128,7 +128,7 @@ int ocf_read_fast(struct ocf_request *req)
hit = ocf_engine_is_hit(req);
if (hit) {
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
lock = ocf_req_trylock_rd(req);
}
@ -198,7 +198,7 @@ int ocf_write_fast(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req);
if (mapped) {
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
lock = ocf_req_trylock_wr(req);
}

View File

@ -110,7 +110,7 @@ int ocf_read_pt(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@ -217,7 +217,7 @@ int ocf_read_generic(struct ocf_request *req)
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
if (env_atomic_read(&cache->pending_read_misses_list_blocked)) {
/* There are conditions to bypass IO */

View File

@ -40,7 +40,7 @@ int ocf_write_wa(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@ -172,7 +172,7 @@ int ocf_write_wb(struct ocf_request *req)
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Not sure if we need this. */
ocf_req_get(req);

View File

@ -140,7 +140,7 @@ int ocf_write_wi(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@ -207,7 +207,7 @@ int ocf_read_wo(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@ -167,7 +167,7 @@ int ocf_write_wt(struct ocf_request *req)
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
ocf_io_start(req->io);
ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */
ocf_req_get(req);

View File

@ -134,9 +134,12 @@ int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
/* *** HELPER FUNCTIONS *** */
static inline struct ocf_core_io *ocf_io_to_core_io(struct ocf_io *io)
static inline struct ocf_request *ocf_io_to_req(struct ocf_io *io)
{
return ocf_io_get_priv(io);
struct ocf_io_internal *ioi;
ioi = container_of(io, struct ocf_io_internal, io);
return container_of(ioi, struct ocf_request, ioi);
}
static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume)
@ -146,21 +149,19 @@ static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume)
return core_volume->core;
}
static inline int ocf_io_set_dirty(ocf_cache_t cache,
struct ocf_core_io *core_io)
static inline int ocf_io_set_dirty(struct ocf_request *req)
{
core_io->dirty = !!ocf_refcnt_inc(&cache->refcnt.dirty);
return core_io->dirty ? 0 : -EBUSY;
req->dirty = !!ocf_refcnt_inc(&req->cache->refcnt.dirty);
return req->dirty ? 0 : -EBUSY;
}
static inline void dec_counter_if_req_was_dirty(struct ocf_core_io *core_io,
ocf_cache_t cache)
static inline void dec_counter_if_req_was_dirty(struct ocf_request *req)
{
if (!core_io->dirty)
if (!req->dirty)
return;
core_io->dirty = 0;
ocf_refcnt_dec(&cache->refcnt.dirty);
req->dirty = 0;
ocf_refcnt_dec(&req->cache->refcnt.dirty);
}
static inline int ocf_core_validate_io(struct ocf_io *io)
@ -195,21 +196,20 @@ static inline int ocf_core_validate_io(struct ocf_io *io)
static void ocf_req_complete(struct ocf_request *req, int error)
{
/* Log trace */
ocf_trace_io_cmpl(ocf_io_to_core_io(req->io), req->cache);
ocf_trace_io_cmpl(req);
/* Complete IO */
ocf_io_end(req->io, error);
ocf_io_end(&req->ioi.io, error);
dec_counter_if_req_was_dirty(ocf_io_to_core_io(req->io), req->cache);
dec_counter_if_req_was_dirty(req);
/* Invalidate OCF IO, it is not valid after completion */
ocf_io_put(req->io);
req->io = NULL;
ocf_io_put(&req->ioi.io);
}
void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
ocf_req_cache_mode_t req_cache_mode;
ocf_core_t core;
ocf_cache_t cache;
@ -219,16 +219,15 @@ void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
ret = ocf_core_validate_io(io);
if (ret < 0) {
io->end(io, ret);
ocf_io_end(io, ret);
return;
}
core_io = ocf_io_to_core_io(io);
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
ocf_trace_init_io(core_io, cache);
ocf_trace_init_io(req);
if (unlikely(!env_bit_test(ocf_cache_state_running,
&cache->cache_state))) {
@ -245,50 +244,40 @@ void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
if (io->dir == OCF_WRITE &&
ocf_req_cache_mode_has_lazy_write(req_cache_mode) &&
ocf_io_set_dirty(cache, core_io)) {
ocf_io_set_dirty(req)) {
req_cache_mode = ocf_req_cache_mode_wt;
}
core_io->req = ocf_req_new(io->io_queue, core, io->addr, io->bytes,
io->dir);
if (!core_io->req) {
dec_counter_if_req_was_dirty(core_io, cache);
io->end(io, -OCF_ERR_NO_MEM);
return;
}
if (core_io->req->d2c)
if (req->d2c)
req_cache_mode = ocf_req_cache_mode_d2c;
core_io->req->part_id = ocf_part_class2id(cache, io->io_class);
core_io->req->data = core_io->data;
core_io->req->complete = ocf_req_complete;
core_io->req->io = io;
req->core = core;
req->complete = ocf_req_complete;
req->part_id = ocf_part_class2id(cache, io->io_class);
ocf_seq_cutoff_update(core, core_io->req);
ocf_seq_cutoff_update(core, req);
ocf_core_update_stats(core, io);
if (io->dir == OCF_WRITE)
ocf_trace_io(core_io, ocf_event_operation_wr, cache);
ocf_trace_io(req, ocf_event_operation_wr);
else if (io->dir == OCF_READ)
ocf_trace_io(core_io, ocf_event_operation_rd, cache);
ocf_trace_io(req, ocf_event_operation_rd);
ocf_io_get(io);
ret = ocf_engine_hndl_req(core_io->req, req_cache_mode);
ret = ocf_engine_hndl_req(req, req_cache_mode);
if (ret) {
dec_counter_if_req_was_dirty(core_io, cache);
ocf_req_put(core_io->req);
io->end(io, ret);
dec_counter_if_req_was_dirty(req);
ocf_io_end(io, ret);
}
}
int ocf_core_submit_io_fast(struct ocf_io *io)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
ocf_req_cache_mode_t req_cache_mode;
struct ocf_event_io trace_event;
struct ocf_request *req;
ocf_core_t core;
ocf_cache_t cache;
int fast;
@ -300,8 +289,7 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
if (ret < 0)
return ret;
core_io = ocf_io_to_core_io(io);
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
@ -311,11 +299,17 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
return 0;
}
ret = ocf_req_alloc_map(req);
if (ret) {
ocf_io_end(io, -OCF_ERR_NO_MEM);
return 0;
}
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (io->dir == OCF_WRITE &&
ocf_req_cache_mode_has_lazy_write(req_cache_mode) &&
ocf_io_set_dirty(cache, core_io)) {
ocf_io_set_dirty(req)) {
req_cache_mode = ocf_req_cache_mode_wt;
}
@ -335,35 +329,22 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
req_cache_mode = ocf_req_cache_mode_fast;
}
core_io->req = ocf_req_new_extended(io->io_queue, core,
io->addr, io->bytes, io->dir);
// We need additional pointer to req in case completion arrives before
// we leave this function and core_io is freed
req = core_io->req;
if (!req) {
dec_counter_if_req_was_dirty(core_io, cache);
io->end(io, -OCF_ERR_NO_MEM);
return 0;
}
if (req->d2c) {
dec_counter_if_req_was_dirty(core_io, cache);
ocf_req_put(req);
dec_counter_if_req_was_dirty(req);
return -OCF_ERR_IO;
}
req->part_id = ocf_part_class2id(cache, io->io_class);
req->data = core_io->data;
req->core = core;
req->complete = ocf_req_complete;
req->io = io;
req->part_id = ocf_part_class2id(cache, io->io_class);
ocf_core_update_stats(core, io);
if (cache->trace.trace_callback) {
if (io->dir == OCF_WRITE)
ocf_trace_prep_io_event(&trace_event, core_io, ocf_event_operation_wr);
ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_wr);
else if (io->dir == OCF_READ)
ocf_trace_prep_io_event(&trace_event, core_io, ocf_event_operation_rd);
ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_rd);
}
ocf_io_get(io);
@ -375,10 +356,9 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
return 0;
}
dec_counter_if_req_was_dirty(core_io, cache);
dec_counter_if_req_was_dirty(req);
ocf_io_put(io);
ocf_req_put(req);
return -OCF_ERR_IO;
}
@ -389,7 +369,7 @@ static void ocf_core_volume_submit_io(struct ocf_io *io)
static void ocf_core_volume_submit_flush(struct ocf_io *io)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
ocf_core_t core;
ocf_cache_t cache;
int ret;
@ -402,8 +382,7 @@ static void ocf_core_volume_submit_flush(struct ocf_io *io)
return;
}
core_io = ocf_io_to_core_io(io);
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
@ -413,25 +392,18 @@ static void ocf_core_volume_submit_flush(struct ocf_io *io)
return;
}
core_io->req = ocf_req_new(io->io_queue, core, io->addr, io->bytes,
io->dir);
if (!core_io->req) {
ocf_io_end(io, -OCF_ERR_NO_MEM);
return;
}
req->core = core;
req->complete = ocf_req_complete;
core_io->req->complete = ocf_req_complete;
core_io->req->io = io;
core_io->req->data = core_io->data;
ocf_trace_io(core_io, ocf_event_operation_flush, cache);
ocf_trace_io(req, ocf_event_operation_flush);
ocf_io_get(io);
ocf_engine_hndl_ops_req(core_io->req);
ocf_engine_hndl_ops_req(req);
}
static void ocf_core_volume_submit_discard(struct ocf_io *io)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
ocf_core_t core;
ocf_cache_t cache;
int ret;
@ -444,8 +416,7 @@ static void ocf_core_volume_submit_discard(struct ocf_io *io)
return;
}
core_io = ocf_io_to_core_io(io);
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
@ -455,20 +426,19 @@ static void ocf_core_volume_submit_discard(struct ocf_io *io)
return;
}
core_io->req = ocf_req_new_discard(io->io_queue, core,
io->addr, io->bytes, OCF_WRITE);
if (!core_io->req) {
ret = ocf_req_alloc_map_discard(req);
if (ret) {
ocf_io_end(io, -OCF_ERR_NO_MEM);
return;
}
core_io->req->complete = ocf_req_complete;
core_io->req->io = io;
core_io->req->data = core_io->data;
req->core = core;
req->complete = ocf_req_complete;
ocf_trace_io(core_io, ocf_event_operation_discard, cache);
ocf_trace_io(req, ocf_event_operation_discard);
ocf_io_get(io);
ocf_engine_hndl_discard_req(core_io->req);
ocf_engine_hndl_discard_req(req);
}
/* *** VOLUME OPS *** */
@ -508,32 +478,32 @@ static uint64_t ocf_core_volume_get_byte_length(ocf_volume_t volume)
static int ocf_core_io_set_data(struct ocf_io *io,
ctx_data_t *data, uint32_t offset)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
OCF_CHECK_NULL(io);
if (!data || offset)
return -OCF_ERR_INVAL;
core_io = ocf_io_to_core_io(io);
core_io->data = data;
req = ocf_io_to_req(io);
req->data = data;
return 0;
}
static ctx_data_t *ocf_core_io_get_data(struct ocf_io *io)
{
struct ocf_core_io *core_io;
struct ocf_request *req;
OCF_CHECK_NULL(io);
core_io = ocf_io_to_core_io(io);
return core_io->data;
req = ocf_io_to_req(io);
return req->data;
}
const struct ocf_volume_properties ocf_core_volume_properties = {
.name = "OCF Core",
.io_priv_size = sizeof(struct ocf_core_io),
.io_priv_size = 0, /* Not used - custom allocator */
.volume_priv_size = sizeof(struct ocf_core_volume),
.caps = {
.atomic_writes = 0,
@ -555,10 +525,55 @@ const struct ocf_volume_properties ocf_core_volume_properties = {
},
};
static int ocf_core_io_allocator_init(ocf_io_allocator_t allocator,
uint32_t priv_size, const char *name)
{
return 0;
}
static void ocf_core_io_allocator_deinit(ocf_io_allocator_t allocator)
{
}
static void *ocf_core_io_allocator_new(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir)
{
struct ocf_request *req;
req = ocf_req_new(queue, NULL, addr, bytes, dir);
if (!req)
return NULL;
return &req->ioi;
}
static void ocf_core_io_allocator_del(ocf_io_allocator_t allocator, void *obj)
{
struct ocf_request *req;
req = container_of(obj, struct ocf_request, ioi);
ocf_req_put(req);
}
const struct ocf_io_allocator_type ocf_core_io_allocator_type = {
.ops = {
.allocator_init = ocf_core_io_allocator_init,
.allocator_deinit = ocf_core_io_allocator_deinit,
.allocator_new = ocf_core_io_allocator_new,
.allocator_del = ocf_core_io_allocator_del,
},
};
const struct ocf_volume_extended ocf_core_volume_extended = {
.allocator_type = &ocf_core_io_allocator_type,
};
int ocf_core_volume_type_init(ocf_ctx_t ctx)
{
return ocf_ctx_register_volume_type(ctx, 0,
&ocf_core_volume_properties);
return ocf_ctx_register_volume_type_extended(ctx, 0,
&ocf_core_volume_properties,
&ocf_core_volume_extended);
}
void ocf_core_volume_type_deinit(ocf_ctx_t ctx)

View File

@ -18,20 +18,6 @@
#define ocf_core_log(core, lvl, fmt, ...) \
ocf_core_log_prefix(core, lvl, ": ", fmt, ##__VA_ARGS__)
struct ocf_core_io {
bool dirty;
/*!< Indicates if io leaves dirty data */
struct ocf_request *req;
ctx_data_t *data;
log_sid_t sid;
/*!< Sequence ID */
uint64_t timestamp;
/*!< Timestamp */
};
struct ocf_metadata_uuid {
uint32_t size;
uint8_t data[OCF_VOLUME_UUID_MAX_SIZE];

View File

@ -9,6 +9,7 @@
#include "ocf_env.h"
#include "ocf/ocf_ctx.h"
#include "ocf_logger_priv.h"
#include "ocf_volume_priv.h"
#define OCF_VOLUME_TYPE_MAX 8
@ -45,6 +46,10 @@ struct ocf_ctx {
#define ocf_log_stack_trace(ctx) \
ocf_log_stack_trace_raw(&ctx->logger)
int ocf_ctx_register_volume_type_extended(ocf_ctx_t ctx, uint8_t type_id,
const struct ocf_volume_properties *properties,
const struct ocf_volume_extended *extended);
/**
* @name Environment data buffer operations wrappers
* @{

View File

@ -7,7 +7,6 @@
#define __OCF_IO_PRIV_H__
#include "ocf/ocf.h"
#include "ocf_request.h"
#include "utils/utils_io_allocator.h"
struct ocf_io_meta {
@ -17,6 +16,7 @@ struct ocf_io_meta {
struct ocf_request *req;
};
struct ocf_io_internal {
struct ocf_io_meta meta;
struct ocf_io io;

View File

@ -90,10 +90,10 @@ void ocf_queue_run_single(ocf_queue_t q)
if (!io_req)
return;
if (io_req->io && io_req->io->handle)
io_req->io->handle(io_req->io, io_req);
if (io_req->ioi.io.handle)
io_req->ioi.io.handle(&io_req->ioi.io, io_req);
else
ocf_io_handle(io_req->io, io_req);
ocf_io_handle(&io_req->ioi.io, io_req);
}
void ocf_queue_run(ocf_queue_t q)

View File

@ -201,6 +201,10 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
req->rw = rw;
req->part_id = PARTITION_DEFAULT;
req->discard.sector = BYTES_TO_SECTORS(addr);
req->discard.nr_sects = BYTES_TO_SECTORS(bytes);
req->discard.handled = 0;
return req;
}
@ -218,6 +222,24 @@ int ocf_req_alloc_map(struct ocf_request *req)
return 0;
}
int ocf_req_alloc_map_discard(struct ocf_request *req)
{
if (req->byte_length <= MAX_TRIM_RQ_SIZE)
return ocf_req_alloc_map(req);
/*
* NOTE: For cache line size bigger than 8k a single-allocation mapping
* can handle more than MAX_TRIM_RQ_SIZE, so for these cache line sizes
* discard request uses only part of the mapping array.
*/
req->byte_length = MAX_TRIM_RQ_SIZE;
req->core_line_last = ocf_bytes_2_lines(req->cache,
req->byte_position + req->byte_length - 1);
req->core_line_count = req->core_line_last - req->core_line_first + 1;
return ocf_req_alloc_map(req);
}
struct ocf_request *ocf_req_new_extended(ocf_queue_t queue, ocf_core_t core,
uint64_t addr, uint32_t bytes, int rw)
{
@ -243,10 +265,6 @@ struct ocf_request *ocf_req_new_discard(ocf_queue_t queue, ocf_core_t core,
if (!req)
return NULL;
req->discard.sector = BYTES_TO_SECTORS(addr);
req->discard.nr_sects = BYTES_TO_SECTORS(bytes);
req->discard.handled = 0;
return req;
}

View File

@ -104,6 +104,9 @@ struct ocf_req_discard_info {
* @brief OCF IO request
*/
struct ocf_request {
struct ocf_io_internal ioi;
/*!< OCF IO associated with request */
env_atomic ref_count;
/*!< Reference usage count, once OCF request reaches zero it
* will be de-initialed. Get/Put method are intended to modify
@ -180,6 +183,12 @@ struct ocf_request {
uint8_t master_io_req_type : 2;
/*!< Core device request context type */
log_sid_t sid;
/*!< Tracing sequence ID */
uint64_t timestamp;
/*!< Tracing timestamp */
ocf_queue_t io_queue;
/*!< I/O queue handle for which request should be submitted */
@ -192,9 +201,6 @@ struct ocf_request {
void (*complete)(struct ocf_request *ocf_req, int error);
/*!< Request completion function */
struct ocf_io *io;
/*!< OCF IO associated with request */
struct ocf_req_discard_info discard;
struct ocf_map_info *map;
@ -243,6 +249,16 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
*/
int ocf_req_alloc_map(struct ocf_request *req);
/**
* @brief Allocate OCF request map for discard request
*
* @param req OCF request
*
* @retval 0 Allocation succeed
* @retval non-zero Allocation failed
*/
int ocf_req_alloc_map_discard(struct ocf_request *req);
/**
* @brief Allocate new OCF request with NOIO map allocation for huge request
*

View File

@ -40,19 +40,17 @@ static inline uint64_t ocf_trace_seq_id(ocf_cache_t cache)
return env_atomic64_inc_return(&cache->trace.trace_seq_ref);
}
static inline void ocf_trace_init_io(struct ocf_core_io *io, ocf_cache_t cache)
static inline void ocf_trace_init_io(struct ocf_request *req)
{
io->timestamp = env_ticks_to_nsecs(env_get_tick_count());
io->sid = ocf_trace_seq_id(cache);
req->timestamp = env_ticks_to_nsecs(env_get_tick_count());
req->sid = ocf_trace_seq_id(req->cache);
}
static inline void ocf_trace_prep_io_event(struct ocf_event_io *ev,
struct ocf_core_io *io, ocf_event_operation_t op)
struct ocf_request *req, ocf_event_operation_t op)
{
struct ocf_request *req = io->req;
ocf_event_init_hdr(&ev->hdr, ocf_event_type_io, io->sid,
io->timestamp, sizeof(*ev));
ocf_event_init_hdr(&ev->hdr, ocf_event_type_io, req->sid,
req->timestamp, sizeof(*ev));
ev->addr = req->byte_position;
if (op == ocf_event_operation_discard)
@ -63,7 +61,7 @@ static inline void ocf_trace_prep_io_event(struct ocf_event_io *ev,
ev->operation = op;
ev->core_id = ocf_core_get_id(req->core);
ev->io_class = req->io->io_class;
ev->io_class = req->ioi.io.io_class;
}
static inline void ocf_trace_push(ocf_queue_t queue, void *trace, uint32_t size)
@ -103,34 +101,31 @@ static inline void ocf_trace_push(ocf_queue_t queue, void *trace, uint32_t size)
env_atomic64_dec(&queue->trace_ref_cntr);
}
static inline void ocf_trace_io(struct ocf_core_io *io, ocf_event_operation_t dir, ocf_cache_t cache)
static inline void ocf_trace_io(struct ocf_request *req,
ocf_event_operation_t dir)
{
struct ocf_event_io ev;
struct ocf_request *req;
if (!cache->trace.trace_callback)
if (!req->cache->trace.trace_callback)
return;
req = io->req;
ocf_trace_prep_io_event(&ev, io, dir);
ocf_trace_prep_io_event(&ev, req, dir);
ocf_trace_push(req->io_queue, &ev, sizeof(ev));
}
static inline void ocf_trace_io_cmpl(struct ocf_core_io *io, ocf_cache_t cache)
static inline void ocf_trace_io_cmpl(struct ocf_request *req)
{
struct ocf_event_io_cmpl ev;
struct ocf_request *req;
if (!cache->trace.trace_callback)
if (!req->cache->trace.trace_callback)
return;
req = io->req;
ocf_event_init_hdr(&ev.hdr, ocf_event_type_io_cmpl,
ocf_trace_seq_id(cache),
ocf_trace_seq_id(req->cache),
env_ticks_to_nsecs(env_get_tick_count()),
sizeof(ev));
ev.rsid = io->sid;
ev.rsid = req->sid;
ev.is_hit = ocf_engine_is_hit(req);
ocf_trace_push(req->io_queue, &ev, sizeof(ev));

View File

@ -228,8 +228,8 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
uint64_t size, unsigned int reqs, ocf_req_end_t callback)
{
struct ocf_counters_block *cache_stats;
uint64_t flags = req->io ? req->io->flags : 0;
uint32_t class = req->io ? req->io->io_class : 0;
uint64_t flags = req->ioi.io.flags;
uint32_t class = req->ioi.io.io_class;
uint64_t addr, bytes, total_bytes = 0;
struct ocf_io *io;
int err;
@ -333,8 +333,8 @@ void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
ocf_req_end_t callback)
{
struct ocf_counters_block *core_stats;
uint64_t flags = req->io ? req->io->flags : 0;
uint32_t class = req->io ? req->io->io_class : 0;
uint64_t flags = req->ioi.io.flags;
uint32_t class = req->ioi.io.io_class;
int dir = req->rw;
struct ocf_io *io;
int err;