Merge pull request #540 from mmkayPL/add-back-fastpath

Add back fastpath
This commit is contained in:
Robert Baldyga 2021-07-21 14:58:58 +02:00 committed by GitHub
commit 227021b416
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 64 additions and 102 deletions

View File

@ -172,17 +172,6 @@ static inline void ocf_core_submit_io(struct ocf_io *io)
ocf_volume_submit_io(io); ocf_volume_submit_io(io);
} }
/**
* @brief Fast path for submitting IO. If possible, request is processed
* immediately without adding to internal request queue
*
* @param[in] io IO to be submitted
*
* @retval 0 IO has been submitted successfully
* @retval Non-zero Fast submit failed. Try to submit IO with ocf_core_submit_io()
*/
int ocf_core_submit_io_fast(struct ocf_io *io);
/** /**
* @brief Submit ocf_io with flush command * @brief Submit ocf_io with flush command
* *

View File

@ -144,11 +144,6 @@ struct ocf_request *ocf_engine_pop_req(ocf_queue_t q)
OCF_CHECK_NULL(req); OCF_CHECK_NULL(req);
if (ocf_req_alloc_map(req)) {
req->complete(req, req->error);
return NULL;
}
return req; return req;
} }

View File

@ -221,6 +221,53 @@ static void ocf_req_complete(struct ocf_request *req, int error)
ocf_io_put(&req->ioi.io); ocf_io_put(&req->ioi.io);
} }
static int ocf_core_submit_io_fast(struct ocf_io *io, struct ocf_request *req,
ocf_core_t core, ocf_cache_t cache)
{
struct ocf_event_io trace_event;
ocf_req_cache_mode_t original_cache_mode;
int fast;
if (req->d2c) {
return -OCF_ERR_IO;
}
original_cache_mode = req->cache_mode;
switch (req->cache_mode) {
case ocf_req_cache_mode_pt:
return -OCF_ERR_IO;
case ocf_req_cache_mode_wb:
case ocf_req_cache_mode_wo:
req->cache_mode = ocf_req_cache_mode_fast;
break;
default:
if (cache->use_submit_io_fast)
break;
if (io->dir == OCF_WRITE)
return -OCF_ERR_IO;
req->cache_mode = ocf_req_cache_mode_fast;
}
if (cache->trace.trace_callback) {
if (io->dir == OCF_WRITE)
ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_wr);
else if (io->dir == OCF_READ)
ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_rd);
}
fast = ocf_engine_hndl_fast_req(req);
if (fast != OCF_FAST_PATH_NO) {
ocf_trace_push(io->io_queue, &trace_event, sizeof(trace_event));
return 0;
}
req->cache_mode = original_cache_mode;
return -OCF_ERR_IO;
}
void ocf_core_volume_submit_io(struct ocf_io *io) void ocf_core_volume_submit_io(struct ocf_io *io)
{ {
struct ocf_request *req; struct ocf_request *req;
@ -248,112 +295,43 @@ void ocf_core_volume_submit_io(struct ocf_io *io)
return; return;
} }
ret = ocf_req_alloc_map(req);
if (ret) {
ocf_io_end(io, ret);
return;
}
req->part_id = ocf_user_part_class2id(cache, io->io_class); req->part_id = ocf_user_part_class2id(cache, io->io_class);
req->core = core; req->core = core;
req->complete = ocf_req_complete; req->complete = ocf_req_complete;
ocf_resolve_effective_cache_mode(cache, core, req); ocf_resolve_effective_cache_mode(cache, core, req);
ocf_core_seq_cutoff_update(core, req);
ocf_core_update_stats(core, io); ocf_core_update_stats(core, io);
ocf_io_get(io);
if (!ocf_core_submit_io_fast(io, req, core, cache)) {
ocf_core_seq_cutoff_update(core, req);
return;
}
ocf_req_clear_map(req);
ocf_core_seq_cutoff_update(core, req);
if (io->dir == OCF_WRITE) if (io->dir == OCF_WRITE)
ocf_trace_io(req, ocf_event_operation_wr); ocf_trace_io(req, ocf_event_operation_wr);
else if (io->dir == OCF_READ) else if (io->dir == OCF_READ)
ocf_trace_io(req, ocf_event_operation_rd); ocf_trace_io(req, ocf_event_operation_rd);
ocf_io_get(io);
ret = ocf_engine_hndl_req(req); ret = ocf_engine_hndl_req(req);
if (ret) { if (ret) {
dec_counter_if_req_was_dirty(req); dec_counter_if_req_was_dirty(req);
ocf_io_end(io, ret); ocf_io_end(io, ret);
ocf_io_put(io);
} }
} }
int ocf_core_submit_io_fast(struct ocf_io *io)
{
struct ocf_request *req;
struct ocf_event_io trace_event;
ocf_core_t core;
ocf_cache_t cache;
int fast;
int ret;
OCF_CHECK_NULL(io);
ret = ocf_core_validate_io(io);
if (ret < 0)
return ret;
req = ocf_io_to_req(io);
core = ocf_volume_to_core(ocf_io_get_volume(io));
cache = ocf_core_get_cache(core);
if (unlikely(!env_bit_test(ocf_cache_state_running,
&cache->cache_state))) {
ocf_io_end(io, -OCF_ERR_CACHE_NOT_AVAIL);
return 0;
}
if (req->d2c) {
dec_counter_if_req_was_dirty(req);
return -OCF_ERR_IO;
}
ret = ocf_req_alloc_map(req);
if (ret) {
ocf_io_end(io, -OCF_ERR_NO_MEM);
return 0;
}
req->core = core;
req->complete = ocf_req_complete;
req->part_id = ocf_user_part_class2id(cache, io->io_class);
ocf_resolve_effective_cache_mode(cache, core, req);
switch (req->cache_mode) {
case ocf_req_cache_mode_pt:
return -OCF_ERR_IO;
case ocf_req_cache_mode_wb:
case ocf_req_cache_mode_wo:
req->cache_mode = ocf_req_cache_mode_fast;
break;
default:
if (cache->use_submit_io_fast)
break;
if (io->dir == OCF_WRITE)
return -OCF_ERR_IO;
req->cache_mode = ocf_req_cache_mode_fast;
}
ocf_core_update_stats(core, io);
if (cache->trace.trace_callback) {
if (io->dir == OCF_WRITE)
ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_wr);
else if (io->dir == OCF_READ)
ocf_trace_prep_io_event(&trace_event, req, ocf_event_operation_rd);
}
ocf_io_get(io);
fast = ocf_engine_hndl_fast_req(req);
if (fast != OCF_FAST_PATH_NO) {
ocf_trace_push(io->io_queue, &trace_event, sizeof(trace_event));
ocf_core_seq_cutoff_update(core, req);
return 0;
}
dec_counter_if_req_was_dirty(req);
ocf_io_put(io);
return -OCF_ERR_IO;
}
static void ocf_core_volume_submit_flush(struct ocf_io *io) static void ocf_core_volume_submit_flush(struct ocf_io *io)
{ {
struct ocf_request *req; struct ocf_request *req;