ocf_reqest: Store core handle instead of core_id
Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
parent
eccd4a0163
commit
0490dd8bd4
@ -51,25 +51,23 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
|
||||
* request. Also, complete original request only if this is the last
|
||||
* sub-request to complete
|
||||
*/
|
||||
if (env_atomic_dec_return(&req->req_remaining) == 0) {
|
||||
/* We must free the pages we have allocated */
|
||||
ctx_data_secure_erase(cache->owner, req->data);
|
||||
ctx_data_munlock(cache->owner, req->data);
|
||||
ctx_data_free(cache->owner, req->data);
|
||||
req->data = NULL;
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
if (req->error) {
|
||||
env_atomic_inc(&cache->core[req->core_id].
|
||||
counters->cache_errors.write);
|
||||
ocf_engine_invalidate(req);
|
||||
} else {
|
||||
ocf_req_unlock(req);
|
||||
/* We must free the pages we have allocated */
|
||||
ctx_data_secure_erase(cache->owner, req->data);
|
||||
ctx_data_munlock(cache->owner, req->data);
|
||||
ctx_data_free(cache->owner, req->data);
|
||||
req->data = NULL;
|
||||
|
||||
/* always free the request at the last point
|
||||
* of the completion path
|
||||
*/
|
||||
ocf_req_put(req);
|
||||
}
|
||||
if (req->error) {
|
||||
env_atomic_inc(&req->core->counters->cache_errors.write);
|
||||
ocf_engine_invalidate(req);
|
||||
} else {
|
||||
ocf_req_unlock(req);
|
||||
|
||||
/* put the request at the last point of the completion path */
|
||||
ocf_req_put(req);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,7 @@ void ocf_engine_error(struct ocf_request *req,
|
||||
if (stop_cache)
|
||||
env_bit_clear(ocf_cache_state_running, &cache->cache_state);
|
||||
|
||||
ocf_core_log(&cache->core[req->core_id], log_err,
|
||||
ocf_core_log(req->core, log_err,
|
||||
"%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg,
|
||||
BYTES_TO_SECTORS(req->byte_position), req->byte_length);
|
||||
}
|
||||
@ -158,7 +158,7 @@ void ocf_engine_traverse(struct ocf_request *req)
|
||||
uint64_t core_line;
|
||||
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
@ -201,6 +201,7 @@ int ocf_engine_check(struct ocf_request *req)
|
||||
uint64_t core_line;
|
||||
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
@ -217,7 +218,7 @@ int ocf_engine_check(struct ocf_request *req)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (_ocf_engine_check_map_entry(cache, entry, req->core_id)) {
|
||||
if (_ocf_engine_check_map_entry(cache, entry, core_id)) {
|
||||
/* Mapping is invalid */
|
||||
entry->invalid = true;
|
||||
req->info.seq_req = false;
|
||||
@ -247,6 +248,7 @@ static void ocf_engine_map_cache_line(struct ocf_request *req,
|
||||
ocf_cache_line_t *cache_line)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
ocf_part_id_t part_id = req->part_id;
|
||||
ocf_cleaning_t clean_policy_type;
|
||||
|
||||
@ -266,7 +268,7 @@ static void ocf_engine_map_cache_line(struct ocf_request *req,
|
||||
ocf_metadata_add_to_partition(cache, part_id, *cache_line);
|
||||
|
||||
/* Add the block to the corresponding collision list */
|
||||
ocf_metadata_add_to_collision(cache, req->core_id, core_line, hash_index,
|
||||
ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
|
||||
*cache_line);
|
||||
|
||||
ocf_eviction_init_cache_line(cache, *cache_line, part_id);
|
||||
@ -320,7 +322,7 @@ void ocf_engine_map(struct ocf_request *req)
|
||||
struct ocf_map_info *entry;
|
||||
uint64_t core_line;
|
||||
int status = LOOKUP_MAPPED;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
|
||||
if (ocf_engine_unmapped_count(req))
|
||||
status = space_managment_evict_do(cache, req,
|
||||
@ -442,12 +444,10 @@ void ocf_engine_clean(struct ocf_request *req)
|
||||
|
||||
void ocf_engine_update_block_stats(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_part_id_t part_id = req->part_id;
|
||||
struct ocf_counters_block *blocks;
|
||||
|
||||
blocks = &cache->core[core_id].counters->
|
||||
blocks = &req->core->counters->
|
||||
part_counters[part_id].blocks;
|
||||
|
||||
if (req->rw == OCF_READ)
|
||||
@ -460,19 +460,15 @@ void ocf_engine_update_block_stats(struct ocf_request *req)
|
||||
|
||||
void ocf_engine_update_request_stats(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_part_id_t part_id = req->part_id;
|
||||
struct ocf_counters_req *reqs;
|
||||
|
||||
switch (req->rw) {
|
||||
case OCF_READ:
|
||||
reqs = &cache->core[core_id].counters->
|
||||
part_counters[part_id].read_reqs;
|
||||
reqs = &req->core->counters->part_counters[part_id].read_reqs;
|
||||
break;
|
||||
case OCF_WRITE:
|
||||
reqs = &cache->core[core_id].counters->
|
||||
part_counters[part_id].write_reqs;
|
||||
reqs = &req->core->counters->part_counters[part_id].write_reqs;
|
||||
break;
|
||||
default:
|
||||
ENV_BUG();
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
static void _ocf_d2c_completion(struct ocf_request *req, int error)
|
||||
{
|
||||
ocf_core_t core = &req->cache->core[req->core_id];
|
||||
ocf_core_t core = req->core;
|
||||
req->error = error;
|
||||
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
@ -38,8 +38,7 @@ static void _ocf_d2c_completion(struct ocf_request *req, int error)
|
||||
|
||||
int ocf_io_d2c(struct ocf_request *req)
|
||||
{
|
||||
ocf_cache_t cache = req->cache;
|
||||
ocf_core_t core = &cache->core[req->core_id];
|
||||
ocf_core_t core = req->core;
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
|
@ -62,10 +62,9 @@ static void _ocf_discard_core_complete(struct ocf_io *io, int error)
|
||||
|
||||
static int _ocf_discard_core(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
struct ocf_io *io;
|
||||
|
||||
io = ocf_volume_new_io(&cache->core[req->core_id].volume);
|
||||
io = ocf_volume_new_io(&req->core->volume);
|
||||
if (!io) {
|
||||
_ocf_discard_complete_req(req, -ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
@ -43,8 +43,7 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
|
||||
if (req->error) {
|
||||
OCF_DEBUG_RQ(req, "ERROR");
|
||||
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
cache_errors.read);
|
||||
env_atomic_inc(&req->core->counters->cache_errors.read);
|
||||
ocf_engine_push_req_front_pt(req);
|
||||
} else {
|
||||
ocf_req_unlock(req);
|
||||
|
@ -20,8 +20,7 @@ static void _ocf_invalidate_req(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
req->error = error;
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
env_atomic_inc(&req->core->counters->cache_errors.write);
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
|
@ -48,7 +48,7 @@ int ocf_engine_ops(struct ocf_request *req)
|
||||
env_atomic_set(&req->req_remaining, 2);
|
||||
|
||||
/* Submit operation into core device */
|
||||
ocf_submit_volume_req(&cache->core[req->core_id].volume, req,
|
||||
ocf_submit_volume_req(&req->core->volume, req,
|
||||
_ocf_engine_ops_complete);
|
||||
|
||||
ocf_submit_cache_reqs(cache, req->map, req, req->rw,
|
||||
|
@ -28,8 +28,7 @@ static void _ocf_read_pt_complete(struct ocf_request *req, int error)
|
||||
|
||||
if (req->error) {
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
core_errors.read);
|
||||
env_atomic_inc(&req->core->counters->core_errors.read);
|
||||
}
|
||||
|
||||
/* Complete request */
|
||||
@ -43,15 +42,12 @@ static void _ocf_read_pt_complete(struct ocf_request *req, int error)
|
||||
|
||||
static inline void _ocf_read_pt_submit(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
env_atomic_set(&req->req_remaining, 1); /* Core device IO */
|
||||
|
||||
OCF_DEBUG_RQ(req, "Submit");
|
||||
|
||||
/* Core read */
|
||||
ocf_submit_volume_req(&cache->core[req->core_id].volume, req,
|
||||
_ocf_read_pt_complete);
|
||||
ocf_submit_volume_req(&req->core->volume, req, _ocf_read_pt_complete);
|
||||
}
|
||||
|
||||
int ocf_read_pt_do(struct ocf_request *req)
|
||||
@ -91,7 +87,7 @@ int ocf_read_pt_do(struct ocf_request *req)
|
||||
|
||||
/* Update statistics */
|
||||
ocf_engine_update_block_stats(req);
|
||||
env_atomic64_inc(&cache->core[req->core_id].counters->
|
||||
env_atomic64_inc(&req->core->counters->
|
||||
part_counters[req->part_id].read_reqs.pass_through);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
|
@ -38,8 +38,7 @@ static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
|
||||
OCF_DEBUG_RQ(req, "HIT completion");
|
||||
|
||||
if (req->error) {
|
||||
env_atomic_inc(&req->cache->core[req->core_id].
|
||||
counters->cache_errors.read);
|
||||
env_atomic_inc(&req->core->counters->cache_errors.read);
|
||||
ocf_engine_push_req_front_pt(req);
|
||||
} else {
|
||||
|
||||
@ -78,8 +77,7 @@ static void _ocf_read_generic_miss_complete(struct ocf_request *req, int error)
|
||||
req->complete(req, req->error);
|
||||
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&cache->core[req->core_id].
|
||||
counters->core_errors.read);
|
||||
env_atomic_inc(&req->core->counters->core_errors.read);
|
||||
|
||||
ctx_data_free(cache->owner, req->cp_data);
|
||||
req->cp_data = NULL;
|
||||
@ -128,7 +126,7 @@ static inline void _ocf_read_generic_submit_miss(struct ocf_request *req)
|
||||
goto err_alloc;
|
||||
|
||||
/* Submit read request to core device. */
|
||||
ocf_submit_volume_req(&cache->core[req->core_id].volume, req,
|
||||
ocf_submit_volume_req(&req->core->volume, req,
|
||||
_ocf_read_generic_miss_complete);
|
||||
|
||||
return;
|
||||
|
@ -24,8 +24,7 @@ static void _ocf_read_wa_complete(struct ocf_request *req, int error)
|
||||
|
||||
if (req->error) {
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
core_errors.write);
|
||||
env_atomic_inc(&req->core->counters->core_errors.write);
|
||||
}
|
||||
|
||||
/* Complete request */
|
||||
@ -72,12 +71,12 @@ int ocf_write_wa(struct ocf_request *req)
|
||||
|
||||
/* Submit write IO to the core */
|
||||
env_atomic_set(&req->req_remaining, 1);
|
||||
ocf_submit_volume_req(&cache->core[req->core_id].volume, req,
|
||||
ocf_submit_volume_req(&req->core->volume, req,
|
||||
_ocf_read_wa_complete);
|
||||
|
||||
/* Update statistics */
|
||||
ocf_engine_update_block_stats(req);
|
||||
env_atomic64_inc(&cache->core[req->core_id].counters->
|
||||
env_atomic64_inc(&req->core->counters->
|
||||
part_counters[req->part_id].write_reqs.pass_through);
|
||||
}
|
||||
|
||||
|
@ -88,8 +88,7 @@ static const struct ocf_io_if _io_if_wb_flush_metadata = {
|
||||
static void _ocf_write_wb_complete(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
env_atomic_inc(&req->core->counters->cache_errors.write);
|
||||
req->error |= error;
|
||||
}
|
||||
|
||||
|
@ -26,8 +26,7 @@ static const struct ocf_io_if _io_if_wi_flush_metadata = {
|
||||
static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
env_atomic_inc(&req->core->counters->cache_errors.write);
|
||||
req->error |= error;
|
||||
}
|
||||
|
||||
@ -78,8 +77,7 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
|
||||
if (error) {
|
||||
req->error = error;
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
core_errors.write);
|
||||
env_atomic_inc(&req->core->counters->core_errors.write);
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
@ -101,8 +99,6 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
|
||||
|
||||
static int _ocf_write_wi_do(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_req_get(req);
|
||||
|
||||
@ -111,12 +107,12 @@ static int _ocf_write_wi_do(struct ocf_request *req)
|
||||
OCF_DEBUG_RQ(req, "Submit");
|
||||
|
||||
/* Submit write IO to the core */
|
||||
ocf_submit_volume_req(&cache->core[req->core_id].volume, req,
|
||||
ocf_submit_volume_req(&req->core->volume, req,
|
||||
_ocf_write_wi_core_complete);
|
||||
|
||||
/* Update statistics */
|
||||
ocf_engine_update_block_stats(req);
|
||||
env_atomic64_inc(&cache->core[req->core_id].counters->
|
||||
env_atomic64_inc(&req->core->counters->
|
||||
part_counters[req->part_id].write_reqs.pass_through);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
|
@ -48,8 +48,7 @@ static void _ocf_write_wt_cache_complete(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
req->error = req->error ?: error;
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
env_atomic_inc(&req->core->counters->cache_errors.write);
|
||||
|
||||
if (req->error)
|
||||
inc_fallback_pt_error_counter(req->cache);
|
||||
@ -63,8 +62,7 @@ static void _ocf_write_wt_core_complete(struct ocf_request *req, int error)
|
||||
if (error) {
|
||||
req->error = error;
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
core_errors.write);
|
||||
env_atomic_inc(&req->core->counters->core_errors.write);
|
||||
}
|
||||
|
||||
_ocf_write_wt_req_complete(req);
|
||||
@ -93,7 +91,7 @@ static inline void _ocf_write_wt_submit(struct ocf_request *req)
|
||||
ocf_engine_io_count(req), _ocf_write_wt_cache_complete);
|
||||
|
||||
/* To core */
|
||||
ocf_submit_volume_req(&cache->core[req->core_id].volume, req,
|
||||
ocf_submit_volume_req(&req->core->volume, req,
|
||||
_ocf_write_wt_core_complete);
|
||||
}
|
||||
|
||||
|
@ -50,8 +50,7 @@ static const struct ocf_io_if _io_if_zero_purge = {
|
||||
static void _ocf_zero_io_flush_metadata(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
env_atomic_inc(&req->cache->core[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
env_atomic_inc(&req->core->counters->cache_errors.write);
|
||||
req->error = error;
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ static uint32_t ocf_evict_calculate(struct ocf_user_part *part,
|
||||
|
||||
static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, const uint32_t evict_cline_no,
|
||||
ocf_core_id_t core_id, ocf_part_id_t target_part_id)
|
||||
ocf_part_id_t target_part_id)
|
||||
{
|
||||
uint32_t to_evict = 0, evicted = 0;
|
||||
struct ocf_user_part *part;
|
||||
@ -84,7 +84,7 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
}
|
||||
|
||||
evicted += ocf_eviction_need_space(cache, io_queue,
|
||||
part_id, to_evict, core_id);
|
||||
part_id, to_evict);
|
||||
}
|
||||
|
||||
if (!ocf_eviction_can_evict(cache))
|
||||
@ -95,7 +95,7 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
to_evict = ocf_evict_calculate(target_part, evict_cline_no);
|
||||
if (to_evict) {
|
||||
evicted += ocf_eviction_need_space(cache, io_queue,
|
||||
target_part_id, to_evict, core_id);
|
||||
target_part_id, to_evict);
|
||||
}
|
||||
}
|
||||
|
||||
@ -111,9 +111,9 @@ int space_managment_evict_do(struct ocf_cache *cache,
|
||||
if (evict_cline_no <= cache->device->freelist_part->curr_size)
|
||||
return LOOKUP_MAPPED;
|
||||
|
||||
evict_cline_no = evict_cline_no - cache->device->freelist_part->curr_size;
|
||||
evict_cline_no -= cache->device->freelist_part->curr_size;
|
||||
evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no,
|
||||
req->core_id, req->part_id);
|
||||
req->part_id);
|
||||
|
||||
if (evict_cline_no <= evicted)
|
||||
return LOOKUP_MAPPED;
|
||||
|
@ -39,7 +39,7 @@ struct eviction_policy_ops {
|
||||
bool (*can_evict)(ocf_cache_t cache);
|
||||
uint32_t (*req_clines)(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, ocf_part_id_t part_id,
|
||||
uint32_t cline_no, ocf_core_id_t core_id);
|
||||
uint32_t cline_no);
|
||||
void (*hot_cline)(ocf_cache_t cache,
|
||||
ocf_cache_line_t cline);
|
||||
void (*init_evp)(ocf_cache_t cache,
|
||||
|
@ -391,7 +391,7 @@ bool evp_lru_can_evict(ocf_cache_t cache)
|
||||
|
||||
/* the caller must hold the metadata lock */
|
||||
uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
ocf_part_id_t part_id, uint32_t cline_no, ocf_core_id_t core_id)
|
||||
ocf_part_id_t part_id, uint32_t cline_no)
|
||||
{
|
||||
uint32_t i;
|
||||
ocf_cache_line_t curr_cline, prev_cline;
|
||||
|
@ -13,8 +13,7 @@ void evp_lru_init_cline(struct ocf_cache *cache,
|
||||
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
bool evp_lru_can_evict(struct ocf_cache *cache);
|
||||
uint32_t evp_lru_req_clines(struct ocf_cache *cache, ocf_queue_t io_queue,
|
||||
ocf_part_id_t part_id, uint32_t cline_no,
|
||||
ocf_core_id_t core_id);
|
||||
ocf_part_id_t part_id, uint32_t cline_no);
|
||||
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id);
|
||||
void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline);
|
||||
|
@ -53,14 +53,11 @@ static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
ocf_queue_t io_queue, ocf_part_id_t part_id, uint32_t clines,
|
||||
ocf_core_id_t core_id)
|
||||
ocf_queue_t io_queue, ocf_part_id_t part_id, uint32_t clines)
|
||||
{
|
||||
uint8_t type;
|
||||
uint32_t result = 0;
|
||||
|
||||
ENV_BUG_ON(core_id >= OCF_CORE_MAX);
|
||||
|
||||
type = cache->conf_meta->eviction_policy_type;
|
||||
|
||||
ENV_BUG_ON(type >= ocf_eviction_max);
|
||||
@ -71,7 +68,7 @@ static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
* eviction lock.
|
||||
*/
|
||||
result = evict_policy_ops[type].req_clines(cache, io_queue,
|
||||
part_id, clines, core_id);
|
||||
part_id, clines);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -123,15 +123,15 @@ struct ocf_request {
|
||||
ocf_cache_t cache;
|
||||
/*!< Handle to cache instance */
|
||||
|
||||
ocf_core_t core;
|
||||
/*!< Handle to core instance */
|
||||
|
||||
const struct ocf_io_if *io_if;
|
||||
/*!< IO interface */
|
||||
|
||||
void (*resume)(struct ocf_request *req);
|
||||
/*!< OCF request resume callback */
|
||||
|
||||
ocf_core_id_t core_id;
|
||||
/*!< This file indicates core id of request */
|
||||
|
||||
ocf_part_id_t part_id;
|
||||
/*!< Targeted partition of requests */
|
||||
|
||||
|
@ -61,7 +61,7 @@ static inline void ocf_trace_prep_io_event(struct ocf_event_io *ev,
|
||||
ev->len = rq->byte_length;
|
||||
|
||||
ev->operation = op;
|
||||
ev->core_id = rq->core_id;
|
||||
ev->core_id = ocf_core_get_id(rq->core);
|
||||
|
||||
ev->io_class = rq->io->io_class;
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
ENV_BUG_ON(!req);
|
||||
|
||||
part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
core_id = req->core_id;
|
||||
core_id = ocf_core_get_id(req->core);
|
||||
|
||||
__set_cache_line_invalid(cache, start_bit, end_bit, line, core_id,
|
||||
part_id);
|
||||
@ -81,7 +81,7 @@ void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
|
||||
void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
|
||||
{
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
ocf_cache_line_t line = req->map[map_idx].coll_idx;
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
|
||||
@ -101,7 +101,7 @@ void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
|
||||
{
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
ocf_cache_line_t line = req->map[map_idx].coll_idx;
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
|
||||
@ -141,7 +141,7 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
|
||||
void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
|
||||
{
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
ocf_cache_line_t line = req->map[map_idx].coll_idx;
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
|
||||
|
@ -312,6 +312,7 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req)
|
||||
const struct ocf_map_info *iter = req->map;
|
||||
uint32_t i;
|
||||
ocf_cache_line_t cache_line;
|
||||
ocf_core_id_t core_id;
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
@ -332,7 +333,8 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req)
|
||||
continue;
|
||||
|
||||
ocf_metadata_get_core_and_part_id(cache, cache_line,
|
||||
&req->core_id, &req->part_id);
|
||||
&core_id, &req->part_id);
|
||||
req->core = &cache->core[core_id];
|
||||
|
||||
set_cache_line_clean(cache, 0, ocf_line_end_sector(cache), req,
|
||||
i);
|
||||
|
@ -237,8 +237,7 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
uint32_t i;
|
||||
int err;
|
||||
|
||||
cache_stats = &cache->core[req->core_id].
|
||||
counters->cache_blocks;
|
||||
cache_stats = &req->core->counters->cache_blocks;
|
||||
|
||||
if (reqs == 1) {
|
||||
io = ocf_new_cache_io(cache);
|
||||
@ -328,7 +327,6 @@ update_stats:
|
||||
void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
|
||||
ocf_req_end_t callback)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
struct ocf_counters_block *core_stats;
|
||||
uint64_t flags = req->io ? req->io->flags : 0;
|
||||
uint32_t class = req->io ? req->io->io_class : 0;
|
||||
@ -336,8 +334,7 @@ void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
|
||||
struct ocf_io *io;
|
||||
int err;
|
||||
|
||||
core_stats = &cache->core[req->core_id].
|
||||
counters->core_blocks;
|
||||
core_stats = &req->core->counters->core_blocks;
|
||||
if (dir == OCF_WRITE)
|
||||
env_atomic64_add(req->byte_length, &core_stats->write_bytes);
|
||||
else if (dir == OCF_READ)
|
||||
|
@ -95,6 +95,7 @@ void ocf_part_move(struct ocf_request *req)
|
||||
ocf_part_id_t id_old, id_new;
|
||||
uint32_t i;
|
||||
ocf_cleaning_t type = cache->conf_meta->cleaning_policy_type;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
|
||||
ENV_BUG_ON(type >= ocf_cleaning_max);
|
||||
|
||||
@ -157,15 +158,15 @@ void ocf_part_move(struct ocf_request *req)
|
||||
cleaning_policy_ops[type].
|
||||
set_hot_cache_line(cache, line);
|
||||
|
||||
env_atomic_inc(&cache->core_runtime_meta[req->core_id].
|
||||
env_atomic_inc(&cache->core_runtime_meta[core_id].
|
||||
part_counters[id_new].dirty_clines);
|
||||
env_atomic_dec(&cache->core_runtime_meta[req->core_id].
|
||||
env_atomic_dec(&cache->core_runtime_meta[core_id].
|
||||
part_counters[id_old].dirty_clines);
|
||||
}
|
||||
|
||||
env_atomic_inc(&cache->core_runtime_meta[req->core_id].
|
||||
env_atomic_inc(&cache->core_runtime_meta[core_id].
|
||||
part_counters[id_new].cached_clines);
|
||||
env_atomic_dec(&cache->core_runtime_meta[req->core_id].
|
||||
env_atomic_dec(&cache->core_runtime_meta[core_id].
|
||||
part_counters[id_old].cached_clines);
|
||||
|
||||
/* DONE */
|
||||
|
@ -187,8 +187,7 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
|
||||
ocf_queue_get(queue);
|
||||
req->io_queue = queue;
|
||||
|
||||
/* TODO: Store core pointer instead of id */
|
||||
req->core_id = core ? ocf_core_get_id(core) : 0;
|
||||
req->core = core;
|
||||
req->cache = cache;
|
||||
|
||||
req->d2c = (queue != cache->mngt_queue) && !ocf_refcnt_inc(
|
||||
|
Loading…
Reference in New Issue
Block a user