Add getter function for cache->device->concurrency.cache_line

The purpose of this change is to facilitate unit testing.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2021-03-05 11:20:47 +01:00
parent ce2ff14150
commit 1411314678
21 changed files with 49 additions and 46 deletions

View File

@ -398,7 +398,7 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
if (info.status == LOOKUP_HIT &&
metadata_test_dirty(cache, info.coll_idx)) {
locked = ocf_cache_line_try_lock_rd(
cache->device->concurrency.cache_line,
ocf_cache_line_concurrency(cache),
info.coll_idx);
}
@ -474,7 +474,7 @@ static void _acp_flush_end(void *priv, int error)
for (i = 0; i < flush->size; i++) {
ocf_cache_line_unlock_rd(
cache->device->concurrency.cache_line,
ocf_cache_line_concurrency(cache),
flush->data[i].cache_line);
ACP_DEBUG_END(acp, flush->data[i].cache_line);
}

View File

@ -682,8 +682,7 @@ static bool block_is_busy(struct ocf_cache *cache,
if (!cache->core[core_id].opened)
return true;
if (ocf_cache_line_is_used(
cache->device->concurrency.cache_line,
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
cache_line)) {
return true;
}

View File

@ -1130,7 +1130,8 @@ bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
struct ocf_cache_line_concurrency *c =
ocf_cache_line_concurrency(cache);
env_atomic *access = &c->access[line];
int val = env_atomic_read(access);

View File

@ -202,4 +202,16 @@ void ocf_cache_line_unlock_wr(struct ocf_cache_line_concurrency *c,
bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief Get cacheline concurrency context
*
* @param cache - cache instance
* @return cacheline concurrency context
*/
static inline struct ocf_cache_line_concurrency *
ocf_cache_line_concurrency(ocf_cache_t cache)
{
return cache->device->concurrency.cache_line;
}
#endif /* OCF_CONCURRENCY_H_ */

View File

@ -64,7 +64,7 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
ocf_engine_invalidate(req);
} else {
ocf_req_unlock(cache->device->concurrency.cache_line, req);
ocf_req_unlock(ocf_cache_line_concurrency(cache), req);
/* put the request at the last point of the completion path */
ocf_req_put(req);

View File

@ -434,7 +434,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
req->error |= error;
/* End request and do not processing */
ocf_req_unlock(req->cache->device->concurrency.cache_line,
ocf_req_unlock(ocf_cache_line_concurrency(req->cache),
req);
/* Complete request */
@ -451,8 +451,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
static int _lock_clines(struct ocf_request *req)
{
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(req->cache);
enum ocf_engine_lock_type lock_type =
req->engine_cbs->get_lock_type(req);
@ -742,8 +741,7 @@ static int _ocf_engine_refresh(struct ocf_request *req)
req->complete(req, req->error);
/* Release WRITE lock of request */
ocf_req_unlock(req->cache->device->concurrency.cache_line,
req);
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
/* Release OCF request */
ocf_req_put(req);

View File

@ -147,7 +147,7 @@ static void _ocf_discard_step_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "Completion");
/* Release WRITE lock of request */
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
if (req->error) {
ocf_metadata_error(req->cache);
@ -236,7 +236,7 @@ static int _ocf_discard_step(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(
cache->device->concurrency.cache_line,
ocf_cache_line_concurrency(cache),
req, _ocf_discard_on_resume);
} else {
lock = OCF_LOCK_ACQUIRED;

View File

@ -46,8 +46,7 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
ocf_core_stats_cache_error_update(req->core, OCF_READ);
ocf_engine_push_req_front_pt(req);
} else {
ocf_req_unlock(req->cache->device->concurrency.cache_line,
req);
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
/* Complete request */
req->complete(req, req->error);
@ -132,7 +131,7 @@ int ocf_read_fast(struct ocf_request *req)
if (hit && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_rd(
req->cache->device->concurrency.cache_line,
ocf_cache_line_concurrency(req->cache),
req, ocf_engine_on_resume);
}
@ -204,7 +203,7 @@ int ocf_write_fast(struct ocf_request *req)
if (mapped && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_wr(
req->cache->device->concurrency.cache_line,
ocf_cache_line_concurrency(req->cache),
req, ocf_engine_on_resume);
}

View File

@ -31,7 +31,7 @@ static void _ocf_invalidate_req(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to flush metadata to cache");
ocf_req_unlock(req->cache->device->concurrency.cache_line, req);
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
/* Put OCF request - decrease reference counter */
ocf_req_put(req);

View File

@ -34,7 +34,7 @@ static void _ocf_read_pt_complete(struct ocf_request *req, int error)
/* Complete request */
req->complete(req, req->error);
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
/* Release OCF request */
ocf_req_put(req);

View File

@ -24,8 +24,8 @@
static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
{
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(
req->cache);
if (error)
req->error |= error;

View File

@ -60,7 +60,7 @@ static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
req->complete(req, req->error);

View File

@ -25,7 +25,7 @@ static const struct ocf_io_if _io_if_wi_update_metadata = {
int _ocf_write_wi_next_pass(struct ocf_request *req)
{
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
if (req->wi_second_pass) {
req->complete(req, req->error);
@ -75,7 +75,7 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
req->complete(req, req->error);
@ -128,8 +128,7 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "Completion");
if (req->error) {
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
req);
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
req->complete(req, req->error);
@ -200,7 +199,7 @@ int ocf_write_wi(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(
req->cache->device->concurrency.cache_line,
ocf_cache_line_concurrency(req->cache),
req, _ocf_write_wi_on_resume);
} else {
lock = OCF_LOCK_ACQUIRED;

View File

@ -33,7 +33,7 @@ static void ocf_read_wo_cache_complete(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to read data from cache");
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
/* Complete request */
req->complete(req, req->error);
@ -169,8 +169,7 @@ static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
if (!req->info.dirty_any || req->error) {
OCF_DEBUG_RQ(req, "Completion");
req->complete(req, req->error);
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line,
req);
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
ocf_req_put(req);
return;
}
@ -238,7 +237,7 @@ int ocf_read_wo(struct ocf_request *req)
* lock request for READ access
*/
lock = ocf_req_async_lock_rd(
req->cache->device->concurrency.cache_line,
ocf_cache_line_concurrency(req->cache),
req, ocf_engine_on_resume);
}

View File

@ -34,8 +34,7 @@ static void _ocf_write_wt_req_complete(struct ocf_request *req)
ocf_engine_invalidate(req);
} else {
/* Unlock reqest from WRITE access */
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
req);
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
/* Complete request */
req->complete(req, req->info.core_error ? req->error : 0);

View File

@ -31,7 +31,7 @@ static int ocf_zero_purge(struct ocf_request *req)
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
}
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
req->complete(req, req->error);
@ -153,7 +153,7 @@ void ocf_engine_zero_line(struct ocf_request *req)
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(
req->cache->device->concurrency.cache_line,
ocf_cache_line_concurrency(req->cache),
req, ocf_engine_on_resume);
if (lock >= 0) {

View File

@ -357,8 +357,7 @@ static int evp_lru_clean_getter(ocf_cache_t cache, void *getter_context,
break;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(
cache->device->concurrency.cache_line,
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
cline)) {
continue;
}
@ -492,8 +491,7 @@ uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
break;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(
cache->device->concurrency.cache_line,
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
cline)) {
continue;
}

View File

@ -52,7 +52,7 @@ int ocf_metadata_actor(struct ocf_cache *cache,
uint64_t start_line, end_line;
int ret = 0;
struct ocf_cache_line_concurrency *c =
cache->device->concurrency.cache_line;
ocf_cache_line_concurrency(cache);
start_line = ocf_bytes_2_lines(cache, start_byte);
end_line = ocf_bytes_2_lines(cache, end_byte);

View File

@ -81,7 +81,7 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
}
if (!ocf_cache_line_try_lock_wr(
cache->device->concurrency.cache_line,
ocf_cache_line_concurrency(cache),
curr_cline)) {
break;
}
@ -90,7 +90,8 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
ocf_purge_cleaning_policy(cache, curr_cline);
ocf_metadata_sparse_cache_line(cache, curr_cline);
ocf_cache_line_unlock_wr(cache->device->concurrency.cache_line,
ocf_cache_line_unlock_wr(
ocf_cache_line_concurrency(cache),
curr_cline);
if (prev_cline != cache->device->collision_table_entries)

View File

@ -44,8 +44,7 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
* only valid bits
*/
if (!is_valid && !ocf_cache_line_are_waiters(
cache->device->concurrency.cache_line,
line)) {
ocf_cache_line_concurrency(cache), line)) {
ocf_purge_eviction_policy(cache, line);
ocf_metadata_remove_cache_line(cache, line);
}

View File

@ -213,8 +213,7 @@ static int _ocf_cleaner_cache_line_lock(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
return ocf_req_async_lock_rd(
req->cache->device->concurrency.cache_line,
return ocf_req_async_lock_rd(ocf_cache_line_concurrency(req->cache),
req, _ocf_cleaner_on_resume);
}