Add getter function for cache->device->concurrency.cache_line
The purpose of this change is to facilitate unit testing. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
@@ -64,7 +64,7 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
|
||||
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
|
||||
ocf_engine_invalidate(req);
|
||||
} else {
|
||||
ocf_req_unlock(cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(cache), req);
|
||||
|
||||
/* put the request at the last point of the completion path */
|
||||
ocf_req_put(req);
|
||||
|
@@ -434,7 +434,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
|
||||
req->error |= error;
|
||||
|
||||
/* End request and do not processing */
|
||||
ocf_req_unlock(req->cache->device->concurrency.cache_line,
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(req->cache),
|
||||
req);
|
||||
|
||||
/* Complete request */
|
||||
@@ -451,8 +451,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
|
||||
|
||||
static int _lock_clines(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
req->cache->device->concurrency.cache_line;
|
||||
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(req->cache);
|
||||
enum ocf_engine_lock_type lock_type =
|
||||
req->engine_cbs->get_lock_type(req);
|
||||
|
||||
@@ -742,8 +741,7 @@ static int _ocf_engine_refresh(struct ocf_request *req)
|
||||
req->complete(req, req->error);
|
||||
|
||||
/* Release WRITE lock of request */
|
||||
ocf_req_unlock(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_req_put(req);
|
||||
|
@@ -147,7 +147,7 @@ static void _ocf_discard_step_complete(struct ocf_request *req, int error)
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
/* Release WRITE lock of request */
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
if (req->error) {
|
||||
ocf_metadata_error(req->cache);
|
||||
@@ -236,7 +236,7 @@ static int _ocf_discard_step(struct ocf_request *req)
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_async_lock_wr(
|
||||
cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(cache),
|
||||
req, _ocf_discard_on_resume);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
|
@@ -46,8 +46,7 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
|
||||
ocf_core_stats_cache_error_update(req->core, OCF_READ);
|
||||
ocf_engine_push_req_front_pt(req);
|
||||
} else {
|
||||
ocf_req_unlock(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Complete request */
|
||||
req->complete(req, req->error);
|
||||
@@ -132,7 +131,7 @@ int ocf_read_fast(struct ocf_request *req)
|
||||
if (hit && part_has_space) {
|
||||
ocf_io_start(&req->ioi.io);
|
||||
lock = ocf_req_async_lock_rd(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
@@ -204,7 +203,7 @@ int ocf_write_fast(struct ocf_request *req)
|
||||
if (mapped && part_has_space) {
|
||||
ocf_io_start(&req->ioi.io);
|
||||
lock = ocf_req_async_lock_wr(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
|
@@ -31,7 +31,7 @@ static void _ocf_invalidate_req(struct ocf_request *req, int error)
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to flush metadata to cache");
|
||||
|
||||
ocf_req_unlock(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_req_put(req);
|
||||
|
@@ -34,7 +34,7 @@ static void _ocf_read_pt_complete(struct ocf_request *req, int error)
|
||||
/* Complete request */
|
||||
req->complete(req, req->error);
|
||||
|
||||
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_req_put(req);
|
||||
|
@@ -24,8 +24,8 @@
|
||||
|
||||
static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
req->cache->device->concurrency.cache_line;
|
||||
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(
|
||||
req->cache);
|
||||
|
||||
if (error)
|
||||
req->error |= error;
|
||||
|
@@ -60,7 +60,7 @@ static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to write data to cache");
|
||||
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
req->complete(req, req->error);
|
||||
|
||||
|
@@ -25,7 +25,7 @@ static const struct ocf_io_if _io_if_wi_update_metadata = {
|
||||
|
||||
int _ocf_write_wi_next_pass(struct ocf_request *req)
|
||||
{
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
if (req->wi_second_pass) {
|
||||
req->complete(req, req->error);
|
||||
@@ -75,7 +75,7 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to write data to cache");
|
||||
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
req->complete(req, req->error);
|
||||
|
||||
@@ -128,8 +128,7 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
if (req->error) {
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
req->complete(req, req->error);
|
||||
|
||||
@@ -200,7 +199,7 @@ int ocf_write_wi(struct ocf_request *req)
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_async_lock_wr(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, _ocf_write_wi_on_resume);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
|
@@ -33,7 +33,7 @@ static void ocf_read_wo_cache_complete(struct ocf_request *req, int error)
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to read data from cache");
|
||||
|
||||
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Complete request */
|
||||
req->complete(req, req->error);
|
||||
@@ -169,8 +169,7 @@ static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
|
||||
if (!req->info.dirty_any || req->error) {
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
req->complete(req, req->error);
|
||||
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
|
||||
ocf_req_put(req);
|
||||
return;
|
||||
}
|
||||
@@ -238,7 +237,7 @@ int ocf_read_wo(struct ocf_request *req)
|
||||
* lock request for READ access
|
||||
*/
|
||||
lock = ocf_req_async_lock_rd(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
|
@@ -34,8 +34,7 @@ static void _ocf_write_wt_req_complete(struct ocf_request *req)
|
||||
ocf_engine_invalidate(req);
|
||||
} else {
|
||||
/* Unlock reqest from WRITE access */
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Complete request */
|
||||
req->complete(req, req->info.core_error ? req->error : 0);
|
||||
|
@@ -31,7 +31,7 @@ static int ocf_zero_purge(struct ocf_request *req)
|
||||
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
req->complete(req, req->error);
|
||||
|
||||
@@ -153,7 +153,7 @@ void ocf_engine_zero_line(struct ocf_request *req)
|
||||
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_async_lock_wr(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, ocf_engine_on_resume);
|
||||
|
||||
if (lock >= 0) {
|
||||
|
Reference in New Issue
Block a user