Use cline concurrency ctx instead of cache

Cacheline concurrency functions have their interface changed
so that the cacheline concurrency private context is
explicitly on the parameter list, rather than being taken
from cache->device->concurrency.cache_line.

Cache pointer is no longer provided as a parameter to these
functions. Cacheline concurrency context now has a pointer
to cache structure (for logging purposes only).

The purpose of this change is to facilitate unit testing.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski
2021-02-24 17:29:39 -06:00
parent 0f34e46375
commit cf5f82b253
22 changed files with 200 additions and 151 deletions

View File

@@ -64,7 +64,7 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
ocf_engine_invalidate(req);
} else {
ocf_req_unlock(req);
ocf_req_unlock(cache->device->concurrency.cache_line, req);
/* put the request at the last point of the completion path */
ocf_req_put(req);

View File

@@ -396,7 +396,8 @@ static void _ocf_engine_clean_end(void *private_data, int error)
req->error |= error;
/* End request and do not processing */
ocf_req_unlock(req);
ocf_req_unlock(req->cache->device->concurrency.cache_line,
req);
/* Complete request */
req->complete(req, error);
@@ -414,12 +415,14 @@ static int lock_clines(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
enum ocf_engine_lock_type lock_type = engine_cbs->get_lock_type(req);
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
switch (lock_type) {
case ocf_engine_lock_write:
return ocf_req_async_lock_wr(req, engine_cbs->resume);
return ocf_req_async_lock_wr(c, req, engine_cbs->resume);
case ocf_engine_lock_read:
return ocf_req_async_lock_rd(req, engine_cbs->resume);
return ocf_req_async_lock_rd(c, req, engine_cbs->resume);
default:
return OCF_LOCK_ACQUIRED;
}
@@ -703,7 +706,8 @@ static int _ocf_engine_refresh(struct ocf_request *req)
req->complete(req, req->error);
/* Release WRITE lock of request */
ocf_req_unlock(req);
ocf_req_unlock(req->cache->device->concurrency.cache_line,
req);
/* Release OCF request */
ocf_req_put(req);

View File

@@ -147,7 +147,7 @@ static void _ocf_discard_step_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "Completion");
/* Release WRITE lock of request */
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
if (req->error) {
ocf_metadata_error(req->cache);
@@ -235,7 +235,9 @@ static int _ocf_discard_step(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume);
lock = ocf_req_async_lock_wr(
cache->device->concurrency.cache_line,
req, _ocf_discard_on_resume);
} else {
lock = OCF_LOCK_ACQUIRED;
}

View File

@@ -46,7 +46,8 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
ocf_core_stats_cache_error_update(req->core, OCF_READ);
ocf_engine_push_req_front_pt(req);
} else {
ocf_req_unlock(req);
ocf_req_unlock(req->cache->device->concurrency.cache_line,
req);
/* Complete request */
req->complete(req, req->error);
@@ -130,7 +131,9 @@ int ocf_read_fast(struct ocf_request *req)
if (hit && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_rd(
req->cache->device->concurrency.cache_line,
req, ocf_engine_on_resume);
}
ocf_hb_req_prot_unlock_rd(req);
@@ -200,7 +203,9 @@ int ocf_write_fast(struct ocf_request *req)
if (mapped && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_wr(
req->cache->device->concurrency.cache_line,
req, ocf_engine_on_resume);
}
ocf_hb_req_prot_unlock_rd(req);

View File

@@ -31,7 +31,7 @@ static void _ocf_invalidate_req(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to flush metadata to cache");
ocf_req_unlock(req);
ocf_req_unlock(req->cache->device->concurrency.cache_line, req);
/* Put OCF request - decrease reference counter */
ocf_req_put(req);

View File

@@ -34,7 +34,7 @@ static void _ocf_read_pt_complete(struct ocf_request *req, int error)
/* Complete request */
req->complete(req, req->error);
ocf_req_unlock_rd(req);
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
/* Release OCF request */
ocf_req_put(req);
@@ -127,7 +127,10 @@ int ocf_read_pt(struct ocf_request *req)
/* There are mapped cache line,
* lock request for READ access
*/
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_rd(
req->cache->device->concurrency.
cache_line,
req, ocf_engine_on_resume);
} else {
/* No mapped cache lines, no need to get lock */
lock = OCF_LOCK_ACQUIRED;

View File

@@ -24,6 +24,9 @@
static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
{
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
if (error)
req->error |= error;
@@ -41,8 +44,7 @@ static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
ocf_core_stats_cache_error_update(req->core, OCF_READ);
ocf_engine_push_req_front_pt(req);
} else {
ocf_req_unlock(req);
ocf_req_unlock(c, req);
/* Complete request */
req->complete(req, req->error);

View File

@@ -60,7 +60,7 @@ static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
req->complete(req, req->error);

View File

@@ -25,7 +25,7 @@ static const struct ocf_io_if _io_if_wi_update_metadata = {
int _ocf_write_wi_next_pass(struct ocf_request *req)
{
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
if (req->wi_second_pass) {
req->complete(req, req->error);
@@ -75,7 +75,7 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
req->complete(req, req->error);
@@ -128,7 +128,8 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "Completion");
if (req->error) {
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
req);
req->complete(req, req->error);
@@ -198,7 +199,9 @@ int ocf_write_wi(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume);
lock = ocf_req_async_lock_wr(
req->cache->device->concurrency.cache_line,
req, _ocf_write_wi_on_resume);
} else {
lock = OCF_LOCK_ACQUIRED;
}

View File

@@ -33,7 +33,7 @@ static void ocf_read_wo_cache_complete(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to read data from cache");
ocf_req_unlock_rd(req);
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
/* Complete request */
req->complete(req, req->error);
@@ -169,7 +169,8 @@ static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
if (!req->info.dirty_any || req->error) {
OCF_DEBUG_RQ(req, "Completion");
req->complete(req, req->error);
ocf_req_unlock_rd(req);
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line,
req);
ocf_req_put(req);
return;
}
@@ -236,7 +237,9 @@ int ocf_read_wo(struct ocf_request *req)
/* There are mapped cache lines,
* lock request for READ access
*/
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_rd(
req->cache->device->concurrency.cache_line,
req, ocf_engine_on_resume);
}
ocf_hb_req_prot_unlock_rd(req); /*- END Metadata RD access -----------------*/

View File

@@ -34,7 +34,8 @@ static void _ocf_write_wt_req_complete(struct ocf_request *req)
ocf_engine_invalidate(req);
} else {
/* Unlock reqest from WRITE access */
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
req);
/* Complete request */
req->complete(req, req->info.core_error ? req->error : 0);

View File

@@ -31,7 +31,7 @@ static int ocf_zero_purge(struct ocf_request *req)
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
}
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
req->complete(req, req->error);
@@ -152,7 +152,9 @@ void ocf_engine_zero_line(struct ocf_request *req)
req->io_if = &_io_if_ocf_zero_do;
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_wr(
req->cache->device->concurrency.cache_line,
req, ocf_engine_on_resume);
if (lock >= 0) {
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);