Optimize cacheline locking in ocf_engine_prepare_clines

Hash bucket read/write lock is sufficient to safely attempt
cacheline trylock/lock. This change removes cacheline lock
global RW semaprhore and moves cacheline trylock/lock under
hash bucket read/write lock respectively.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski
2019-07-31 15:13:32 -04:00
parent 5248093e1f
commit 30f22d4f47
8 changed files with 161 additions and 97 deletions

View File

@@ -395,7 +395,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
}
}
static int ocf_engine_evict(struct ocf_request *req)
static int ocf_engine_evict(struct ocf_request *req)
{
if (!ocf_engine_unmapped_count(req))
return 0;
@@ -417,6 +417,19 @@ static int lock_clines(struct ocf_request *req, enum ocf_engine_lock_type lock,
}
}
static int trylock_clines(struct ocf_request *req,
enum ocf_engine_lock_type lock)
{
switch (lock) {
case ocf_engine_lock_write:
return ocf_req_trylock_wr(req);
case ocf_engine_lock_read:
return ocf_req_trylock_rd(req);
default:
return OCF_LOCK_ACQUIRED;
}
}
int ocf_engine_prepare_clines(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
@@ -426,48 +439,87 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
enum ocf_engine_lock_type lock_type;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
/* Calculate hashes for hash-bucket locking */
ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
/* Read-lock hash buckets associated with request target core & LBAs
* (core lines) to assure that cache mapping for these core lines does
* not change during traversation */
ocf_req_hash_lock_rd(req);
/* Traverse request to cache if there is hit */
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
/* We are holding hash buckets read lock, so we can attempt
* per-cacheline locking fast path, which would fail either if
* cachelines are already locked without putting request to a
* waiter list */
lock_type = engine_cbs->get_lock_type(req);
lock = lock_clines(req, lock_type, engine_cbs->resume);
lock = trylock_clines(req, lock_type);
if (lock == OCF_LOCK_ACQUIRED) {
/* Cachelines are mapped and locked, we don't need the
* hash bucket lock any more */
ocf_req_hash_unlock_rd(req);
} else {
/* Failed to acquire cachelines lock in fast path,
* acquire hash-buckets write lock and attempt the lock
* again, allowing slow path and async assignment of
* the lock. */
ocf_req_hash_lock_upgrade(req);
lock = lock_clines(req, lock_type, engine_cbs->resume);
ocf_req_hash_unlock_wr(req);
}
} else {
/* check if request should promote cachelines */
promote = ocf_promotion_req_should_promote(
req->cache->promotion_policy, req);
if (!promote)
if (!promote) {
req->info.mapping_error = 1;
ocf_req_hash_unlock_rd(req);
}
}
if (mapped || !promote) {
ocf_req_hash_unlock_rd(req);
} else {
/* need to attempt mapping / eviction */
ocf_req_hash_lock_upgrade(req); /*- Metadata WR access, eviction -----*/
if (!mapped && promote) {
/* Need to map (potentially evict) cachelines. Mapping must be
* performed holding (at least) hash-bucket write lock */
ocf_req_hash_lock_upgrade(req);
ocf_engine_map(req);
ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
if (!req->info.mapping_error) {
/* Lock cachelines, potentially putting the request on
* waiter list */
lock_type = engine_cbs->get_lock_type(req);
lock = trylock_clines(req, lock_type);
if (lock != OCF_LOCK_ACQUIRED) {
lock = lock_clines(req, lock_type,
engine_cbs->resume);
}
}
/* At this point the request is mapped or we need to evict,
* which is done under global metadata lock */
ocf_req_hash_unlock_wr(req);
if (req->info.mapping_error) {
/* Not mapped - evict cachelines */
ocf_metadata_start_exclusive_access(metadata_lock);
/* Now there is exclusive access for metadata. May
* traverse once again and evict cachelines if needed.
*/
if (ocf_engine_evict(req) == LOOKUP_MAPPED)
ocf_engine_map(req);
if (!req->info.mapping_error) {
lock_type = engine_cbs->get_lock_type(req);
lock = trylock_clines(req, lock_type);
if (lock != OCF_LOCK_ACQUIRED) {
lock = lock_clines(req, lock_type,
engine_cbs->resume);
}
}
ocf_metadata_end_exclusive_access(metadata_lock);
}
if (!req->info.mapping_error) {
lock_type = engine_cbs->get_lock_type(req);
lock = lock_clines(req, lock_type, engine_cbs->resume);
}
}
return lock;
}

View File

@@ -235,12 +235,18 @@ static int _ocf_discard_step(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume);
lock = ocf_req_trylock_wr(req);
} else {
lock = OCF_LOCK_ACQUIRED;
}
ocf_req_hash_unlock_rd(req);
if (lock != OCF_LOCK_ACQUIRED) {
ocf_req_hash_lock_upgrade(req);
lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume);
ocf_req_hash_unlock_wr(req);
} else {
ocf_req_hash_unlock_rd(req);
}
if (lock >= 0) {
if (OCF_LOCK_ACQUIRED == lock) {

View File

@@ -195,11 +195,18 @@ int ocf_write_fast(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req);
if (mapped) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
lock = ocf_req_trylock_wr(req);
if (lock != OCF_LOCK_ACQUIRED) {
ocf_req_hash_lock_upgrade(req);
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
ocf_req_hash_unlock_wr(req);
} else {
ocf_req_hash_unlock_rd(req);
}
} else {
ocf_req_hash_unlock_rd(req);
}
ocf_req_hash_unlock_rd(req);
if (mapped) {
if (lock >= 0) {
OCF_DEBUG_RQ(req, "Fast path success");

View File

@@ -102,7 +102,7 @@ static const struct ocf_io_if _io_if_pt_resume = {
int ocf_read_pt(struct ocf_request *req)
{
bool use_cache = false;
int lock = OCF_LOCK_NOT_ACQUIRED;
int lock = OCF_LOCK_ACQUIRED;
OCF_DEBUG_TRACE(req->cache);
@@ -127,14 +127,17 @@ int ocf_read_pt(struct ocf_request *req)
/* There are mapped cache line,
* lock request for READ access
*/
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
} else {
/* No mapped cache lines, no need to get lock */
lock = OCF_LOCK_ACQUIRED;
lock = ocf_req_trylock_rd(req);
}
}
ocf_req_hash_unlock_rd(req);
if (lock != OCF_LOCK_ACQUIRED) {
ocf_req_hash_lock_upgrade(req);
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
ocf_req_hash_unlock_wr(req);
} else {
ocf_req_hash_unlock_rd(req);
}
if (use_cache) {
/*

View File

@@ -154,12 +154,18 @@ int ocf_write_wi(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume);
lock = ocf_req_trylock_wr(req);
} else {
lock = OCF_LOCK_ACQUIRED;
}
ocf_req_hash_unlock_rd(req); /*- END Metadata READ access----------------*/
if (lock != OCF_LOCK_ACQUIRED) {
ocf_req_hash_lock_upgrade(req);
lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume);
ocf_req_hash_unlock_wr(req);
} else {
ocf_req_hash_unlock_rd(req);
}
if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) {

View File

@@ -223,10 +223,16 @@ int ocf_read_wo(struct ocf_request *req)
/* There are mapped cache lines,
* lock request for READ access
*/
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
lock = ocf_req_trylock_rd(req);
}
ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/
if (lock != OCF_LOCK_ACQUIRED) {
ocf_req_hash_lock_upgrade(req);
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
ocf_req_hash_unlock_wr(req);
} else {
ocf_req_hash_unlock_rd(req);
}
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {