Introduce ocf_cache_line_is_locked_exclusively

Function returns true if cacheline is locked (read
or write) by exactly one entity with no waiters.

This is usefull for eviction. Assuming caller holds
hash bucket write lock, having exlusive cacheline
lock (either read or write) allows holder to remap
cacheline safely. Typically during eviction hash
bucket is unknown until resolved under cacheline lock,
so locking cacheline exclusively (instead of locking
and checking for exclusive lock) is not possible.

More specifically this is the flow for synchronizing
cacheline remap using ocf_cache_line_is_locked_exclusively:
1. acquire a cacheline (read or write) lock
2. resolve hash bucket
3. write-lock hash bucket
4. verify cacheline lock is exclusive

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2021-03-05 11:20:46 +01:00
parent ac9bd5b094
commit a09587f521
2 changed files with 30 additions and 4 deletions

View File

@ -969,14 +969,18 @@ void ocf_req_unlock_rd(struct ocf_cache_line_concurrency *c, struct ocf_request
for (i = 0; i < req->core_line_count; i++) { for (i = 0; i < req->core_line_count; i++) {
ENV_BUG_ON(req->map[i].wr_locked);
if (req->map[i].status == LOOKUP_MISS) { if (req->map[i].status == LOOKUP_MISS) {
/* MISS nothing to lock */ /* MISS nothing to lock */
continue; continue;
} }
if (!req->map[i].rd_locked)
continue;
line = req->map[i].coll_idx; line = req->map[i].coll_idx;
ENV_BUG_ON(!req->map[i].rd_locked);
ENV_BUG_ON(line >= c->num_clines); ENV_BUG_ON(line >= c->num_clines);
__unlock_cache_line_rd(c, line); __unlock_cache_line_rd(c, line);
@ -995,15 +999,18 @@ void ocf_req_unlock_wr(struct ocf_cache_line_concurrency *c, struct ocf_request
OCF_DEBUG_RQ(req, "Unlock"); OCF_DEBUG_RQ(req, "Unlock");
for (i = 0; i < req->core_line_count; i++) { for (i = 0; i < req->core_line_count; i++) {
ENV_BUG_ON(req->map[i].rd_locked);
if (req->map[i].status == LOOKUP_MISS) { if (req->map[i].status == LOOKUP_MISS) {
/* MISS nothing to lock */ /* MISS nothing to lock */
continue; continue;
} }
if (!req->map[i].wr_locked)
continue;
line = req->map[i].coll_idx; line = req->map[i].coll_idx;
ENV_BUG_ON(!req->map[i].wr_locked);
ENV_BUG_ON(line >= c->num_clines); ENV_BUG_ON(line >= c->num_clines);
__unlock_cache_line_wr(c, line); __unlock_cache_line_wr(c, line);
@ -1039,8 +1046,6 @@ void ocf_req_unlock(struct ocf_cache_line_concurrency *c, struct ocf_request *re
} else if (req->map[i].wr_locked) { } else if (req->map[i].wr_locked) {
__unlock_cache_line_wr(c, line); __unlock_cache_line_wr(c, line);
req->map[i].wr_locked = false; req->map[i].wr_locked = false;
} else {
ENV_BUG();
} }
} }
} }
@ -1104,6 +1109,24 @@ bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
return are; return are;
} }
/* NOTE: it is caller responsibility to assure that noone acquires
* a lock in background */
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
env_atomic *access = &c->access[line];
int val = env_atomic_read(access);
ENV_BUG_ON(val == OCF_CACHE_LINE_ACCESS_IDLE);
if (ocf_cache_line_are_waiters(c, line))
return false;
return val == OCF_CACHE_LINE_ACCESS_ONE_RD ||
val == OCF_CACHE_LINE_ACCESS_WR;
}
/* /*
* *
*/ */

View File

@ -147,6 +147,9 @@ bool ocf_cache_line_is_used(struct ocf_cache_line_concurrency *c,
bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c, bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line); ocf_cache_line_t line);
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
ocf_cache_line_t line);
/** /**
* @brief un_lock request map info entry from from write or read access. * @brief un_lock request map info entry from from write or read access.
* *