From a09587f52140ae386a54cca1684f03bf439adad1 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Fri, 5 Mar 2021 11:20:46 +0100 Subject: [PATCH] Introduce ocf_cache_line_is_locked_exclusively Function returns true if cacheline is locked (read or write) by exactly one entity with no waiters. This is usefull for eviction. Assuming caller holds hash bucket write lock, having exlusive cacheline lock (either read or write) allows holder to remap cacheline safely. Typically during eviction hash bucket is unknown until resolved under cacheline lock, so locking cacheline exclusively (instead of locking and checking for exclusive lock) is not possible. More specifically this is the flow for synchronizing cacheline remap using ocf_cache_line_is_locked_exclusively: 1. acquire a cacheline (read or write) lock 2. resolve hash bucket 3. write-lock hash bucket 4. verify cacheline lock is exclusive Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_cache_line_concurrency.c | 31 +++++++++++++++++--- src/concurrency/ocf_cache_line_concurrency.h | 3 ++ 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/src/concurrency/ocf_cache_line_concurrency.c b/src/concurrency/ocf_cache_line_concurrency.c index 62745fb..df4141c 100644 --- a/src/concurrency/ocf_cache_line_concurrency.c +++ b/src/concurrency/ocf_cache_line_concurrency.c @@ -969,14 +969,18 @@ void ocf_req_unlock_rd(struct ocf_cache_line_concurrency *c, struct ocf_request for (i = 0; i < req->core_line_count; i++) { + ENV_BUG_ON(req->map[i].wr_locked); + if (req->map[i].status == LOOKUP_MISS) { /* MISS nothing to lock */ continue; } + if (!req->map[i].rd_locked) + continue; + line = req->map[i].coll_idx; - ENV_BUG_ON(!req->map[i].rd_locked); ENV_BUG_ON(line >= c->num_clines); __unlock_cache_line_rd(c, line); @@ -995,15 +999,18 @@ void ocf_req_unlock_wr(struct ocf_cache_line_concurrency *c, struct ocf_request OCF_DEBUG_RQ(req, "Unlock"); for (i = 0; i < req->core_line_count; i++) { + ENV_BUG_ON(req->map[i].rd_locked); if (req->map[i].status == LOOKUP_MISS) { /* MISS nothing to lock */ continue; } + if (!req->map[i].wr_locked) + continue; + line = req->map[i].coll_idx; - ENV_BUG_ON(!req->map[i].wr_locked); ENV_BUG_ON(line >= c->num_clines); __unlock_cache_line_wr(c, line); @@ -1039,8 +1046,6 @@ void ocf_req_unlock(struct ocf_cache_line_concurrency *c, struct ocf_request *re } else if (req->map[i].wr_locked) { __unlock_cache_line_wr(c, line); req->map[i].wr_locked = false; - } else { - ENV_BUG(); } } } @@ -1104,6 +1109,24 @@ bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c, return are; } +/* NOTE: it is caller responsibility to assure that noone acquires + * a lock in background */ +bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line; + env_atomic *access = &c->access[line]; + int val = env_atomic_read(access); + + ENV_BUG_ON(val == OCF_CACHE_LINE_ACCESS_IDLE); + + if (ocf_cache_line_are_waiters(c, line)) + return false; + + return val == OCF_CACHE_LINE_ACCESS_ONE_RD || + val == OCF_CACHE_LINE_ACCESS_WR; +} + /* * */ diff --git a/src/concurrency/ocf_cache_line_concurrency.h b/src/concurrency/ocf_cache_line_concurrency.h index 29895fe..dc2dd0d 100644 --- a/src/concurrency/ocf_cache_line_concurrency.h +++ b/src/concurrency/ocf_cache_line_concurrency.h @@ -147,6 +147,9 @@ bool ocf_cache_line_is_used(struct ocf_cache_line_concurrency *c, bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c, ocf_cache_line_t line); +bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache, + ocf_cache_line_t line); + /** * @brief un_lock request map info entry from from write or read access. *