diff --git a/src/cleaning/acp.c b/src/cleaning/acp.c
index c1cf1fe..36059ba 100644
--- a/src/cleaning/acp.c
+++ b/src/cleaning/acp.c
@@ -386,7 +386,7 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
struct ocf_map_info info;
bool locked = false;
- ocf_metadata_hash_lock_rd(&cache->metadata.lock, core_id, core_line);
+ ocf_hb_cline_prot_lock_rd(&cache->metadata.lock, core_id, core_line);
ocf_engine_lookup_map_entry(cache, &info, core_id,
core_line);
@@ -397,7 +397,7 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
locked = true;
}
- ocf_metadata_hash_unlock_rd(&cache->metadata.lock, core_id, core_line);
+ ocf_hb_cline_prot_unlock_rd(&cache->metadata.lock, core_id, core_line);
return locked ? info.coll_idx : cache->device->collision_table_entries;
}
diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c
index a3db313..7f4591e 100644
--- a/src/concurrency/ocf_metadata_concurrency.c
+++ b/src/concurrency/ocf_metadata_concurrency.c
@@ -155,14 +155,12 @@ void ocf_metadata_end_exclusive_access(
env_rwsem_up_write(&metadata_lock->global);
}
-void ocf_metadata_start_shared_access(
- struct ocf_metadata_lock *metadata_lock)
+void ocf_metadata_start_shared_access(struct ocf_metadata_lock *metadata_lock)
{
env_rwsem_down_read(&metadata_lock->global);
}
-int ocf_metadata_try_start_shared_access(
- struct ocf_metadata_lock *metadata_lock)
+int ocf_metadata_try_start_shared_access(struct ocf_metadata_lock *metadata_lock)
{
return env_rwsem_down_read_trylock(&metadata_lock->global);
}
@@ -172,7 +170,15 @@ void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock)
env_rwsem_up_read(&metadata_lock->global);
}
-void ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock,
+/* NOTE: Calling 'naked' lock/unlock requires caller to hold global metadata
+ shared (aka read) lock
+ NOTE: Using 'naked' variants to lock multiple hash buckets is prone to
+ deadlocks if not locking in the the order of increasing hash bucket
+ number. Preffered way to lock multiple hash buckets is to use
+ request lock rountines ocf_req_hash_(un)lock_(rd/wr).
+*/
+static inline void ocf_hb_id_naked_lock(
+ struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
@@ -185,7 +191,8 @@ void ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock,
ENV_BUG();
}
-void ocf_metadata_hash_unlock(struct ocf_metadata_lock *metadata_lock,
+static inline void ocf_hb_id_naked_unlock(
+ struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
@@ -198,7 +205,7 @@ void ocf_metadata_hash_unlock(struct ocf_metadata_lock *metadata_lock,
ENV_BUG();
}
-int ocf_metadata_hash_try_lock(struct ocf_metadata_lock *metadata_lock,
+static int ocf_hb_id_naked_trylock(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
int result = -1;
@@ -219,75 +226,116 @@ int ocf_metadata_hash_try_lock(struct ocf_metadata_lock *metadata_lock,
return result;
}
-void ocf_metadata_lock_hash_rd(struct ocf_metadata_lock *metadata_lock,
- ocf_cache_line_t hash)
-{
- ocf_metadata_start_shared_access(metadata_lock);
- ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_RD);
-}
-
-void ocf_metadata_unlock_hash_rd(struct ocf_metadata_lock *metadata_lock,
- ocf_cache_line_t hash)
-{
- ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_RD);
- ocf_metadata_end_shared_access(metadata_lock);
-}
-
-void ocf_metadata_lock_hash_wr(struct ocf_metadata_lock *metadata_lock,
- ocf_cache_line_t hash)
-{
- ocf_metadata_start_shared_access(metadata_lock);
- ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_WR);
-}
-
-void ocf_metadata_unlock_hash_wr(struct ocf_metadata_lock *metadata_lock,
- ocf_cache_line_t hash)
-{
- ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_WR);
- ocf_metadata_end_shared_access(metadata_lock);
-}
-
-/* NOTE: attempt to acquire hash lock for multiple core lines may end up
- * in deadlock. In order to hash lock multiple core lines safely, use
- * ocf_req_hash_lock_* functions */
-void ocf_metadata_hash_lock_rd(struct ocf_metadata_lock *metadata_lock,
+bool ocf_hb_cline_naked_trylock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
- ocf_metadata_start_shared_access(metadata_lock);
- ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_RD);
+ return (0 == ocf_hb_id_naked_trylock(metadata_lock, hash,
+ OCF_METADATA_WR));
}
-void ocf_metadata_hash_unlock_rd(struct ocf_metadata_lock *metadata_lock,
+bool ocf_hb_cline_naked_trylock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
- ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_RD);
+ return (0 == ocf_hb_id_naked_trylock(metadata_lock, hash,
+ OCF_METADATA_RD));
+}
+
+void ocf_hb_cline_naked_unlock_rd(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line)
+{
+ ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
+ core_line, core_id);
+
+ ocf_hb_id_naked_unlock(metadata_lock, hash, OCF_METADATA_RD);
+}
+
+void ocf_hb_cline_naked_unlock_wr(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line)
+{
+ ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
+ core_line, core_id);
+
+ ocf_hb_id_naked_unlock(metadata_lock, hash, OCF_METADATA_WR);
+}
+
+/* common part of protected hash bucket lock routines */
+static inline void ocf_hb_id_prot_lock_common(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw)
+{
+ ocf_metadata_start_shared_access(metadata_lock);
+ ocf_hb_id_naked_lock(metadata_lock, hash, rw);
+}
+
+/* common part of protected hash bucket unlock routines */
+static inline void ocf_hb_id_prot_unlock_common(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw)
+{
+ ocf_hb_id_naked_unlock(metadata_lock, hash, rw);
ocf_metadata_end_shared_access(metadata_lock);
}
-void ocf_metadata_hash_lock_wr(struct ocf_metadata_lock *metadata_lock,
+/* NOTE: caller can lock at most one hash bucket at a time using protected
+ variants of lock routines. */
+void ocf_hb_cline_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
- ocf_metadata_start_shared_access(metadata_lock);
- ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_WR);
+ ocf_hb_id_prot_lock_common(metadata_lock, hash,
+ OCF_METADATA_WR);
}
-void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock,
+void ocf_hb_cline_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
- ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_WR);
- ocf_metadata_end_shared_access(metadata_lock);
+ ocf_hb_id_prot_unlock_common(metadata_lock, hash,
+ OCF_METADATA_WR);
+}
+
+void ocf_hb_cline_prot_lock_rd(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line)
+{
+ ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
+ core_line, core_id);
+
+ ocf_hb_id_prot_lock_common(metadata_lock, hash,
+ OCF_METADATA_RD);
+}
+
+void ocf_hb_cline_prot_unlock_rd(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line)
+{
+ ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
+ core_line, core_id);
+
+ ocf_hb_id_prot_unlock_common(metadata_lock, hash,
+ OCF_METADATA_RD);
+}
+
+void ocf_hb_hash_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash)
+{
+ ocf_hb_id_prot_lock_common(metadata_lock, hash,
+ OCF_METADATA_WR);
+}
+
+void ocf_hb_hash_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash)
+{
+ ocf_hb_id_prot_unlock_common(metadata_lock, hash,
+ OCF_METADATA_WR);
}
/* number of hash entries */
@@ -340,59 +388,59 @@ void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock,
for (hash = _MIN_HASH(req); hash <= _MAX_HASH(req); \
hash = _HASH_NEXT(req, hash))
-void ocf_req_hash_lock_rd(struct ocf_request *req)
+void ocf_hb_req_prot_lock_rd(struct ocf_request *req)
{
ocf_cache_line_t hash;
ocf_metadata_start_shared_access(&req->cache->metadata.lock);
for_each_req_hash_asc(req, hash) {
- ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
+ ocf_hb_id_naked_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
}
-void ocf_req_hash_unlock_rd(struct ocf_request *req)
+void ocf_hb_req_prot_unlock_rd(struct ocf_request *req)
{
ocf_cache_line_t hash;
for_each_req_hash_asc(req, hash) {
- ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
+ ocf_hb_id_naked_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
ocf_metadata_end_shared_access(&req->cache->metadata.lock);
}
-void ocf_req_hash_lock_wr(struct ocf_request *req)
+void ocf_hb_req_prot_lock_wr(struct ocf_request *req)
{
ocf_cache_line_t hash;
ocf_metadata_start_shared_access(&req->cache->metadata.lock);
for_each_req_hash_asc(req, hash) {
- ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
+ ocf_hb_id_naked_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
}
-void ocf_req_hash_lock_upgrade(struct ocf_request *req)
+void ocf_hb_req_prot_lock_upgrade(struct ocf_request *req)
{
ocf_cache_line_t hash;
for_each_req_hash_asc(req, hash) {
- ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
+ ocf_hb_id_naked_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
for_each_req_hash_asc(req, hash) {
- ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
+ ocf_hb_id_naked_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
}
-void ocf_req_hash_unlock_wr(struct ocf_request *req)
+void ocf_hb_req_prot_unlock_wr(struct ocf_request *req)
{
ocf_cache_line_t hash;
for_each_req_hash_asc(req, hash) {
- ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
+ ocf_hb_id_naked_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
ocf_metadata_end_shared_access(&req->cache->metadata.lock);
diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h
index 92bc89a..ed647ae 100644
--- a/src/concurrency/ocf_metadata_concurrency.h
+++ b/src/concurrency/ocf_metadata_concurrency.h
@@ -136,32 +136,39 @@ static inline void ocf_metadata_status_bits_unlock(
ocf_metadata_status_bits_unlock(&cache->metadata.lock, \
OCF_METADATA_WR)
-/* lock/unlock single hash */
-void ocf_metadata_lock_hash_rd(struct ocf_metadata_lock *metadata_lock,
+void ocf_hb_cline_prot_lock_rd(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line);
+void ocf_hb_cline_prot_unlock_rd(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line);
+
+void ocf_hb_id_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash);
-void ocf_metadata_unlock_hash_rd(struct ocf_metadata_lock *metadata_lock,
- ocf_cache_line_t hash);
-void ocf_metadata_lock_hash_wr(struct ocf_metadata_lock *metadata_lock,
- ocf_cache_line_t hash);
-void ocf_metadata_unlock_hash_wr(struct ocf_metadata_lock *metadata_lock,
+void ocf_hb_id_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash);
-/* lock/unlock single hash provided core id and core line */
-void ocf_metadata_hash_lock_rd(struct ocf_metadata_lock *metadata_lock,
+/* caller must hold global metadata read lock */
+bool ocf_hb_cline_naked_trylock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line);
-void ocf_metadata_hash_unlock_rd(struct ocf_metadata_lock *metadata_lock,
+void ocf_hb_cline_naked_unlock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line);
-void ocf_metadata_hash_lock_wr(struct ocf_metadata_lock *metadata_lock,
+
+void ocf_hb_cline_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line);
-void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock,
+void ocf_hb_cline_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line);
+
+/* caller must hold global metadata read lock */
+bool ocf_hb_cline_naked_trylock_wr(struct ocf_metadata_lock *metadata_lock,
+ uint32_t core_id, uint64_t core_line);
+void ocf_hb_cline_naked_unlock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line);
/* lock entire request in deadlock-free manner */
-void ocf_req_hash_lock_rd(struct ocf_request *req);
-void ocf_req_hash_unlock_rd(struct ocf_request *req);
-void ocf_req_hash_lock_wr(struct ocf_request *req);
-void ocf_req_hash_unlock_wr(struct ocf_request *req);
-void ocf_req_hash_lock_upgrade(struct ocf_request *req);
+void ocf_hb_req_prot_lock_rd(struct ocf_request *req);
+void ocf_hb_req_prot_unlock_rd(struct ocf_request *req);
+void ocf_hb_req_prot_lock_wr(struct ocf_request *req);
+void ocf_hb_req_prot_unlock_wr(struct ocf_request *req);
+void ocf_hb_req_prot_lock_upgrade(struct ocf_request *req);
/* collision table page lock interface */
void ocf_collision_start_shared_access(struct ocf_metadata_lock *metadata_lock,
diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c
index 32835b6..db7569f 100644
--- a/src/engine/engine_common.c
+++ b/src/engine/engine_common.c
@@ -434,17 +434,17 @@ static inline int ocf_prepare_clines_miss(struct ocf_request *req,
/* requests to disabled partitions go in pass-through */
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
ocf_req_set_mapping_error(req);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
return lock_status;
}
if (!ocf_part_has_space(req)) {
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
goto eviction;
}
/* Mapping must be performed holding (at least) hash-bucket write lock */
- ocf_req_hash_lock_upgrade(req);
+ ocf_hb_req_prot_lock_upgrade(req);
ocf_engine_map(req);
@@ -455,11 +455,11 @@ static inline int ocf_prepare_clines_miss(struct ocf_request *req,
* Don't try to evict, just return error to caller */
ocf_req_set_mapping_error(req);
}
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
return lock_status;
}
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
eviction:
ocf_metadata_start_exclusive_access(metadata_lock);
@@ -505,7 +505,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
/* Read-lock hash buckets associated with request target core & LBAs
* (core lines) to assure that cache mapping for these core lines does
* not change during traversation */
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Traverse to check if request is mapped fully */
ocf_engine_traverse(req);
@@ -513,7 +513,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
mapped = ocf_engine_is_mapped(req);
if (mapped) {
lock = lock_clines(req, engine_cbs);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
return lock;
}
@@ -522,7 +522,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
req->cache->promotion_policy, req);
if (!promote) {
ocf_req_set_mapping_error(req);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
return lock;
}
@@ -676,11 +676,11 @@ static int _ocf_engine_refresh(struct ocf_request *req)
int result;
/* Check under metadata RD lock */
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
result = ocf_engine_check(req);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
if (result == 0) {
diff --git a/src/engine/engine_discard.c b/src/engine/engine_discard.c
index af921cf..a2d97c6 100644
--- a/src/engine/engine_discard.c
+++ b/src/engine/engine_discard.c
@@ -170,7 +170,7 @@ int _ocf_discard_step_do(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* There are mapped cache line, need to remove them */
- ocf_req_hash_lock_wr(req);
+ ocf_hb_req_prot_lock_wr(req);
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(req);
@@ -181,16 +181,16 @@ int _ocf_discard_step_do(struct ocf_request *req)
_ocf_discard_step_complete);
}
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
}
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Even if no cachelines are mapped they could be tracked in promotion
* policy. RD lock suffices. */
ocf_promotion_req_purge(req->cache->promotion_policy, req);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
OCF_DEBUG_RQ(req, "Discard");
_ocf_discard_step_complete(req, 0);
@@ -228,7 +228,7 @@ static int _ocf_discard_step(struct ocf_request *req)
0));
ocf_req_hash(req);
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Travers to check if request is mapped fully */
ocf_engine_traverse(req);
@@ -240,7 +240,7 @@ static int _ocf_discard_step(struct ocf_request *req)
lock = OCF_LOCK_ACQUIRED;
}
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
if (lock >= 0) {
if (OCF_LOCK_ACQUIRED == lock) {
diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c
index bd2b5f6..abb9695 100644
--- a/src/engine/engine_fast.c
+++ b/src/engine/engine_fast.c
@@ -72,14 +72,14 @@ static int _ocf_read_fast_do(struct ocf_request *req)
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
- ocf_req_hash_lock_wr(req);
+ ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
}
/* Submit IO */
@@ -119,7 +119,7 @@ int ocf_read_fast(struct ocf_request *req)
/*- Metadata RD access -----------------------------------------------*/
ocf_req_hash(req);
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Traverse request to cache if there is hit */
ocf_engine_traverse(req);
@@ -133,7 +133,7 @@ int ocf_read_fast(struct ocf_request *req)
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
}
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
if (hit && part_has_space) {
OCF_DEBUG_RQ(req, "Fast path success");
@@ -189,7 +189,7 @@ int ocf_write_fast(struct ocf_request *req)
/*- Metadata RD access -----------------------------------------------*/
ocf_req_hash(req);
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Traverse request to cache if there is hit */
ocf_engine_traverse(req);
@@ -203,7 +203,7 @@ int ocf_write_fast(struct ocf_request *req)
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
}
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
if (mapped && part_has_space) {
if (lock >= 0) {
diff --git a/src/engine/engine_inv.c b/src/engine/engine_inv.c
index c5fdce6..0a15137 100644
--- a/src/engine/engine_inv.c
+++ b/src/engine/engine_inv.c
@@ -43,9 +43,9 @@ static int _ocf_invalidate_do(struct ocf_request *req)
ENV_BUG_ON(env_atomic_read(&req->req_remaining));
- ocf_req_hash_lock_wr(req);
+ ocf_hb_req_prot_lock_wr(req);
ocf_purge_map_info(req);
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
env_atomic_inc(&req->req_remaining);
diff --git a/src/engine/engine_pt.c b/src/engine/engine_pt.c
index ea28664..4ddb33b 100644
--- a/src/engine/engine_pt.c
+++ b/src/engine/engine_pt.c
@@ -56,10 +56,10 @@ int ocf_read_pt_do(struct ocf_request *req)
ocf_req_get(req);
if (req->info.dirty_any) {
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Need to clean, start it */
ocf_engine_clean(req);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
/* Do not processing, because first we need to clean request */
ocf_req_put(req);
@@ -70,14 +70,14 @@ int ocf_read_pt_do(struct ocf_request *req)
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
- ocf_req_hash_lock_wr(req);
+ ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
}
/* Submit read IO to the core */
@@ -115,7 +115,7 @@ int ocf_read_pt(struct ocf_request *req)
req->io_if = &_io_if_pt_resume;
ocf_req_hash(req);
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(req);
@@ -134,7 +134,7 @@ int ocf_read_pt(struct ocf_request *req)
}
}
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
if (use_cache) {
/*
diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c
index 7b4fcb0..3c8160e 100644
--- a/src/engine/engine_rd.c
+++ b/src/engine/engine_rd.c
@@ -151,12 +151,12 @@ static int _ocf_read_generic_do(struct ocf_request *req)
if (ocf_engine_is_miss(req)) {
if (req->info.dirty_any) {
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Request is dirty need to clean request */
ocf_engine_clean(req);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
/* We need to clean request before processing, return */
ocf_req_put(req);
@@ -164,25 +164,25 @@ static int _ocf_read_generic_do(struct ocf_request *req)
return 0;
}
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Set valid status bits map */
ocf_set_valid_map_info(req);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
}
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
- ocf_req_hash_lock_wr(req);
+ ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
}
OCF_DEBUG_RQ(req, "Submit");
diff --git a/src/engine/engine_wa.c b/src/engine/engine_wa.c
index ebbbf14..089e671 100644
--- a/src/engine/engine_wa.c
+++ b/src/engine/engine_wa.c
@@ -23,12 +23,12 @@ int ocf_write_wa(struct ocf_request *req)
ocf_req_hash(req);
- ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/
+ ocf_hb_req_prot_lock_rd(req); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(req);
- ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/
+ ocf_hb_req_prot_unlock_rd(req); /*- END Metadata RD access -----------------*/
if (ocf_engine_is_hit(req)) {
ocf_req_clear(req);
diff --git a/src/engine/engine_wb.c b/src/engine/engine_wb.c
index a4e1c51..8c9c296 100644
--- a/src/engine/engine_wb.c
+++ b/src/engine/engine_wb.c
@@ -28,20 +28,20 @@ static const struct ocf_io_if _io_if_wb_resume = {
static void _ocf_write_wb_update_bits(struct ocf_request *req)
{
if (ocf_engine_is_miss(req)) {
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Update valid status bits */
ocf_set_valid_map_info(req);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
}
if (!ocf_engine_is_dirty_all(req)) {
- ocf_req_hash_lock_wr(req);
+ ocf_hb_req_prot_lock_wr(req);
/* set dirty bits, and mark if metadata flushing is required */
ocf_set_dirty_map_info(req);
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
}
ocf_req_set_cleaning_hot(req);
@@ -127,14 +127,14 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req)
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
- ocf_req_hash_lock_wr(req);
+ ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
}
OCF_DEBUG_RQ(req, "Submit Data");
diff --git a/src/engine/engine_wi.c b/src/engine/engine_wi.c
index 880f73b..52a9242 100644
--- a/src/engine/engine_wi.c
+++ b/src/engine/engine_wi.c
@@ -96,12 +96,12 @@ static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req)
env_atomic_set(&req->req_remaining, 1); /* One core IO */
- ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/
+ ocf_hb_req_prot_lock_wr(req); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(req);
- ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
+ ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
if (req->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
@@ -191,7 +191,7 @@ int ocf_write_wi(struct ocf_request *req)
&_io_if_wi_core_write;
ocf_req_hash(req);
- ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
+ ocf_hb_req_prot_lock_rd(req); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(req);
@@ -203,7 +203,7 @@ int ocf_write_wi(struct ocf_request *req)
lock = OCF_LOCK_ACQUIRED;
}
- ocf_req_hash_unlock_rd(req); /*- END Metadata READ access----------------*/
+ ocf_hb_req_prot_unlock_rd(req); /*- END Metadata READ access----------------*/
if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) {
diff --git a/src/engine/engine_wo.c b/src/engine/engine_wo.c
index 0875b92..777df45 100644
--- a/src/engine/engine_wo.c
+++ b/src/engine/engine_wo.c
@@ -213,7 +213,7 @@ int ocf_read_wo(struct ocf_request *req)
req->io_if = &_io_if_wo_resume;
ocf_req_hash(req);
- ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/
+ ocf_hb_req_prot_lock_rd(req); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(req);
@@ -225,7 +225,7 @@ int ocf_read_wo(struct ocf_request *req)
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
}
- ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/
+ ocf_hb_req_prot_unlock_rd(req); /*- END Metadata RD access -----------------*/
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
diff --git a/src/engine/engine_wt.c b/src/engine/engine_wt.c
index 79cf9c7..83b0176 100644
--- a/src/engine/engine_wt.c
+++ b/src/engine/engine_wt.c
@@ -98,16 +98,16 @@ static inline void _ocf_write_wt_submit(struct ocf_request *req)
static void _ocf_write_wt_update_bits(struct ocf_request *req)
{
if (ocf_engine_is_miss(req)) {
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
/* Update valid status bits */
ocf_set_valid_map_info(req);
- ocf_req_hash_unlock_rd(req);
+ ocf_hb_req_prot_unlock_rd(req);
}
if (req->info.dirty_any) {
- ocf_req_hash_lock_wr(req);
+ ocf_hb_req_prot_lock_wr(req);
/* Writes goes to SDD and HDD, need to update status bits from
* dirty to clean
@@ -115,20 +115,20 @@ static void _ocf_write_wt_update_bits(struct ocf_request *req)
ocf_set_clean_map_info(req);
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
}
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
- ocf_req_hash_lock_wr(req);
+ ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
- ocf_req_hash_unlock_wr(req);
+ ocf_hb_req_prot_unlock_wr(req);
}
}
diff --git a/src/engine/engine_zero.c b/src/engine/engine_zero.c
index 8f9daff..2294064 100644
--- a/src/engine/engine_zero.c
+++ b/src/engine/engine_zero.c
@@ -23,12 +23,12 @@ static int ocf_zero_purge(struct ocf_request *req)
} else {
/* There are mapped cache line, need to remove them */
- ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/
+ ocf_hb_req_prot_lock_wr(req); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(req);
- ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
+ ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
}
ocf_req_unlock_wr(req);
diff --git a/src/metadata/metadata_collision.c b/src/metadata/metadata_collision.c
index 336ad09..e8e51d8 100644
--- a/src/metadata/metadata_collision.c
+++ b/src/metadata/metadata_collision.c
@@ -304,6 +304,7 @@ void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
OCF_CORE_MAX, ULLONG_MAX);
}
+/* must be called under global metadata read(shared) lock */
void ocf_metadata_start_collision_shared_access(struct ocf_cache *cache,
ocf_cache_line_t line)
{
@@ -316,6 +317,7 @@ void ocf_metadata_start_collision_shared_access(struct ocf_cache *cache,
ocf_collision_start_shared_access(&cache->metadata.lock, page);
}
+/* must be called under global metadata read(shared) lock */
void ocf_metadata_end_collision_shared_access(struct ocf_cache *cache,
ocf_cache_line_t line)
{
diff --git a/src/mngt/ocf_mngt_common.c b/src/mngt/ocf_mngt_common.c
index 2eb3398..f4dca35 100644
--- a/src/mngt/ocf_mngt_common.c
+++ b/src/mngt/ocf_mngt_common.c
@@ -62,7 +62,7 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
for (hash = 0; hash < num_hash;) {
prev_cline = cache->device->collision_table_entries;
- ocf_metadata_lock_hash_wr(&cache->metadata.lock, hash);
+ ocf_hb_id_prot_lock_wr(&cache->metadata.lock, hash);
curr_cline = ocf_metadata_get_hash(cache, hash);
while (curr_cline != cache->device->collision_table_entries) {
@@ -91,7 +91,7 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
else
curr_cline = ocf_metadata_get_hash(cache, hash);
}
- ocf_metadata_unlock_hash_wr(&cache->metadata.lock, hash);
+ ocf_hb_id_prot_unlock_wr(&cache->metadata.lock, hash);
/* Check whether all the cachelines from the hash bucket were sparsed */
if (curr_cline == cache->device->collision_table_entries)
diff --git a/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c b/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c
index 72fe4c6..34b4f30 100644
--- a/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c
+++ b/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c
@@ -1,6 +1,6 @@
/*
* src/concurrency/ocf_metadata_concurrency.c
- * ocf_req_hash_lock_rd
+ * ocf_hb_req_prot_lock_rd
*
* INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
* ONE FUNCTION PER LINE
@@ -23,7 +23,7 @@
#include "concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency_generated_wraps.c"
-void __wrap_ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock,
+void __wrap_ocf_hb_id_naked_lock(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
check_expected(hash);
@@ -62,15 +62,15 @@ static void _test_lock_order(struct ocf_request* req,
req->map[i].hash = hash[i];
for (i = 0; i < expected_call_count; i++) {
- expect_function_call(__wrap_ocf_metadata_hash_lock);
- expect_value(__wrap_ocf_metadata_hash_lock, hash, expected_call[i]);
+ expect_function_call(__wrap_ocf_hb_id_naked_lock);
+ expect_value(__wrap_ocf_hb_id_naked_lock, hash, expected_call[i]);
}
- ocf_req_hash_lock_rd(req);
+ ocf_hb_req_prot_lock_rd(req);
}
-static void ocf_req_hash_lock_rd_test01(void **state)
+static void ocf_hb_req_prot_lock_rd_test01(void **state)
{
struct ocf_request *req = alloc_req();
struct {
@@ -126,10 +126,10 @@ static void ocf_req_hash_lock_rd_test01(void **state)
int main(void)
{
const struct CMUnitTest tests[] = {
- cmocka_unit_test(ocf_req_hash_lock_rd_test01)
+ cmocka_unit_test(ocf_hb_req_prot_lock_rd_test01)
};
- print_message("Unit test for ocf_req_hash_lock_rd\n");
+ print_message("Unit test for ocf_hb_req_prot_lock_rd\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}