Switch from global metadata locks to hash-bucket locks in engines

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2019-07-31 16:31:28 -04:00
parent b39bcf86d4
commit 3a70d68d38
12 changed files with 124 additions and 129 deletions

View File

@ -538,15 +538,14 @@ void inc_fallback_pt_error_counter(ocf_cache_t cache)
static int _ocf_engine_refresh(struct ocf_request *req) static int _ocf_engine_refresh(struct ocf_request *req)
{ {
struct ocf_cache *cache = req->cache;
int result; int result;
OCF_METADATA_LOCK_RD();
/* Check under metadata RD lock */ /* Check under metadata RD lock */
ocf_req_hash_lock_rd(req);
result = ocf_engine_check(req); result = ocf_engine_check(req);
OCF_METADATA_UNLOCK_RD(); ocf_req_hash_unlock_rd(req);
if (result == 0) { if (result == 0) {

View File

@ -170,7 +170,7 @@ int _ocf_discard_step_do(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) { if (ocf_engine_mapped_count(req)) {
/* There are mapped cache line, need to remove them */ /* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/ ocf_req_hash_lock_wr(req);
/* Remove mapped cache lines from metadata */ /* Remove mapped cache lines from metadata */
ocf_purge_map_info(req); ocf_purge_map_info(req);
@ -181,16 +181,16 @@ int _ocf_discard_step_do(struct ocf_request *req)
_ocf_discard_step_complete); _ocf_discard_step_complete);
} }
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ ocf_req_hash_unlock_wr(req);
} }
OCF_METADATA_LOCK_RD(); ocf_req_hash_lock_rd(req);
/* Even if no cachelines are mapped they could be tracked in promotion /* Even if no cachelines are mapped they could be tracked in promotion
* policy. RD lock suffices. */ * policy. RD lock suffices. */
ocf_promotion_req_purge(req->cache->promotion_policy, req); ocf_promotion_req_purge(req->cache->promotion_policy, req);
OCF_METADATA_UNLOCK_RD(); ocf_req_hash_unlock_rd(req);
OCF_DEBUG_RQ(req, "Discard"); OCF_DEBUG_RQ(req, "Discard");
_ocf_discard_step_complete(req, 0); _ocf_discard_step_complete(req, 0);
@ -224,11 +224,12 @@ static int _ocf_discard_step(struct ocf_request *req)
req->core_line_count = req->core_line_last - req->core_line_first + 1; req->core_line_count = req->core_line_last - req->core_line_first + 1;
req->io_if = &_io_if_discard_step_resume; req->io_if = &_io_if_discard_step_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
ENV_BUG_ON(env_memset(req->map, sizeof(*req->map) * req->core_line_count, ENV_BUG_ON(env_memset(req->map, sizeof(*req->map) * req->core_line_count,
0)); 0));
ocf_req_hash(req);
ocf_req_hash_lock_rd(req);
/* Travers to check if request is mapped fully */ /* Travers to check if request is mapped fully */
ocf_engine_traverse(req); ocf_engine_traverse(req);
@ -239,7 +240,7 @@ static int _ocf_discard_step(struct ocf_request *req)
lock = OCF_LOCK_ACQUIRED; lock = OCF_LOCK_ACQUIRED;
} }
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ ocf_req_hash_unlock_rd(req);
if (lock >= 0) { if (lock >= 0) {
if (OCF_LOCK_ACQUIRED == lock) { if (OCF_LOCK_ACQUIRED == lock) {

View File

@ -58,8 +58,6 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
static int _ocf_read_fast_do(struct ocf_request *req) static int _ocf_read_fast_do(struct ocf_request *req)
{ {
struct ocf_cache *cache = req->cache;
if (ocf_engine_is_miss(req)) { if (ocf_engine_is_miss(req)) {
/* It seams that after resume, now request is MISS, do PT */ /* It seams that after resume, now request is MISS, do PT */
OCF_DEBUG_RQ(req, "Switching to read PT"); OCF_DEBUG_RQ(req, "Switching to read PT");
@ -74,14 +72,14 @@ static int _ocf_read_fast_do(struct ocf_request *req)
if (req->info.re_part) { if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part"); OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR(); ocf_req_hash_lock_wr(req);
/* Probably some cache lines are assigned into wrong /* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one * partition. Need to move it to new one
*/ */
ocf_part_move(req); ocf_part_move(req);
OCF_METADATA_UNLOCK_WR(); ocf_req_hash_unlock_wr(req);
} }
/* Submit IO */ /* Submit IO */
@ -110,7 +108,6 @@ int ocf_read_fast(struct ocf_request *req)
{ {
bool hit; bool hit;
int lock = OCF_LOCK_NOT_ACQUIRED; int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
/* Get OCF request - increase reference counter */ /* Get OCF request - increase reference counter */
ocf_req_get(req); ocf_req_get(req);
@ -120,7 +117,8 @@ int ocf_read_fast(struct ocf_request *req)
/*- Metadata RD access -----------------------------------------------*/ /*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD(); ocf_req_hash(req);
ocf_req_hash_lock_rd(req);
/* Traverse request to cache if there is hit */ /* Traverse request to cache if there is hit */
ocf_engine_traverse(req); ocf_engine_traverse(req);
@ -131,7 +129,7 @@ int ocf_read_fast(struct ocf_request *req)
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_RD(); ocf_req_hash_unlock_rd(req);
if (hit) { if (hit) {
OCF_DEBUG_RQ(req, "Fast path success"); OCF_DEBUG_RQ(req, "Fast path success");
@ -179,7 +177,6 @@ int ocf_write_fast(struct ocf_request *req)
{ {
bool mapped; bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED; int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
/* Get OCF request - increase reference counter */ /* Get OCF request - increase reference counter */
ocf_req_get(req); ocf_req_get(req);
@ -189,7 +186,8 @@ int ocf_write_fast(struct ocf_request *req)
/*- Metadata RD access -----------------------------------------------*/ /*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD(); ocf_req_hash(req);
ocf_req_hash_lock_rd(req);
/* Traverse request to cache if there is hit */ /* Traverse request to cache if there is hit */
ocf_engine_traverse(req); ocf_engine_traverse(req);
@ -200,7 +198,7 @@ int ocf_write_fast(struct ocf_request *req)
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_RD(); ocf_req_hash_unlock_rd(req);
if (mapped) { if (mapped) {
if (lock >= 0) { if (lock >= 0) {

View File

@ -43,9 +43,9 @@ static int _ocf_invalidate_do(struct ocf_request *req)
ENV_BUG_ON(env_atomic_read(&req->req_remaining)); ENV_BUG_ON(env_atomic_read(&req->req_remaining));
OCF_METADATA_LOCK_WR(); ocf_req_hash_lock_wr(req);
ocf_purge_map_info(req); ocf_purge_map_info(req);
OCF_METADATA_UNLOCK_WR(); ocf_req_hash_unlock_wr(req);
env_atomic_inc(&req->req_remaining); env_atomic_inc(&req->req_remaining);

View File

@ -52,16 +52,14 @@ static inline void _ocf_read_pt_submit(struct ocf_request *req)
int ocf_read_pt_do(struct ocf_request *req) int ocf_read_pt_do(struct ocf_request *req)
{ {
struct ocf_cache *cache = req->cache;
/* Get OCF request - increase reference counter */ /* Get OCF request - increase reference counter */
ocf_req_get(req); ocf_req_get(req);
if (req->info.dirty_any) { if (req->info.dirty_any) {
OCF_METADATA_LOCK_RD(); ocf_req_hash_lock_rd(req);
/* Need to clean, start it */ /* Need to clean, start it */
ocf_engine_clean(req); ocf_engine_clean(req);
OCF_METADATA_UNLOCK_RD(); ocf_req_hash_unlock_rd(req);
/* Do not processing, because first we need to clean request */ /* Do not processing, because first we need to clean request */
ocf_req_put(req); ocf_req_put(req);
@ -72,14 +70,14 @@ int ocf_read_pt_do(struct ocf_request *req)
if (req->info.re_part) { if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part"); OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR(); ocf_req_hash_lock_wr(req);
/* Probably some cache lines are assigned into wrong /* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one * partition. Need to move it to new one
*/ */
ocf_part_move(req); ocf_part_move(req);
OCF_METADATA_UNLOCK_WR(); ocf_req_hash_unlock_wr(req);
} }
/* Submit read IO to the core */ /* Submit read IO to the core */
@ -105,7 +103,6 @@ int ocf_read_pt(struct ocf_request *req)
{ {
bool use_cache = false; bool use_cache = false;
int lock = OCF_LOCK_NOT_ACQUIRED; int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
OCF_DEBUG_TRACE(req->cache); OCF_DEBUG_TRACE(req->cache);
@ -117,7 +114,8 @@ int ocf_read_pt(struct ocf_request *req)
/* Set resume io_if */ /* Set resume io_if */
req->io_if = &_io_if_pt_resume; req->io_if = &_io_if_pt_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/ ocf_req_hash(req);
ocf_req_hash_lock_rd(req);
/* Traverse request to check if there are mapped cache lines */ /* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(req); ocf_engine_traverse(req);
@ -136,7 +134,7 @@ int ocf_read_pt(struct ocf_request *req)
} }
} }
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ ocf_req_hash_unlock_rd(req);
if (use_cache) { if (use_cache) {
/* /*

View File

@ -137,8 +137,6 @@ err_alloc:
static int _ocf_read_generic_do(struct ocf_request *req) static int _ocf_read_generic_do(struct ocf_request *req)
{ {
struct ocf_cache *cache = req->cache;
if (ocf_engine_is_miss(req) && req->map->rd_locked) { if (ocf_engine_is_miss(req) && req->map->rd_locked) {
/* Miss can be handled only on write locks. /* Miss can be handled only on write locks.
* Need to switch to PT * Need to switch to PT
@ -153,12 +151,12 @@ static int _ocf_read_generic_do(struct ocf_request *req)
if (ocf_engine_is_miss(req)) { if (ocf_engine_is_miss(req)) {
if (req->info.dirty_any) { if (req->info.dirty_any) {
OCF_METADATA_LOCK_RD(); ocf_req_hash_lock_rd(req);
/* Request is dirty need to clean request */ /* Request is dirty need to clean request */
ocf_engine_clean(req); ocf_engine_clean(req);
OCF_METADATA_UNLOCK_RD(); ocf_req_hash_unlock_rd(req);
/* We need to clean request before processing, return */ /* We need to clean request before processing, return */
ocf_req_put(req); ocf_req_put(req);
@ -166,25 +164,25 @@ static int _ocf_read_generic_do(struct ocf_request *req)
return 0; return 0;
} }
OCF_METADATA_LOCK_RD(); ocf_req_hash_lock_rd(req);
/* Set valid status bits map */ /* Set valid status bits map */
ocf_set_valid_map_info(req); ocf_set_valid_map_info(req);
OCF_METADATA_UNLOCK_RD(); ocf_req_hash_unlock_rd(req);
} }
if (req->info.re_part) { if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part"); OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR(); ocf_req_hash_lock_wr(req);
/* Probably some cache lines are assigned into wrong /* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one * partition. Need to move it to new one
*/ */
ocf_part_move(req); ocf_part_move(req);
OCF_METADATA_UNLOCK_WR(); ocf_req_hash_unlock_wr(req);
} }
OCF_DEBUG_RQ(req, "Submit"); OCF_DEBUG_RQ(req, "Submit");
@ -216,6 +214,7 @@ int ocf_read_generic(struct ocf_request *req)
int lock = OCF_LOCK_NOT_ACQUIRED; int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache; struct ocf_cache *cache = req->cache;
bool promote = true; bool promote = true;
struct ocf_metadata_lock *metadata_lock = &cache->metadata.lock;
ocf_io_start(&req->ioi.io); ocf_io_start(&req->ioi.io);
@ -231,10 +230,11 @@ int ocf_read_generic(struct ocf_request *req)
/* Set resume call backs */ /* Set resume call backs */
req->io_if = &_io_if_read_generic_resume; req->io_if = &_io_if_read_generic_resume;
/* calculate hashes for hash-bucket locking */
ocf_req_hash(req);
/*- Metadata RD access -----------------------------------------------*/ /*- Metadata RD access -----------------------------------------------*/
ocf_req_hash_lock_rd(req);
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */ /* Traverse request to cache if there is hit */
ocf_engine_traverse(req); ocf_engine_traverse(req);
@ -251,27 +251,27 @@ int ocf_read_generic(struct ocf_request *req)
*/ */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
} } else {
if (!mapped) {
promote = ocf_promotion_req_should_promote( promote = ocf_promotion_req_should_promote(
cache->promotion_policy, req); cache->promotion_policy, req);
} }
OCF_METADATA_UNLOCK_RD(); if (mapped || !promote) {
ocf_req_hash_unlock_rd(req);
} else {
/*- Metadata RD access ---------------------------------------*/
ocf_req_hash_lock_upgrade(req);
ocf_engine_map(req);
ocf_req_hash_unlock_wr(req);
/*- END Metadata RD access -------------------------------------------*/ if (req->info.mapping_error) {
/* Still not mapped - evict cachelines under global
if (!mapped && promote) { * metadata write lock */
/*- Metadata WR access ---------------------------------------*/ ocf_metadata_start_exclusive_access(metadata_lock);
OCF_METADATA_LOCK_WR(); if (ocf_engine_evict(req) == LOOKUP_MAPPED)
ocf_engine_map(req);
/* Now there is exclusive access for metadata. May traverse once ocf_metadata_end_exclusive_access(metadata_lock);
* again. If there are misses need to call eviction. This }
* process is called 'mapping'.
*/
if (ocf_engine_evict(req) == LOOKUP_MAPPED)
ocf_engine_map(req);
if (!req->info.mapping_error) { if (!req->info.mapping_error) {
if (ocf_engine_is_hit(req)) { if (ocf_engine_is_hit(req)) {
@ -288,9 +288,6 @@ int ocf_read_generic(struct ocf_request *req)
ocf_engine_on_resume); ocf_engine_on_resume);
} }
} }
OCF_METADATA_UNLOCK_WR();
/*- END Metadata WR access -----------------------------------*/
} }
if (promote && !req->info.mapping_error) { if (promote && !req->info.mapping_error) {

View File

@ -38,19 +38,19 @@ static void _ocf_read_wa_complete(struct ocf_request *req, int error)
int ocf_write_wa(struct ocf_request *req) int ocf_write_wa(struct ocf_request *req)
{ {
struct ocf_cache *cache = req->cache;
ocf_io_start(&req->ioi.io); ocf_io_start(&req->ioi.io);
/* Get OCF request - increase reference counter */ /* Get OCF request - increase reference counter */
ocf_req_get(req); ocf_req_get(req);
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/ ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */ /* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(req); ocf_engine_traverse(req);
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/
if (ocf_engine_is_hit(req)) { if (ocf_engine_is_hit(req)) {
ocf_req_clear(req); ocf_req_clear(req);

View File

@ -25,23 +25,21 @@ static const struct ocf_io_if _io_if_wb_resume = {
static void _ocf_write_wb_update_bits(struct ocf_request *req) static void _ocf_write_wb_update_bits(struct ocf_request *req)
{ {
struct ocf_cache *cache = req->cache;
if (ocf_engine_is_miss(req)) { if (ocf_engine_is_miss(req)) {
OCF_METADATA_LOCK_RD(); ocf_req_hash_lock_rd(req);
/* Update valid status bits */ /* Update valid status bits */
ocf_set_valid_map_info(req); ocf_set_valid_map_info(req);
OCF_METADATA_UNLOCK_RD(); ocf_req_hash_unlock_rd(req);
} }
if (!ocf_engine_is_dirty_all(req)) { if (!ocf_engine_is_dirty_all(req)) {
OCF_METADATA_LOCK_WR(); ocf_req_hash_lock_wr(req);
/* set dirty bits, and mark if metadata flushing is required */ /* set dirty bits, and mark if metadata flushing is required */
ocf_set_dirty_map_info(req); ocf_set_dirty_map_info(req);
OCF_METADATA_UNLOCK_WR(); ocf_req_hash_unlock_wr(req);
} }
} }
@ -127,14 +125,14 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req)
if (req->info.re_part) { if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part"); OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR(); ocf_req_hash_lock_wr(req);
/* Probably some cache lines are assigned into wrong /* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one * partition. Need to move it to new one
*/ */
ocf_part_move(req); ocf_part_move(req);
OCF_METADATA_UNLOCK_WR(); ocf_req_hash_unlock_wr(req);
} }
OCF_DEBUG_RQ(req, "Submit Data"); OCF_DEBUG_RQ(req, "Submit Data");
@ -169,8 +167,8 @@ int ocf_write_wb(struct ocf_request *req)
{ {
bool mapped; bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED; int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
bool promote = true; bool promote = true;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
ocf_io_start(&req->ioi.io); ocf_io_start(&req->ioi.io);
@ -182,7 +180,8 @@ int ocf_write_wb(struct ocf_request *req)
/* TODO: Handle fits into dirty */ /* TODO: Handle fits into dirty */
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */ /* Travers to check if request is mapped fully */
ocf_engine_traverse(req); ocf_engine_traverse(req);
@ -191,31 +190,32 @@ int ocf_write_wb(struct ocf_request *req)
if (mapped) { if (mapped) {
/* All cache line are mapped, lock request for WRITE access */ /* All cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} } else {
if (!mapped) {
promote = ocf_promotion_req_should_promote( promote = ocf_promotion_req_should_promote(
cache->promotion_policy, req); req->cache->promotion_policy, req);
} }
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ if (mapped || !promote) {
ocf_req_hash_unlock_rd(req);
} else {
/*- Metadata RD access ---------------------------------------*/
ocf_req_hash_lock_upgrade(req);
ocf_engine_map(req);
ocf_req_hash_unlock_wr(req);
if (!mapped && promote) { if (req->info.mapping_error) {
OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/ /* Still not mapped - evict cachelines under global
* metadata write lock */
/* Now there is exclusive access for metadata. May traverse once ocf_metadata_start_exclusive_access(metadata_lock);
* again. If there are misses need to call eviction. This if (ocf_engine_evict(req) == LOOKUP_MAPPED)
* process is called 'mapping'. ocf_engine_map(req);
*/ ocf_metadata_end_exclusive_access(metadata_lock);
if (ocf_engine_evict(req) == LOOKUP_MAPPED) }
ocf_engine_map(req);
if (!req->info.mapping_error) { if (!req->info.mapping_error) {
/* Lock request for WRITE access */ /* Lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
} }
if (promote && !req->info.mapping_error) { if (promote && !req->info.mapping_error) {

View File

@ -52,12 +52,12 @@ static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) { if (ocf_engine_mapped_count(req)) {
/* There are mapped cache line, need to remove them */ /* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/ ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */ /* Remove mapped cache lines from metadata */
ocf_purge_map_info(req); ocf_purge_map_info(req);
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
if (req->info.flush_metadata) { if (req->info.flush_metadata) {
/* Request was dirty and need to flush metadata */ /* Request was dirty and need to flush metadata */
@ -135,7 +135,6 @@ static const struct ocf_io_if _io_if_wi_resume = {
int ocf_write_wi(struct ocf_request *req) int ocf_write_wi(struct ocf_request *req)
{ {
int lock = OCF_LOCK_NOT_ACQUIRED; int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
OCF_DEBUG_TRACE(req->cache); OCF_DEBUG_TRACE(req->cache);
@ -147,7 +146,8 @@ int ocf_write_wi(struct ocf_request *req)
/* Set resume io_if */ /* Set resume io_if */
req->io_if = &_io_if_wi_resume; req->io_if = &_io_if_wi_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */ /* Travers to check if request is mapped fully */
ocf_engine_traverse(req); ocf_engine_traverse(req);
@ -159,7 +159,7 @@ int ocf_write_wi(struct ocf_request *req)
lock = OCF_LOCK_ACQUIRED; lock = OCF_LOCK_ACQUIRED;
} }
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ ocf_req_hash_unlock_rd(req); /*- END Metadata READ access----------------*/
if (lock >= 0) { if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) { if (lock == OCF_LOCK_ACQUIRED) {

View File

@ -201,7 +201,6 @@ static const struct ocf_io_if _io_if_wo_resume = {
int ocf_read_wo(struct ocf_request *req) int ocf_read_wo(struct ocf_request *req)
{ {
ocf_cache_t cache = req->cache;
int lock = OCF_LOCK_ACQUIRED; int lock = OCF_LOCK_ACQUIRED;
OCF_DEBUG_TRACE(req->cache); OCF_DEBUG_TRACE(req->cache);
@ -214,7 +213,8 @@ int ocf_read_wo(struct ocf_request *req)
/* Set resume call backs */ /* Set resume call backs */
req->io_if = &_io_if_wo_resume; req->io_if = &_io_if_wo_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/ ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */ /* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(req); ocf_engine_traverse(req);
@ -226,7 +226,7 @@ int ocf_read_wo(struct ocf_request *req)
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/
if (lock >= 0) { if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) { if (lock != OCF_LOCK_ACQUIRED) {

View File

@ -97,19 +97,17 @@ static inline void _ocf_write_wt_submit(struct ocf_request *req)
static void _ocf_write_wt_update_bits(struct ocf_request *req) static void _ocf_write_wt_update_bits(struct ocf_request *req)
{ {
struct ocf_cache *cache = req->cache;
if (ocf_engine_is_miss(req)) { if (ocf_engine_is_miss(req)) {
OCF_METADATA_LOCK_RD(); ocf_req_hash_lock_rd(req);
/* Update valid status bits */ /* Update valid status bits */
ocf_set_valid_map_info(req); ocf_set_valid_map_info(req);
OCF_METADATA_UNLOCK_RD(); ocf_req_hash_unlock_rd(req);
} }
if (req->info.dirty_any) { if (req->info.dirty_any) {
OCF_METADATA_LOCK_WR(); ocf_req_hash_lock_wr(req);
/* Writes goes to SDD and HDD, need to update status bits from /* Writes goes to SDD and HDD, need to update status bits from
* dirty to clean * dirty to clean
@ -117,20 +115,20 @@ static void _ocf_write_wt_update_bits(struct ocf_request *req)
ocf_set_clean_map_info(req); ocf_set_clean_map_info(req);
OCF_METADATA_UNLOCK_WR(); ocf_req_hash_unlock_wr(req);
} }
if (req->info.re_part) { if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part"); OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR(); ocf_req_hash_lock_wr(req);
/* Probably some cache lines are assigned into wrong /* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one * partition. Need to move it to new one
*/ */
ocf_part_move(req); ocf_part_move(req);
OCF_METADATA_UNLOCK_WR(); ocf_req_hash_unlock_wr(req);
} }
} }
@ -164,8 +162,8 @@ int ocf_write_wt(struct ocf_request *req)
{ {
bool mapped; bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED; int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
bool promote = true; bool promote = true;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
ocf_io_start(&req->ioi.io); ocf_io_start(&req->ioi.io);
@ -175,7 +173,8 @@ int ocf_write_wt(struct ocf_request *req)
/* Set resume io_if */ /* Set resume io_if */
req->io_if = &_io_if_wt_resume; req->io_if = &_io_if_wt_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */ /* Travers to check if request is mapped fully */
ocf_engine_traverse(req); ocf_engine_traverse(req);
@ -184,31 +183,32 @@ int ocf_write_wt(struct ocf_request *req)
if (mapped) { if (mapped) {
/* All cache line are mapped, lock request for WRITE access */ /* All cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} } else {
if (!mapped) {
promote = ocf_promotion_req_should_promote( promote = ocf_promotion_req_should_promote(
cache->promotion_policy, req); req->cache->promotion_policy, req);
} }
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ if (mapped || !promote) {
ocf_req_hash_unlock_rd(req);
} else {
/*- Metadata RD access ---------------------------------------*/
ocf_req_hash_lock_upgrade(req);
ocf_engine_map(req);
ocf_req_hash_unlock_wr(req);
if (!mapped && promote) { if (req->info.mapping_error) {
OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/ /* Still not mapped - evict cachelines under global
* metadata write lock */
/* Now there is exclusive access for metadata. May traverse once ocf_metadata_start_exclusive_access(metadata_lock);
* again. If there are misses need to call eviction. This if (ocf_engine_evict(req) == LOOKUP_MAPPED)
* process is called 'mapping'. ocf_engine_map(req);
*/ ocf_metadata_end_exclusive_access(metadata_lock);
if (ocf_engine_evict(req) == LOOKUP_MAPPED) }
ocf_engine_map(req);
if (!req->info.mapping_error) { if (!req->info.mapping_error) {
/* Lock request for WRITE access */ /* Lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
} }
if (promote && !req->info.mapping_error) { if (promote && !req->info.mapping_error) {

View File

@ -18,19 +18,17 @@
static int ocf_zero_purge(struct ocf_request *req) static int ocf_zero_purge(struct ocf_request *req)
{ {
struct ocf_cache *cache = req->cache;
if (req->error) { if (req->error) {
ocf_engine_error(req, true, "Failed to discard data on cache"); ocf_engine_error(req, true, "Failed to discard data on cache");
} else { } else {
/* There are mapped cache line, need to remove them */ /* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/ ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */ /* Remove mapped cache lines from metadata */
ocf_purge_map_info(req); ocf_purge_map_info(req);
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
} }
ocf_req_unlock_wr(req); ocf_req_unlock_wr(req);
@ -142,6 +140,10 @@ void ocf_engine_zero_line(struct ocf_request *req)
ENV_BUG_ON(req->core_line_count != 1); ENV_BUG_ON(req->core_line_count != 1);
/* No hash bucket locking here - ocf_engine_zero_line caller must hold
* metadata global write lock, so we have exclusive access to all hash
* buckets here. */
/* Traverse to check if request is mapped */ /* Traverse to check if request is mapped */
ocf_engine_traverse(req); ocf_engine_traverse(req);