Move common mapping and locking logic to dedicated function

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski
2019-08-12 17:36:30 -04:00
parent d2bd807e49
commit 5248093e1f
5 changed files with 140 additions and 154 deletions

View File

@@ -208,13 +208,24 @@ static const struct ocf_io_if _io_if_read_generic_resume = {
.write = _ocf_read_generic_do,
};
static enum ocf_engine_lock_type ocf_rd_get_lock_type(struct ocf_request *req)
{
if (ocf_engine_is_hit(req))
return ocf_engine_lock_read;
else
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _rd_engine_callbacks =
{
.get_lock_type = ocf_rd_get_lock_type,
.resume = ocf_engine_on_resume,
};
int ocf_read_generic(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
bool promote = true;
struct ocf_metadata_lock *metadata_lock = &cache->metadata.lock;
ocf_io_start(&req->ioi.io);
@@ -230,67 +241,9 @@ int ocf_read_generic(struct ocf_request *req)
/* Set resume call backs */
req->io_if = &_io_if_read_generic_resume;
/* calculate hashes for hash-bucket locking */
ocf_req_hash(req);
lock = ocf_engine_prepare_clines(req, &_rd_engine_callbacks);
/*- Metadata RD access -----------------------------------------------*/
ocf_req_hash_lock_rd(req);
/* Traverse request to cache if there is hit */
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
/* Request is fully mapped, no need to call eviction */
if (ocf_engine_is_hit(req)) {
/* There is a hit, lock request for READ access */
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
} else {
/* All cache line mapped, but some sectors are not valid
* and cache insert will be performed - lock for
* WRITE is required
*/
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
}
} else {
promote = ocf_promotion_req_should_promote(
cache->promotion_policy, req);
}
if (mapped || !promote) {
ocf_req_hash_unlock_rd(req);
} else {
/*- Metadata RD access ---------------------------------------*/
ocf_req_hash_lock_upgrade(req);
ocf_engine_map(req);
ocf_req_hash_unlock_wr(req);
if (req->info.mapping_error) {
/* Still not mapped - evict cachelines under global
* metadata write lock */
ocf_metadata_start_exclusive_access(metadata_lock);
if (ocf_engine_evict(req) == LOOKUP_MAPPED)
ocf_engine_map(req);
ocf_metadata_end_exclusive_access(metadata_lock);
}
if (!req->info.mapping_error) {
if (ocf_engine_is_hit(req)) {
/* After mapping turns out there is hit,
* so lock OCF request for read access
*/
lock = ocf_req_async_lock_rd(req,
ocf_engine_on_resume);
} else {
/* Miss, new cache lines were mapped,
* need to lock OCF request for write access
*/
lock = ocf_req_async_lock_wr(req,
ocf_engine_on_resume);
}
}
}
if (promote && !req->info.mapping_error) {
if (!req->info.mapping_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */