Move common mapping and locking logic to dedicated function

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2019-08-12 17:36:30 -04:00
parent d2bd807e49
commit 5248093e1f
5 changed files with 140 additions and 154 deletions

View File

@ -16,6 +16,8 @@
#include "../utils/utils_cleaner.h"
#include "../metadata/metadata.h"
#include "../eviction/eviction.h"
#include "../promotion/promotion.h"
#include "../concurrency/ocf_concurrency.h"
void ocf_engine_error(struct ocf_request *req,
bool stop_cache, const char *msg)
@ -305,7 +307,7 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
}
}
void ocf_engine_map(struct ocf_request *req)
static void ocf_engine_map(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
uint32_t i;
@ -393,7 +395,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
}
}
int ocf_engine_evict(struct ocf_request *req)
static int ocf_engine_evict(struct ocf_request *req)
{
if (!ocf_engine_unmapped_count(req))
return 0;
@ -402,6 +404,73 @@ int ocf_engine_evict(struct ocf_request *req)
ocf_engine_unmapped_count(req));
}
static int lock_clines(struct ocf_request *req, enum ocf_engine_lock_type lock,
ocf_req_async_lock_cb cb)
{
switch (lock) {
case ocf_engine_lock_write:
return ocf_req_async_lock_wr(req, cb);
case ocf_engine_lock_read:
return ocf_req_async_lock_rd(req, cb);
default:
return OCF_LOCK_ACQUIRED;
}
}
int ocf_engine_prepare_clines(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
bool mapped;
bool promote = true;
int lock = -ENOENT;
enum ocf_engine_lock_type lock_type;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
lock_type = engine_cbs->get_lock_type(req);
lock = lock_clines(req, lock_type, engine_cbs->resume);
} else {
promote = ocf_promotion_req_should_promote(
req->cache->promotion_policy, req);
if (!promote)
req->info.mapping_error = 1;
}
if (mapped || !promote) {
ocf_req_hash_unlock_rd(req);
} else {
/* need to attempt mapping / eviction */
ocf_req_hash_lock_upgrade(req); /*- Metadata WR access, eviction -----*/
ocf_engine_map(req);
ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
if (req->info.mapping_error) {
ocf_metadata_start_exclusive_access(metadata_lock);
/* Now there is exclusive access for metadata. May
* traverse once again and evict cachelines if needed.
*/
if (ocf_engine_evict(req) == LOOKUP_MAPPED)
ocf_engine_map(req);
ocf_metadata_end_exclusive_access(metadata_lock);
}
if (!req->info.mapping_error) {
lock_type = engine_cbs->get_lock_type(req);
lock = lock_clines(req, lock_type, engine_cbs->resume);
}
}
return lock;
}
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line)
{

View File

@ -162,15 +162,34 @@ void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
uint64_t core_line);
/**
* @brief Traverse request in order to lookup cache lines. If there are misses,
* attempt to map free cache lines.
*
* @param req OCF request
* @brief Request cacheline lock type
*/
void ocf_engine_map(struct ocf_request *req);
enum ocf_engine_lock_type
{
/** No lock */
ocf_engine_lock_none = 0,
/** Write lock */
ocf_engine_lock_write,
/** Read lock */
ocf_engine_lock_read,
};
/**
* @brief Evict cachelines to populate freelist.
* @brief Engine-specific callbacks for common request handling rountine
*
* TODO(arutk): expand this structure to fit all engines and all steps
*/
struct ocf_engine_callbacks
{
/** Specify locking requirements after request is mapped */
enum ocf_engine_lock_type (*get_lock_type)(struct ocf_request *req);
/** Resume handling after acquiring asynchronous lock */
ocf_req_async_lock_cb resume;
};
/**
* @brief Map and lock cachelines
*
* @param req OCF request
*
@ -178,7 +197,8 @@ void ocf_engine_map(struct ocf_request *req);
* @retval LOOKUP_MAPPED successfully evicted required number of cachelines
* @retval LOOKUP_MISS eviction failure
*/
int ocf_engine_evict(struct ocf_request *req);
int ocf_engine_prepare_clines(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs);
/**
* @brief Traverse OCF request (lookup cache)

View File

@ -208,13 +208,24 @@ static const struct ocf_io_if _io_if_read_generic_resume = {
.write = _ocf_read_generic_do,
};
static enum ocf_engine_lock_type ocf_rd_get_lock_type(struct ocf_request *req)
{
if (ocf_engine_is_hit(req))
return ocf_engine_lock_read;
else
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _rd_engine_callbacks =
{
.get_lock_type = ocf_rd_get_lock_type,
.resume = ocf_engine_on_resume,
};
int ocf_read_generic(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = req->cache;
bool promote = true;
struct ocf_metadata_lock *metadata_lock = &cache->metadata.lock;
ocf_io_start(&req->ioi.io);
@ -230,67 +241,9 @@ int ocf_read_generic(struct ocf_request *req)
/* Set resume call backs */
req->io_if = &_io_if_read_generic_resume;
/* calculate hashes for hash-bucket locking */
ocf_req_hash(req);
lock = ocf_engine_prepare_clines(req, &_rd_engine_callbacks);
/*- Metadata RD access -----------------------------------------------*/
ocf_req_hash_lock_rd(req);
/* Traverse request to cache if there is hit */
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
/* Request is fully mapped, no need to call eviction */
if (ocf_engine_is_hit(req)) {
/* There is a hit, lock request for READ access */
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
} else {
/* All cache line mapped, but some sectors are not valid
* and cache insert will be performed - lock for
* WRITE is required
*/
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
}
} else {
promote = ocf_promotion_req_should_promote(
cache->promotion_policy, req);
}
if (mapped || !promote) {
ocf_req_hash_unlock_rd(req);
} else {
/*- Metadata RD access ---------------------------------------*/
ocf_req_hash_lock_upgrade(req);
ocf_engine_map(req);
ocf_req_hash_unlock_wr(req);
if (req->info.mapping_error) {
/* Still not mapped - evict cachelines under global
* metadata write lock */
ocf_metadata_start_exclusive_access(metadata_lock);
if (ocf_engine_evict(req) == LOOKUP_MAPPED)
ocf_engine_map(req);
ocf_metadata_end_exclusive_access(metadata_lock);
}
if (!req->info.mapping_error) {
if (ocf_engine_is_hit(req)) {
/* After mapping turns out there is hit,
* so lock OCF request for read access
*/
lock = ocf_req_async_lock_rd(req,
ocf_engine_on_resume);
} else {
/* Miss, new cache lines were mapped,
* need to lock OCF request for write access
*/
lock = ocf_req_async_lock_wr(req,
ocf_engine_on_resume);
}
}
}
if (promote && !req->info.mapping_error) {
if (!req->info.mapping_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */

View File

@ -163,12 +163,20 @@ int ocf_write_wb_do(struct ocf_request *req)
return 0;
}
static enum ocf_engine_lock_type ocf_wb_get_lock_type(struct ocf_request *req)
{
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _wb_engine_callbacks =
{
.get_lock_type = ocf_wb_get_lock_type,
.resume = ocf_engine_on_resume,
};
int ocf_write_wb(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
bool promote = true;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
ocf_io_start(&req->ioi.io);
@ -180,45 +188,9 @@ int ocf_write_wb(struct ocf_request *req)
/* TODO: Handle fits into dirty */
ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
lock = ocf_engine_prepare_clines(req, &_wb_engine_callbacks);
/* Travers to check if request is mapped fully */
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
/* All cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} else {
promote = ocf_promotion_req_should_promote(
req->cache->promotion_policy, req);
}
if (mapped || !promote) {
ocf_req_hash_unlock_rd(req);
} else {
/*- Metadata RD access ---------------------------------------*/
ocf_req_hash_lock_upgrade(req);
ocf_engine_map(req);
ocf_req_hash_unlock_wr(req);
if (req->info.mapping_error) {
/* Still not mapped - evict cachelines under global
* metadata write lock */
ocf_metadata_start_exclusive_access(metadata_lock);
if (ocf_engine_evict(req) == LOOKUP_MAPPED)
ocf_engine_map(req);
ocf_metadata_end_exclusive_access(metadata_lock);
}
if (!req->info.mapping_error) {
/* Lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
}
}
if (promote && !req->info.mapping_error) {
if (!req->info.mapping_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */

View File

@ -158,12 +158,20 @@ static const struct ocf_io_if _io_if_wt_resume = {
.write = _ocf_write_wt_do,
};
static enum ocf_engine_lock_type ocf_wt_get_lock_type(struct ocf_request *req)
{
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _wt_engine_callbacks =
{
.get_lock_type = ocf_wt_get_lock_type,
.resume = ocf_engine_on_resume,
};
int ocf_write_wt(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
bool promote = true;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
ocf_io_start(&req->ioi.io);
@ -173,45 +181,9 @@ int ocf_write_wt(struct ocf_request *req)
/* Set resume io_if */
req->io_if = &_io_if_wt_resume;
ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
lock = ocf_engine_prepare_clines(req, &_wt_engine_callbacks);
/* Travers to check if request is mapped fully */
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
/* All cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} else {
promote = ocf_promotion_req_should_promote(
req->cache->promotion_policy, req);
}
if (mapped || !promote) {
ocf_req_hash_unlock_rd(req);
} else {
/*- Metadata RD access ---------------------------------------*/
ocf_req_hash_lock_upgrade(req);
ocf_engine_map(req);
ocf_req_hash_unlock_wr(req);
if (req->info.mapping_error) {
/* Still not mapped - evict cachelines under global
* metadata write lock */
ocf_metadata_start_exclusive_access(metadata_lock);
if (ocf_engine_evict(req) == LOOKUP_MAPPED)
ocf_engine_map(req);
ocf_metadata_end_exclusive_access(metadata_lock);
}
if (!req->info.mapping_error) {
/* Lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
}
}
if (promote && !req->info.mapping_error) {
if (!req->info.mapping_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */