Optimize set_hot calls in eviction path
Split traversation into two distinct phases: lookup() and lru set_hot(). prepare_cachelines() now only calls set_hot() once after lookup and insert are finished. lookup() is called explicitly only once in prepare_cachelines() at the very beginning of the procedure. If request is a miss then then map() performs operations equivalent to lookup() supplemented by an attempt to map cachelines. Both lookup() and set_hot() are called via traverse() from the engines which do not attempt mapping, and thus do not call prepare_clines(). Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
@@ -423,7 +423,12 @@ void ocf_hb_id_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
|
||||
for (hash = _MIN_HASH(req); hash <= _MAX_HASH(req); \
|
||||
hash = _HASH_NEXT(req, hash))
|
||||
|
||||
/* returns true if given core hash collides with request LBA range */
|
||||
/* Returns true if the the given LBA (determined by core_id
|
||||
* and core_line) resolves to a hash value that is within the
|
||||
* set of hashes for the given request (i.e. after the request
|
||||
* hash bucket are locked, the given core line is hash bucket
|
||||
* locked as well).
|
||||
*/
|
||||
bool ocf_req_hash_in_range(struct ocf_request *req,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
@@ -438,7 +443,6 @@ bool ocf_req_hash_in_range(struct ocf_request *req,
|
||||
return (hash >= _MIN_HASH(req) && hash <= _GAP_START(req)) ||
|
||||
(hash > _GAP_START(req) + _GAP_VAL(req) &&
|
||||
hash <= _MAX_HASH(req));
|
||||
|
||||
}
|
||||
|
||||
void ocf_hb_req_prot_lock_rd(struct ocf_request *req)
|
||||
|
Reference in New Issue
Block a user