Merge pull request #471 from arutk/lru_fix_2
Prevent remapping cachelines within single request
This commit is contained in:
commit
296e98e39c
@ -263,7 +263,8 @@ void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
|
|||||||
|
|
||||||
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
||||||
struct ocf_user_part *part, uint32_t start_evp, bool clean,
|
struct ocf_user_part *part, uint32_t start_evp, bool clean,
|
||||||
bool cl_lock_write, _lru_hash_locked_pfn hash_locked, void *context)
|
bool cl_lock_write, _lru_hash_locked_pfn hash_locked,
|
||||||
|
struct ocf_request *req)
|
||||||
{
|
{
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
@ -280,7 +281,7 @@ static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
|||||||
iter->clean = clean;
|
iter->clean = clean;
|
||||||
iter->cl_lock_write = cl_lock_write;
|
iter->cl_lock_write = cl_lock_write;
|
||||||
iter->hash_locked = hash_locked;
|
iter->hash_locked = hash_locked;
|
||||||
iter->context = context;
|
iter->req = req;
|
||||||
|
|
||||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
||||||
iter->curr_cline[i] = evp_lru_get_list(part, i, clean)->tail;
|
iter->curr_cline[i] = evp_lru_get_list(part, i, clean)->tail;
|
||||||
@ -295,14 +296,6 @@ static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
|
|||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool _evp_lru_evict_hash_locked(void *context,
|
|
||||||
ocf_core_id_t core_id, uint64_t core_line)
|
|
||||||
{
|
|
||||||
struct ocf_request *req = context;
|
|
||||||
|
|
||||||
return ocf_req_hash_in_range(req, core_id, core_line);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
|
static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
|
||||||
ocf_cache_t cache, struct ocf_user_part *part,
|
ocf_cache_t cache, struct ocf_user_part *part,
|
||||||
uint32_t start_evp, bool cl_lock_write,
|
uint32_t start_evp, bool cl_lock_write,
|
||||||
@ -314,7 +307,7 @@ static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
|
|||||||
* is already locked as part of request hash locking (to avoid attempt
|
* is already locked as part of request hash locking (to avoid attempt
|
||||||
* to acquire the same hash bucket lock twice) */
|
* to acquire the same hash bucket lock twice) */
|
||||||
lru_iter_init(iter, cache, part, start_evp, true, cl_lock_write,
|
lru_iter_init(iter, cache, part, start_evp, true, cl_lock_write,
|
||||||
_evp_lru_evict_hash_locked, req);
|
ocf_req_hash_in_range, req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -375,8 +368,7 @@ static bool inline _lru_trylock_hash(struct ocf_lru_iter *iter,
|
|||||||
ocf_core_id_t core_id, uint64_t core_line)
|
ocf_core_id_t core_id, uint64_t core_line)
|
||||||
{
|
{
|
||||||
if (iter->hash_locked != NULL && iter->hash_locked(
|
if (iter->hash_locked != NULL && iter->hash_locked(
|
||||||
iter->context,
|
iter->req, core_id, core_line)) {
|
||||||
core_id, core_line)) {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,8 +381,7 @@ static void inline _lru_unlock_hash(struct ocf_lru_iter *iter,
|
|||||||
ocf_core_id_t core_id, uint64_t core_line)
|
ocf_core_id_t core_id, uint64_t core_line)
|
||||||
{
|
{
|
||||||
if (iter->hash_locked != NULL && iter->hash_locked(
|
if (iter->hash_locked != NULL && iter->hash_locked(
|
||||||
iter->context,
|
iter->req, core_id, core_line)) {
|
||||||
core_id, core_line)) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -404,12 +395,22 @@ static bool inline _lru_iter_evition_lock(struct ocf_lru_iter *iter,
|
|||||||
ocf_core_id_t *core_id, uint64_t *core_line)
|
ocf_core_id_t *core_id, uint64_t *core_line)
|
||||||
|
|
||||||
{
|
{
|
||||||
|
struct ocf_request *req = iter->req;
|
||||||
|
|
||||||
if (!_lru_trylock_cacheline(iter, cache_line))
|
if (!_lru_trylock_cacheline(iter, cache_line))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
ocf_metadata_get_core_info(iter->cache, cache_line,
|
ocf_metadata_get_core_info(iter->cache, cache_line,
|
||||||
core_id, core_line);
|
core_id, core_line);
|
||||||
|
|
||||||
|
/* avoid evicting current request target cachelines */
|
||||||
|
if (*core_id == ocf_core_get_id(req->core) &&
|
||||||
|
*core_line >= req->core_line_first &&
|
||||||
|
*core_line <= req->core_line_last) {
|
||||||
|
_lru_unlock_cacheline(iter, cache_line);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (!_lru_trylock_hash(iter, *core_id, *core_line)) {
|
if (!_lru_trylock_hash(iter, *core_id, *core_line)) {
|
||||||
_lru_unlock_cacheline(iter, cache_line);
|
_lru_unlock_cacheline(iter, cache_line);
|
||||||
return false;
|
return false;
|
||||||
|
@ -33,7 +33,7 @@ struct ocf_user_part_runtime {
|
|||||||
struct cleaning_policy cleaning;
|
struct cleaning_policy cleaning;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef bool ( *_lru_hash_locked_pfn)(void *context,
|
typedef bool ( *_lru_hash_locked_pfn)(struct ocf_request *req,
|
||||||
ocf_core_id_t core_id, uint64_t core_line);
|
ocf_core_id_t core_id, uint64_t core_line);
|
||||||
|
|
||||||
/* Iterator state, visiting all eviction lists within a partition
|
/* Iterator state, visiting all eviction lists within a partition
|
||||||
@ -56,8 +56,8 @@ struct ocf_lru_iter
|
|||||||
/* callback to determine whether given hash bucket is already
|
/* callback to determine whether given hash bucket is already
|
||||||
* locked by the caller */
|
* locked by the caller */
|
||||||
_lru_hash_locked_pfn hash_locked;
|
_lru_hash_locked_pfn hash_locked;
|
||||||
/* hash_locked private data */
|
/* optional caller request */
|
||||||
void *context;
|
struct ocf_request *req;
|
||||||
/* 1 if iterating over clean lists, 0 if over dirty */
|
/* 1 if iterating over clean lists, 0 if over dirty */
|
||||||
bool clean : 1;
|
bool clean : 1;
|
||||||
/* 1 if cacheline is to be locked for write, 0 if for read*/
|
/* 1 if cacheline is to be locked for write, 0 if for read*/
|
||||||
|
Loading…
Reference in New Issue
Block a user