remove stale references to eviction

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2021-06-15 21:57:00 +02:00
parent 33e2beac24
commit 07cbba32f6
9 changed files with 213 additions and 214 deletions

View File

@ -14,8 +14,8 @@ int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
unsigned part_iter;
unsigned global_iter;
for (evp_iter = 0; evp_iter < OCF_NUM_EVICTION_LISTS; evp_iter++)
env_rwlock_init(&metadata_lock->eviction[evp_iter]);
for (evp_iter = 0; evp_iter < OCF_NUM_LRU_LISTS; evp_iter++)
env_rwlock_init(&metadata_lock->lru[evp_iter]);
for (global_iter = 0; global_iter < OCF_NUM_GLOBAL_META_LOCKS;
global_iter++) {
@ -41,7 +41,7 @@ global_err:
env_rwsem_destroy(&metadata_lock->global[global_iter].sem);
while (evp_iter--)
env_rwlock_destroy(&metadata_lock->eviction[evp_iter]);
env_rwlock_destroy(&metadata_lock->lru[evp_iter]);
return err;
}
@ -53,8 +53,8 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock)
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++)
env_spinlock_destroy(&metadata_lock->partition[i]);
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
env_rwlock_destroy(&metadata_lock->eviction[i]);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
env_rwlock_destroy(&metadata_lock->lru[i]);
for (i = 0; i < OCF_NUM_GLOBAL_META_LOCKS; i++)
env_rwsem_destroy(&metadata_lock->global[i].sem);

View File

@ -28,69 +28,69 @@ int ocf_metadata_concurrency_attached_init(
void ocf_metadata_concurrency_attached_deinit(
struct ocf_metadata_lock *metadata_lock);
static inline void ocf_metadata_eviction_wr_lock(
static inline void ocf_metadata_lru_wr_lock(
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_rwlock_write_lock(&metadata_lock->eviction[ev_list]);
env_rwlock_write_lock(&metadata_lock->lru[ev_list]);
}
static inline void ocf_metadata_eviction_wr_unlock(
static inline void ocf_metadata_lru_wr_unlock(
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_rwlock_write_unlock(&metadata_lock->eviction[ev_list]);
env_rwlock_write_unlock(&metadata_lock->lru[ev_list]);
}
static inline void ocf_metadata_eviction_rd_lock(
static inline void ocf_metadata_lru_rd_lock(
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_rwlock_read_lock(&metadata_lock->eviction[ev_list]);
env_rwlock_read_lock(&metadata_lock->lru[ev_list]);
}
static inline void ocf_metadata_eviction_rd_unlock(
static inline void ocf_metadata_lru_rd_unlock(
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_rwlock_read_unlock(&metadata_lock->eviction[ev_list]);
env_rwlock_read_unlock(&metadata_lock->lru[ev_list]);
}
static inline void ocf_metadata_eviction_wr_lock_all(
static inline void ocf_metadata_lru_wr_lock_all(
struct ocf_metadata_lock *metadata_lock)
{
uint32_t i;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
ocf_metadata_eviction_wr_lock(metadata_lock, i);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
ocf_metadata_lru_wr_lock(metadata_lock, i);
}
static inline void ocf_metadata_eviction_wr_unlock_all(
static inline void ocf_metadata_lru_wr_unlock_all(
struct ocf_metadata_lock *metadata_lock)
{
uint32_t i;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
ocf_metadata_eviction_wr_unlock(metadata_lock, i);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
ocf_metadata_lru_wr_unlock(metadata_lock, i);
}
#define OCF_METADATA_EVICTION_WR_LOCK(cline) \
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_LRU_WR_LOCK(cline) \
ocf_metadata_lru_wr_lock(&cache->metadata.lock, \
cline % OCF_NUM_LRU_LISTS)
#define OCF_METADATA_EVICTION_WR_UNLOCK(cline) \
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_LRU_WR_UNLOCK(cline) \
ocf_metadata_lru_wr_unlock(&cache->metadata.lock, \
cline % OCF_NUM_LRU_LISTS)
#define OCF_METADATA_EVICTION_RD_LOCK(cline) \
ocf_metadata_eviction_rd_lock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_LRU_RD_LOCK(cline) \
ocf_metadata_lru_rd_lock(&cache->metadata.lock, \
cline % OCF_NUM_LRU_LISTS)
#define OCF_METADATA_EVICTION_RD_UNLOCK(cline) \
ocf_metadata_eviction_rd_unlock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_LRU_RD_UNLOCK(cline) \
ocf_metadata_lru_rd_unlock(&cache->metadata.lock, \
cline % OCF_NUM_LRU_LISTS)
#define OCF_METADATA_EVICTION_WR_LOCK_ALL() \
ocf_metadata_eviction_wr_lock_all(&cache->metadata.lock)
#define OCF_METADATA_LRU_WR_LOCK_ALL() \
ocf_metadata_lru_wr_lock_all(&cache->metadata.lock)
#define OCF_METADATA_EVICTION_WR_UNLOCK_ALL() \
ocf_metadata_eviction_wr_unlock_all(&cache->metadata.lock)
#define OCF_METADATA_LRU_WR_UNLOCK_ALL() \
ocf_metadata_lru_wr_unlock_all(&cache->metadata.lock)
static inline void ocf_metadata_partition_lock(
struct ocf_metadata_lock *metadata_lock,

View File

@ -30,31 +30,31 @@ struct ocf_user_part_config {
struct ocf_part_runtime {
env_atomic curr_size;
struct ocf_lru_part_meta lru[OCF_NUM_EVICTION_LISTS];
struct ocf_lru_part_meta lru[OCF_NUM_LRU_LISTS];
};
typedef bool ( *_lru_hash_locked_pfn)(struct ocf_request *req,
ocf_core_id_t core_id, uint64_t core_line);
/* Iterator state, visiting all eviction lists within a partition
/* Iterator state, visiting all lru lists within a partition
in round robin order */
struct ocf_lru_iter
{
/* per-partition cacheline iterator */
ocf_cache_line_t curr_cline[OCF_NUM_EVICTION_LISTS];
ocf_cache_line_t curr_cline[OCF_NUM_LRU_LISTS];
/* cache object */
ocf_cache_t cache;
/* cacheline concurrency */
struct ocf_alock *c;
/* target partition */
struct ocf_part *part;
/* available (non-empty) eviction list bitmap rotated so that current
@evp is on the most significant bit */
unsigned long long next_avail_evp;
/* number of available eviction lists */
uint32_t num_avail_evps;
/* current eviction list index */
uint32_t evp;
/* available (non-empty) lru list bitmap rotated so that current
@lru_idx is on the most significant bit */
unsigned long long next_avail_lru;
/* number of available lru lists */
uint32_t num_avail_lrus;
/* current lru list index */
uint32_t lru_idx;
/* callback to determine whether given hash bucket is already
* locked by the caller */
_lru_hash_locked_pfn hash_locked;

View File

@ -55,7 +55,7 @@ struct ocf_metadata_lock
{
struct ocf_metadata_global_lock global[OCF_NUM_GLOBAL_META_LOCKS];
/*!< global metadata lock (GML) */
env_rwlock eviction[OCF_NUM_EVICTION_LISTS]; /*!< Fast lock for eviction policy */
env_rwlock lru[OCF_NUM_LRU_LISTS]; /*!< Fast locks for lru list */
env_spinlock partition[OCF_USER_IO_CLASS_MAX]; /* partition lock */
env_rwsem *hash; /*!< Hash bucket locks */
env_rwsem *collision_pages; /*!< Collision table page locks */

View File

@ -185,9 +185,9 @@ static void __init_parts_attached(ocf_cache_t cache)
ocf_part_id_t part_id;
for (part_id = 0; part_id < OCF_USER_IO_CLASS_MAX; part_id++)
ocf_lru_init_evp(cache, &cache->user_parts[part_id].part);
ocf_lru_init(cache, &cache->user_parts[part_id].part);
ocf_lru_init_evp(cache, &cache->free);
ocf_lru_init(cache, &cache->free);
}
static void __init_free(ocf_cache_t cache)

View File

@ -14,8 +14,6 @@
#include "../ocf_request.h"
#include "../engine/engine_common.h"
#define OCF_EVICTION_MAX_SCAN 1024
static const ocf_cache_line_t end_marker = (ocf_cache_line_t)-1;
/* Adds the given collision_index to the _head_ of the LRU list */
@ -232,16 +230,16 @@ void ocf_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
}
static struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
uint32_t evp, bool clean)
uint32_t lru_idx, bool clean)
{
return clean ? &part->runtime->lru[evp].clean :
&part->runtime->lru[evp].dirty;
return clean ? &part->runtime->lru[lru_idx].clean :
&part->runtime->lru[lru_idx].dirty;
}
static inline struct ocf_lru_list *evp_get_cline_list(ocf_cache_t cache,
static inline struct ocf_lru_list *lru_get_cline_list(ocf_cache_t cache,
ocf_cache_line_t cline)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
uint32_t lru_list = (cline % OCF_NUM_LRU_LISTS);
ocf_part_id_t part_id;
struct ocf_part *part;
@ -250,7 +248,7 @@ static inline struct ocf_lru_list *evp_get_cline_list(ocf_cache_t cache,
ENV_BUG_ON(part_id > OCF_USER_IO_CLASS_MAX);
part = &cache->user_parts[part_id].part;
return ocf_lru_get_list(part, ev_list,
return ocf_lru_get_list(part, lru_list,
!metadata_test_dirty(cache, cline));
}
@ -272,7 +270,7 @@ static void ocf_lru_move(ocf_cache_t cache, ocf_cache_line_t cline,
void ocf_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
struct ocf_lru_list *list, *free;
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
uint32_t lru_list = (cline % OCF_NUM_LRU_LISTS);
ocf_part_id_t part_id;
struct ocf_part *part;
@ -280,25 +278,25 @@ void ocf_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
ENV_BUG_ON(part_id > OCF_USER_IO_CLASS_MAX);
part = &cache->user_parts[part_id].part;
OCF_METADATA_EVICTION_WR_LOCK(cline);
OCF_METADATA_LRU_WR_LOCK(cline);
list = evp_get_cline_list(cache, cline);
free = ocf_lru_get_list(&cache->free, ev_list, true);
list = lru_get_cline_list(cache, cline);
free = ocf_lru_get_list(&cache->free, lru_list, true);
ocf_lru_move(cache, cline, part, list, &cache->free, free);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
}
static void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_part *dst_part)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
uint32_t lru_list = (cline % OCF_NUM_LRU_LISTS);
struct ocf_lru_list *src_list, *dst_list;
bool clean;
clean = !metadata_test_dirty(cache, cline);
src_list = ocf_lru_get_list(src_part, ev_list, clean);
dst_list = ocf_lru_get_list(dst_part, ev_list, clean);
src_list = ocf_lru_get_list(src_part, lru_list, clean);
dst_list = ocf_lru_get_list(dst_part, lru_list, clean);
ocf_lru_move(cache, cline, src_part, src_list, dst_part, dst_list);
}
@ -306,86 +304,86 @@ static void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_part *dst_part)
{
OCF_METADATA_EVICTION_WR_LOCK(cline);
OCF_METADATA_LRU_WR_LOCK(cline);
ocf_lru_repart_locked(cache, cline, src_part, dst_part);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
}
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
struct ocf_part *part, uint32_t start_evp, bool clean,
struct ocf_part *part, uint32_t start_lru, bool clean,
_lru_hash_locked_pfn hash_locked, struct ocf_request *req)
{
uint32_t i;
/* entire iterator implementation depends on gcc builtins for
bit operations which works on 64 bit integers at most */
ENV_BUILD_BUG_ON(OCF_NUM_EVICTION_LISTS > sizeof(iter->evp) * 8);
ENV_BUILD_BUG_ON(OCF_NUM_LRU_LISTS > sizeof(iter->lru_idx) * 8);
iter->cache = cache;
iter->c = ocf_cache_line_concurrency(cache);
iter->part = part;
/* set iterator value to start_evp - 1 modulo OCF_NUM_EVICTION_LISTS */
iter->evp = (start_evp + OCF_NUM_EVICTION_LISTS - 1) %
OCF_NUM_EVICTION_LISTS;
iter->num_avail_evps = OCF_NUM_EVICTION_LISTS;
iter->next_avail_evp = ((1ULL << OCF_NUM_EVICTION_LISTS) - 1);
/* set iterator value to start_lru - 1 modulo OCF_NUM_LRU_LISTS */
iter->lru_idx = (start_lru + OCF_NUM_LRU_LISTS - 1) %
OCF_NUM_LRU_LISTS;
iter->num_avail_lrus = OCF_NUM_LRU_LISTS;
iter->next_avail_lru = ((1ULL << OCF_NUM_LRU_LISTS) - 1);
iter->clean = clean;
iter->hash_locked = hash_locked;
iter->req = req;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
iter->curr_cline[i] = ocf_lru_get_list(part, i, clean)->tail;
}
static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
ocf_cache_t cache, struct ocf_part *part, uint32_t start_evp)
ocf_cache_t cache, struct ocf_part *part, uint32_t start_lru)
{
/* Lock cachelines for read, non-exclusive access */
lru_iter_init(iter, cache, part, start_evp, false, NULL, NULL);
lru_iter_init(iter, cache, part, start_lru, false, NULL, NULL);
}
static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
ocf_cache_t cache, struct ocf_part *part,
uint32_t start_evp, struct ocf_request *req)
uint32_t start_lru, struct ocf_request *req)
{
/* Lock hash buckets for write, cachelines according to user request,
* however exclusive cacheline access is needed even in case of read
* access. _ocf_lru_evict_hash_locked tells whether given hash bucket
* is already locked as part of request hash locking (to avoid attempt
* to acquire the same hash bucket lock twice) */
lru_iter_init(iter, cache, part, start_evp, true, ocf_req_hash_in_range,
lru_iter_init(iter, cache, part, start_lru, true, ocf_req_hash_in_range,
req);
}
static inline uint32_t _lru_next_evp(struct ocf_lru_iter *iter)
static inline uint32_t _lru_next_lru(struct ocf_lru_iter *iter)
{
unsigned increment;
increment = __builtin_ffsll(iter->next_avail_evp);
iter->next_avail_evp = ocf_rotate_right(iter->next_avail_evp,
increment, OCF_NUM_EVICTION_LISTS);
iter->evp = (iter->evp + increment) % OCF_NUM_EVICTION_LISTS;
increment = __builtin_ffsll(iter->next_avail_lru);
iter->next_avail_lru = ocf_rotate_right(iter->next_avail_lru,
increment, OCF_NUM_LRU_LISTS);
iter->lru_idx = (iter->lru_idx + increment) % OCF_NUM_LRU_LISTS;
return iter->evp;
return iter->lru_idx;
}
static inline bool _lru_evp_is_empty(struct ocf_lru_iter *iter)
static inline bool _lru_lru_is_empty(struct ocf_lru_iter *iter)
{
return !(iter->next_avail_evp & (1ULL << (OCF_NUM_EVICTION_LISTS - 1)));
return !(iter->next_avail_lru & (1ULL << (OCF_NUM_LRU_LISTS - 1)));
}
static inline void _lru_evp_set_empty(struct ocf_lru_iter *iter)
static inline void _lru_lru_set_empty(struct ocf_lru_iter *iter)
{
iter->next_avail_evp &= ~(1ULL << (OCF_NUM_EVICTION_LISTS - 1));
iter->num_avail_evps--;
iter->next_avail_lru &= ~(1ULL << (OCF_NUM_LRU_LISTS - 1));
iter->num_avail_lrus--;
}
static inline bool _lru_evp_all_empty(struct ocf_lru_iter *iter)
static inline bool _lru_lru_all_empty(struct ocf_lru_iter *iter)
{
return iter->num_avail_evps == 0;
return iter->num_avail_lrus == 0;
}
static bool inline _lru_trylock_hash(struct ocf_lru_iter *iter,
@ -450,7 +448,7 @@ static bool inline _lru_iter_evition_lock(struct ocf_lru_iter *iter,
}
/* Get next clean cacheline from tail of lru lists. Caller must not hold any
* eviction list lock.
* lru list lock.
* - returned cacheline is write locked
* - returned cacheline has the corresponding metadata hash bucket write locked
* - cacheline is moved to the head of destination partition lru list before
@ -462,18 +460,18 @@ static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
struct ocf_part *dst_part, ocf_core_id_t *core_id,
uint64_t *core_line)
{
uint32_t curr_evp;
uint32_t curr_lru;
ocf_cache_line_t cline;
ocf_cache_t cache = iter->cache;
struct ocf_part *part = iter->part;
struct ocf_lru_list *list;
do {
curr_evp = _lru_next_evp(iter);
curr_lru = _lru_next_lru(iter);
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, curr_evp);
ocf_metadata_lru_wr_lock(&cache->metadata.lock, curr_lru);
list = ocf_lru_get_list(part, curr_evp, iter->clean);
list = ocf_lru_get_list(part, curr_lru, iter->clean);
cline = list->tail;
while (cline != end_marker && !_lru_iter_evition_lock(iter,
@ -492,20 +490,20 @@ static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
}
}
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock,
curr_evp);
ocf_metadata_lru_wr_unlock(&cache->metadata.lock,
curr_lru);
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
if (cline == end_marker && !_lru_lru_is_empty(iter)) {
/* mark list as empty */
_lru_evp_set_empty(iter);
_lru_lru_set_empty(iter);
}
} while (cline == end_marker && !_lru_evp_all_empty(iter));
} while (cline == end_marker && !_lru_lru_all_empty(iter));
return cline;
}
/* Get next clean cacheline from tail of free lru lists. Caller must not hold any
* eviction list lock.
* lru list lock.
* - returned cacheline is write locked
* - cacheline is moved to the head of destination partition lru list before
* being returned.
@ -515,18 +513,18 @@ static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
static inline ocf_cache_line_t lru_iter_free_next(struct ocf_lru_iter *iter,
struct ocf_part *dst_part)
{
uint32_t curr_evp;
uint32_t curr_lru;
ocf_cache_line_t cline;
ocf_cache_t cache = iter->cache;
struct ocf_part *free = iter->part;
struct ocf_lru_list *list;
do {
curr_evp = _lru_next_evp(iter);
curr_lru = _lru_next_lru(iter);
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, curr_evp);
ocf_metadata_lru_wr_lock(&cache->metadata.lock, curr_lru);
list = ocf_lru_get_list(free, curr_evp, true);
list = ocf_lru_get_list(free, curr_lru, true);
cline = list->tail;
while (cline != end_marker && !ocf_cache_line_try_lock_wr(
@ -538,44 +536,44 @@ static inline ocf_cache_line_t lru_iter_free_next(struct ocf_lru_iter *iter,
ocf_lru_repart_locked(cache, cline, free, dst_part);
}
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock,
curr_evp);
ocf_metadata_lru_wr_unlock(&cache->metadata.lock,
curr_lru);
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
if (cline == end_marker && !_lru_lru_is_empty(iter)) {
/* mark list as empty */
_lru_evp_set_empty(iter);
_lru_lru_set_empty(iter);
}
} while (cline == end_marker && !_lru_evp_all_empty(iter));
} while (cline == end_marker && !_lru_lru_all_empty(iter));
return cline;
}
/* Get next dirty cacheline from tail of lru lists. Caller must hold all
* eviction list locks during entire iteration proces. Returned cacheline
* lru list locks during entire iteration proces. Returned cacheline
* is read or write locked, depending on iter->write_lock */
static inline ocf_cache_line_t lru_iter_cleaning_next(struct ocf_lru_iter *iter)
{
uint32_t curr_evp;
uint32_t curr_lru;
ocf_cache_line_t cline;
do {
curr_evp = _lru_next_evp(iter);
cline = iter->curr_cline[curr_evp];
curr_lru = _lru_next_lru(iter);
cline = iter->curr_cline[curr_lru];
while (cline != end_marker && ! ocf_cache_line_try_lock_rd(
iter->c, cline)) {
cline = ocf_metadata_get_lru(iter->cache, cline)->prev;
}
if (cline != end_marker) {
iter->curr_cline[curr_evp] =
iter->curr_cline[curr_lru] =
ocf_metadata_get_lru(iter->cache , cline)->prev;
}
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
if (cline == end_marker && !_lru_lru_is_empty(iter)) {
/* mark list as empty */
_lru_evp_set_empty(iter);
_lru_lru_set_empty(iter);
}
} while (cline == end_marker && !_lru_evp_all_empty(iter));
} while (cline == end_marker && !_lru_lru_all_empty(iter));
return cline;
}
@ -628,7 +626,7 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
};
ocf_cache_line_t *cline = ctx->cline;
struct ocf_lru_iter iter;
unsigned evp;
unsigned lru_idx;
int cnt;
unsigned i;
unsigned lock_idx;
@ -648,14 +646,14 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
}
ctx->cache = cache;
evp = io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
lru_idx = io_queue->eviction_idx++ % OCF_NUM_LRU_LISTS;
lock_idx = ocf_metadata_concurrency_next_idx(io_queue);
ocf_metadata_start_shared_access(&cache->metadata.lock, lock_idx);
OCF_METADATA_EVICTION_WR_LOCK_ALL();
OCF_METADATA_LRU_WR_LOCK_ALL();
lru_iter_cleaning_init(&iter, cache, &user_part->part, evp);
lru_iter_cleaning_init(&iter, cache, &user_part->part, lru_idx);
i = 0;
while (i < OCF_EVICTION_CLEAN_SIZE) {
cline[i] = lru_iter_cleaning_next(&iter);
@ -666,7 +664,7 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
while (i < OCF_EVICTION_CLEAN_SIZE)
cline[i++] = end_marker;
OCF_METADATA_EVICTION_WR_UNLOCK_ALL();
OCF_METADATA_LRU_WR_UNLOCK_ALL();
ocf_metadata_end_shared_access(&cache->metadata.lock, lock_idx);
@ -721,7 +719,7 @@ uint32_t ocf_lru_req_clines(struct ocf_request *req,
uint64_t core_line;
ocf_core_id_t core_id;
ocf_cache_t cache = req->cache;
unsigned evp;
unsigned lru_idx;
unsigned req_idx = 0;
struct ocf_part *dst_part;
@ -740,9 +738,9 @@ uint32_t ocf_lru_req_clines(struct ocf_request *req,
ENV_BUG_ON(req->part_id == PARTITION_FREELIST);
dst_part = &cache->user_parts[req->part_id].part;
evp = req->io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
lru_idx = req->io_queue->eviction_idx++ % OCF_NUM_LRU_LISTS;
lru_iter_eviction_init(&iter, cache, src_part, evp, req);
lru_iter_eviction_init(&iter, cache, src_part, lru_idx, req);
i = 0;
while (i < cline_no) {
@ -805,16 +803,16 @@ void ocf_lru_hot_cline(ocf_cache_t cache, ocf_cache_line_t cline)
node = ocf_metadata_get_lru(cache, cline);
OCF_METADATA_EVICTION_RD_LOCK(cline);
OCF_METADATA_LRU_RD_LOCK(cline);
hot = node->hot;
OCF_METADATA_EVICTION_RD_UNLOCK(cline);
OCF_METADATA_LRU_RD_UNLOCK(cline);
if (hot)
return;
list = evp_get_cline_list(cache, cline);
list = lru_get_cline_list(cache, cline);
OCF_METADATA_EVICTION_WR_LOCK(cline);
OCF_METADATA_LRU_WR_LOCK(cline);
if (node->next != end_marker ||
node->prev != end_marker ||
@ -826,7 +824,7 @@ void ocf_lru_hot_cline(ocf_cache_t cache, ocf_cache_line_t cline)
add_lru_head(cache, list, cline);
balance_lru_list(cache, list);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
}
static inline void _lru_init(struct ocf_lru_list *list, bool track_hot)
@ -839,13 +837,13 @@ static inline void _lru_init(struct ocf_lru_list *list, bool track_hot)
list->track_hot = track_hot;
}
void ocf_lru_init_evp(ocf_cache_t cache, struct ocf_part *part)
void ocf_lru_init(ocf_cache_t cache, struct ocf_part *part)
{
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
uint32_t i;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
clean_list = ocf_lru_get_list(part, i, true);
dirty_list = ocf_lru_get_list(part, i, false);
@ -863,37 +861,37 @@ void ocf_lru_init_evp(ocf_cache_t cache, struct ocf_part *part)
void ocf_lru_clean_cline(ocf_cache_t cache, struct ocf_part *part,
ocf_cache_line_t cline)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
uint32_t lru_list = (cline % OCF_NUM_LRU_LISTS);
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
clean_list = ocf_lru_get_list(part, ev_list, true);
dirty_list = ocf_lru_get_list(part, ev_list, false);
clean_list = ocf_lru_get_list(part, lru_list, true);
dirty_list = ocf_lru_get_list(part, lru_list, false);
OCF_METADATA_EVICTION_WR_LOCK(cline);
OCF_METADATA_LRU_WR_LOCK(cline);
remove_lru_list(cache, dirty_list, cline);
balance_lru_list(cache, dirty_list);
add_lru_head(cache, clean_list, cline);
balance_lru_list(cache, clean_list);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
}
void ocf_lru_dirty_cline(ocf_cache_t cache, struct ocf_part *part,
ocf_cache_line_t cline)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
uint32_t lru_list = (cline % OCF_NUM_LRU_LISTS);
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
clean_list = ocf_lru_get_list(part, ev_list, true);
dirty_list = ocf_lru_get_list(part, ev_list, false);
clean_list = ocf_lru_get_list(part, lru_list, true);
dirty_list = ocf_lru_get_list(part, lru_list, false);
OCF_METADATA_EVICTION_WR_LOCK(cline);
OCF_METADATA_LRU_WR_LOCK(cline);
remove_lru_list(cache, clean_list, cline);
balance_lru_list(cache, clean_list);
add_lru_head(cache, dirty_list, cline);
balance_lru_list(cache, dirty_list);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
}
static ocf_cache_line_t next_phys_invalid(ocf_cache_t cache,
@ -927,7 +925,7 @@ void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
ocf_cache_line_t collision_table_entries =
ocf_metadata_collision_table_entries(cache);
struct ocf_lru_list *list;
unsigned ev_list;
unsigned lru_list;
unsigned i;
phys = 0;
@ -940,8 +938,8 @@ void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
ocf_metadata_set_partition_id(cache, cline, PARTITION_FREELIST);
ev_list = (cline % OCF_NUM_EVICTION_LISTS);
list = ocf_lru_get_list(&cache->free, ev_list, true);
lru_list = (cline % OCF_NUM_LRU_LISTS);
list = ocf_lru_get_list(&cache->free, lru_list, true);
add_lru_head(cache, list, cline);
balance_lru_list(cache, list);
@ -1024,7 +1022,8 @@ int ocf_metadata_actor(struct ocf_cache *cache,
ENV_BUG_ON(part_id == PARTITION_FREELIST);
part = &cache->user_parts[part_id].part;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
for (clean = 0; clean <= 1; clean++) {
list = ocf_lru_get_list(part, i, clean);

View File

@ -20,7 +20,7 @@ bool ocf_lru_can_evict(struct ocf_cache *cache);
uint32_t ocf_lru_req_clines(struct ocf_request *req,
struct ocf_part *src_part, uint32_t cline_no);
void ocf_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void ocf_lru_init_evp(struct ocf_cache *cache, struct ocf_part *part);
void ocf_lru_init(struct ocf_cache *cache, struct ocf_part *part);
void ocf_lru_dirty_cline(struct ocf_cache *cache, struct ocf_part *part,
ocf_cache_line_t cline);
void ocf_lru_clean_cline(struct ocf_cache *cache, struct ocf_part *part,

View File

@ -12,7 +12,7 @@
#define OCF_PENDING_EVICTION_LIMIT 512UL
#define OCF_NUM_EVICTION_LISTS 32
#define OCF_NUM_LRU_LISTS 32
struct ocf_part;
struct ocf_user_part;

View File

@ -5,10 +5,10 @@
* INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
* ONE FUNCTION PER LINE
* lru_iter_init
* _lru_next_evp
* _lru_evp_is_empty
* _lru_evp_set_empty
* _lru_evp_all_empty
* _lru_next_lru
* _lru_lru_is_empty
* _lru_lru_set_empty
* _lru_lru_all_empty
* ocf_rotate_right
* lru_iter_eviction_next
* lru_iter_cleaning_next
@ -44,7 +44,7 @@ struct ocf_cache_line_concurrency *__wrap_ocf_cache_line_concurrency(ocf_cache_t
return NULL;
}
ocf_cache_line_t test_cases[10 * OCF_NUM_EVICTION_LISTS][OCF_NUM_EVICTION_LISTS][20];
ocf_cache_line_t test_cases[10 * OCF_NUM_LRU_LISTS][OCF_NUM_LRU_LISTS][20];
unsigned num_cases = 20;
void write_test_case_description(void)
@ -53,21 +53,21 @@ void write_test_case_description(void)
unsigned test_case = 0;
// case 0 - all lists empty
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
test_cases[0][i][test_case] = -1;
}
// case 1 - all lists with single element
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
test_cases[0][i][test_case] = 10 * i;
test_cases[1][i][test_case] = -1;
}
// case 2 - all lists have between 1 and 5 elements, increasingly
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
unsigned num_elements = 1 + i / (OCF_NUM_EVICTION_LISTS / 4);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = 1 + i / (OCF_NUM_LRU_LISTS / 4);
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 10 * i + j;
@ -76,7 +76,7 @@ void write_test_case_description(void)
// case 3 - all lists have between 1 and 5 elements, modulo index
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = 1 + (i % 5);
for (j = 0; j < num_elements; j++)
@ -86,8 +86,8 @@ void write_test_case_description(void)
// case 4 - all lists have between 0 and 4 elements, increasingly
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
unsigned num_elements = i / (OCF_NUM_EVICTION_LISTS / 4);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = i / (OCF_NUM_LRU_LISTS / 4);
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 10 * i + j;
@ -96,7 +96,7 @@ void write_test_case_description(void)
// case 5 - all lists have between 0 and 4 elements, modulo index
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = (i % 5);
for (j = 0; j < num_elements; j++)
@ -106,41 +106,41 @@ void write_test_case_description(void)
// case 6 - list length increasing by 1 from 0
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = i;
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = OCF_NUM_EVICTION_LISTS * i + j;
test_cases[j][i][test_case] = OCF_NUM_LRU_LISTS * i + j;
test_cases[j][i][test_case] = -1;
}
// case 7 - list length increasing by 1 from 1
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = i + 1;
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 2 * OCF_NUM_EVICTION_LISTS * i + j;
test_cases[j][i][test_case] = 2 * OCF_NUM_LRU_LISTS * i + j;
test_cases[j][i][test_case] = -1;
}
// case 8 - list length increasing by 4 from 0
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = 4 * i;
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 4 * OCF_NUM_EVICTION_LISTS * i + j;
test_cases[j][i][test_case] = 4 * OCF_NUM_LRU_LISTS * i + j;
test_cases[j][i][test_case] = -1;
}
// case 9 - list length increasing by 4 from 1
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = 4 * i + 1;
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 5 * OCF_NUM_EVICTION_LISTS * i + j;
test_cases[j][i][test_case] = 5 * OCF_NUM_LRU_LISTS * i + j;
test_cases[j][i][test_case] = -1;
}
@ -150,8 +150,8 @@ void write_test_case_description(void)
while(test_case < 2 * (l + 1)) {
unsigned matching_case = test_case - l - 1;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
unsigned curr_list = (i + 4) % OCF_NUM_EVICTION_LISTS;
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned curr_list = (i + 4) % OCF_NUM_LRU_LISTS;
j = 0;
while(test_cases[j][i][matching_case] != -1) {
test_cases[j][curr_list][test_case] =
@ -164,13 +164,13 @@ void write_test_case_description(void)
}
/* transform cacheline numbers so that they remain unique but have
* assignment to list modulo OCF_NUM_EVICTION_LISTS */
* assignment to list modulo OCF_NUM_LRU_LISTS */
for (test_case = 0; test_case < num_cases; test_case++) {
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
j = 0;
while (test_cases[j][i][test_case] != -1) {
test_cases[j][i][test_case] = test_cases[j][i][test_case] *
OCF_NUM_EVICTION_LISTS + i;
OCF_NUM_LRU_LISTS + i;
j++;
}
}
@ -185,7 +185,7 @@ void write_test_case_description(void)
for (test_case = 0; test_case < num_cases; test_case++) {
print_message("test case no %d\n", test_case);
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
print_message("list %02u: ", i);
j = 0;
while (test_cases[j][i][test_case] != -1) {
@ -204,11 +204,11 @@ unsigned current_case;
struct ocf_lru_list list;
struct ocf_lru_list *__wrap_ocf_lru_get_list(struct ocf_user_part *user_part,
uint32_t evp, bool clean)
uint32_t lru, bool clean)
{
unsigned i = 0;
while (test_cases[i][evp][current_case] != -1)
while (test_cases[i][lru][current_case] != -1)
i++;
if (i == 0) {
@ -216,53 +216,53 @@ struct ocf_lru_list *__wrap_ocf_lru_get_list(struct ocf_user_part *user_part,
list.tail = -1;
list.num_nodes = 0;
} else {
list.head = test_cases[0][evp][current_case];
list.tail = test_cases[i - 1][evp][current_case];
list.head = test_cases[0][lru][current_case];
list.tail = test_cases[i - 1][lru][current_case];
list.num_nodes = i;
}
#ifdef DEBUG
print_message("list for case %u evp %u: head: %u tail %u elems %u\n",
current_case, evp, list.head, list.tail, list.num_nodes);
print_message("list for case %u lru %u: head: %u tail %u elems %u\n",
current_case, lru, list.head, list.tail, list.num_nodes);
#endif
return &list;
}
inline struct ocf_lru_list *__wrap_evp_get_cline_list(ocf_cache_t cache,
inline struct ocf_lru_list *__wrap_lru_get_cline_list(ocf_cache_t cache,
ocf_cache_line_t cline)
{
return __wrap_ocf_lru_get_list(NULL, cline % OCF_NUM_EVICTION_LISTS, true);
return __wrap_ocf_lru_get_list(NULL, cline % OCF_NUM_LRU_LISTS, true);
}
struct ocf_lru_meta policy;
struct ocf_lru_meta g_lru_meta;
struct ocf_lru_meta *__wrap_ocf_metadata_get_lru(
struct ocf_cache *cache, ocf_cache_line_t line)
{
unsigned i, j;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
{
j = 0;
while (test_cases[j][i][current_case] != -1) {
if (test_cases[j][i][current_case] == line) {
if (j == 0) {
policy.prev = -1;
g_lru_meta.prev = -1;
} else {
policy.prev =
g_lru_meta.prev =
test_cases[j - 1][i][current_case];
}
policy.next = test_cases[j + 1][i][current_case];
g_lru_meta.next = test_cases[j + 1][i][current_case];
#ifdef DEBUG
print_message("[%u] next %u prev %u\n",
line, policy.next,
policy.prev);
line, g_lru_meta.next,
g_lru_meta.prev);
#endif
return &policy;
return &g_lru_meta;
}
j++;
}
@ -280,7 +280,7 @@ void __wrap_add_lru_head(ocf_cache_t cache,
unsigned int collision_index)
{
unsigned list_head = list->head;
unsigned i, j = collision_index % OCF_NUM_EVICTION_LISTS;
unsigned i, j = collision_index % OCF_NUM_LRU_LISTS;
i = 1;
while (test_cases[i][j][current_case] != -1)
@ -294,7 +294,7 @@ void __wrap_add_lru_head(ocf_cache_t cache,
test_cases[0][j][current_case] = collision_index;
#ifdef DEBUG
print_message("case %u evp %u head set to %u\n", current_case, j, collision_index);
print_message("case %u lru %u head set to %u\n", current_case, j, collision_index);
#endif
}
@ -307,7 +307,7 @@ void __wrap_remove_lru_list(ocf_cache_t cache,
unsigned i, j;
found = false;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
{
j = 0;
@ -328,7 +328,7 @@ void __wrap_remove_lru_list(ocf_cache_t cache,
assert(found);
#ifdef DEBUG
print_message("case %u removed %u from evp %u\n", current_case, collision_index, i);
print_message("case %u removed %u from lru %u\n", current_case, collision_index, i);
#endif
}
@ -355,17 +355,17 @@ static void _lru_run_test(unsigned test_case)
unsigned start_pos;
current_case = test_case;
for (start_pos = 0; start_pos < OCF_NUM_EVICTION_LISTS; start_pos++)
for (start_pos = 0; start_pos < OCF_NUM_LRU_LISTS; start_pos++)
{
struct ocf_lru_iter iter;
ocf_cache_line_t cache_line, expected_cache_line;
unsigned curr_evp = start_pos;
unsigned pos[OCF_NUM_EVICTION_LISTS];
unsigned curr_lru = start_pos;
unsigned pos[OCF_NUM_LRU_LISTS];
unsigned i;
write_test_case_description();
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
{
pos[i] = -1;
while(test_cases[pos[i] + 1][i][test_case] != -1)
@ -377,26 +377,26 @@ static void _lru_run_test(unsigned test_case)
do {
/* check what is expected to be returned from iterator */
if (pos[curr_evp] == -1) {
if (pos[curr_lru] == -1) {
i = 1;
while (i < OCF_NUM_EVICTION_LISTS &&
pos[(curr_evp + i) % OCF_NUM_EVICTION_LISTS]
while (i < OCF_NUM_LRU_LISTS &&
pos[(curr_lru + i) % OCF_NUM_LRU_LISTS]
== -1) {
i++;
}
if (i == OCF_NUM_EVICTION_LISTS) {
if (i == OCF_NUM_LRU_LISTS) {
/* reached end of lists */
expected_cache_line = -1;
} else {
curr_evp = (curr_evp + i) % OCF_NUM_EVICTION_LISTS;
expected_cache_line = test_cases[pos[curr_evp]]
[curr_evp][test_case];
pos[curr_evp]--;
curr_lru = (curr_lru + i) % OCF_NUM_LRU_LISTS;
expected_cache_line = test_cases[pos[curr_lru]]
[curr_lru][test_case];
pos[curr_lru]--;
}
} else {
expected_cache_line = test_cases[pos[curr_evp]]
[curr_evp][test_case];
pos[curr_evp]--;
expected_cache_line = test_cases[pos[curr_lru]]
[curr_lru][test_case];
pos[curr_lru]--;
}
/* get cacheline from iterator */
@ -404,11 +404,11 @@ static void _lru_run_test(unsigned test_case)
assert_int_equal(cache_line, expected_cache_line);
curr_evp = (curr_evp + 1) % OCF_NUM_EVICTION_LISTS;
curr_lru = (curr_lru + 1) % OCF_NUM_LRU_LISTS;
} while (cache_line != -1);
/* make sure all cachelines are visited */
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
{
assert_int_equal((unsigned)-1, pos[i]);
}