Rename "evp_lru*" functions to "ocf_lru*"

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2021-06-15 21:27:22 +02:00
parent b1143374a8
commit 33e2beac24
10 changed files with 68 additions and 68 deletions

View File

@ -202,7 +202,7 @@ static void ocf_engine_set_hot(struct ocf_request *req)
if (status == LOOKUP_HIT) {
/* Update eviction (LRU) */
evp_lru_hot_cline(cache, entry->coll_idx);
ocf_lru_hot_cline(cache, entry->coll_idx);
}
}
}
@ -518,7 +518,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
ocf_hb_req_prot_unlock_wr(req);
if (ocf_req_is_cleaning_required(req)) {
evp_lru_clean(req->cache, user_part, req->io_queue,
ocf_lru_clean(req->cache, user_part, req->io_queue,
128);
}

View File

@ -1167,9 +1167,9 @@ static void _recovery_rebuild_cline_metadata(ocf_cache_t cache,
ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
cache_line);
evp_lru_init_cline(cache, cache_line);
ocf_lru_init_cline(cache, cache_line);
evp_lru_hot_cline(cache, cache_line);
ocf_lru_hot_cline(cache, cache_line);
env_atomic_inc(&core->runtime_meta->cached_clines);
env_atomic_inc(&core->runtime_meta->

View File

@ -185,9 +185,9 @@ static void __init_parts_attached(ocf_cache_t cache)
ocf_part_id_t part_id;
for (part_id = 0; part_id < OCF_USER_IO_CLASS_MAX; part_id++)
evp_lru_init_evp(cache, &cache->user_parts[part_id].part);
ocf_lru_init_evp(cache, &cache->user_parts[part_id].part);
evp_lru_init_evp(cache, &cache->free);
ocf_lru_init_evp(cache, &cache->free);
}
static void __init_free(ocf_cache_t cache)

View File

@ -220,7 +220,7 @@ static void balance_lru_list(ocf_cache_t cache,
/*-- End of LRU functions*/
void evp_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
void ocf_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
struct ocf_lru_meta *node;
@ -231,7 +231,7 @@ void evp_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
node->next = end_marker;
}
static struct ocf_lru_list *evp_lru_get_list(struct ocf_part *part,
static struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
uint32_t evp, bool clean)
{
return clean ? &part->runtime->lru[evp].clean :
@ -250,11 +250,11 @@ static inline struct ocf_lru_list *evp_get_cline_list(ocf_cache_t cache,
ENV_BUG_ON(part_id > OCF_USER_IO_CLASS_MAX);
part = &cache->user_parts[part_id].part;
return evp_lru_get_list(part, ev_list,
return ocf_lru_get_list(part, ev_list,
!metadata_test_dirty(cache, cline));
}
static void evp_lru_move(ocf_cache_t cache, ocf_cache_line_t cline,
static void ocf_lru_move(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_lru_list *src_list,
struct ocf_part *dst_part, struct ocf_lru_list *dst_list)
{
@ -269,7 +269,7 @@ static void evp_lru_move(ocf_cache_t cache, ocf_cache_line_t cline,
}
/* the caller must hold the metadata lock */
void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
void ocf_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
struct ocf_lru_list *list, *free;
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
@ -283,13 +283,13 @@ void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
OCF_METADATA_EVICTION_WR_LOCK(cline);
list = evp_get_cline_list(cache, cline);
free = evp_lru_get_list(&cache->free, ev_list, true);
evp_lru_move(cache, cline, part, list, &cache->free, free);
free = ocf_lru_get_list(&cache->free, ev_list, true);
ocf_lru_move(cache, cline, part, list, &cache->free, free);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
}
static void evp_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
static void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_part *dst_part)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
@ -297,17 +297,17 @@ static void evp_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
bool clean;
clean = !metadata_test_dirty(cache, cline);
src_list = evp_lru_get_list(src_part, ev_list, clean);
dst_list = evp_lru_get_list(dst_part, ev_list, clean);
src_list = ocf_lru_get_list(src_part, ev_list, clean);
dst_list = ocf_lru_get_list(dst_part, ev_list, clean);
evp_lru_move(cache, cline, src_part, src_list, dst_part, dst_list);
ocf_lru_move(cache, cline, src_part, src_list, dst_part, dst_list);
}
void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_part *dst_part)
{
OCF_METADATA_EVICTION_WR_LOCK(cline);
evp_lru_repart_locked(cache, cline, src_part, dst_part);
ocf_lru_repart_locked(cache, cline, src_part, dst_part);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
}
@ -334,7 +334,7 @@ static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
iter->req = req;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
iter->curr_cline[i] = evp_lru_get_list(part, i, clean)->tail;
iter->curr_cline[i] = ocf_lru_get_list(part, i, clean)->tail;
}
static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
@ -350,7 +350,7 @@ static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
{
/* Lock hash buckets for write, cachelines according to user request,
* however exclusive cacheline access is needed even in case of read
* access. _evp_lru_evict_hash_locked tells whether given hash bucket
* access. _ocf_lru_evict_hash_locked tells whether given hash bucket
* is already locked as part of request hash locking (to avoid attempt
* to acquire the same hash bucket lock twice) */
lru_iter_init(iter, cache, part, start_evp, true, ocf_req_hash_in_range,
@ -473,7 +473,7 @@ static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, curr_evp);
list = evp_lru_get_list(part, curr_evp, iter->clean);
list = ocf_lru_get_list(part, curr_evp, iter->clean);
cline = list->tail;
while (cline != end_marker && !_lru_iter_evition_lock(iter,
@ -483,7 +483,7 @@ static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
if (cline != end_marker) {
if (dst_part != part) {
evp_lru_repart_locked(cache, cline, part,
ocf_lru_repart_locked(cache, cline, part,
dst_part);
} else {
remove_lru_list(cache, list, cline);
@ -526,7 +526,7 @@ static inline ocf_cache_line_t lru_iter_free_next(struct ocf_lru_iter *iter,
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, curr_evp);
list = evp_lru_get_list(free, curr_evp, true);
list = ocf_lru_get_list(free, curr_evp, true);
cline = list->tail;
while (cline != end_marker && !ocf_cache_line_try_lock_wr(
@ -535,7 +535,7 @@ static inline ocf_cache_line_t lru_iter_free_next(struct ocf_lru_iter *iter,
}
if (cline != end_marker) {
evp_lru_repart_locked(cache, cline, free, dst_part);
ocf_lru_repart_locked(cache, cline, free, dst_part);
}
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock,
@ -580,7 +580,7 @@ static inline ocf_cache_line_t lru_iter_cleaning_next(struct ocf_lru_iter *iter)
return cline;
}
static void evp_lru_clean_end(void *private_data, int error)
static void ocf_lru_clean_end(void *private_data, int error)
{
struct ocf_part_cleaning_ctx *ctx = private_data;
unsigned i;
@ -594,7 +594,7 @@ static void evp_lru_clean_end(void *private_data, int error)
ocf_refcnt_dec(&ctx->counter);
}
static int evp_lru_clean_get(ocf_cache_t cache, void *getter_context,
static int ocf_lru_clean_get(ocf_cache_t cache, void *getter_context,
uint32_t idx, ocf_cache_line_t *line)
{
struct ocf_part_cleaning_ctx *ctx = getter_context;
@ -607,7 +607,7 @@ static int evp_lru_clean_get(ocf_cache_t cache, void *getter_context,
return 0;
}
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
ocf_queue_t io_queue, uint32_t count)
{
struct ocf_part_cleaning_ctx *ctx = &user_part->cleaning;
@ -617,9 +617,9 @@ void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
.do_sort = true,
.cmpl_context = ctx,
.cmpl_fn = evp_lru_clean_end,
.cmpl_fn = ocf_lru_clean_end,
.getter = evp_lru_clean_get,
.getter = ocf_lru_clean_get,
.getter_context = ctx,
.count = min(count, OCF_EVICTION_CLEAN_SIZE),
@ -673,7 +673,7 @@ void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
ocf_cleaner_fire(cache, &attribs);
}
bool evp_lru_can_evict(ocf_cache_t cache)
bool ocf_lru_can_evict(ocf_cache_t cache)
{
if (env_atomic_read(&cache->pending_eviction_clines) >=
OCF_PENDING_EVICTION_LIMIT) {
@ -683,7 +683,7 @@ bool evp_lru_can_evict(ocf_cache_t cache)
return true;
}
static void evp_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
static void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
ocf_core_id_t core_id, ocf_part_id_t part_id)
{
ocf_core_t core;
@ -711,7 +711,7 @@ static void evp_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
* NOTE: all cachelines assigned to the request in this function are marked
* as LOOKUP_REMAPPED and are write locked.
*/
uint32_t evp_lru_req_clines(struct ocf_request *req,
uint32_t ocf_lru_req_clines(struct ocf_request *req,
struct ocf_part *src_part, uint32_t cline_no)
{
struct ocf_alock* alock;
@ -746,7 +746,7 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
i = 0;
while (i < cline_no) {
if (!evp_lru_can_evict(cache))
if (!ocf_lru_can_evict(cache))
break;
if (src_part->id != PARTITION_FREELIST) {
@ -773,7 +773,7 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
ENV_BUG_ON(req->map[req_idx].status != LOOKUP_MISS);
if (src_part->id != PARTITION_FREELIST) {
evp_lru_invalidate(cache, cline, core_id, src_part->id);
ocf_lru_invalidate(cache, cline, core_id, src_part->id);
_lru_unlock_hash(&iter, core_id, core_line);
}
@ -797,7 +797,7 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
}
/* the caller must hold the metadata lock */
void evp_lru_hot_cline(ocf_cache_t cache, ocf_cache_line_t cline)
void ocf_lru_hot_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
struct ocf_lru_meta *node;
struct ocf_lru_list *list;
@ -839,15 +839,15 @@ static inline void _lru_init(struct ocf_lru_list *list, bool track_hot)
list->track_hot = track_hot;
}
void evp_lru_init_evp(ocf_cache_t cache, struct ocf_part *part)
void ocf_lru_init_evp(ocf_cache_t cache, struct ocf_part *part)
{
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
uint32_t i;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
clean_list = evp_lru_get_list(part, i, true);
dirty_list = evp_lru_get_list(part, i, false);
clean_list = ocf_lru_get_list(part, i, true);
dirty_list = ocf_lru_get_list(part, i, false);
if (part->id == PARTITION_FREELIST) {
_lru_init(clean_list, false);
@ -860,15 +860,15 @@ void evp_lru_init_evp(ocf_cache_t cache, struct ocf_part *part)
env_atomic_set(&part->runtime->curr_size, 0);
}
void evp_lru_clean_cline(ocf_cache_t cache, struct ocf_part *part,
void ocf_lru_clean_cline(ocf_cache_t cache, struct ocf_part *part,
ocf_cache_line_t cline)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
clean_list = evp_lru_get_list(part, ev_list, true);
dirty_list = evp_lru_get_list(part, ev_list, false);
clean_list = ocf_lru_get_list(part, ev_list, true);
dirty_list = ocf_lru_get_list(part, ev_list, false);
OCF_METADATA_EVICTION_WR_LOCK(cline);
remove_lru_list(cache, dirty_list, cline);
@ -878,15 +878,15 @@ void evp_lru_clean_cline(ocf_cache_t cache, struct ocf_part *part,
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
}
void evp_lru_dirty_cline(ocf_cache_t cache, struct ocf_part *part,
void ocf_lru_dirty_cline(ocf_cache_t cache, struct ocf_part *part,
ocf_cache_line_t cline)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
clean_list = evp_lru_get_list(part, ev_list, true);
dirty_list = evp_lru_get_list(part, ev_list, false);
clean_list = ocf_lru_get_list(part, ev_list, true);
dirty_list = ocf_lru_get_list(part, ev_list, false);
OCF_METADATA_EVICTION_WR_LOCK(cline);
remove_lru_list(cache, clean_list, cline);
@ -941,7 +941,7 @@ void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
ocf_metadata_set_partition_id(cache, cline, PARTITION_FREELIST);
ev_list = (cline % OCF_NUM_EVICTION_LISTS);
list = evp_lru_get_list(&cache->free, ev_list, true);
list = ocf_lru_get_list(&cache->free, ev_list, true);
add_lru_head(cache, list, cline);
balance_lru_list(cache, list);
@ -1026,7 +1026,7 @@ int ocf_metadata_actor(struct ocf_cache *cache,
part = &cache->user_parts[part_id].part;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (clean = 0; clean <= 1; clean++) {
list = evp_lru_get_list(part, i, clean);
list = ocf_lru_get_list(part, i, clean);
cline = list->tail;
while (cline != end_marker) {

View File

@ -14,18 +14,18 @@ struct ocf_part_runtime;
struct ocf_part_cleaning_ctx;
struct ocf_request;
void evp_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline);
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
bool evp_lru_can_evict(struct ocf_cache *cache);
uint32_t evp_lru_req_clines(struct ocf_request *req,
void ocf_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline);
void ocf_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
bool ocf_lru_can_evict(struct ocf_cache *cache);
uint32_t ocf_lru_req_clines(struct ocf_request *req,
struct ocf_part *src_part, uint32_t cline_no);
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void evp_lru_init_evp(struct ocf_cache *cache, struct ocf_part *part);
void evp_lru_dirty_cline(struct ocf_cache *cache, struct ocf_part *part,
void ocf_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void ocf_lru_init_evp(struct ocf_cache *cache, struct ocf_part *part);
void ocf_lru_dirty_cline(struct ocf_cache *cache, struct ocf_part *part,
ocf_cache_line_t cline);
void evp_lru_clean_cline(struct ocf_cache *cache, struct ocf_part *part,
void ocf_lru_clean_cline(struct ocf_cache *cache, struct ocf_part *part,
ocf_cache_line_t cline);
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
ocf_queue_t io_queue, uint32_t count);
void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_upart, struct ocf_part *dst_upart);

View File

@ -42,7 +42,7 @@ static inline uint32_t ocf_evict_part_do(struct ocf_request *req,
return 0;
}
return evp_lru_req_clines(req, &user_part->part, to_evict);
return ocf_lru_req_clines(req, &user_part->part, to_evict);
}
static inline uint32_t ocf_evict_user_partitions(ocf_cache_t cache,
@ -89,7 +89,7 @@ static inline uint32_t ocf_evict_user_partitions(ocf_cache_t cache,
if (overflown_only)
to_evict = OCF_MIN(to_evict, overflow_size);
evicted += evp_lru_req_clines(req, &user_part->part, to_evict);
evicted += ocf_lru_req_clines(req, &user_part->part, to_evict);
if (evicted >= evict_cline_no) {
/* Evicted requested number of cache line, stop
@ -113,7 +113,7 @@ static inline uint32_t ocf_remap_do(struct ocf_request *req)
/* First attempt to map from freelist */
if (ocf_lru_num_free(cache) > 0)
remapped = evp_lru_req_clines(req, &cache->free, remap_cline_no);
remapped = ocf_lru_req_clines(req, &cache->free, remap_cline_no);
if (remapped >= remap_cline_no)
return remapped;

View File

@ -132,7 +132,7 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
*/
env_atomic_dec(&req->core->runtime_meta->
part_counters[part_id].dirty_clines);
evp_lru_clean_cline(cache, part, line);
ocf_lru_clean_cline(cache, part, line);
ocf_purge_cleaning_policy(cache, line);
}
}
@ -174,7 +174,7 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
*/
env_atomic_inc(&req->core->runtime_meta->
part_counters[part_id].dirty_clines);
evp_lru_dirty_cline(cache, part, line);
ocf_lru_dirty_cline(cache, part, line);
}
}

View File

@ -179,7 +179,7 @@ static inline void ocf_purge_cleaning_policy(struct ocf_cache *cache,
static inline void ocf_purge_eviction_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
evp_lru_rm_cline(cache, line);
ocf_lru_rm_cline(cache, line);
}
/**

View File

@ -203,7 +203,7 @@ unsigned current_case;
struct ocf_lru_list list;
struct ocf_lru_list *__wrap_evp_lru_get_list(struct ocf_user_part *user_part,
struct ocf_lru_list *__wrap_ocf_lru_get_list(struct ocf_user_part *user_part,
uint32_t evp, bool clean)
{
unsigned i = 0;
@ -232,7 +232,7 @@ struct ocf_lru_list *__wrap_evp_lru_get_list(struct ocf_user_part *user_part,
inline struct ocf_lru_list *__wrap_evp_get_cline_list(ocf_cache_t cache,
ocf_cache_line_t cline)
{
return __wrap_evp_lru_get_list(NULL, cline % OCF_NUM_EVICTION_LISTS, true);
return __wrap_ocf_lru_get_list(NULL, cline % OCF_NUM_EVICTION_LISTS, true);
}

View File

@ -53,7 +53,7 @@ uint32_t __wrap_ocf_evict_calculate(ocf_cache_t cache,
return min(tcache->evictable[user_part->part.id], to_evict);
}
uint32_t __wrap_evp_lru_req_clines(struct ocf_request *req,
uint32_t __wrap_ocf_lru_req_clines(struct ocf_request *req,
struct ocf_part *src_part, uint32_t cline_no)
{
struct test_cache *tcache = (struct test_cache *)req->cache;
@ -193,10 +193,10 @@ uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
#define _expect_evict_call(tcache, part_id, req_count, ret_count) \
do { \
expect_value(__wrap_evp_lru_req_clines, src_part, &tcache.cache.user_parts[part_id].part); \
expect_value(__wrap_evp_lru_req_clines, cline_no, req_count); \
expect_function_call(__wrap_evp_lru_req_clines); \
will_return(__wrap_evp_lru_req_clines, ret_count); \
expect_value(__wrap_ocf_lru_req_clines, src_part, &tcache.cache.user_parts[part_id].part); \
expect_value(__wrap_ocf_lru_req_clines, cline_no, req_count); \
expect_function_call(__wrap_ocf_lru_req_clines); \
will_return(__wrap_ocf_lru_req_clines, ret_count); \
} while (false);
static void ocf_remap_do_test01(void **state)