Parallel eviction
Eviction changes allowing to evict (remap) cachelines while holding hash bucket write lock instead of global metadata write lock. As eviction (replacement) is now tightly coupled with request, each request uses eviction size equal to number of its unmapped cachelines. Evicting without global metadata write lock is possible thanks to the fact that remaping is always performed while exclusively holding cacheline (read or write) lock. So for a cacheline on LRU list we acquire cacheline lock, safely resolve hash and consequently write-lock hash bucket. Since cacheline lock is acquired under hash bucket (everywhere except for new eviction implementation), we are certain that noone acquires cacheline lock behind our back. Concurrent eviction threads are eliminated by holding eviction list lock for the duration of critial locking operations. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
parent
1411314678
commit
81fc7ab5c5
@ -497,6 +497,7 @@ static void _acp_flush(struct acp_context *acp)
|
||||
.cmpl_context = acp,
|
||||
.cmpl_fn = _acp_flush_end,
|
||||
.lock_cacheline = false,
|
||||
.lock_metadata = true,
|
||||
.do_sort = false,
|
||||
.io_queue = cache->cleaner.io_queue,
|
||||
};
|
||||
|
@ -816,6 +816,7 @@ void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl)
|
||||
fctx->attribs.cmpl_context = fctx;
|
||||
fctx->attribs.cmpl_fn = alru_clean_complete;
|
||||
fctx->attribs.lock_cacheline = true;
|
||||
fctx->attribs.lock_metadata = false;
|
||||
fctx->attribs.do_sort = true;
|
||||
fctx->attribs.io_queue = cache->cleaner.io_queue;
|
||||
|
||||
|
@ -123,6 +123,15 @@ void ocf_engine_patch_req_info(struct ocf_cache *cache,
|
||||
|
||||
req->info.insert_no++;
|
||||
|
||||
if (req->part_id != ocf_metadata_get_partition_id(cache,
|
||||
entry->coll_idx)) {
|
||||
/*
|
||||
* Need to move this cache line into other partition
|
||||
*/
|
||||
entry->re_part = true;
|
||||
req->info.re_part_no++;
|
||||
}
|
||||
|
||||
if (idx > 0 && ocf_engine_clines_phys_cont(req, idx - 1))
|
||||
req->info.seq_no++;
|
||||
if (idx + 1 < req->core_line_count &&
|
||||
@ -172,13 +181,11 @@ static void ocf_engine_update_req_info(struct ocf_cache *cache,
|
||||
|
||||
break;
|
||||
case LOOKUP_INSERTED:
|
||||
case LOOKUP_REMAPPED:
|
||||
req->info.insert_no++;
|
||||
break;
|
||||
case LOOKUP_MISS:
|
||||
break;
|
||||
case LOOKUP_REMAPPED:
|
||||
/* remapped cachelines are to be updated via
|
||||
* ocf_engine_patch_req_info()
|
||||
*/
|
||||
default:
|
||||
ENV_BUG();
|
||||
break;
|
||||
@ -310,7 +317,7 @@ static void ocf_engine_map_cache_line(struct ocf_request *req,
|
||||
ocf_cache_line_t cache_line;
|
||||
|
||||
if (!ocf_freelist_get_cache_line(cache->freelist, &cache_line)) {
|
||||
req->info.mapping_error = 1;
|
||||
ocf_req_set_mapping_error(req);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -353,7 +360,6 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
|
||||
|
||||
ocf_metadata_end_collision_shared_access(cache,
|
||||
entry->coll_idx);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -412,7 +418,6 @@ static void ocf_engine_map(struct ocf_request *req)
|
||||
entry->coll_idx, entry->core_line);
|
||||
|
||||
ocf_engine_update_req_info(cache, req, i);
|
||||
|
||||
}
|
||||
|
||||
if (!ocf_req_test_mapping_error(req)) {
|
||||
@ -449,7 +454,30 @@ static void _ocf_engine_clean_end(void *private_data, int error)
|
||||
}
|
||||
}
|
||||
|
||||
static int _lock_clines(struct ocf_request *req)
|
||||
static void ocf_engine_evict(struct ocf_request *req)
|
||||
{
|
||||
int status;
|
||||
|
||||
status = space_managment_evict_do(req);
|
||||
if (status == LOOKUP_MISS) {
|
||||
/* mark error */
|
||||
ocf_req_set_mapping_error(req);
|
||||
|
||||
/* unlock cachelines locked during eviction */
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(req->cache),
|
||||
req);
|
||||
|
||||
/* request cleaning */
|
||||
ocf_req_set_clean_eviction(req);
|
||||
|
||||
/* unmap inserted and replaced cachelines */
|
||||
ocf_engine_map_hndl_error(req->cache, req);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int lock_clines(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(req->cache);
|
||||
enum ocf_engine_lock_type lock_type =
|
||||
@ -465,77 +493,69 @@ static int _lock_clines(struct ocf_request *req)
|
||||
}
|
||||
}
|
||||
|
||||
static inline int ocf_prepare_clines_evict(struct ocf_request *req)
|
||||
{
|
||||
int lock_status = -OCF_ERR_NO_LOCK;
|
||||
bool part_has_space;
|
||||
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
part_has_space = ocf_part_has_space(req);
|
||||
if (!part_has_space) {
|
||||
/* adding more cachelines to target partition would overflow
|
||||
it - requesting eviction from target partition only */
|
||||
ocf_req_set_part_evict(req);
|
||||
} else {
|
||||
/* evict from any partition */
|
||||
ocf_req_clear_part_evict(req);
|
||||
}
|
||||
|
||||
ocf_engine_evict(req);
|
||||
|
||||
if (!ocf_req_test_mapping_error(req)) {
|
||||
ocf_promotion_req_purge(req->cache->promotion_policy, req);
|
||||
lock_status = lock_clines(req);
|
||||
if (lock_status < 0)
|
||||
ocf_req_set_mapping_error(req);
|
||||
}
|
||||
|
||||
return lock_status;
|
||||
}
|
||||
|
||||
static inline int ocf_prepare_clines_miss(struct ocf_request *req)
|
||||
{
|
||||
int lock_status = -OCF_ERR_NO_LOCK;
|
||||
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
|
||||
|
||||
/* requests to disabled partitions go in pass-through */
|
||||
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
|
||||
ocf_req_set_mapping_error(req);
|
||||
ocf_hb_req_prot_unlock_rd(req);
|
||||
return lock_status;
|
||||
}
|
||||
|
||||
if (!ocf_part_has_space(req)) {
|
||||
ocf_hb_req_prot_unlock_rd(req);
|
||||
goto eviction;
|
||||
}
|
||||
|
||||
/* Mapping must be performed holding (at least) hash-bucket write lock */
|
||||
ocf_hb_req_prot_lock_upgrade(req);
|
||||
if (!ocf_part_has_space(req))
|
||||
return ocf_prepare_clines_evict(req);
|
||||
|
||||
ocf_engine_map(req);
|
||||
|
||||
if (!ocf_req_test_mapping_error(req)) {
|
||||
lock_status = _lock_clines(req);
|
||||
lock_status = lock_clines(req);
|
||||
if (lock_status < 0) {
|
||||
/* Mapping succeeded, but we failed to acquire cacheline lock.
|
||||
* Don't try to evict, just return error to caller */
|
||||
ocf_req_set_mapping_error(req);
|
||||
}
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
return lock_status;
|
||||
}
|
||||
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
|
||||
eviction:
|
||||
ocf_metadata_start_exclusive_access(metadata_lock);
|
||||
|
||||
/* repeat traversation to pick up latest metadata status */
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
if (!ocf_part_has_space(req))
|
||||
ocf_req_set_part_evict(req);
|
||||
else
|
||||
ocf_req_clear_part_evict(req);
|
||||
|
||||
if (space_managment_evict_do(req->cache, req,
|
||||
ocf_engine_unmapped_count(req)) == LOOKUP_MISS) {
|
||||
ocf_req_set_mapping_error(req);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ocf_engine_map(req);
|
||||
if (ocf_req_test_mapping_error(req))
|
||||
goto unlock;
|
||||
|
||||
lock_status = _lock_clines(req);
|
||||
if (lock_status < 0)
|
||||
ocf_req_set_mapping_error(req);
|
||||
|
||||
unlock:
|
||||
ocf_metadata_end_exclusive_access(metadata_lock);
|
||||
|
||||
return lock_status;
|
||||
return ocf_prepare_clines_evict(req);
|
||||
}
|
||||
|
||||
int ocf_engine_prepare_clines(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_user_part *part = &req->cache->user_parts[req->part_id];
|
||||
bool mapped;
|
||||
bool promote = true;
|
||||
int lock = -OCF_ERR_NO_LOCK;
|
||||
int result;
|
||||
|
||||
/* Calculate hashes for hash-bucket locking */
|
||||
ocf_req_hash(req);
|
||||
@ -550,7 +570,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
|
||||
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
if (mapped) {
|
||||
lock = _lock_clines(req);
|
||||
lock = lock_clines(req);
|
||||
ocf_hb_req_prot_unlock_rd(req);
|
||||
return lock;
|
||||
}
|
||||
@ -564,7 +584,17 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
|
||||
return lock;
|
||||
}
|
||||
|
||||
return ocf_prepare_clines_miss(req);
|
||||
/* Mapping must be performed holding (at least) hash-bucket write lock */
|
||||
ocf_hb_req_prot_lock_upgrade(req);
|
||||
result = ocf_prepare_clines_miss(req);
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
|
||||
if (ocf_req_test_clean_eviction(req)) {
|
||||
ocf_eviction_flush_dirty(req->cache, part, req->io_queue,
|
||||
128);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
|
||||
@ -598,6 +628,7 @@ void ocf_engine_clean(struct ocf_request *req)
|
||||
/* Initialize attributes for cleaner */
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.lock_cacheline = false,
|
||||
.lock_metadata = false,
|
||||
|
||||
.cmpl_context = req,
|
||||
.cmpl_fn = _ocf_engine_clean_end,
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "eviction.h"
|
||||
#include "ops.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../engine/engine_common.h"
|
||||
|
||||
struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
|
||||
[ocf_eviction_lru] = {
|
||||
@ -16,12 +17,13 @@ struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
|
||||
.init_evp = evp_lru_init_evp,
|
||||
.dirty_cline = evp_lru_dirty_cline,
|
||||
.clean_cline = evp_lru_clean_cline,
|
||||
.flush_dirty = evp_lru_clean,
|
||||
.name = "lru",
|
||||
},
|
||||
};
|
||||
|
||||
static uint32_t ocf_evict_calculate(ocf_cache_t cache,
|
||||
struct ocf_user_part *part, uint32_t to_evict, bool roundup)
|
||||
struct ocf_user_part *part, uint32_t to_evict)
|
||||
{
|
||||
|
||||
uint32_t curr_part_size = ocf_part_get_occupancy(part);
|
||||
@ -35,33 +37,34 @@ static uint32_t ocf_evict_calculate(ocf_cache_t cache,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (roundup && to_evict < OCF_TO_EVICTION_MIN)
|
||||
to_evict = OCF_TO_EVICTION_MIN;
|
||||
|
||||
if (to_evict > (curr_part_size - min_part_size))
|
||||
to_evict = curr_part_size - min_part_size;
|
||||
|
||||
return to_evict;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_part_do(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, const uint32_t evict_cline_no,
|
||||
static inline uint32_t ocf_evict_part_do(struct ocf_request *req,
|
||||
struct ocf_user_part *target_part)
|
||||
{
|
||||
uint32_t unmapped = ocf_engine_unmapped_count(req);
|
||||
uint32_t to_evict = 0;
|
||||
|
||||
if (!evp_lru_can_evict(cache))
|
||||
if (!evp_lru_can_evict(req->cache))
|
||||
return 0;
|
||||
|
||||
to_evict = ocf_evict_calculate(cache, target_part, evict_cline_no,
|
||||
false);
|
||||
to_evict = ocf_evict_calculate(req->cache, target_part, unmapped);
|
||||
|
||||
return ocf_eviction_need_space(cache, io_queue,
|
||||
target_part, to_evict);
|
||||
if (to_evict < unmapped) {
|
||||
/* cannot evict enough cachelines to map request,
|
||||
so no purpose in evicting anything */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ocf_eviction_need_space(req->cache, req, target_part, to_evict);
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, uint32_t evict_cline_no,
|
||||
struct ocf_request *req, uint32_t evict_cline_no,
|
||||
bool overflown_only, int16_t max_priority)
|
||||
{
|
||||
uint32_t to_evict = 0, evicted = 0;
|
||||
@ -98,7 +101,7 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
}
|
||||
|
||||
to_evict = ocf_evict_calculate(cache, part,
|
||||
evict_cline_no - evicted, true);
|
||||
evict_cline_no - evicted);
|
||||
if (to_evict == 0) {
|
||||
/* No cache lines to evict for this partition */
|
||||
continue;
|
||||
@ -107,8 +110,7 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
if (overflown_only)
|
||||
to_evict = OCF_MIN(to_evict, overflow_size);
|
||||
|
||||
evicted += ocf_eviction_need_space(cache, io_queue,
|
||||
part, to_evict);
|
||||
evicted += ocf_eviction_need_space(cache, req, part, to_evict);
|
||||
|
||||
if (evicted >= evict_cline_no) {
|
||||
/* Evicted requested number of cache line, stop
|
||||
@ -122,10 +124,12 @@ out:
|
||||
return evicted;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, uint32_t evict_cline_no,
|
||||
struct ocf_user_part *target_part)
|
||||
static inline uint32_t ocf_evict_do(struct ocf_request *req)
|
||||
{
|
||||
ocf_cache_t cache = req->cache;
|
||||
ocf_part_id_t target_part_id = req->part_id;
|
||||
struct ocf_user_part *target_part = &cache->user_parts[target_part_id];
|
||||
uint32_t evict_cline_no = ocf_engine_unmapped_count(req);
|
||||
uint32_t evicted;
|
||||
|
||||
/* First attempt to evict overflown partitions in order to
|
||||
@ -134,7 +138,7 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
* free its cachelines regardless of destination partition
|
||||
* priority. */
|
||||
|
||||
evicted = ocf_evict_partitions(cache, io_queue, evict_cline_no,
|
||||
evicted = ocf_evict_partitions(cache, req, evict_cline_no,
|
||||
true, OCF_IO_CLASS_PRIO_PINNED);
|
||||
if (evicted >= evict_cline_no)
|
||||
return evicted;
|
||||
@ -142,35 +146,26 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
* partitions with priority <= target partition and attempt
|
||||
* to evict from those. */
|
||||
evict_cline_no -= evicted;
|
||||
evicted += ocf_evict_partitions(cache, io_queue, evict_cline_no,
|
||||
evicted += ocf_evict_partitions(cache, req, evict_cline_no,
|
||||
false, target_part->config->priority);
|
||||
|
||||
return evicted;
|
||||
}
|
||||
|
||||
int space_managment_evict_do(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t evict_cline_no)
|
||||
int space_managment_evict_do(struct ocf_request *req)
|
||||
{
|
||||
uint32_t needed = ocf_engine_unmapped_count(req);
|
||||
uint32_t evicted;
|
||||
uint32_t free;
|
||||
struct ocf_user_part *req_part = &cache->user_parts[req->part_id];
|
||||
struct ocf_user_part *req_part = &req->cache->user_parts[req->part_id];
|
||||
|
||||
if (ocf_req_part_evict(req)) {
|
||||
evicted = ocf_evict_part_do(cache, req->io_queue, evict_cline_no,
|
||||
req_part);
|
||||
evicted = ocf_evict_part_do(req, req_part);
|
||||
} else {
|
||||
free = ocf_freelist_num_free(cache->freelist);
|
||||
if (evict_cline_no <= free)
|
||||
return LOOKUP_INSERTED;
|
||||
|
||||
evict_cline_no -= free;
|
||||
|
||||
evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, req_part);
|
||||
evicted = ocf_evict_do(req);
|
||||
}
|
||||
|
||||
if (evict_cline_no <= evicted)
|
||||
if (needed <= evicted)
|
||||
return LOOKUP_INSERTED;
|
||||
|
||||
ocf_req_set_mapping_error(req);
|
||||
return LOOKUP_MISS;
|
||||
}
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include "lru.h"
|
||||
#include "lru_structs.h"
|
||||
|
||||
#define OCF_TO_EVICTION_MIN 128UL
|
||||
#define OCF_PENDING_EVICTION_LIMIT 512UL
|
||||
|
||||
#define OCF_NUM_EVICTION_LISTS 32
|
||||
@ -40,11 +39,9 @@ struct eviction_policy_ops {
|
||||
void (*rm_cline)(ocf_cache_t cache,
|
||||
ocf_cache_line_t cline);
|
||||
bool (*can_evict)(ocf_cache_t cache);
|
||||
uint32_t (*req_clines)(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, struct ocf_user_part *part,
|
||||
uint32_t (*req_clines)(struct ocf_request *req, struct ocf_user_part *part,
|
||||
uint32_t cline_no);
|
||||
void (*hot_cline)(ocf_cache_t cache,
|
||||
ocf_cache_line_t cline);
|
||||
void (*hot_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
|
||||
void (*init_evp)(ocf_cache_t cache, struct ocf_user_part *part);
|
||||
void (*dirty_cline)(ocf_cache_t cache,
|
||||
struct ocf_user_part *part,
|
||||
@ -52,6 +49,8 @@ struct eviction_policy_ops {
|
||||
void (*clean_cline)(ocf_cache_t cache,
|
||||
struct ocf_user_part *part,
|
||||
uint32_t cline_no);
|
||||
void (*flush_dirty)(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, uint32_t count);
|
||||
const char *name;
|
||||
};
|
||||
|
||||
@ -64,8 +63,7 @@ extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
|
||||
* 'LOOKUP_HIT' if evicted enough cachelines to serve @req
|
||||
* 'LOOKUP_MISS' otherwise
|
||||
*/
|
||||
int space_managment_evict_do(ocf_cache_t cache,
|
||||
struct ocf_request *req, uint32_t evict_cline_no);
|
||||
int space_managment_evict_do(struct ocf_request *req);
|
||||
|
||||
int space_management_free(ocf_cache_t cache, uint32_t count);
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "../mngt/ocf_mngt_common.h"
|
||||
#include "../engine/engine_zero.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../engine/engine_common.h"
|
||||
|
||||
#define OCF_EVICTION_MAX_SCAN 1024
|
||||
|
||||
@ -261,7 +262,8 @@ void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
|
||||
}
|
||||
|
||||
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
||||
struct ocf_user_part *part, uint32_t start_evp, bool clean)
|
||||
struct ocf_user_part *part, uint32_t start_evp, bool clean,
|
||||
bool cl_lock_write, _lru_hash_locked_pfn hash_locked, void *context)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
@ -275,11 +277,47 @@ static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
||||
iter->evp = (start_evp + OCF_NUM_EVICTION_LISTS - 1) % OCF_NUM_EVICTION_LISTS;
|
||||
iter->num_avail_evps = OCF_NUM_EVICTION_LISTS;
|
||||
iter->next_avail_evp = ((1ULL << OCF_NUM_EVICTION_LISTS) - 1);
|
||||
iter->clean = clean;
|
||||
iter->cl_lock_write = cl_lock_write;
|
||||
iter->hash_locked = hash_locked;
|
||||
iter->context = context;
|
||||
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
||||
iter->curr_cline[i] = evp_lru_get_list(part, i, clean)->tail;
|
||||
}
|
||||
|
||||
static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
|
||||
ocf_cache_t cache, struct ocf_user_part *part,
|
||||
uint32_t start_evp)
|
||||
{
|
||||
/* Lock cachelines for read, non-exclusive access */
|
||||
lru_iter_init(iter, cache, part, start_evp, false, false,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
static bool _evp_lru_evict_hash_locked(void *context,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
struct ocf_request *req = context;
|
||||
|
||||
return ocf_req_hash_in_range(req, core_id, core_line);
|
||||
}
|
||||
|
||||
static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
|
||||
ocf_cache_t cache, struct ocf_user_part *part,
|
||||
uint32_t start_evp, bool cl_lock_write,
|
||||
struct ocf_request *req)
|
||||
{
|
||||
/* Lock hash buckets for write, cachelines according to user request,
|
||||
* however exclusive cacheline access is needed even in case of read
|
||||
* access. _evp_lru_evict_hash_locked tells whether given hash bucket
|
||||
* is already locked as part of request hash locking (to avoid attempt
|
||||
* to acquire the same hash bucket lock twice) */
|
||||
lru_iter_init(iter, cache, part, start_evp, true, cl_lock_write,
|
||||
_evp_lru_evict_hash_locked, req);
|
||||
}
|
||||
|
||||
|
||||
static inline uint32_t _lru_next_evp(struct ocf_lru_iter *iter)
|
||||
{
|
||||
unsigned increment;
|
||||
@ -292,6 +330,8 @@ static inline uint32_t _lru_next_evp(struct ocf_lru_iter *iter)
|
||||
return iter->evp;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline bool _lru_evp_is_empty(struct ocf_lru_iter *iter)
|
||||
{
|
||||
return !(iter->next_avail_evp & (1ULL << (OCF_NUM_EVICTION_LISTS - 1)));
|
||||
@ -308,143 +348,253 @@ static inline bool _lru_evp_all_empty(struct ocf_lru_iter *iter)
|
||||
return iter->num_avail_evps == 0;
|
||||
}
|
||||
|
||||
/* get next non-empty lru list if available */
|
||||
static inline ocf_cache_line_t lru_iter_next(struct ocf_lru_iter *iter)
|
||||
static bool inline _lru_trylock_cacheline(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cline)
|
||||
{
|
||||
struct lru_eviction_policy_meta *node;
|
||||
uint32_t curr_evp;
|
||||
ocf_cache_line_t ret;
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
ocf_cache_line_concurrency(iter->cache);
|
||||
|
||||
return iter->cl_lock_write ?
|
||||
ocf_cache_line_try_lock_wr(c, cline) :
|
||||
ocf_cache_line_try_lock_rd(c, cline);
|
||||
}
|
||||
|
||||
static void inline _lru_unlock_cacheline(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cline)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
ocf_cache_line_concurrency(iter->cache);
|
||||
|
||||
if (iter->cl_lock_write)
|
||||
ocf_cache_line_unlock_wr(c, cline);
|
||||
else
|
||||
ocf_cache_line_unlock_rd(c, cline);
|
||||
}
|
||||
|
||||
static bool inline _lru_trylock_hash(struct ocf_lru_iter *iter,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
if (iter->hash_locked != NULL && iter->hash_locked(
|
||||
iter->context,
|
||||
core_id, core_line)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return ocf_hb_cline_naked_trylock_wr(
|
||||
&iter->cache->metadata.lock,
|
||||
core_id, core_line);
|
||||
}
|
||||
|
||||
static void inline _lru_unlock_hash(struct ocf_lru_iter *iter,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
if (iter->hash_locked != NULL && iter->hash_locked(
|
||||
iter->context,
|
||||
core_id, core_line)) {
|
||||
return;
|
||||
}
|
||||
|
||||
ocf_hb_cline_naked_unlock_wr(
|
||||
&iter->cache->metadata.lock,
|
||||
core_id, core_line);
|
||||
}
|
||||
|
||||
static bool inline _lru_iter_evition_lock(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cache_line,
|
||||
ocf_core_id_t *core_id, uint64_t *core_line)
|
||||
|
||||
{
|
||||
if (!_lru_trylock_cacheline(iter, cache_line))
|
||||
return false;
|
||||
|
||||
ocf_metadata_get_core_info(iter->cache, cache_line,
|
||||
core_id, core_line);
|
||||
|
||||
if (!_lru_trylock_hash(iter, *core_id, *core_line)) {
|
||||
_lru_unlock_cacheline(iter, cache_line);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ocf_cache_line_is_locked_exclusively(iter->cache,
|
||||
cache_line)) {
|
||||
_lru_unlock_hash(iter, *core_id, *core_line);
|
||||
_lru_unlock_cacheline(iter, cache_line);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Get next clean cacheline from tail of lru lists. Caller must not hold any
|
||||
* eviction list lock. Returned cacheline is read or write locked, depending on
|
||||
* iter->write_lock. Returned cacheline has corresponding metadata hash bucket
|
||||
* locked. Cacheline is moved to the head of lru list before being returned */
|
||||
static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
|
||||
ocf_core_id_t *core_id, uint64_t *core_line)
|
||||
{
|
||||
uint32_t curr_evp;
|
||||
ocf_cache_line_t cline;
|
||||
ocf_cache_t cache = iter->cache;
|
||||
struct ocf_user_part *part = iter->part;
|
||||
struct ocf_lru_list *list;
|
||||
|
||||
do {
|
||||
curr_evp = _lru_next_evp(iter);
|
||||
|
||||
while (iter->curr_cline[curr_evp] == end_marker) {
|
||||
if (!_lru_evp_is_empty(iter)) {
|
||||
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, curr_evp);
|
||||
|
||||
list = evp_lru_get_list(part, curr_evp, iter->clean);
|
||||
|
||||
cline = list->tail;
|
||||
while (cline != end_marker && !_lru_iter_evition_lock(iter,
|
||||
cline, core_id, core_line)) {
|
||||
cline = ocf_metadata_get_eviction_policy(
|
||||
iter->cache, cline)->lru.prev;
|
||||
}
|
||||
|
||||
if (cline != end_marker) {
|
||||
remove_lru_list(cache, list, cline);
|
||||
add_lru_head(cache, list, cline);
|
||||
balance_lru_list(cache, list);
|
||||
}
|
||||
|
||||
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock, curr_evp);
|
||||
|
||||
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
|
||||
/* mark list as empty */
|
||||
_lru_evp_set_empty(iter);
|
||||
}
|
||||
if (_lru_evp_all_empty(iter)) {
|
||||
/* all lists empty */
|
||||
return end_marker;
|
||||
} while (cline == end_marker && !_lru_evp_all_empty(iter));
|
||||
|
||||
return cline;
|
||||
}
|
||||
|
||||
/* Get next dirty cacheline from tail of lru lists. Caller must hold all
|
||||
* eviction list locks during entire iteration proces. Returned cacheline
|
||||
* is read or write locked, depending on iter->write_lock */
|
||||
static inline ocf_cache_line_t lru_iter_cleaning_next(struct ocf_lru_iter *iter)
|
||||
{
|
||||
uint32_t curr_evp;
|
||||
ocf_cache_line_t cline;
|
||||
|
||||
do {
|
||||
curr_evp = _lru_next_evp(iter);
|
||||
cline = iter->curr_cline[curr_evp];
|
||||
|
||||
while (cline != end_marker && !_lru_trylock_cacheline(iter,
|
||||
cline)) {
|
||||
cline = ocf_metadata_get_eviction_policy(
|
||||
iter->cache, cline)->lru.prev;
|
||||
}
|
||||
if (cline != end_marker) {
|
||||
iter->curr_cline[curr_evp] =
|
||||
ocf_metadata_get_eviction_policy(
|
||||
iter->cache , cline)->lru.prev;
|
||||
}
|
||||
|
||||
node = &ocf_metadata_get_eviction_policy(iter->cache,
|
||||
iter->curr_cline[curr_evp])->lru;
|
||||
ret = iter->curr_cline[curr_evp];
|
||||
iter->curr_cline[curr_evp] = node->prev;
|
||||
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
|
||||
/* mark list as empty */
|
||||
_lru_evp_set_empty(iter);
|
||||
}
|
||||
} while (cline == end_marker && !_lru_evp_all_empty(iter));
|
||||
|
||||
return ret;
|
||||
return cline;
|
||||
}
|
||||
|
||||
static void evp_lru_clean_end(void *private_data, int error)
|
||||
{
|
||||
struct ocf_lru_iter *iter = private_data;
|
||||
struct ocf_part_cleaning_ctx *ctx = private_data;
|
||||
unsigned i;
|
||||
|
||||
ocf_refcnt_dec(&iter->part->cleaning);
|
||||
for (i = 0; i < OCF_EVICTION_CLEAN_SIZE; i++) {
|
||||
if (ctx->cline[i] != end_marker)
|
||||
ocf_cache_line_unlock_rd(ctx->cache->device->concurrency
|
||||
.cache_line, ctx->cline[i]);
|
||||
}
|
||||
|
||||
static int evp_lru_clean_getter(ocf_cache_t cache, void *getter_context,
|
||||
uint32_t item, ocf_cache_line_t *line)
|
||||
ocf_refcnt_dec(&ctx->counter);
|
||||
}
|
||||
|
||||
static int evp_lru_clean_get(ocf_cache_t cache, void *getter_context,
|
||||
uint32_t idx, ocf_cache_line_t *line)
|
||||
{
|
||||
struct ocf_lru_iter *iter = getter_context;
|
||||
ocf_cache_line_t cline;
|
||||
struct ocf_part_cleaning_ctx *ctx = getter_context;
|
||||
|
||||
while (true) {
|
||||
cline = lru_iter_next(iter);
|
||||
if (ctx->cline[idx] == end_marker)
|
||||
return -1;
|
||||
|
||||
if (cline == end_marker)
|
||||
break;
|
||||
ENV_BUG_ON(!metadata_test_dirty(ctx->cache, ctx->cline[idx]));
|
||||
*line = ctx->cline[idx];
|
||||
|
||||
/* Prevent evicting already locked items */
|
||||
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
|
||||
cline)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(!metadata_test_dirty(cache, cline));
|
||||
|
||||
*line = cline;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void evp_lru_clean(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
struct ocf_user_part *part, uint32_t count)
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, uint32_t count)
|
||||
{
|
||||
struct ocf_refcnt *counter = &part->cleaning;
|
||||
struct ocf_part_cleaning_ctx *ctx = &part->cleaning;
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.lock_cacheline = true,
|
||||
.lock_cacheline = false,
|
||||
.lock_metadata = true,
|
||||
.do_sort = true,
|
||||
|
||||
.cmpl_context = &part->eviction_clean_iter,
|
||||
.cmpl_context = &part->cleaning,
|
||||
.cmpl_fn = evp_lru_clean_end,
|
||||
|
||||
.getter = evp_lru_clean_getter,
|
||||
.getter_context = &part->eviction_clean_iter,
|
||||
.getter = evp_lru_clean_get,
|
||||
.getter_context = &part->cleaning,
|
||||
|
||||
.count = count > 32 ? 32 : count,
|
||||
.count = min(count, OCF_EVICTION_CLEAN_SIZE),
|
||||
|
||||
.io_queue = io_queue
|
||||
};
|
||||
ocf_cache_line_t *cline = part->cleaning.cline;
|
||||
struct ocf_lru_iter iter;
|
||||
unsigned evp;
|
||||
int cnt;
|
||||
unsigned i;
|
||||
unsigned lock_idx;
|
||||
|
||||
if (ocf_mngt_cache_is_locked(cache))
|
||||
return;
|
||||
|
||||
cnt = ocf_refcnt_inc(counter);
|
||||
cnt = ocf_refcnt_inc(&ctx->counter);
|
||||
if (!cnt) {
|
||||
/* cleaner disabled by management operation */
|
||||
return;
|
||||
}
|
||||
|
||||
if (cnt > 1) {
|
||||
/* cleaning already running for this partition */
|
||||
ocf_refcnt_dec(counter);
|
||||
ocf_refcnt_dec(&ctx->counter);
|
||||
return;
|
||||
}
|
||||
|
||||
lru_iter_init(&part->eviction_clean_iter, cache, part,
|
||||
part->eviction_clean_iter.evp, false);
|
||||
part->cleaning.cache = cache;
|
||||
evp = io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
|
||||
|
||||
lock_idx = ocf_metadata_concurrency_next_idx(io_queue);
|
||||
ocf_metadata_start_shared_access(&cache->metadata.lock, lock_idx);
|
||||
|
||||
OCF_METADATA_EVICTION_WR_LOCK_ALL();
|
||||
|
||||
lru_iter_cleaning_init(&iter, cache, part, evp);
|
||||
i = 0;
|
||||
while (i < OCF_EVICTION_CLEAN_SIZE) {
|
||||
cline[i] = lru_iter_cleaning_next(&iter);
|
||||
if (cline[i] == end_marker)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
while (i < OCF_EVICTION_CLEAN_SIZE)
|
||||
cline[i++] = end_marker;
|
||||
|
||||
OCF_METADATA_EVICTION_WR_UNLOCK_ALL();
|
||||
|
||||
ocf_metadata_end_shared_access(&cache->metadata.lock, lock_idx);
|
||||
|
||||
ocf_cleaner_fire(cache, &attribs);
|
||||
}
|
||||
|
||||
static void evp_lru_zero_line_complete(struct ocf_request *ocf_req, int error)
|
||||
{
|
||||
env_atomic_dec(&ocf_req->cache->pending_eviction_clines);
|
||||
}
|
||||
|
||||
static void evp_lru_zero_line(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_request *req;
|
||||
ocf_core_id_t id;
|
||||
uint64_t addr, core_line;
|
||||
|
||||
ocf_metadata_get_core_info(cache, line, &id, &core_line);
|
||||
addr = core_line * ocf_line_size(cache);
|
||||
|
||||
req = ocf_req_new(io_queue, &cache->core[id], addr,
|
||||
ocf_line_size(cache), OCF_WRITE);
|
||||
if (!req)
|
||||
return;
|
||||
|
||||
if (req->d2c) {
|
||||
/* cache device is being detached */
|
||||
ocf_req_put(req);
|
||||
return;
|
||||
}
|
||||
|
||||
req->info.internal = true;
|
||||
req->complete = evp_lru_zero_line_complete;
|
||||
|
||||
env_atomic_inc(&cache->pending_eviction_clines);
|
||||
|
||||
ocf_engine_zero_line(req);
|
||||
}
|
||||
|
||||
bool evp_lru_can_evict(ocf_cache_t cache)
|
||||
{
|
||||
if (env_atomic_read(&cache->pending_eviction_clines) >=
|
||||
@ -455,73 +605,86 @@ bool evp_lru_can_evict(ocf_cache_t cache)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool dirty_pages_present(ocf_cache_t cache, struct ocf_user_part *part)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
|
||||
if (evp_lru_get_list(part, i, false)->tail != end_marker)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* the caller must hold the metadata lock */
|
||||
uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
||||
struct ocf_user_part *part, uint32_t cline_no)
|
||||
{
|
||||
struct ocf_lru_iter iter;
|
||||
uint32_t i;
|
||||
ocf_cache_line_t cline;
|
||||
uint64_t core_line;
|
||||
ocf_core_id_t core_id;
|
||||
ocf_cache_t cache = req->cache;
|
||||
bool cl_write_lock =
|
||||
(req->engine_cbs->get_lock_type(req) == ocf_engine_lock_write);
|
||||
unsigned evp;
|
||||
unsigned req_idx = 0;
|
||||
|
||||
if (cline_no == 0)
|
||||
return 0;
|
||||
|
||||
lru_iter_init(&iter, cache, part, part->next_eviction_list, true);
|
||||
if (unlikely(ocf_engine_unmapped_count(req) < cline_no)) {
|
||||
ocf_cache_log(req->cache, log_err, "Not enough space in"
|
||||
"request: unmapped %u, requested %u",
|
||||
ocf_engine_unmapped_count(req),
|
||||
cline_no);
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
evp = req->io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
|
||||
|
||||
lru_iter_eviction_init(&iter, cache, part, evp, cl_write_lock, req);
|
||||
|
||||
i = 0;
|
||||
while (i < cline_no) {
|
||||
cline = lru_iter_next(&iter);
|
||||
if (!evp_lru_can_evict(cache))
|
||||
break;
|
||||
|
||||
cline = lru_iter_eviction_next(&iter, &core_id, &core_line);
|
||||
|
||||
if (cline == end_marker)
|
||||
break;
|
||||
|
||||
if (!evp_lru_can_evict(cache))
|
||||
break;
|
||||
|
||||
/* Prevent evicting already locked items */
|
||||
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
|
||||
cline)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(metadata_test_dirty(cache, cline));
|
||||
|
||||
if (ocf_volume_is_atomic(&cache->device->volume)) {
|
||||
/* atomic cache, we have to trim cache lines before
|
||||
* eviction
|
||||
*/
|
||||
evp_lru_zero_line(cache, io_queue, cline);
|
||||
continue;
|
||||
/* TODO: if atomic mode is restored, need to zero metadata
|
||||
* before proceeding with cleaning (see version <= 20.12) */
|
||||
|
||||
/* find next unmapped cacheline in request */
|
||||
while (req_idx + 1 < req->core_line_count &&
|
||||
req->map[req_idx].status != LOOKUP_MISS) {
|
||||
req_idx++;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(req->map[req_idx].status != LOOKUP_MISS);
|
||||
|
||||
ocf_metadata_start_collision_shared_access(
|
||||
cache, cline);
|
||||
set_cache_line_invalid_no_flush(cache, 0,
|
||||
ocf_line_end_sector(cache),
|
||||
cline);
|
||||
metadata_clear_valid_sec(cache, cline, 0, ocf_line_end_sector(cache));
|
||||
ocf_metadata_remove_from_collision(cache, cline, part->id);
|
||||
ocf_metadata_end_collision_shared_access(
|
||||
cache, cline);
|
||||
|
||||
_lru_unlock_hash(&iter, core_id, core_line);
|
||||
|
||||
env_atomic_dec(&req->core->runtime_meta->cached_clines);
|
||||
env_atomic_dec(&req->core->runtime_meta->
|
||||
part_counters[part->id].cached_clines);
|
||||
|
||||
ocf_map_cache_line(req, req_idx, cline);
|
||||
|
||||
req->map[req_idx].status = LOOKUP_REMAPPED;
|
||||
ocf_engine_patch_req_info(cache, req, req_idx);
|
||||
|
||||
if (cl_write_lock)
|
||||
req->map[req_idx].wr_locked = true;
|
||||
else
|
||||
req->map[req_idx].rd_locked = true;
|
||||
|
||||
++req_idx;
|
||||
++i;
|
||||
}
|
||||
|
||||
part->next_eviction_list = iter.evp;
|
||||
|
||||
if (i < cline_no && dirty_pages_present(cache, part))
|
||||
evp_lru_clean(cache, io_queue, part, cline_no - i);
|
||||
|
||||
/* Return number of clines that were really evicted */
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -9,11 +9,12 @@
|
||||
#include "lru_structs.h"
|
||||
|
||||
struct ocf_user_part;
|
||||
struct ocf_request;
|
||||
|
||||
void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
bool evp_lru_can_evict(struct ocf_cache *cache);
|
||||
uint32_t evp_lru_req_clines(struct ocf_cache *cache, ocf_queue_t io_queue,
|
||||
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
||||
struct ocf_user_part *part, uint32_t cline_no);
|
||||
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
void evp_lru_init_evp(struct ocf_cache *cache, struct ocf_user_part *part);
|
||||
@ -21,5 +22,6 @@ void evp_lru_dirty_cline(struct ocf_cache *cache, struct ocf_user_part *part,
|
||||
uint32_t cline);
|
||||
void evp_lru_clean_cline(struct ocf_cache *cache, struct ocf_user_part *part,
|
||||
uint32_t cline);
|
||||
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, uint32_t count);
|
||||
#endif
|
||||
|
@ -52,8 +52,8 @@ static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
ocf_queue_t io_queue, struct ocf_user_part *part,
|
||||
static inline uint32_t ocf_eviction_need_space(ocf_cache_t cache,
|
||||
struct ocf_request *req, struct ocf_user_part *part,
|
||||
uint32_t clines)
|
||||
{
|
||||
uint8_t type;
|
||||
@ -64,11 +64,7 @@ static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
ENV_BUG_ON(type >= ocf_eviction_max);
|
||||
|
||||
if (likely(evict_policy_ops[type].req_clines)) {
|
||||
/*
|
||||
* This is called under METADATA WR lock. No need to get
|
||||
* eviction lock.
|
||||
*/
|
||||
result = evict_policy_ops[type].req_clines(cache, io_queue,
|
||||
result = evict_policy_ops[type].req_clines(req,
|
||||
part, clines);
|
||||
}
|
||||
|
||||
@ -101,4 +97,18 @@ static inline void ocf_eviction_initialize(struct ocf_cache *cache,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ocf_eviction_flush_dirty(ocf_cache_t cache,
|
||||
struct ocf_user_part *part, ocf_queue_t io_queue,
|
||||
uint32_t count)
|
||||
{
|
||||
uint8_t type = cache->conf_meta->eviction_policy_type;
|
||||
|
||||
ENV_BUG_ON(type >= ocf_eviction_max);
|
||||
|
||||
if (likely(evict_policy_ops[type].flush_dirty)) {
|
||||
evict_policy_ops[type].flush_dirty(cache, part, io_queue,
|
||||
count);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* LAYER_EVICTION_POLICY_OPS_H_ */
|
||||
|
@ -33,9 +33,13 @@ struct ocf_user_part_runtime {
|
||||
struct cleaning_policy cleaning;
|
||||
};
|
||||
|
||||
typedef bool ( *_lru_hash_locked_pfn)(void *context,
|
||||
ocf_core_id_t core_id, uint64_t core_line);
|
||||
|
||||
/* Iterator state, visiting all eviction lists within a partition
|
||||
in round robin order */
|
||||
struct ocf_lru_iter {
|
||||
struct ocf_lru_iter
|
||||
{
|
||||
/* cache object */
|
||||
ocf_cache_t cache;
|
||||
/* target partition */
|
||||
@ -49,16 +53,30 @@ struct ocf_lru_iter {
|
||||
uint32_t num_avail_evps;
|
||||
/* current eviction list index */
|
||||
uint32_t evp;
|
||||
/* callback to determine whether given hash bucket is already
|
||||
* locked by the caller */
|
||||
_lru_hash_locked_pfn hash_locked;
|
||||
/* hash_locked private data */
|
||||
void *context;
|
||||
/* 1 if iterating over clean lists, 0 if over dirty */
|
||||
bool clean : 1;
|
||||
/* 1 if cacheline is to be locked for write, 0 if for read*/
|
||||
bool cl_lock_write : 1;
|
||||
};
|
||||
|
||||
#define OCF_EVICTION_CLEAN_SIZE 32U
|
||||
|
||||
struct ocf_part_cleaning_ctx {
|
||||
ocf_cache_t cache;
|
||||
struct ocf_refcnt counter;
|
||||
ocf_cache_line_t cline[OCF_EVICTION_CLEAN_SIZE];
|
||||
};
|
||||
|
||||
struct ocf_user_part {
|
||||
struct ocf_user_part_config *config;
|
||||
struct ocf_user_part_runtime *runtime;
|
||||
struct ocf_refcnt cleaning;
|
||||
ocf_part_id_t id;
|
||||
|
||||
struct ocf_lru_iter eviction_clean_iter;
|
||||
uint32_t next_eviction_list;
|
||||
struct ocf_part_cleaning_ctx cleaning;
|
||||
struct ocf_lst_entry lst_valid;
|
||||
};
|
||||
|
||||
|
@ -169,7 +169,7 @@ static void __init_partitions(ocf_cache_t cache)
|
||||
|
||||
/* Add other partition to the cache and make it as dummy */
|
||||
for (i_part = 0; i_part < OCF_IO_CLASS_MAX; i_part++) {
|
||||
ocf_refcnt_freeze(&cache->user_parts[i_part].cleaning);
|
||||
ocf_refcnt_freeze(&cache->user_parts[i_part].cleaning.counter);
|
||||
|
||||
if (i_part == PARTITION_DEFAULT)
|
||||
continue;
|
||||
|
@ -439,6 +439,7 @@ static void _ocf_mngt_flush_container(
|
||||
|
||||
fc->req = req;
|
||||
fc->attribs.lock_cacheline = true;
|
||||
fc->attribs.lock_metadata = false;
|
||||
fc->attribs.cmpl_context = fc;
|
||||
fc->attribs.cmpl_fn = _ocf_mngt_flush_portion_end;
|
||||
fc->attribs.io_queue = cache->mngt_queue;
|
||||
|
@ -21,6 +21,9 @@ struct ocf_queue {
|
||||
/* per-queue free running global metadata lock index */
|
||||
unsigned lock_idx;
|
||||
|
||||
/* per-queue free running eviction list index */
|
||||
unsigned eviction_idx;
|
||||
|
||||
/* Tracing reference counter */
|
||||
env_atomic64 trace_ref_cntr;
|
||||
|
||||
|
@ -33,6 +33,9 @@ struct ocf_req_info {
|
||||
uint32_t mapping_error : 1;
|
||||
/*!< Core lines in this request were not mapped into cache */
|
||||
|
||||
uint32_t clean_eviction : 1;
|
||||
/*!< Eviction failed, need to request cleaning */
|
||||
|
||||
uint32_t core_error : 1;
|
||||
/*!< Error occured during I/O on core device */
|
||||
|
||||
@ -104,6 +107,7 @@ struct ocf_request {
|
||||
/*!< OCF IO associated with request */
|
||||
|
||||
const struct ocf_engine_callbacks *engine_cbs;
|
||||
/*!< Engine owning the request */
|
||||
|
||||
env_atomic ref_count;
|
||||
/*!< Reference usage count, once OCF request reaches zero it
|
||||
@ -396,6 +400,16 @@ static inline bool ocf_req_test_mapping_error(struct ocf_request *req)
|
||||
return req->info.mapping_error;
|
||||
}
|
||||
|
||||
static inline void ocf_req_set_clean_eviction(struct ocf_request *req)
|
||||
{
|
||||
req->info.clean_eviction = true;
|
||||
}
|
||||
|
||||
static inline bool ocf_req_test_clean_eviction(struct ocf_request *req)
|
||||
{
|
||||
return req->info.clean_eviction;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return OCF request reference count
|
||||
*
|
||||
|
@ -9,8 +9,7 @@
|
||||
#include "ocf/ocf_debug.h"
|
||||
#include "utils/utils_cache_line.h"
|
||||
|
||||
#define SEQ_CUTOFF_FULL_MARGIN \
|
||||
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
|
||||
#define SEQ_CUTOFF_FULL_MARGIN OCF_PENDING_EVICTION_LIMIT
|
||||
|
||||
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache,
|
||||
struct ocf_request *req)
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "utils_part.h"
|
||||
#include "utils_io.h"
|
||||
#include "utils_cache_line.h"
|
||||
#include "../ocf_queue_priv.h"
|
||||
|
||||
#define OCF_UTILS_CLEANER_DEBUG 0
|
||||
|
||||
@ -847,6 +848,7 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
int err;
|
||||
ocf_core_id_t core_id;
|
||||
uint64_t core_sector;
|
||||
bool skip;
|
||||
|
||||
/* Allocate master request */
|
||||
master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
|
||||
@ -869,7 +871,6 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
env_atomic_inc(&master->master_remaining);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
||||
/* when request hasn't yet been allocated or is just issued */
|
||||
if (!req) {
|
||||
if (max > count - i) {
|
||||
@ -900,12 +901,23 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get mapping info */
|
||||
ocf_metadata_get_core_info(cache, cache_line, &core_id,
|
||||
&core_sector);
|
||||
|
||||
if (attribs->lock_metadata) {
|
||||
ocf_hb_cline_prot_lock_rd(&cache->metadata.lock,
|
||||
req->lock_idx, core_id, core_sector);
|
||||
}
|
||||
|
||||
skip = false;
|
||||
|
||||
/* when line already cleaned - rare condition under heavy
|
||||
* I/O workload.
|
||||
*/
|
||||
if (!metadata_test_dirty(cache, cache_line)) {
|
||||
OCF_DEBUG_MSG(cache, "Not dirty");
|
||||
continue;
|
||||
skip = true;
|
||||
}
|
||||
|
||||
if (!metadata_test_valid_any(cache, cache_line)) {
|
||||
@ -916,12 +928,16 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
* Cache line (sector) cannot be dirty and not valid
|
||||
*/
|
||||
ENV_BUG();
|
||||
continue;
|
||||
skip = true;
|
||||
}
|
||||
|
||||
/* Get mapping info */
|
||||
ocf_metadata_get_core_info(cache, cache_line, &core_id,
|
||||
&core_sector);
|
||||
if (attribs->lock_metadata) {
|
||||
ocf_hb_cline_prot_unlock_rd(&cache->metadata.lock,
|
||||
req->lock_idx, core_id, core_sector);
|
||||
}
|
||||
|
||||
if (skip)
|
||||
continue;
|
||||
|
||||
if (unlikely(!cache->core[core_id].opened)) {
|
||||
OCF_DEBUG_MSG(cache, "Core object inactive");
|
||||
@ -945,6 +961,7 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
i_out = 0;
|
||||
req = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (req) {
|
||||
@ -1036,7 +1053,7 @@ void ocf_cleaner_refcnt_freeze(ocf_cache_t cache)
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for_each_part(cache, curr_part, part_id)
|
||||
ocf_refcnt_freeze(&curr_part->cleaning);
|
||||
ocf_refcnt_freeze(&curr_part->cleaning.counter);
|
||||
}
|
||||
|
||||
void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
|
||||
@ -1045,7 +1062,7 @@ void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for_each_part(cache, curr_part, part_id)
|
||||
ocf_refcnt_unfreeze(&curr_part->cleaning);
|
||||
ocf_refcnt_unfreeze(&curr_part->cleaning.counter);
|
||||
}
|
||||
|
||||
static void ocf_cleaner_refcnt_register_zero_cb_finish(void *priv)
|
||||
@ -1069,7 +1086,7 @@ void ocf_cleaner_refcnt_register_zero_cb(ocf_cache_t cache,
|
||||
|
||||
for_each_part(cache, curr_part, part_id) {
|
||||
env_atomic_inc(&ctx->waiting);
|
||||
ocf_refcnt_register_zero_cb(&curr_part->cleaning,
|
||||
ocf_refcnt_register_zero_cb(&curr_part->cleaning.counter,
|
||||
ocf_cleaner_refcnt_register_zero_cb_finish, ctx);
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ typedef int (*ocf_cleaner_get_item)(struct ocf_cache *cache,
|
||||
*/
|
||||
struct ocf_cleaner_attribs {
|
||||
uint8_t lock_cacheline : 1; /*!< Cleaner to lock cachelines on its own */
|
||||
uint8_t lock_metadata : 1; /*!< Cleaner to lock metadata on its own */
|
||||
|
||||
uint8_t do_sort : 1; /*!< Sort cache lines which will be cleaned */
|
||||
|
||||
@ -40,7 +41,7 @@ struct ocf_cleaner_attribs {
|
||||
void *getter_context;
|
||||
/*!< Context for getting cache lines */
|
||||
uint32_t getter_item;
|
||||
/*!< Additional variable that can be used by cleaner caller
|
||||
/*!< Additional variable that can be used by cleaner call
|
||||
* to iterate over items
|
||||
*/
|
||||
|
||||
|
@ -103,7 +103,12 @@ void ocf_part_move(struct ocf_request *req)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry->status != LOOKUP_HIT) {
|
||||
/* Moving cachelines to another partition is needed only
|
||||
* for those already mapped before this request, which
|
||||
* indicates either HIT or REMAPPED.
|
||||
*/
|
||||
if (entry->status != LOOKUP_HIT &&
|
||||
entry->status != LOOKUP_REMAPPED) {
|
||||
/* No HIT */
|
||||
continue;
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
* <tested_file_path>src/engine/engine_common.c</tested_file_path>
|
||||
* <tested_function>ocf_prepare_clines_miss</tested_function>
|
||||
* <functions_to_leave>
|
||||
* INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
|
||||
* ONE FUNCTION PER LINE
|
||||
* ocf_prepare_clines_evict
|
||||
* ocf_engine_evict
|
||||
* </functions_to_leave>
|
||||
*/
|
||||
|
||||
@ -36,6 +36,11 @@
|
||||
|
||||
#include "engine/engine_common.c/prepare_clines_miss_generated_wraps.c"
|
||||
|
||||
struct ocf_cache_line_concurrency *__wrap_ocf_cache_line_concurrency(ocf_cache_t cache)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void __wrap_ocf_req_hash_lock_upgrade(struct ocf_request *req)
|
||||
{
|
||||
}
|
||||
@ -66,13 +71,6 @@ void __wrap_ocf_metadata_end_exclusive_access(
|
||||
{
|
||||
}
|
||||
|
||||
int __wrap_space_managment_evict_do(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t evict_cline_no)
|
||||
{
|
||||
function_called();
|
||||
return mock();
|
||||
}
|
||||
|
||||
bool __wrap_ocf_part_is_enabled(struct ocf_user_part *target_part)
|
||||
{
|
||||
return mock();
|
||||
@ -93,9 +91,21 @@ void __wrap_ocf_req_set_mapping_error(struct ocf_request *req)
|
||||
function_called();
|
||||
}
|
||||
|
||||
int __wrap_space_managment_evict_do(struct ocf_request *req)
|
||||
{
|
||||
function_called();
|
||||
return mock();
|
||||
}
|
||||
|
||||
uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
|
||||
{
|
||||
return 100;
|
||||
}
|
||||
|
||||
static void ocf_prepare_clines_miss_test01(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
print_test_description("Target part is disabled and empty\n");
|
||||
will_return(__wrap_ocf_part_is_enabled, false);
|
||||
expect_function_call(__wrap_ocf_req_set_mapping_error);
|
||||
@ -104,7 +114,9 @@ static void ocf_prepare_clines_miss_test01(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test02(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is disabled but has cachelines assigned.\n");
|
||||
print_test_description("\tMark mapping error\n");
|
||||
|
||||
@ -116,20 +128,18 @@ static void ocf_prepare_clines_miss_test02(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test03(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("\tEviction is ok and cachelines lock is acquired.\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_MAPPED);
|
||||
will_return_always(__wrap_space_managment_evict_do, LOOKUP_INSERTED);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
|
||||
will_return(__wrap_ocf_req_test_mapping_error, false);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
will_return(__wrap_lock_clines, 0);
|
||||
expect_function_call(__wrap_lock_clines);
|
||||
@ -139,57 +149,38 @@ static void ocf_prepare_clines_miss_test03(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test04(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("\tEviction failed\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_MISS);
|
||||
expect_function_call(__wrap_ocf_req_set_mapping_error);
|
||||
|
||||
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
|
||||
}
|
||||
|
||||
static void ocf_prepare_clines_miss_test05(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("Eviction is ok, but mapping failed.\n");
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
will_return(__wrap_ocf_req_test_mapping_error, true);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, true);
|
||||
|
||||
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
|
||||
}
|
||||
|
||||
static void ocf_prepare_clines_miss_test06(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("Eviction and mapping were ok, but failed to lock cachelines.\n");
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
will_return(__wrap_ocf_req_test_mapping_error, false);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
expect_function_call(__wrap_lock_clines);
|
||||
will_return(__wrap_lock_clines, -OCF_ERR_NO_LOCK);
|
||||
@ -201,20 +192,20 @@ static void ocf_prepare_clines_miss_test06(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test07(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("Eviction and mapping were ok, lock not acquired.\n");
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
will_return(__wrap_ocf_req_test_mapping_error, false);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
expect_function_call(__wrap_lock_clines);
|
||||
will_return(__wrap_lock_clines, OCF_LOCK_NOT_ACQUIRED);
|
||||
@ -224,15 +215,17 @@ static void ocf_prepare_clines_miss_test07(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test08(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled has enough space.\n");
|
||||
print_test_description("\tMapping and cacheline lock are both ok\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return(__wrap_ocf_part_has_space, true);
|
||||
will_return_always(__wrap_ocf_part_has_space, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
will_return(__wrap_ocf_req_test_mapping_error, false);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
expect_function_call(__wrap_lock_clines);
|
||||
will_return(__wrap_lock_clines, OCF_LOCK_ACQUIRED);
|
||||
@ -247,7 +240,6 @@ int main(void)
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test02),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test03),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test04),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test05),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test06),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test07),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test08)
|
||||
|
@ -27,9 +27,9 @@ struct test_cache
|
||||
{
|
||||
struct ocf_cache cache;
|
||||
struct ocf_user_part_config part[OCF_IO_CLASS_MAX];
|
||||
struct ocf_user_part upart[OCF_IO_CLASS_MAX];
|
||||
uint32_t overflow[OCF_IO_CLASS_MAX];
|
||||
uint32_t evictable[OCF_IO_CLASS_MAX];
|
||||
uint32_t req_unmapped;
|
||||
};
|
||||
|
||||
bool __wrap_ocf_eviction_can_evict(ocf_cache_t cache)
|
||||
@ -62,10 +62,12 @@ uint32_t __wrap_ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
|
||||
tcache->overflow[part->id] -= overflown_consumed;
|
||||
tcache->evictable[part->id] -= clines;
|
||||
tcache->req_unmapped -= clines;
|
||||
|
||||
check_expected(part);
|
||||
check_expected(clines);
|
||||
function_called();
|
||||
|
||||
return mock();
|
||||
}
|
||||
|
||||
@ -157,7 +159,7 @@ static struct ocf_lst_entry *_list_getter(
|
||||
{
|
||||
struct test_cache* tcache = cache;
|
||||
|
||||
return &tcache->upart[idx].lst_valid;
|
||||
return &tcache->cache.user_parts[idx].lst_valid;
|
||||
}
|
||||
|
||||
static void init_part_list(struct test_cache *tcache)
|
||||
@ -165,23 +167,30 @@ static void init_part_list(struct test_cache *tcache)
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
tcache->upart[i].id = i;
|
||||
tcache->upart[i].config = &tcache->part[i];
|
||||
tcache->upart[i].config->priority = i+1;
|
||||
tcache->upart[i].config->flags.eviction = 1;
|
||||
tcache->cache.user_parts[i].id = i;
|
||||
tcache->cache.user_parts[i].config = &tcache->part[i];
|
||||
tcache->cache.user_parts[i].config->priority = i+1;
|
||||
tcache->cache.user_parts[i].config->flags.eviction = 1;
|
||||
}
|
||||
|
||||
ocf_lst_init((ocf_cache_t)tcache, &tcache->cache.lst_part, OCF_IO_CLASS_MAX,
|
||||
_list_getter, ocf_part_lst_cmp_valid);
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
ocf_lst_init_entry(&tcache->cache.lst_part, &tcache->upart[i].lst_valid);
|
||||
ocf_lst_init_entry(&tcache->cache.lst_part, &tcache->cache.user_parts[i].lst_valid);
|
||||
ocf_lst_add_tail(&tcache->cache.lst_part, i);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
|
||||
{
|
||||
struct test_cache* tcache = (struct test_cache*)req->cache;
|
||||
|
||||
return tcache->req_unmapped;
|
||||
}
|
||||
|
||||
#define _expect_evict_call(tcache, part_id, req_count, ret_count) \
|
||||
do { \
|
||||
expect_value(__wrap_ocf_eviction_need_space, part, &tcache.upart[part_id]); \
|
||||
expect_value(__wrap_ocf_eviction_need_space, part, &tcache.cache.user_parts[part_id]); \
|
||||
expect_value(__wrap_ocf_eviction_need_space, clines, req_count); \
|
||||
expect_function_call(__wrap_ocf_eviction_need_space); \
|
||||
will_return(__wrap_ocf_eviction_need_space, ret_count); \
|
||||
@ -190,6 +199,7 @@ static void init_part_list(struct test_cache *tcache)
|
||||
static void ocf_evict_do_test01(void **state)
|
||||
{
|
||||
struct test_cache tcache = {};
|
||||
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
|
||||
unsigned evicted;
|
||||
|
||||
print_test_description("one IO class, no overflow\n");
|
||||
@ -197,16 +207,17 @@ static void ocf_evict_do_test01(void **state)
|
||||
init_part_list(&tcache);
|
||||
|
||||
tcache.evictable[10] = 100;
|
||||
tcache.req_unmapped = 50;
|
||||
|
||||
_expect_evict_call(tcache, 10, 50, 50);
|
||||
|
||||
evicted = ocf_evict_do((ocf_cache_t *)&tcache, NULL, 50, &tcache.upart[0]);
|
||||
evicted = ocf_evict_do(&req);
|
||||
assert_int_equal(evicted, 50);
|
||||
}
|
||||
|
||||
static void ocf_evict_do_test02(void **state)
|
||||
{
|
||||
struct test_cache tcache = {};
|
||||
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
|
||||
unsigned i;
|
||||
unsigned evicted;
|
||||
|
||||
@ -216,16 +227,18 @@ static void ocf_evict_do_test02(void **state)
|
||||
|
||||
tcache.evictable[10] = 100;
|
||||
tcache.overflow[10] = 100;
|
||||
tcache.req_unmapped = 50;
|
||||
|
||||
_expect_evict_call(tcache, 10, 50, 50);
|
||||
|
||||
evicted = ocf_evict_do((ocf_cache_t *)&tcache, NULL, 50, &tcache.upart[0]);
|
||||
evicted = ocf_evict_do(&req);
|
||||
assert_int_equal(evicted, 50);
|
||||
}
|
||||
|
||||
static void ocf_evict_do_test03(void **state)
|
||||
{
|
||||
struct test_cache tcache = {};
|
||||
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
|
||||
unsigned i;
|
||||
unsigned evicted;
|
||||
|
||||
@ -237,19 +250,21 @@ static void ocf_evict_do_test03(void **state)
|
||||
tcache.evictable[12] = 100;
|
||||
tcache.evictable[16] = 100;
|
||||
tcache.evictable[17] = 100;
|
||||
tcache.req_unmapped = 350;
|
||||
|
||||
_expect_evict_call(tcache, 10, 100, 100);
|
||||
_expect_evict_call(tcache, 12, 100, 100);
|
||||
_expect_evict_call(tcache, 16, 100, 100);
|
||||
_expect_evict_call(tcache, 17, 50, 50);
|
||||
|
||||
evicted = ocf_evict_do((ocf_cache_t *)&tcache, NULL, 350, &tcache.upart[0]);
|
||||
evicted = ocf_evict_do(&req);
|
||||
assert_int_equal(evicted, 350);
|
||||
}
|
||||
|
||||
static void ocf_evict_do_test04(void **state)
|
||||
{
|
||||
struct test_cache tcache = {};
|
||||
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
|
||||
unsigned i;
|
||||
unsigned evicted;
|
||||
|
||||
@ -266,6 +281,7 @@ static void ocf_evict_do_test04(void **state)
|
||||
tcache.evictable[17] = 100;
|
||||
tcache.evictable[18] = 100;
|
||||
tcache.overflow[18] = 100;
|
||||
tcache.req_unmapped = 580;
|
||||
|
||||
_expect_evict_call(tcache, 12, 40, 40);
|
||||
_expect_evict_call(tcache, 14, 100, 100);
|
||||
@ -275,7 +291,7 @@ static void ocf_evict_do_test04(void **state)
|
||||
_expect_evict_call(tcache, 16, 100, 100);
|
||||
_expect_evict_call(tcache, 17, 80, 80);
|
||||
|
||||
evicted = ocf_evict_do((ocf_cache_t *)&tcache, NULL, 580, &tcache.upart[0]);
|
||||
evicted = ocf_evict_do(&req);
|
||||
assert_int_equal(evicted, 580);
|
||||
}
|
||||
int main(void)
|
||||
|
@ -10,6 +10,8 @@
|
||||
* _lru_evp_set_empty
|
||||
* _lru_evp_all_empty
|
||||
* ocf_rotate_right
|
||||
* lru_iter_eviction_next
|
||||
* lru_iter_cleaning_next
|
||||
* </functions_to_leave>
|
||||
*/
|
||||
|
||||
@ -157,7 +159,26 @@ void write_test_case_description(void)
|
||||
test_case++;
|
||||
}
|
||||
|
||||
/* transform cacheline numbers so that they remain unique but have
|
||||
* assignment to list modulo OCF_NUM_EVICTION_LISTS */
|
||||
for (test_case = 0; test_case < num_cases; test_case++) {
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
|
||||
j = 0;
|
||||
while (test_cases[j][i][test_case] != -1) {
|
||||
test_cases[j][i][test_case] = test_cases[j][i][test_case] *
|
||||
OCF_NUM_EVICTION_LISTS + i;
|
||||
j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool desc_printed = false;
|
||||
|
||||
if (desc_printed)
|
||||
return;
|
||||
desc_printed = true;
|
||||
|
||||
for (test_case = 0; test_case < num_cases; test_case++) {
|
||||
print_message("test case no %d\n", test_case);
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
|
||||
@ -196,6 +217,11 @@ struct ocf_lru_list *__wrap_evp_lru_get_list(struct ocf_user_part *part,
|
||||
list.num_nodes = i;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_message("list for case %u evp %u: head: %u tail %u elems %u\n",
|
||||
current_case, evp, list.head, list.tail, list.num_nodes);
|
||||
#endif
|
||||
|
||||
return &list;
|
||||
}
|
||||
|
||||
@ -245,6 +271,76 @@ union eviction_policy_meta *__wrap_ocf_metadata_get_eviction_policy(
|
||||
}
|
||||
|
||||
|
||||
void __wrap_add_lru_head(ocf_cache_t cache,
|
||||
struct ocf_lru_list *list,
|
||||
unsigned int collision_index)
|
||||
{
|
||||
unsigned list_head = list->head;
|
||||
unsigned i, j = collision_index % OCF_NUM_EVICTION_LISTS;
|
||||
|
||||
i = 1;
|
||||
while (test_cases[i][j][current_case] != -1)
|
||||
i++;
|
||||
|
||||
test_cases[i+1][j][current_case] = -1;
|
||||
|
||||
while (i--)
|
||||
test_cases[i + 1][j][current_case] = test_cases[i][j][current_case];
|
||||
|
||||
test_cases[0][j][current_case] = collision_index;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_message("case %u evp %u head set to %u\n", current_case, j, collision_index);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void __wrap_remove_lru_list(ocf_cache_t cache,
|
||||
struct ocf_lru_list *list,
|
||||
unsigned int collision_index)
|
||||
{
|
||||
bool found;
|
||||
unsigned i, j;
|
||||
|
||||
found = false;
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
||||
{
|
||||
j = 0;
|
||||
|
||||
while (test_cases[j][i][current_case] != -1) {
|
||||
if (!found && test_cases[j][i][current_case] == collision_index) {
|
||||
assert_int_equal(test_cases[0][i][current_case], list->head);
|
||||
found = true;
|
||||
}
|
||||
if (found)
|
||||
test_cases[j][i][current_case] = test_cases[j+1][i][current_case];
|
||||
j++;
|
||||
}
|
||||
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
|
||||
assert(found);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_message("case %u removed %u from evp %u\n", current_case, collision_index, i);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool __wrap__lru_lock(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cache_line,
|
||||
ocf_core_id_t *core_id, uint64_t *core_line)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool __wrap__lru_trylock_cacheline(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cline)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void _lru_run_test(unsigned test_case)
|
||||
{
|
||||
unsigned start_pos;
|
||||
@ -258,6 +354,8 @@ static void _lru_run_test(unsigned test_case)
|
||||
unsigned pos[OCF_NUM_EVICTION_LISTS];
|
||||
unsigned i;
|
||||
|
||||
write_test_case_description();
|
||||
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
||||
{
|
||||
pos[i] = -1;
|
||||
@ -265,12 +363,10 @@ static void _lru_run_test(unsigned test_case)
|
||||
pos[i]++;
|
||||
}
|
||||
|
||||
lru_iter_init(&iter, NULL, NULL, start_pos, false);
|
||||
lru_iter_init(&iter, NULL, NULL, start_pos, false, false, false,
|
||||
NULL, NULL);
|
||||
|
||||
do {
|
||||
/* get cacheline from iterator */
|
||||
cache_line = lru_iter_next(&iter);
|
||||
|
||||
/* check what is expected to be returned from iterator */
|
||||
if (pos[curr_evp] == -1) {
|
||||
i = 1;
|
||||
@ -294,6 +390,9 @@ static void _lru_run_test(unsigned test_case)
|
||||
pos[curr_evp]--;
|
||||
}
|
||||
|
||||
/* get cacheline from iterator */
|
||||
cache_line = lru_iter_cleaning_next(&iter);
|
||||
|
||||
assert_int_equal(cache_line, expected_cache_line);
|
||||
|
||||
curr_evp = (curr_evp + 1) % OCF_NUM_EVICTION_LISTS;
|
||||
@ -475,7 +574,5 @@ int main(void)
|
||||
|
||||
print_message("Unit test for lru_iter_next\n");
|
||||
|
||||
write_test_case_description();
|
||||
|
||||
return cmocka_run_group_tests(tests, NULL, NULL);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user