commit
6a93303d26
@ -398,7 +398,7 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
|
||||
if (info.status == LOOKUP_HIT &&
|
||||
metadata_test_dirty(cache, info.coll_idx)) {
|
||||
locked = ocf_cache_line_try_lock_rd(
|
||||
cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(cache),
|
||||
info.coll_idx);
|
||||
}
|
||||
|
||||
@ -474,7 +474,7 @@ static void _acp_flush_end(void *priv, int error)
|
||||
|
||||
for (i = 0; i < flush->size; i++) {
|
||||
ocf_cache_line_unlock_rd(
|
||||
cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(cache),
|
||||
flush->data[i].cache_line);
|
||||
ACP_DEBUG_END(acp, flush->data[i].cache_line);
|
||||
}
|
||||
@ -496,7 +496,8 @@ static void _acp_flush(struct acp_context *acp)
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.cmpl_context = acp,
|
||||
.cmpl_fn = _acp_flush_end,
|
||||
.cache_line_lock = false,
|
||||
.lock_cacheline = false,
|
||||
.lock_metadata = true,
|
||||
.do_sort = false,
|
||||
.io_queue = cache->cleaner.io_queue,
|
||||
};
|
||||
|
@ -682,8 +682,7 @@ static bool block_is_busy(struct ocf_cache *cache,
|
||||
if (!cache->core[core_id].opened)
|
||||
return true;
|
||||
|
||||
if (ocf_cache_line_is_used(
|
||||
cache->device->concurrency.cache_line,
|
||||
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
|
||||
cache_line)) {
|
||||
return true;
|
||||
}
|
||||
@ -816,7 +815,8 @@ void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl)
|
||||
|
||||
fctx->attribs.cmpl_context = fctx;
|
||||
fctx->attribs.cmpl_fn = alru_clean_complete;
|
||||
fctx->attribs.cache_line_lock = true;
|
||||
fctx->attribs.lock_cacheline = true;
|
||||
fctx->attribs.lock_metadata = false;
|
||||
fctx->attribs.do_sort = true;
|
||||
fctx->attribs.io_queue = cache->cleaner.io_queue;
|
||||
|
||||
|
@ -710,9 +710,19 @@ static inline void __remove_line_from_waiters_list(struct ocf_cache_line_concurr
|
||||
__unlock_waiters_list(c, line, flags);
|
||||
}
|
||||
|
||||
static inline bool _ocf_req_needs_cl_lock(struct ocf_request *req, unsigned entry)
|
||||
{
|
||||
/* Remapped cachelines are assigned cacheline lock individually
|
||||
* during eviction
|
||||
*/
|
||||
return req->map[entry].status != LOOKUP_MISS &&
|
||||
req->map[entry].status != LOOKUP_REMAPPED;
|
||||
}
|
||||
|
||||
/* Try to read-lock request without adding waiters. Function should be called
|
||||
* under read lock, multiple threads may attempt to acquire the lock
|
||||
* concurrently. */
|
||||
* concurrently.
|
||||
*/
|
||||
static int _ocf_req_trylock_rd(struct ocf_cache_line_concurrency *c,
|
||||
struct ocf_request *req)
|
||||
{
|
||||
@ -725,8 +735,8 @@ static int _ocf_req_trylock_rd(struct ocf_cache_line_concurrency *c,
|
||||
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
||||
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
if (!_ocf_req_needs_cl_lock(req, i)) {
|
||||
/* nothing to lock */
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -750,6 +760,11 @@ static int _ocf_req_trylock_rd(struct ocf_cache_line_concurrency *c,
|
||||
if (ret == OCF_LOCK_NOT_ACQUIRED) {
|
||||
/* Request is not locked, discard acquired locks */
|
||||
for (; i >= 0; i--) {
|
||||
if (!_ocf_req_needs_cl_lock(req, i)) {
|
||||
/* nothing to discard */
|
||||
continue;
|
||||
}
|
||||
|
||||
line = req->map[i].coll_idx;
|
||||
|
||||
if (req->map[i].rd_locked) {
|
||||
@ -781,8 +796,8 @@ static int _ocf_req_lock_rd(struct ocf_cache_line_concurrency *c,
|
||||
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
if (!_ocf_req_needs_cl_lock(req, i)) {
|
||||
/* nothing to lock */
|
||||
env_atomic_dec(&req->lock_remaining);
|
||||
continue;
|
||||
}
|
||||
@ -808,6 +823,9 @@ static int _ocf_req_lock_rd(struct ocf_cache_line_concurrency *c,
|
||||
|
||||
err:
|
||||
for (; i >= 0; i--) {
|
||||
if (!_ocf_req_needs_cl_lock(req, i))
|
||||
continue;
|
||||
|
||||
__remove_line_from_waiters_list(c, req, i, req,
|
||||
OCF_READ);
|
||||
}
|
||||
@ -847,8 +865,8 @@ static int _ocf_req_trylock_wr(struct ocf_cache_line_concurrency *c,
|
||||
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
||||
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
if (!_ocf_req_needs_cl_lock(req, i)) {
|
||||
/* nothing to lock */
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -872,6 +890,9 @@ static int _ocf_req_trylock_wr(struct ocf_cache_line_concurrency *c,
|
||||
if (ret == OCF_LOCK_NOT_ACQUIRED) {
|
||||
/* Request is not locked, discard acquired locks */
|
||||
for (; i >= 0; i--) {
|
||||
if (!_ocf_req_needs_cl_lock(req, i))
|
||||
continue;
|
||||
|
||||
line = req->map[i].coll_idx;
|
||||
|
||||
if (req->map[i].wr_locked) {
|
||||
@ -904,8 +925,8 @@ static int _ocf_req_lock_wr(struct ocf_cache_line_concurrency *c,
|
||||
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
if (!_ocf_req_needs_cl_lock(req, i)) {
|
||||
/* nothing to lock */
|
||||
env_atomic_dec(&req->lock_remaining);
|
||||
continue;
|
||||
}
|
||||
@ -931,6 +952,9 @@ static int _ocf_req_lock_wr(struct ocf_cache_line_concurrency *c,
|
||||
|
||||
err:
|
||||
for (; i >= 0; i--) {
|
||||
if (!_ocf_req_needs_cl_lock(req, i))
|
||||
continue;
|
||||
|
||||
__remove_line_from_waiters_list(c, req, i, req,
|
||||
OCF_WRITE);
|
||||
}
|
||||
@ -968,15 +992,16 @@ void ocf_req_unlock_rd(struct ocf_cache_line_concurrency *c, struct ocf_request
|
||||
OCF_DEBUG_RQ(req, "Unlock");
|
||||
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
ENV_BUG_ON(req->map[i].wr_locked);
|
||||
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
if (req->map[i].status == LOOKUP_MISS)
|
||||
continue;
|
||||
|
||||
if (!req->map[i].rd_locked)
|
||||
continue;
|
||||
}
|
||||
|
||||
line = req->map[i].coll_idx;
|
||||
|
||||
ENV_BUG_ON(!req->map[i].rd_locked);
|
||||
ENV_BUG_ON(line >= c->num_clines);
|
||||
|
||||
__unlock_cache_line_rd(c, line);
|
||||
@ -995,15 +1020,16 @@ void ocf_req_unlock_wr(struct ocf_cache_line_concurrency *c, struct ocf_request
|
||||
OCF_DEBUG_RQ(req, "Unlock");
|
||||
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
ENV_BUG_ON(req->map[i].rd_locked);
|
||||
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
if (req->map[i].status == LOOKUP_MISS)
|
||||
continue;
|
||||
|
||||
if (!req->map[i].wr_locked)
|
||||
continue;
|
||||
}
|
||||
|
||||
line = req->map[i].coll_idx;
|
||||
|
||||
ENV_BUG_ON(!req->map[i].wr_locked);
|
||||
ENV_BUG_ON(line >= c->num_clines);
|
||||
|
||||
__unlock_cache_line_wr(c, line);
|
||||
@ -1022,11 +1048,8 @@ void ocf_req_unlock(struct ocf_cache_line_concurrency *c, struct ocf_request *re
|
||||
OCF_DEBUG_RQ(req, "Unlock");
|
||||
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
if (req->map[i].status == LOOKUP_MISS)
|
||||
continue;
|
||||
}
|
||||
|
||||
line = req->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= c->num_clines);
|
||||
@ -1039,8 +1062,6 @@ void ocf_req_unlock(struct ocf_cache_line_concurrency *c, struct ocf_request *re
|
||||
} else if (req->map[i].wr_locked) {
|
||||
__unlock_cache_line_wr(c, line);
|
||||
req->map[i].wr_locked = false;
|
||||
} else {
|
||||
ENV_BUG();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1104,6 +1125,25 @@ bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
|
||||
return are;
|
||||
}
|
||||
|
||||
/* NOTE: it is caller responsibility to assure that noone acquires
|
||||
* a lock in background */
|
||||
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
ocf_cache_line_concurrency(cache);
|
||||
env_atomic *access = &c->access[line];
|
||||
int val = env_atomic_read(access);
|
||||
|
||||
ENV_BUG_ON(val == OCF_CACHE_LINE_ACCESS_IDLE);
|
||||
|
||||
if (ocf_cache_line_are_waiters(c, line))
|
||||
return false;
|
||||
|
||||
return val == OCF_CACHE_LINE_ACCESS_ONE_RD ||
|
||||
val == OCF_CACHE_LINE_ACCESS_WR;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
*/
|
||||
|
@ -147,6 +147,9 @@ bool ocf_cache_line_is_used(struct ocf_cache_line_concurrency *c,
|
||||
bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line);
|
||||
|
||||
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
|
||||
ocf_cache_line_t line);
|
||||
|
||||
/**
|
||||
* @brief un_lock request map info entry from from write or read access.
|
||||
*
|
||||
@ -199,4 +202,16 @@ void ocf_cache_line_unlock_wr(struct ocf_cache_line_concurrency *c,
|
||||
bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line);
|
||||
|
||||
/**
|
||||
* @brief Get cacheline concurrency context
|
||||
*
|
||||
* @param cache - cache instance
|
||||
* @return cacheline concurrency context
|
||||
*/
|
||||
static inline struct ocf_cache_line_concurrency *
|
||||
ocf_cache_line_concurrency(ocf_cache_t cache)
|
||||
{
|
||||
return cache->device->concurrency.cache_line;
|
||||
}
|
||||
|
||||
#endif /* OCF_CONCURRENCY_H_ */
|
||||
|
@ -423,6 +423,28 @@ void ocf_hb_id_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
|
||||
for (hash = _MIN_HASH(req); hash <= _MAX_HASH(req); \
|
||||
hash = _HASH_NEXT(req, hash))
|
||||
|
||||
/* Returns true if the the given LBA (determined by core_id
|
||||
* and core_line) resolves to a hash value that is within the
|
||||
* set of hashes for the given request (i.e. after the request
|
||||
* hash bucket are locked, the given core line is hash bucket
|
||||
* locked as well).
|
||||
*/
|
||||
bool ocf_req_hash_in_range(struct ocf_request *req,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
ocf_cache_line_t hash = ocf_metadata_hash_func(
|
||||
req->cache, core_line, core_id);
|
||||
|
||||
if (!_HAS_GAP(req)) {
|
||||
return (hash >= _MIN_HASH(req) &&
|
||||
hash <= _MAX_HASH(req));
|
||||
}
|
||||
|
||||
return (hash >= _MIN_HASH(req) && hash <= _GAP_START(req)) ||
|
||||
(hash > _GAP_START(req) + _GAP_VAL(req) &&
|
||||
hash <= _MAX_HASH(req));
|
||||
}
|
||||
|
||||
void ocf_hb_req_prot_lock_rd(struct ocf_request *req)
|
||||
{
|
||||
ocf_cache_line_t hash;
|
||||
|
@ -142,16 +142,24 @@ void ocf_hb_id_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
|
||||
void ocf_hb_id_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
|
||||
unsigned lock_idx, ocf_cache_line_t hash);
|
||||
|
||||
/* caller must hold global metadata read lock */
|
||||
/* Caller must hold global metadata read lock when acquiring naked hash bucket
|
||||
* lock.
|
||||
*/
|
||||
bool ocf_hb_cline_naked_trylock_rd(struct ocf_metadata_lock *metadata_lock,
|
||||
uint32_t core_id, uint64_t core_line);
|
||||
void ocf_hb_cline_naked_unlock_rd(struct ocf_metadata_lock *metadata_lock,
|
||||
uint32_t core_id, uint64_t core_line);
|
||||
|
||||
bool ocf_hb_cline_naked_trylock_wr(struct ocf_metadata_lock *metadata_lock,
|
||||
uint32_t core_id, uint64_t core_line);
|
||||
void ocf_hb_cline_naked_unlock_wr(struct ocf_metadata_lock *metadata_lock,
|
||||
uint32_t core_id, uint64_t core_line);
|
||||
void ocf_hb_id_naked_lock_wr(struct ocf_metadata_lock *metadata_lock,
|
||||
ocf_cache_line_t hash);
|
||||
void ocf_hb_id_naked_unlock_wr(struct ocf_metadata_lock *metadata_lock,
|
||||
ocf_cache_line_t hash);
|
||||
|
||||
bool ocf_req_hash_in_range(struct ocf_request *req,
|
||||
ocf_core_id_t core_id, uint64_t core_line);
|
||||
|
||||
/* lock entire request in deadlock-free manner */
|
||||
void ocf_hb_req_prot_lock_rd(struct ocf_request *req);
|
||||
|
@ -11,7 +11,8 @@ struct ocf_request;
|
||||
|
||||
#define LOOKUP_HIT 5
|
||||
#define LOOKUP_MISS 6
|
||||
#define LOOKUP_MAPPED 8
|
||||
#define LOOKUP_INSERTED 8
|
||||
#define LOOKUP_REMAPPED 9
|
||||
|
||||
typedef enum {
|
||||
/* modes inherited from user API */
|
||||
|
@ -64,7 +64,7 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
|
||||
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
|
||||
ocf_engine_invalidate(req);
|
||||
} else {
|
||||
ocf_req_unlock(cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(cache), req);
|
||||
|
||||
/* put the request at the last point of the completion path */
|
||||
ocf_req_put(req);
|
||||
|
@ -93,20 +93,67 @@ static inline int _ocf_engine_check_map_entry(struct ocf_cache *cache,
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ocf_engine_update_req_info(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t entry)
|
||||
/* Returns true if core lines on index 'entry' and 'entry + 1' within the request
|
||||
* are physically contiguous.
|
||||
*/
|
||||
static inline bool ocf_engine_clines_phys_cont(struct ocf_request *req,
|
||||
uint32_t entry)
|
||||
{
|
||||
struct ocf_map_info *entry1, *entry2;
|
||||
ocf_cache_line_t phys1, phys2;
|
||||
|
||||
entry1 = &req->map[entry];
|
||||
entry2 = &req->map[entry + 1];
|
||||
|
||||
if (entry1->status == LOOKUP_MISS || entry2->status == LOOKUP_MISS)
|
||||
return false;
|
||||
|
||||
phys1 = ocf_metadata_map_lg2phy(req->cache, entry1->coll_idx);
|
||||
phys2 = ocf_metadata_map_lg2phy(req->cache, entry2->coll_idx);
|
||||
|
||||
return phys1 < phys2 && phys1 + 1 == phys2;
|
||||
}
|
||||
|
||||
void ocf_engine_patch_req_info(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t idx)
|
||||
{
|
||||
struct ocf_map_info *entry = &req->map[idx];
|
||||
|
||||
ENV_BUG_ON(entry->status != LOOKUP_REMAPPED);
|
||||
|
||||
req->info.insert_no++;
|
||||
|
||||
if (req->part_id != ocf_metadata_get_partition_id(cache,
|
||||
entry->coll_idx)) {
|
||||
/*
|
||||
* Need to move this cache line into other partition
|
||||
*/
|
||||
entry->re_part = true;
|
||||
req->info.re_part_no++;
|
||||
}
|
||||
|
||||
if (idx > 0 && ocf_engine_clines_phys_cont(req, idx - 1))
|
||||
req->info.seq_no++;
|
||||
if (idx + 1 < req->core_line_count &&
|
||||
ocf_engine_clines_phys_cont(req, idx)) {
|
||||
req->info.seq_no++;
|
||||
}
|
||||
}
|
||||
|
||||
static void ocf_engine_update_req_info(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t idx)
|
||||
{
|
||||
uint8_t start_sector = 0;
|
||||
uint8_t end_sector = ocf_line_end_sector(cache);
|
||||
struct ocf_map_info *_entry = &(req->map[entry]);
|
||||
struct ocf_map_info *entry = &(req->map[idx]);
|
||||
|
||||
start_sector = ocf_map_line_start_sector(req, entry);
|
||||
end_sector = ocf_map_line_end_sector(req, entry);
|
||||
start_sector = ocf_map_line_start_sector(req, idx);
|
||||
end_sector = ocf_map_line_end_sector(req, idx);
|
||||
|
||||
/* Handle return value */
|
||||
switch (_entry->status) {
|
||||
switch (entry->status) {
|
||||
case LOOKUP_HIT:
|
||||
if (metadata_test_valid_sec(cache, _entry->coll_idx,
|
||||
if (metadata_test_valid_sec(cache, entry->coll_idx,
|
||||
start_sector, end_sector)) {
|
||||
req->info.hit_no++;
|
||||
} else {
|
||||
@ -114,29 +161,30 @@ void ocf_engine_update_req_info(struct ocf_cache *cache,
|
||||
}
|
||||
|
||||
/* Check request is dirty */
|
||||
if (metadata_test_dirty(cache, _entry->coll_idx)) {
|
||||
if (metadata_test_dirty(cache, entry->coll_idx)) {
|
||||
req->info.dirty_any++;
|
||||
|
||||
/* Check if cache line is fully dirty */
|
||||
if (metadata_test_dirty_all_sec(cache, _entry->coll_idx,
|
||||
if (metadata_test_dirty_all_sec(cache, entry->coll_idx,
|
||||
start_sector, end_sector))
|
||||
req->info.dirty_all++;
|
||||
}
|
||||
|
||||
if (req->part_id != ocf_metadata_get_partition_id(cache,
|
||||
_entry->coll_idx)) {
|
||||
entry->coll_idx)) {
|
||||
/*
|
||||
* Need to move this cache line into other partition
|
||||
*/
|
||||
_entry->re_part = true;
|
||||
entry->re_part = true;
|
||||
req->info.re_part_no++;
|
||||
}
|
||||
|
||||
break;
|
||||
case LOOKUP_MISS:
|
||||
req->info.seq_req = false;
|
||||
case LOOKUP_INSERTED:
|
||||
case LOOKUP_REMAPPED:
|
||||
req->info.insert_no++;
|
||||
break;
|
||||
case LOOKUP_MAPPED:
|
||||
case LOOKUP_MISS:
|
||||
break;
|
||||
default:
|
||||
ENV_BUG();
|
||||
@ -144,17 +192,29 @@ void ocf_engine_update_req_info(struct ocf_cache *cache,
|
||||
}
|
||||
|
||||
/* Check if cache hit is sequential */
|
||||
if (req->info.seq_req && entry) {
|
||||
if (ocf_metadata_map_lg2phy(cache,
|
||||
(req->map[entry - 1].coll_idx)) + 1 !=
|
||||
ocf_metadata_map_lg2phy(cache,
|
||||
_entry->coll_idx)) {
|
||||
req->info.seq_req = false;
|
||||
if (idx > 0 && ocf_engine_clines_phys_cont(req, idx - 1))
|
||||
req->info.seq_no++;
|
||||
}
|
||||
|
||||
static void ocf_engine_set_hot(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
struct ocf_map_info *entry;
|
||||
uint8_t status;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
entry = &(req->map[i]);
|
||||
status = entry->status;
|
||||
|
||||
if (status == LOOKUP_HIT || status == LOOKUP_INSERTED) {
|
||||
/* Update eviction (LRU) */
|
||||
ocf_eviction_set_hot_cache_line(cache, entry->coll_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ocf_engine_traverse(struct ocf_request *req)
|
||||
static void ocf_engine_lookup(struct ocf_request *req)
|
||||
{
|
||||
uint32_t i;
|
||||
uint64_t core_line;
|
||||
@ -165,7 +225,6 @@ void ocf_engine_traverse(struct ocf_request *req)
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
ocf_req_clear_info(req);
|
||||
req->info.seq_req = true;
|
||||
|
||||
for (i = 0, core_line = req->core_line_first;
|
||||
core_line <= req->core_line_last; core_line++, i++) {
|
||||
@ -176,8 +235,6 @@ void ocf_engine_traverse(struct ocf_request *req)
|
||||
core_line);
|
||||
|
||||
if (entry->status != LOOKUP_HIT) {
|
||||
req->info.seq_req = false;
|
||||
|
||||
/* There is miss then lookup for next map entry */
|
||||
OCF_DEBUG_PARAM(cache, "Miss, core line = %llu",
|
||||
entry->core_line);
|
||||
@ -187,14 +244,16 @@ void ocf_engine_traverse(struct ocf_request *req)
|
||||
OCF_DEBUG_PARAM(cache, "Hit, cache line %u, core line = %llu",
|
||||
entry->coll_idx, entry->core_line);
|
||||
|
||||
/* Update eviction (LRU) */
|
||||
ocf_eviction_set_hot_cache_line(cache, entry->coll_idx);
|
||||
|
||||
ocf_engine_update_req_info(cache, req, i);
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", req->info.seq_req ?
|
||||
"Yes" : "No");
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", ocf_engine_is_sequential(req)
|
||||
? "Yes" : "No");
|
||||
}
|
||||
void ocf_engine_traverse(struct ocf_request *req)
|
||||
{
|
||||
ocf_engine_lookup(req);
|
||||
ocf_engine_set_hot(req);
|
||||
}
|
||||
|
||||
int ocf_engine_check(struct ocf_request *req)
|
||||
@ -209,7 +268,6 @@ int ocf_engine_check(struct ocf_request *req)
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
ocf_req_clear_info(req);
|
||||
req->info.seq_req = true;
|
||||
|
||||
for (i = 0, core_line = req->core_line_first;
|
||||
core_line <= req->core_line_last; core_line++, i++) {
|
||||
@ -217,14 +275,12 @@ int ocf_engine_check(struct ocf_request *req)
|
||||
struct ocf_map_info *entry = &(req->map[i]);
|
||||
|
||||
if (entry->status == LOOKUP_MISS) {
|
||||
req->info.seq_req = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (_ocf_engine_check_map_entry(cache, entry, core_id)) {
|
||||
/* Mapping is invalid */
|
||||
entry->invalid = true;
|
||||
req->info.seq_req = false;
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Invalid, Cache line %u",
|
||||
entry->coll_idx);
|
||||
@ -240,38 +296,26 @@ int ocf_engine_check(struct ocf_request *req)
|
||||
}
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", req->info.seq_req ?
|
||||
"Yes" : "No");
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", ocf_engine_is_sequential(req)
|
||||
? "Yes" : "No");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void ocf_engine_map_cache_line(struct ocf_request *req,
|
||||
uint64_t core_line, unsigned int hash_index,
|
||||
ocf_cache_line_t *cache_line)
|
||||
void ocf_map_cache_line(struct ocf_request *req,
|
||||
unsigned int idx, ocf_cache_line_t cache_line)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_cache_t cache = req->cache;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
ocf_part_id_t part_id = req->part_id;
|
||||
ocf_cleaning_t clean_policy_type;
|
||||
|
||||
if (!ocf_freelist_get_cache_line(cache->freelist, cache_line)) {
|
||||
ocf_req_set_mapping_error(req);
|
||||
return;
|
||||
}
|
||||
|
||||
ocf_metadata_add_to_partition(cache, part_id, *cache_line);
|
||||
unsigned int hash_index = req->map[idx].hash;
|
||||
uint64_t core_line = req->core_line_first + idx;
|
||||
|
||||
/* Add the block to the corresponding collision list */
|
||||
ocf_metadata_start_collision_shared_access(cache, *cache_line);
|
||||
ocf_metadata_start_collision_shared_access(cache, cache_line);
|
||||
ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
|
||||
*cache_line);
|
||||
ocf_metadata_end_collision_shared_access(cache, *cache_line);
|
||||
|
||||
ocf_eviction_init_cache_line(cache, *cache_line);
|
||||
|
||||
/* Update LRU:: Move this node to head of lru list. */
|
||||
ocf_eviction_set_hot_cache_line(cache, *cache_line);
|
||||
cache_line);
|
||||
ocf_metadata_end_collision_shared_access(cache, cache_line);
|
||||
|
||||
/* Update dirty cache-block list */
|
||||
clean_policy_type = cache->conf_meta->cleaning_policy_type;
|
||||
@ -280,7 +324,29 @@ static void ocf_engine_map_cache_line(struct ocf_request *req,
|
||||
|
||||
if (cleaning_policy_ops[clean_policy_type].init_cache_block != NULL)
|
||||
cleaning_policy_ops[clean_policy_type].
|
||||
init_cache_block(cache, *cache_line);
|
||||
init_cache_block(cache, cache_line);
|
||||
|
||||
req->map[idx].coll_idx = cache_line;
|
||||
}
|
||||
|
||||
|
||||
static void ocf_engine_map_cache_line(struct ocf_request *req,
|
||||
unsigned int idx)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_cache_line_t cache_line;
|
||||
|
||||
if (!ocf_freelist_get_cache_line(cache->freelist, &cache_line)) {
|
||||
ocf_req_set_mapping_error(req);
|
||||
return;
|
||||
}
|
||||
|
||||
ocf_metadata_add_to_partition(cache, req->part_id, cache_line);
|
||||
|
||||
ocf_map_cache_line(req, idx, cache_line);
|
||||
|
||||
/* Update LRU:: Move this node to head of lru list. */
|
||||
ocf_eviction_init_cache_line(cache, cache_line);
|
||||
}
|
||||
|
||||
static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
|
||||
@ -297,7 +363,8 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
|
||||
case LOOKUP_MISS:
|
||||
break;
|
||||
|
||||
case LOOKUP_MAPPED:
|
||||
case LOOKUP_INSERTED:
|
||||
case LOOKUP_REMAPPED:
|
||||
OCF_DEBUG_RQ(req, "Canceling cache line %u",
|
||||
entry->coll_idx);
|
||||
|
||||
@ -312,7 +379,6 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
|
||||
|
||||
ocf_metadata_end_collision_shared_access(cache,
|
||||
entry->coll_idx);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -328,7 +394,6 @@ static void ocf_engine_map(struct ocf_request *req)
|
||||
uint32_t i;
|
||||
struct ocf_map_info *entry;
|
||||
uint64_t core_line;
|
||||
int status = LOOKUP_MAPPED;
|
||||
ocf_core_id_t core_id = ocf_core_get_id(req->core);
|
||||
|
||||
if (!ocf_engine_unmapped_count(req))
|
||||
@ -341,7 +406,6 @@ static void ocf_engine_map(struct ocf_request *req)
|
||||
}
|
||||
|
||||
ocf_req_clear_info(req);
|
||||
req->info.seq_req = true;
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
@ -351,30 +415,26 @@ static void ocf_engine_map(struct ocf_request *req)
|
||||
|
||||
ocf_engine_lookup_map_entry(cache, entry, core_id, core_line);
|
||||
|
||||
if (entry->status != LOOKUP_HIT) {
|
||||
ocf_engine_map_cache_line(req, entry->core_line,
|
||||
entry->hash, &entry->coll_idx);
|
||||
|
||||
if (ocf_req_test_mapping_error(req)) {
|
||||
/*
|
||||
* Eviction error (mapping error), need to
|
||||
* clean, return and do pass through
|
||||
/* attempt mapping only if no mapping error previously,
|
||||
* otherwise continue the loop anyway to have request fully
|
||||
* traversed after map()
|
||||
*/
|
||||
OCF_DEBUG_RQ(req, "Eviction ERROR when mapping");
|
||||
ocf_engine_map_hndl_error(cache, req);
|
||||
break;
|
||||
if (entry->status != LOOKUP_HIT &&
|
||||
!ocf_req_test_mapping_error(req)) {
|
||||
ocf_engine_map_cache_line(req, i);
|
||||
if (!ocf_req_test_mapping_error(req))
|
||||
entry->status = LOOKUP_INSERTED;
|
||||
}
|
||||
|
||||
entry->status = status;
|
||||
}
|
||||
if (entry->status != LOOKUP_MISS)
|
||||
ocf_engine_update_req_info(cache, req, i);
|
||||
|
||||
OCF_DEBUG_PARAM(req->cache,
|
||||
"%s, cache line %u, core line = %llu",
|
||||
entry->status == LOOKUP_HIT ? "Hit" : "Map",
|
||||
entry->status == LOOKUP_HIT ? "Hit" :
|
||||
entry->status == LOOKUP_MISS : "Miss" :
|
||||
"Insert",
|
||||
entry->coll_idx, entry->core_line);
|
||||
|
||||
ocf_engine_update_req_info(cache, req, i);
|
||||
|
||||
}
|
||||
|
||||
if (!ocf_req_test_mapping_error(req)) {
|
||||
@ -383,8 +443,8 @@ static void ocf_engine_map(struct ocf_request *req)
|
||||
ocf_promotion_req_purge(cache->promotion_policy, req);
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(req->cache, "Sequential - %s", req->info.seq_req ?
|
||||
"Yes" : "No");
|
||||
OCF_DEBUG_PARAM(req->cache, "Sequential - %s",
|
||||
ocf_engine_is_sequential(req) ? "Yes" : "No");
|
||||
}
|
||||
|
||||
static void _ocf_engine_clean_end(void *private_data, int error)
|
||||
@ -396,7 +456,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
|
||||
req->error |= error;
|
||||
|
||||
/* End request and do not processing */
|
||||
ocf_req_unlock(req->cache->device->concurrency.cache_line,
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(req->cache),
|
||||
req);
|
||||
|
||||
/* Complete request */
|
||||
@ -411,96 +471,117 @@ static void _ocf_engine_clean_end(void *private_data, int error)
|
||||
}
|
||||
}
|
||||
|
||||
static int lock_clines(struct ocf_request *req,
|
||||
const struct ocf_engine_callbacks *engine_cbs)
|
||||
static void ocf_engine_evict(struct ocf_request *req)
|
||||
{
|
||||
enum ocf_engine_lock_type lock_type = engine_cbs->get_lock_type(req);
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
req->cache->device->concurrency.cache_line;
|
||||
int status;
|
||||
|
||||
status = space_managment_evict_do(req);
|
||||
if (status == LOOKUP_MISS) {
|
||||
/* mark error */
|
||||
ocf_req_set_mapping_error(req);
|
||||
|
||||
/* unlock cachelines locked during eviction */
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(req->cache),
|
||||
req);
|
||||
|
||||
/* request cleaning */
|
||||
ocf_req_set_clean_eviction(req);
|
||||
|
||||
/* unmap inserted and replaced cachelines */
|
||||
ocf_engine_map_hndl_error(req->cache, req);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int lock_clines(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(req->cache);
|
||||
enum ocf_engine_lock_type lock_type =
|
||||
req->engine_cbs->get_lock_type(req);
|
||||
|
||||
switch (lock_type) {
|
||||
case ocf_engine_lock_write:
|
||||
return ocf_req_async_lock_wr(c, req, engine_cbs->resume);
|
||||
return ocf_req_async_lock_wr(c, req, req->engine_cbs->resume);
|
||||
case ocf_engine_lock_read:
|
||||
return ocf_req_async_lock_rd(c, req, engine_cbs->resume);
|
||||
return ocf_req_async_lock_rd(c, req, req->engine_cbs->resume);
|
||||
default:
|
||||
return OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int ocf_prepare_clines_miss(struct ocf_request *req,
|
||||
const struct ocf_engine_callbacks *engine_cbs)
|
||||
/* Attempt to map cachelines marked as LOOKUP_MISS by evicting from cache.
|
||||
* Caller must assure that request map info is up to date (request
|
||||
* is traversed).
|
||||
*/
|
||||
static inline int ocf_prepare_clines_evict(struct ocf_request *req)
|
||||
{
|
||||
int lock_status = -OCF_ERR_NO_LOCK;
|
||||
bool part_has_space;
|
||||
|
||||
part_has_space = ocf_part_has_space(req);
|
||||
if (!part_has_space) {
|
||||
/* adding more cachelines to target partition would overflow
|
||||
it - requesting eviction from target partition only */
|
||||
ocf_req_set_part_evict(req);
|
||||
} else {
|
||||
/* evict from any partition */
|
||||
ocf_req_clear_part_evict(req);
|
||||
}
|
||||
|
||||
ocf_engine_evict(req);
|
||||
|
||||
if (!ocf_req_test_mapping_error(req)) {
|
||||
ocf_promotion_req_purge(req->cache->promotion_policy, req);
|
||||
lock_status = lock_clines(req);
|
||||
if (lock_status < 0)
|
||||
ocf_req_set_mapping_error(req);
|
||||
}
|
||||
|
||||
return lock_status;
|
||||
}
|
||||
|
||||
static inline int ocf_prepare_clines_miss(struct ocf_request *req)
|
||||
{
|
||||
int lock_status = -OCF_ERR_NO_LOCK;
|
||||
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
|
||||
|
||||
/* requests to disabled partitions go in pass-through */
|
||||
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
|
||||
ocf_req_set_mapping_error(req);
|
||||
ocf_hb_req_prot_unlock_rd(req);
|
||||
return lock_status;
|
||||
}
|
||||
|
||||
if (!ocf_part_has_space(req)) {
|
||||
ocf_hb_req_prot_unlock_rd(req);
|
||||
goto eviction;
|
||||
ocf_engine_lookup(req);
|
||||
return ocf_prepare_clines_evict(req);
|
||||
}
|
||||
|
||||
/* Mapping must be performed holding (at least) hash-bucket write lock */
|
||||
ocf_hb_req_prot_lock_upgrade(req);
|
||||
|
||||
ocf_engine_map(req);
|
||||
|
||||
if (!ocf_req_test_mapping_error(req)) {
|
||||
lock_status = lock_clines(req, engine_cbs);
|
||||
lock_status = lock_clines(req);
|
||||
if (lock_status < 0) {
|
||||
/* Mapping succeeded, but we failed to acquire cacheline lock.
|
||||
* Don't try to evict, just return error to caller */
|
||||
ocf_req_set_mapping_error(req);
|
||||
}
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
return lock_status;
|
||||
}
|
||||
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
|
||||
eviction:
|
||||
ocf_metadata_start_exclusive_access(metadata_lock);
|
||||
|
||||
/* repeat traversation to pick up latest metadata status */
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
if (!ocf_part_has_space(req))
|
||||
ocf_req_set_part_evict(req);
|
||||
else
|
||||
ocf_req_clear_part_evict(req);
|
||||
|
||||
if (space_managment_evict_do(req->cache, req,
|
||||
ocf_engine_unmapped_count(req)) == LOOKUP_MISS) {
|
||||
ocf_req_set_mapping_error(req);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ocf_engine_map(req);
|
||||
if (ocf_req_test_mapping_error(req))
|
||||
goto unlock;
|
||||
|
||||
lock_status = lock_clines(req, engine_cbs);
|
||||
if (lock_status < 0)
|
||||
ocf_req_set_mapping_error(req);
|
||||
|
||||
unlock:
|
||||
ocf_metadata_end_exclusive_access(metadata_lock);
|
||||
|
||||
return lock_status;
|
||||
/* Request mapping failed, but it is fully traversed as a side
|
||||
* effect of ocf_engine_map(), so no need to repeat the traversation
|
||||
* before eviction.
|
||||
* */
|
||||
req->info.mapping_error = false;
|
||||
return ocf_prepare_clines_evict(req);
|
||||
}
|
||||
|
||||
int ocf_engine_prepare_clines(struct ocf_request *req,
|
||||
const struct ocf_engine_callbacks *engine_cbs)
|
||||
int ocf_engine_prepare_clines(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_user_part *part = &req->cache->user_parts[req->part_id];
|
||||
bool mapped;
|
||||
bool promote = true;
|
||||
int lock = -OCF_ERR_NO_LOCK;
|
||||
int result;
|
||||
|
||||
/* Calculate hashes for hash-bucket locking */
|
||||
ocf_req_hash(req);
|
||||
@ -510,13 +591,14 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
|
||||
* not change during traversation */
|
||||
ocf_hb_req_prot_lock_rd(req);
|
||||
|
||||
/* Traverse to check if request is mapped fully */
|
||||
ocf_engine_traverse(req);
|
||||
/* check CL status */
|
||||
ocf_engine_lookup(req);
|
||||
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
if (mapped) {
|
||||
lock = lock_clines(req, engine_cbs);
|
||||
lock = lock_clines(req);
|
||||
ocf_hb_req_prot_unlock_rd(req);
|
||||
ocf_engine_set_hot(req);
|
||||
return lock;
|
||||
}
|
||||
|
||||
@ -529,7 +611,20 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
|
||||
return lock;
|
||||
}
|
||||
|
||||
return ocf_prepare_clines_miss(req, engine_cbs);
|
||||
/* Mapping must be performed holding (at least) hash-bucket write lock */
|
||||
ocf_hb_req_prot_lock_upgrade(req);
|
||||
result = ocf_prepare_clines_miss(req);
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
|
||||
if (ocf_req_test_clean_eviction(req)) {
|
||||
ocf_eviction_flush_dirty(req->cache, part, req->io_queue,
|
||||
128);
|
||||
}
|
||||
|
||||
if (!ocf_req_test_mapping_error(req))
|
||||
ocf_engine_set_hot(req);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
|
||||
@ -562,7 +657,8 @@ void ocf_engine_clean(struct ocf_request *req)
|
||||
{
|
||||
/* Initialize attributes for cleaner */
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.cache_line_lock = false,
|
||||
.lock_cacheline = false,
|
||||
.lock_metadata = false,
|
||||
|
||||
.cmpl_context = req,
|
||||
.cmpl_fn = _ocf_engine_clean_end,
|
||||
@ -706,8 +802,7 @@ static int _ocf_engine_refresh(struct ocf_request *req)
|
||||
req->complete(req, req->error);
|
||||
|
||||
/* Release WRITE lock of request */
|
||||
ocf_req_unlock(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_req_put(req);
|
||||
|
@ -112,6 +112,9 @@ static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *req)
|
||||
return req->core_line_count - (req->info.hit_no + req->info.invalid_no);
|
||||
}
|
||||
|
||||
void ocf_map_cache_line(struct ocf_request *req,
|
||||
unsigned int idx, ocf_cache_line_t cache_line);
|
||||
|
||||
/**
|
||||
* @brief Get number of cache lines to repart
|
||||
*
|
||||
@ -124,6 +127,12 @@ static inline uint32_t ocf_engine_repart_count(struct ocf_request *req)
|
||||
return req->info.re_part_no;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_engine_is_sequential(struct ocf_request *req)
|
||||
{
|
||||
return req->info.hit_no + req->info.insert_no == req->core_line_count
|
||||
&& req->info.seq_no == req->core_line_count - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get number of IOs to perform cache read or write
|
||||
*
|
||||
@ -133,7 +142,7 @@ static inline uint32_t ocf_engine_repart_count(struct ocf_request *req)
|
||||
*/
|
||||
static inline uint32_t ocf_engine_io_count(struct ocf_request *req)
|
||||
{
|
||||
return req->info.seq_req ? 1 : req->core_line_count;
|
||||
return ocf_engine_is_sequential(req) ? 1 : req->core_line_count;
|
||||
}
|
||||
|
||||
static inline
|
||||
@ -233,11 +242,10 @@ struct ocf_engine_callbacks
|
||||
* @param req OCF request
|
||||
*
|
||||
* @returns eviction status
|
||||
* @retval LOOKUP_MAPPED successfully evicted required number of cachelines
|
||||
* @retval LOOKUP_INSERTED successfully evicted required number of cachelines
|
||||
* @retval LOOKUP_MISS eviction failure
|
||||
*/
|
||||
int ocf_engine_prepare_clines(struct ocf_request *req,
|
||||
const struct ocf_engine_callbacks *engine_cbs);
|
||||
int ocf_engine_prepare_clines(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Traverse OCF request (lookup cache)
|
||||
@ -262,12 +270,14 @@ void ocf_engine_traverse(struct ocf_request *req);
|
||||
int ocf_engine_check(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Update OCF request info
|
||||
* @brief Update OCF request info after evicting a cacheline
|
||||
*
|
||||
* @param cache OCF cache instance
|
||||
* @param req OCF request
|
||||
* @param idx cacheline index within the request
|
||||
*/
|
||||
void ocf_engine_update_req_info(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t entry);
|
||||
void ocf_engine_patch_req_info(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t idx);
|
||||
|
||||
/**
|
||||
* @brief Update OCF request block statistics for an exported object
|
||||
|
@ -147,7 +147,7 @@ static void _ocf_discard_step_complete(struct ocf_request *req, int error)
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
/* Release WRITE lock of request */
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
if (req->error) {
|
||||
ocf_metadata_error(req->cache);
|
||||
@ -236,7 +236,7 @@ static int _ocf_discard_step(struct ocf_request *req)
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_async_lock_wr(
|
||||
cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(cache),
|
||||
req, _ocf_discard_on_resume);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
|
@ -46,8 +46,7 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
|
||||
ocf_core_stats_cache_error_update(req->core, OCF_READ);
|
||||
ocf_engine_push_req_front_pt(req);
|
||||
} else {
|
||||
ocf_req_unlock(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Complete request */
|
||||
req->complete(req, req->error);
|
||||
@ -132,7 +131,7 @@ int ocf_read_fast(struct ocf_request *req)
|
||||
if (hit && part_has_space) {
|
||||
ocf_io_start(&req->ioi.io);
|
||||
lock = ocf_req_async_lock_rd(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
@ -204,7 +203,7 @@ int ocf_write_fast(struct ocf_request *req)
|
||||
if (mapped && part_has_space) {
|
||||
ocf_io_start(&req->ioi.io);
|
||||
lock = ocf_req_async_lock_wr(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ static void _ocf_invalidate_req(struct ocf_request *req, int error)
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to flush metadata to cache");
|
||||
|
||||
ocf_req_unlock(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_req_put(req);
|
||||
|
@ -34,7 +34,7 @@ static void _ocf_read_pt_complete(struct ocf_request *req, int error)
|
||||
/* Complete request */
|
||||
req->complete(req, req->error);
|
||||
|
||||
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_req_put(req);
|
||||
|
@ -24,8 +24,8 @@
|
||||
|
||||
static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
req->cache->device->concurrency.cache_line;
|
||||
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(
|
||||
req->cache);
|
||||
|
||||
if (error)
|
||||
req->error |= error;
|
||||
@ -242,8 +242,9 @@ int ocf_read_generic(struct ocf_request *req)
|
||||
|
||||
/* Set resume call backs */
|
||||
req->io_if = &_io_if_read_generic_resume;
|
||||
req->engine_cbs = &_rd_engine_callbacks;
|
||||
|
||||
lock = ocf_engine_prepare_clines(req, &_rd_engine_callbacks);
|
||||
lock = ocf_engine_prepare_clines(req);
|
||||
|
||||
if (!ocf_req_test_mapping_error(req)) {
|
||||
if (lock >= 0) {
|
||||
|
@ -60,7 +60,7 @@ static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to write data to cache");
|
||||
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
req->complete(req, req->error);
|
||||
|
||||
@ -189,10 +189,11 @@ int ocf_write_wb(struct ocf_request *req)
|
||||
|
||||
/* Set resume io_if */
|
||||
req->io_if = &_io_if_wb_resume;
|
||||
req->engine_cbs = &_wb_engine_callbacks;
|
||||
|
||||
/* TODO: Handle fits into dirty */
|
||||
|
||||
lock = ocf_engine_prepare_clines(req, &_wb_engine_callbacks);
|
||||
lock = ocf_engine_prepare_clines(req);
|
||||
|
||||
if (!ocf_req_test_mapping_error(req)) {
|
||||
if (lock >= 0) {
|
||||
|
@ -25,7 +25,7 @@ static const struct ocf_io_if _io_if_wi_update_metadata = {
|
||||
|
||||
int _ocf_write_wi_next_pass(struct ocf_request *req)
|
||||
{
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
if (req->wi_second_pass) {
|
||||
req->complete(req, req->error);
|
||||
@ -75,7 +75,7 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to write data to cache");
|
||||
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
req->complete(req, req->error);
|
||||
|
||||
@ -128,8 +128,7 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
if (req->error) {
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
req->complete(req, req->error);
|
||||
|
||||
@ -200,7 +199,7 @@ int ocf_write_wi(struct ocf_request *req)
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_async_lock_wr(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, _ocf_write_wi_on_resume);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
|
@ -33,7 +33,7 @@ static void ocf_read_wo_cache_complete(struct ocf_request *req, int error)
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to read data from cache");
|
||||
|
||||
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Complete request */
|
||||
req->complete(req, req->error);
|
||||
@ -169,8 +169,7 @@ static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
|
||||
if (!req->info.dirty_any || req->error) {
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
req->complete(req, req->error);
|
||||
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
|
||||
ocf_req_put(req);
|
||||
return;
|
||||
}
|
||||
@ -238,7 +237,7 @@ int ocf_read_wo(struct ocf_request *req)
|
||||
* lock request for READ access
|
||||
*/
|
||||
lock = ocf_req_async_lock_rd(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
|
@ -34,8 +34,7 @@ static void _ocf_write_wt_req_complete(struct ocf_request *req)
|
||||
ocf_engine_invalidate(req);
|
||||
} else {
|
||||
/* Unlock reqest from WRITE access */
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
|
||||
req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
/* Complete request */
|
||||
req->complete(req, req->info.core_error ? req->error : 0);
|
||||
@ -178,8 +177,9 @@ int ocf_write_wt(struct ocf_request *req)
|
||||
|
||||
/* Set resume io_if */
|
||||
req->io_if = &_io_if_wt_resume;
|
||||
req->engine_cbs = &_wt_engine_callbacks;
|
||||
|
||||
lock = ocf_engine_prepare_clines(req, &_wt_engine_callbacks);
|
||||
lock = ocf_engine_prepare_clines(req);
|
||||
|
||||
if (!ocf_req_test_mapping_error(req)) {
|
||||
if (lock >= 0) {
|
||||
|
@ -31,7 +31,7 @@ static int ocf_zero_purge(struct ocf_request *req)
|
||||
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
||||
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||
|
||||
req->complete(req, req->error);
|
||||
|
||||
@ -153,7 +153,7 @@ void ocf_engine_zero_line(struct ocf_request *req)
|
||||
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_async_lock_wr(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(req->cache),
|
||||
req, ocf_engine_on_resume);
|
||||
|
||||
if (lock >= 0) {
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "eviction.h"
|
||||
#include "ops.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../engine/engine_common.h"
|
||||
|
||||
struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
|
||||
[ocf_eviction_lru] = {
|
||||
@ -16,12 +17,13 @@ struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
|
||||
.init_evp = evp_lru_init_evp,
|
||||
.dirty_cline = evp_lru_dirty_cline,
|
||||
.clean_cline = evp_lru_clean_cline,
|
||||
.flush_dirty = evp_lru_clean,
|
||||
.name = "lru",
|
||||
},
|
||||
};
|
||||
|
||||
static uint32_t ocf_evict_calculate(ocf_cache_t cache,
|
||||
struct ocf_user_part *part, uint32_t to_evict, bool roundup)
|
||||
struct ocf_user_part *part, uint32_t to_evict)
|
||||
{
|
||||
|
||||
uint32_t curr_part_size = ocf_part_get_occupancy(part);
|
||||
@ -35,33 +37,34 @@ static uint32_t ocf_evict_calculate(ocf_cache_t cache,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (roundup && to_evict < OCF_TO_EVICTION_MIN)
|
||||
to_evict = OCF_TO_EVICTION_MIN;
|
||||
|
||||
if (to_evict > (curr_part_size - min_part_size))
|
||||
to_evict = curr_part_size - min_part_size;
|
||||
|
||||
return to_evict;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_part_do(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, const uint32_t evict_cline_no,
|
||||
static inline uint32_t ocf_evict_part_do(struct ocf_request *req,
|
||||
struct ocf_user_part *target_part)
|
||||
{
|
||||
uint32_t unmapped = ocf_engine_unmapped_count(req);
|
||||
uint32_t to_evict = 0;
|
||||
|
||||
if (!evp_lru_can_evict(cache))
|
||||
if (!evp_lru_can_evict(req->cache))
|
||||
return 0;
|
||||
|
||||
to_evict = ocf_evict_calculate(cache, target_part, evict_cline_no,
|
||||
false);
|
||||
to_evict = ocf_evict_calculate(req->cache, target_part, unmapped);
|
||||
|
||||
return ocf_eviction_need_space(cache, io_queue,
|
||||
target_part, to_evict);
|
||||
if (to_evict < unmapped) {
|
||||
/* cannot evict enough cachelines to map request,
|
||||
so no purpose in evicting anything */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ocf_eviction_need_space(req->cache, req, target_part, to_evict);
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, uint32_t evict_cline_no,
|
||||
struct ocf_request *req, uint32_t evict_cline_no,
|
||||
bool overflown_only, int16_t max_priority)
|
||||
{
|
||||
uint32_t to_evict = 0, evicted = 0;
|
||||
@ -98,7 +101,7 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
}
|
||||
|
||||
to_evict = ocf_evict_calculate(cache, part,
|
||||
evict_cline_no - evicted, true);
|
||||
evict_cline_no - evicted);
|
||||
if (to_evict == 0) {
|
||||
/* No cache lines to evict for this partition */
|
||||
continue;
|
||||
@ -107,8 +110,7 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
if (overflown_only)
|
||||
to_evict = OCF_MIN(to_evict, overflow_size);
|
||||
|
||||
evicted += ocf_eviction_need_space(cache, io_queue,
|
||||
part, to_evict);
|
||||
evicted += ocf_eviction_need_space(cache, req, part, to_evict);
|
||||
|
||||
if (evicted >= evict_cline_no) {
|
||||
/* Evicted requested number of cache line, stop
|
||||
@ -122,10 +124,12 @@ out:
|
||||
return evicted;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, uint32_t evict_cline_no,
|
||||
struct ocf_user_part *target_part)
|
||||
static inline uint32_t ocf_evict_do(struct ocf_request *req)
|
||||
{
|
||||
ocf_cache_t cache = req->cache;
|
||||
ocf_part_id_t target_part_id = req->part_id;
|
||||
struct ocf_user_part *target_part = &cache->user_parts[target_part_id];
|
||||
uint32_t evict_cline_no = ocf_engine_unmapped_count(req);
|
||||
uint32_t evicted;
|
||||
|
||||
/* First attempt to evict overflown partitions in order to
|
||||
@ -134,7 +138,7 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
* free its cachelines regardless of destination partition
|
||||
* priority. */
|
||||
|
||||
evicted = ocf_evict_partitions(cache, io_queue, evict_cline_no,
|
||||
evicted = ocf_evict_partitions(cache, req, evict_cline_no,
|
||||
true, OCF_IO_CLASS_PRIO_PINNED);
|
||||
if (evicted >= evict_cline_no)
|
||||
return evicted;
|
||||
@ -142,35 +146,26 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
* partitions with priority <= target partition and attempt
|
||||
* to evict from those. */
|
||||
evict_cline_no -= evicted;
|
||||
evicted += ocf_evict_partitions(cache, io_queue, evict_cline_no,
|
||||
evicted += ocf_evict_partitions(cache, req, evict_cline_no,
|
||||
false, target_part->config->priority);
|
||||
|
||||
return evicted;
|
||||
}
|
||||
|
||||
int space_managment_evict_do(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t evict_cline_no)
|
||||
int space_managment_evict_do(struct ocf_request *req)
|
||||
{
|
||||
uint32_t needed = ocf_engine_unmapped_count(req);
|
||||
uint32_t evicted;
|
||||
uint32_t free;
|
||||
struct ocf_user_part *req_part = &cache->user_parts[req->part_id];
|
||||
struct ocf_user_part *req_part = &req->cache->user_parts[req->part_id];
|
||||
|
||||
if (ocf_req_part_evict(req)) {
|
||||
evicted = ocf_evict_part_do(cache, req->io_queue, evict_cline_no,
|
||||
req_part);
|
||||
evicted = ocf_evict_part_do(req, req_part);
|
||||
} else {
|
||||
free = ocf_freelist_num_free(cache->freelist);
|
||||
if (evict_cline_no <= free)
|
||||
return LOOKUP_MAPPED;
|
||||
|
||||
evict_cline_no -= free;
|
||||
|
||||
evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, req_part);
|
||||
evicted = ocf_evict_do(req);
|
||||
}
|
||||
|
||||
if (evict_cline_no <= evicted)
|
||||
return LOOKUP_MAPPED;
|
||||
if (needed <= evicted)
|
||||
return LOOKUP_INSERTED;
|
||||
|
||||
ocf_req_set_mapping_error(req);
|
||||
return LOOKUP_MISS;
|
||||
}
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include "lru.h"
|
||||
#include "lru_structs.h"
|
||||
|
||||
#define OCF_TO_EVICTION_MIN 128UL
|
||||
#define OCF_PENDING_EVICTION_LIMIT 512UL
|
||||
|
||||
#define OCF_NUM_EVICTION_LISTS 32
|
||||
@ -40,11 +39,9 @@ struct eviction_policy_ops {
|
||||
void (*rm_cline)(ocf_cache_t cache,
|
||||
ocf_cache_line_t cline);
|
||||
bool (*can_evict)(ocf_cache_t cache);
|
||||
uint32_t (*req_clines)(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, struct ocf_user_part *part,
|
||||
uint32_t (*req_clines)(struct ocf_request *req, struct ocf_user_part *part,
|
||||
uint32_t cline_no);
|
||||
void (*hot_cline)(ocf_cache_t cache,
|
||||
ocf_cache_line_t cline);
|
||||
void (*hot_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
|
||||
void (*init_evp)(ocf_cache_t cache, struct ocf_user_part *part);
|
||||
void (*dirty_cline)(ocf_cache_t cache,
|
||||
struct ocf_user_part *part,
|
||||
@ -52,6 +49,8 @@ struct eviction_policy_ops {
|
||||
void (*clean_cline)(ocf_cache_t cache,
|
||||
struct ocf_user_part *part,
|
||||
uint32_t cline_no);
|
||||
void (*flush_dirty)(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, uint32_t count);
|
||||
const char *name;
|
||||
};
|
||||
|
||||
@ -64,8 +63,7 @@ extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
|
||||
* 'LOOKUP_HIT' if evicted enough cachelines to serve @req
|
||||
* 'LOOKUP_MISS' otherwise
|
||||
*/
|
||||
int space_managment_evict_do(ocf_cache_t cache,
|
||||
struct ocf_request *req, uint32_t evict_cline_no);
|
||||
int space_managment_evict_do(struct ocf_request *req);
|
||||
|
||||
int space_management_free(ocf_cache_t cache, uint32_t count);
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "../mngt/ocf_mngt_common.h"
|
||||
#include "../engine/engine_zero.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../engine/engine_common.h"
|
||||
|
||||
#define OCF_EVICTION_MAX_SCAN 1024
|
||||
|
||||
@ -261,7 +262,8 @@ void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
|
||||
}
|
||||
|
||||
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
||||
struct ocf_user_part *part, uint32_t start_evp, bool clean)
|
||||
struct ocf_user_part *part, uint32_t start_evp, bool clean,
|
||||
bool cl_lock_write, _lru_hash_locked_pfn hash_locked, void *context)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
@ -275,11 +277,47 @@ static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
||||
iter->evp = (start_evp + OCF_NUM_EVICTION_LISTS - 1) % OCF_NUM_EVICTION_LISTS;
|
||||
iter->num_avail_evps = OCF_NUM_EVICTION_LISTS;
|
||||
iter->next_avail_evp = ((1ULL << OCF_NUM_EVICTION_LISTS) - 1);
|
||||
iter->clean = clean;
|
||||
iter->cl_lock_write = cl_lock_write;
|
||||
iter->hash_locked = hash_locked;
|
||||
iter->context = context;
|
||||
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
||||
iter->curr_cline[i] = evp_lru_get_list(part, i, clean)->tail;
|
||||
}
|
||||
|
||||
static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
|
||||
ocf_cache_t cache, struct ocf_user_part *part,
|
||||
uint32_t start_evp)
|
||||
{
|
||||
/* Lock cachelines for read, non-exclusive access */
|
||||
lru_iter_init(iter, cache, part, start_evp, false, false,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
static bool _evp_lru_evict_hash_locked(void *context,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
struct ocf_request *req = context;
|
||||
|
||||
return ocf_req_hash_in_range(req, core_id, core_line);
|
||||
}
|
||||
|
||||
static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
|
||||
ocf_cache_t cache, struct ocf_user_part *part,
|
||||
uint32_t start_evp, bool cl_lock_write,
|
||||
struct ocf_request *req)
|
||||
{
|
||||
/* Lock hash buckets for write, cachelines according to user request,
|
||||
* however exclusive cacheline access is needed even in case of read
|
||||
* access. _evp_lru_evict_hash_locked tells whether given hash bucket
|
||||
* is already locked as part of request hash locking (to avoid attempt
|
||||
* to acquire the same hash bucket lock twice) */
|
||||
lru_iter_init(iter, cache, part, start_evp, true, cl_lock_write,
|
||||
_evp_lru_evict_hash_locked, req);
|
||||
}
|
||||
|
||||
|
||||
static inline uint32_t _lru_next_evp(struct ocf_lru_iter *iter)
|
||||
{
|
||||
unsigned increment;
|
||||
@ -292,6 +330,8 @@ static inline uint32_t _lru_next_evp(struct ocf_lru_iter *iter)
|
||||
return iter->evp;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline bool _lru_evp_is_empty(struct ocf_lru_iter *iter)
|
||||
{
|
||||
return !(iter->next_avail_evp & (1ULL << (OCF_NUM_EVICTION_LISTS - 1)));
|
||||
@ -308,144 +348,253 @@ static inline bool _lru_evp_all_empty(struct ocf_lru_iter *iter)
|
||||
return iter->num_avail_evps == 0;
|
||||
}
|
||||
|
||||
/* get next non-empty lru list if available */
|
||||
static inline ocf_cache_line_t lru_iter_next(struct ocf_lru_iter *iter)
|
||||
static bool inline _lru_trylock_cacheline(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cline)
|
||||
{
|
||||
struct lru_eviction_policy_meta *node;
|
||||
uint32_t curr_evp;
|
||||
ocf_cache_line_t ret;
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
ocf_cache_line_concurrency(iter->cache);
|
||||
|
||||
return iter->cl_lock_write ?
|
||||
ocf_cache_line_try_lock_wr(c, cline) :
|
||||
ocf_cache_line_try_lock_rd(c, cline);
|
||||
}
|
||||
|
||||
static void inline _lru_unlock_cacheline(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cline)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
ocf_cache_line_concurrency(iter->cache);
|
||||
|
||||
if (iter->cl_lock_write)
|
||||
ocf_cache_line_unlock_wr(c, cline);
|
||||
else
|
||||
ocf_cache_line_unlock_rd(c, cline);
|
||||
}
|
||||
|
||||
static bool inline _lru_trylock_hash(struct ocf_lru_iter *iter,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
if (iter->hash_locked != NULL && iter->hash_locked(
|
||||
iter->context,
|
||||
core_id, core_line)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return ocf_hb_cline_naked_trylock_wr(
|
||||
&iter->cache->metadata.lock,
|
||||
core_id, core_line);
|
||||
}
|
||||
|
||||
static void inline _lru_unlock_hash(struct ocf_lru_iter *iter,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
if (iter->hash_locked != NULL && iter->hash_locked(
|
||||
iter->context,
|
||||
core_id, core_line)) {
|
||||
return;
|
||||
}
|
||||
|
||||
ocf_hb_cline_naked_unlock_wr(
|
||||
&iter->cache->metadata.lock,
|
||||
core_id, core_line);
|
||||
}
|
||||
|
||||
static bool inline _lru_iter_evition_lock(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cache_line,
|
||||
ocf_core_id_t *core_id, uint64_t *core_line)
|
||||
|
||||
{
|
||||
if (!_lru_trylock_cacheline(iter, cache_line))
|
||||
return false;
|
||||
|
||||
ocf_metadata_get_core_info(iter->cache, cache_line,
|
||||
core_id, core_line);
|
||||
|
||||
if (!_lru_trylock_hash(iter, *core_id, *core_line)) {
|
||||
_lru_unlock_cacheline(iter, cache_line);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ocf_cache_line_is_locked_exclusively(iter->cache,
|
||||
cache_line)) {
|
||||
_lru_unlock_hash(iter, *core_id, *core_line);
|
||||
_lru_unlock_cacheline(iter, cache_line);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Get next clean cacheline from tail of lru lists. Caller must not hold any
|
||||
* eviction list lock. Returned cacheline is read or write locked, depending on
|
||||
* iter->write_lock. Returned cacheline has corresponding metadata hash bucket
|
||||
* locked. Cacheline is moved to the head of lru list before being returned */
|
||||
static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
|
||||
ocf_core_id_t *core_id, uint64_t *core_line)
|
||||
{
|
||||
uint32_t curr_evp;
|
||||
ocf_cache_line_t cline;
|
||||
ocf_cache_t cache = iter->cache;
|
||||
struct ocf_user_part *part = iter->part;
|
||||
struct ocf_lru_list *list;
|
||||
|
||||
do {
|
||||
curr_evp = _lru_next_evp(iter);
|
||||
|
||||
while (iter->curr_cline[curr_evp] == end_marker) {
|
||||
if (!_lru_evp_is_empty(iter)) {
|
||||
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, curr_evp);
|
||||
|
||||
list = evp_lru_get_list(part, curr_evp, iter->clean);
|
||||
|
||||
cline = list->tail;
|
||||
while (cline != end_marker && !_lru_iter_evition_lock(iter,
|
||||
cline, core_id, core_line)) {
|
||||
cline = ocf_metadata_get_eviction_policy(
|
||||
iter->cache, cline)->lru.prev;
|
||||
}
|
||||
|
||||
if (cline != end_marker) {
|
||||
remove_lru_list(cache, list, cline);
|
||||
add_lru_head(cache, list, cline);
|
||||
balance_lru_list(cache, list);
|
||||
}
|
||||
|
||||
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock, curr_evp);
|
||||
|
||||
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
|
||||
/* mark list as empty */
|
||||
_lru_evp_set_empty(iter);
|
||||
}
|
||||
if (_lru_evp_all_empty(iter)) {
|
||||
/* all lists empty */
|
||||
return end_marker;
|
||||
}
|
||||
} while (cline == end_marker && !_lru_evp_all_empty(iter));
|
||||
|
||||
return cline;
|
||||
}
|
||||
|
||||
/* Get next dirty cacheline from tail of lru lists. Caller must hold all
|
||||
* eviction list locks during entire iteration proces. Returned cacheline
|
||||
* is read or write locked, depending on iter->write_lock */
|
||||
static inline ocf_cache_line_t lru_iter_cleaning_next(struct ocf_lru_iter *iter)
|
||||
{
|
||||
uint32_t curr_evp;
|
||||
ocf_cache_line_t cline;
|
||||
|
||||
do {
|
||||
curr_evp = _lru_next_evp(iter);
|
||||
cline = iter->curr_cline[curr_evp];
|
||||
|
||||
while (cline != end_marker && !_lru_trylock_cacheline(iter,
|
||||
cline)) {
|
||||
cline = ocf_metadata_get_eviction_policy(
|
||||
iter->cache, cline)->lru.prev;
|
||||
}
|
||||
if (cline != end_marker) {
|
||||
iter->curr_cline[curr_evp] =
|
||||
ocf_metadata_get_eviction_policy(
|
||||
iter->cache , cline)->lru.prev;
|
||||
}
|
||||
|
||||
node = &ocf_metadata_get_eviction_policy(iter->cache,
|
||||
iter->curr_cline[curr_evp])->lru;
|
||||
ret = iter->curr_cline[curr_evp];
|
||||
iter->curr_cline[curr_evp] = node->prev;
|
||||
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
|
||||
/* mark list as empty */
|
||||
_lru_evp_set_empty(iter);
|
||||
}
|
||||
} while (cline == end_marker && !_lru_evp_all_empty(iter));
|
||||
|
||||
return ret;
|
||||
return cline;
|
||||
}
|
||||
|
||||
static void evp_lru_clean_end(void *private_data, int error)
|
||||
{
|
||||
struct ocf_lru_iter *iter = private_data;
|
||||
struct ocf_part_cleaning_ctx *ctx = private_data;
|
||||
unsigned i;
|
||||
|
||||
ocf_refcnt_dec(&iter->part->cleaning);
|
||||
for (i = 0; i < OCF_EVICTION_CLEAN_SIZE; i++) {
|
||||
if (ctx->cline[i] != end_marker)
|
||||
ocf_cache_line_unlock_rd(ctx->cache->device->concurrency
|
||||
.cache_line, ctx->cline[i]);
|
||||
}
|
||||
|
||||
ocf_refcnt_dec(&ctx->counter);
|
||||
}
|
||||
|
||||
static int evp_lru_clean_getter(ocf_cache_t cache, void *getter_context,
|
||||
uint32_t item, ocf_cache_line_t *line)
|
||||
static int evp_lru_clean_get(ocf_cache_t cache, void *getter_context,
|
||||
uint32_t idx, ocf_cache_line_t *line)
|
||||
{
|
||||
struct ocf_lru_iter *iter = getter_context;
|
||||
ocf_cache_line_t cline;
|
||||
|
||||
while (true) {
|
||||
cline = lru_iter_next(iter);
|
||||
|
||||
if (cline == end_marker)
|
||||
break;
|
||||
|
||||
/* Prevent evicting already locked items */
|
||||
if (ocf_cache_line_is_used(
|
||||
cache->device->concurrency.cache_line,
|
||||
cline)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(!metadata_test_dirty(cache, cline));
|
||||
|
||||
*line = cline;
|
||||
return 0;
|
||||
}
|
||||
struct ocf_part_cleaning_ctx *ctx = getter_context;
|
||||
|
||||
if (ctx->cline[idx] == end_marker)
|
||||
return -1;
|
||||
|
||||
ENV_BUG_ON(!metadata_test_dirty(ctx->cache, ctx->cline[idx]));
|
||||
*line = ctx->cline[idx];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void evp_lru_clean(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
struct ocf_user_part *part, uint32_t count)
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, uint32_t count)
|
||||
{
|
||||
struct ocf_refcnt *counter = &part->cleaning;
|
||||
struct ocf_part_cleaning_ctx *ctx = &part->cleaning;
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.cache_line_lock = true,
|
||||
.lock_cacheline = false,
|
||||
.lock_metadata = true,
|
||||
.do_sort = true,
|
||||
|
||||
.cmpl_context = &part->eviction_clean_iter,
|
||||
.cmpl_context = &part->cleaning,
|
||||
.cmpl_fn = evp_lru_clean_end,
|
||||
|
||||
.getter = evp_lru_clean_getter,
|
||||
.getter_context = &part->eviction_clean_iter,
|
||||
.getter = evp_lru_clean_get,
|
||||
.getter_context = &part->cleaning,
|
||||
|
||||
.count = count > 32 ? 32 : count,
|
||||
.count = min(count, OCF_EVICTION_CLEAN_SIZE),
|
||||
|
||||
.io_queue = io_queue
|
||||
};
|
||||
ocf_cache_line_t *cline = part->cleaning.cline;
|
||||
struct ocf_lru_iter iter;
|
||||
unsigned evp;
|
||||
int cnt;
|
||||
unsigned i;
|
||||
unsigned lock_idx;
|
||||
|
||||
if (ocf_mngt_cache_is_locked(cache))
|
||||
return;
|
||||
|
||||
cnt = ocf_refcnt_inc(counter);
|
||||
cnt = ocf_refcnt_inc(&ctx->counter);
|
||||
if (!cnt) {
|
||||
/* cleaner disabled by management operation */
|
||||
return;
|
||||
}
|
||||
|
||||
if (cnt > 1) {
|
||||
/* cleaning already running for this partition */
|
||||
ocf_refcnt_dec(counter);
|
||||
ocf_refcnt_dec(&ctx->counter);
|
||||
return;
|
||||
}
|
||||
|
||||
lru_iter_init(&part->eviction_clean_iter, cache, part,
|
||||
part->eviction_clean_iter.evp, false);
|
||||
part->cleaning.cache = cache;
|
||||
evp = io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
|
||||
|
||||
lock_idx = ocf_metadata_concurrency_next_idx(io_queue);
|
||||
ocf_metadata_start_shared_access(&cache->metadata.lock, lock_idx);
|
||||
|
||||
OCF_METADATA_EVICTION_WR_LOCK_ALL();
|
||||
|
||||
lru_iter_cleaning_init(&iter, cache, part, evp);
|
||||
i = 0;
|
||||
while (i < OCF_EVICTION_CLEAN_SIZE) {
|
||||
cline[i] = lru_iter_cleaning_next(&iter);
|
||||
if (cline[i] == end_marker)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
while (i < OCF_EVICTION_CLEAN_SIZE)
|
||||
cline[i++] = end_marker;
|
||||
|
||||
OCF_METADATA_EVICTION_WR_UNLOCK_ALL();
|
||||
|
||||
ocf_metadata_end_shared_access(&cache->metadata.lock, lock_idx);
|
||||
|
||||
ocf_cleaner_fire(cache, &attribs);
|
||||
}
|
||||
|
||||
static void evp_lru_zero_line_complete(struct ocf_request *ocf_req, int error)
|
||||
{
|
||||
env_atomic_dec(&ocf_req->cache->pending_eviction_clines);
|
||||
}
|
||||
|
||||
static void evp_lru_zero_line(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_request *req;
|
||||
ocf_core_id_t id;
|
||||
uint64_t addr, core_line;
|
||||
|
||||
ocf_metadata_get_core_info(cache, line, &id, &core_line);
|
||||
addr = core_line * ocf_line_size(cache);
|
||||
|
||||
req = ocf_req_new(io_queue, &cache->core[id], addr,
|
||||
ocf_line_size(cache), OCF_WRITE);
|
||||
if (!req)
|
||||
return;
|
||||
|
||||
if (req->d2c) {
|
||||
/* cache device is being detached */
|
||||
ocf_req_put(req);
|
||||
return;
|
||||
}
|
||||
|
||||
req->info.internal = true;
|
||||
req->complete = evp_lru_zero_line_complete;
|
||||
|
||||
env_atomic_inc(&cache->pending_eviction_clines);
|
||||
|
||||
ocf_engine_zero_line(req);
|
||||
}
|
||||
|
||||
bool evp_lru_can_evict(ocf_cache_t cache)
|
||||
{
|
||||
if (env_atomic_read(&cache->pending_eviction_clines) >=
|
||||
@ -456,74 +605,86 @@ bool evp_lru_can_evict(ocf_cache_t cache)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool dirty_pages_present(ocf_cache_t cache, struct ocf_user_part *part)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
|
||||
if (evp_lru_get_list(part, i, false)->tail != end_marker)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* the caller must hold the metadata lock */
|
||||
uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
||||
struct ocf_user_part *part, uint32_t cline_no)
|
||||
{
|
||||
struct ocf_lru_iter iter;
|
||||
uint32_t i;
|
||||
ocf_cache_line_t cline;
|
||||
uint64_t core_line;
|
||||
ocf_core_id_t core_id;
|
||||
ocf_cache_t cache = req->cache;
|
||||
bool cl_write_lock =
|
||||
(req->engine_cbs->get_lock_type(req) == ocf_engine_lock_write);
|
||||
unsigned evp;
|
||||
unsigned req_idx = 0;
|
||||
|
||||
if (cline_no == 0)
|
||||
return 0;
|
||||
|
||||
lru_iter_init(&iter, cache, part, part->next_eviction_list, true);
|
||||
if (unlikely(ocf_engine_unmapped_count(req) < cline_no)) {
|
||||
ocf_cache_log(req->cache, log_err, "Not enough space in"
|
||||
"request: unmapped %u, requested %u",
|
||||
ocf_engine_unmapped_count(req),
|
||||
cline_no);
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
evp = req->io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
|
||||
|
||||
lru_iter_eviction_init(&iter, cache, part, evp, cl_write_lock, req);
|
||||
|
||||
i = 0;
|
||||
while (i < cline_no) {
|
||||
cline = lru_iter_next(&iter);
|
||||
if (!evp_lru_can_evict(cache))
|
||||
break;
|
||||
|
||||
cline = lru_iter_eviction_next(&iter, &core_id, &core_line);
|
||||
|
||||
if (cline == end_marker)
|
||||
break;
|
||||
|
||||
if (!evp_lru_can_evict(cache))
|
||||
break;
|
||||
|
||||
/* Prevent evicting already locked items */
|
||||
if (ocf_cache_line_is_used(
|
||||
cache->device->concurrency.cache_line,
|
||||
cline)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(metadata_test_dirty(cache, cline));
|
||||
|
||||
if (ocf_volume_is_atomic(&cache->device->volume)) {
|
||||
/* atomic cache, we have to trim cache lines before
|
||||
* eviction
|
||||
*/
|
||||
evp_lru_zero_line(cache, io_queue, cline);
|
||||
continue;
|
||||
/* TODO: if atomic mode is restored, need to zero metadata
|
||||
* before proceeding with cleaning (see version <= 20.12) */
|
||||
|
||||
/* find next unmapped cacheline in request */
|
||||
while (req_idx + 1 < req->core_line_count &&
|
||||
req->map[req_idx].status != LOOKUP_MISS) {
|
||||
req_idx++;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(req->map[req_idx].status != LOOKUP_MISS);
|
||||
|
||||
ocf_metadata_start_collision_shared_access(
|
||||
cache, cline);
|
||||
set_cache_line_invalid_no_flush(cache, 0,
|
||||
ocf_line_end_sector(cache),
|
||||
cline);
|
||||
metadata_clear_valid_sec(cache, cline, 0, ocf_line_end_sector(cache));
|
||||
ocf_metadata_remove_from_collision(cache, cline, part->id);
|
||||
ocf_metadata_end_collision_shared_access(
|
||||
cache, cline);
|
||||
|
||||
_lru_unlock_hash(&iter, core_id, core_line);
|
||||
|
||||
env_atomic_dec(&req->core->runtime_meta->cached_clines);
|
||||
env_atomic_dec(&req->core->runtime_meta->
|
||||
part_counters[part->id].cached_clines);
|
||||
|
||||
ocf_map_cache_line(req, req_idx, cline);
|
||||
|
||||
req->map[req_idx].status = LOOKUP_REMAPPED;
|
||||
ocf_engine_patch_req_info(cache, req, req_idx);
|
||||
|
||||
if (cl_write_lock)
|
||||
req->map[req_idx].wr_locked = true;
|
||||
else
|
||||
req->map[req_idx].rd_locked = true;
|
||||
|
||||
++req_idx;
|
||||
++i;
|
||||
}
|
||||
|
||||
part->next_eviction_list = iter.evp;
|
||||
|
||||
if (i < cline_no && dirty_pages_present(cache, part))
|
||||
evp_lru_clean(cache, io_queue, part, cline_no - i);
|
||||
|
||||
/* Return number of clines that were really evicted */
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -9,11 +9,12 @@
|
||||
#include "lru_structs.h"
|
||||
|
||||
struct ocf_user_part;
|
||||
struct ocf_request;
|
||||
|
||||
void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
bool evp_lru_can_evict(struct ocf_cache *cache);
|
||||
uint32_t evp_lru_req_clines(struct ocf_cache *cache, ocf_queue_t io_queue,
|
||||
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
||||
struct ocf_user_part *part, uint32_t cline_no);
|
||||
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
void evp_lru_init_evp(struct ocf_cache *cache, struct ocf_user_part *part);
|
||||
@ -21,5 +22,6 @@ void evp_lru_dirty_cline(struct ocf_cache *cache, struct ocf_user_part *part,
|
||||
uint32_t cline);
|
||||
void evp_lru_clean_cline(struct ocf_cache *cache, struct ocf_user_part *part,
|
||||
uint32_t cline);
|
||||
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, uint32_t count);
|
||||
#endif
|
||||
|
@ -52,8 +52,8 @@ static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
ocf_queue_t io_queue, struct ocf_user_part *part,
|
||||
static inline uint32_t ocf_eviction_need_space(ocf_cache_t cache,
|
||||
struct ocf_request *req, struct ocf_user_part *part,
|
||||
uint32_t clines)
|
||||
{
|
||||
uint8_t type;
|
||||
@ -64,11 +64,7 @@ static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
ENV_BUG_ON(type >= ocf_eviction_max);
|
||||
|
||||
if (likely(evict_policy_ops[type].req_clines)) {
|
||||
/*
|
||||
* This is called under METADATA WR lock. No need to get
|
||||
* eviction lock.
|
||||
*/
|
||||
result = evict_policy_ops[type].req_clines(cache, io_queue,
|
||||
result = evict_policy_ops[type].req_clines(req,
|
||||
part, clines);
|
||||
}
|
||||
|
||||
@ -101,4 +97,18 @@ static inline void ocf_eviction_initialize(struct ocf_cache *cache,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ocf_eviction_flush_dirty(ocf_cache_t cache,
|
||||
struct ocf_user_part *part, ocf_queue_t io_queue,
|
||||
uint32_t count)
|
||||
{
|
||||
uint8_t type = cache->conf_meta->eviction_policy_type;
|
||||
|
||||
ENV_BUG_ON(type >= ocf_eviction_max);
|
||||
|
||||
if (likely(evict_policy_ops[type].flush_dirty)) {
|
||||
evict_policy_ops[type].flush_dirty(cache, part, io_queue,
|
||||
count);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* LAYER_EVICTION_POLICY_OPS_H_ */
|
||||
|
@ -52,7 +52,7 @@ int ocf_metadata_actor(struct ocf_cache *cache,
|
||||
uint64_t start_line, end_line;
|
||||
int ret = 0;
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
cache->device->concurrency.cache_line;
|
||||
ocf_cache_line_concurrency(cache);
|
||||
|
||||
start_line = ocf_bytes_2_lines(cache, start_byte);
|
||||
end_line = ocf_bytes_2_lines(cache, end_byte);
|
||||
|
@ -33,9 +33,13 @@ struct ocf_user_part_runtime {
|
||||
struct cleaning_policy cleaning;
|
||||
};
|
||||
|
||||
typedef bool ( *_lru_hash_locked_pfn)(void *context,
|
||||
ocf_core_id_t core_id, uint64_t core_line);
|
||||
|
||||
/* Iterator state, visiting all eviction lists within a partition
|
||||
in round robin order */
|
||||
struct ocf_lru_iter {
|
||||
struct ocf_lru_iter
|
||||
{
|
||||
/* cache object */
|
||||
ocf_cache_t cache;
|
||||
/* target partition */
|
||||
@ -49,16 +53,30 @@ struct ocf_lru_iter {
|
||||
uint32_t num_avail_evps;
|
||||
/* current eviction list index */
|
||||
uint32_t evp;
|
||||
/* callback to determine whether given hash bucket is already
|
||||
* locked by the caller */
|
||||
_lru_hash_locked_pfn hash_locked;
|
||||
/* hash_locked private data */
|
||||
void *context;
|
||||
/* 1 if iterating over clean lists, 0 if over dirty */
|
||||
bool clean : 1;
|
||||
/* 1 if cacheline is to be locked for write, 0 if for read*/
|
||||
bool cl_lock_write : 1;
|
||||
};
|
||||
|
||||
#define OCF_EVICTION_CLEAN_SIZE 32U
|
||||
|
||||
struct ocf_part_cleaning_ctx {
|
||||
ocf_cache_t cache;
|
||||
struct ocf_refcnt counter;
|
||||
ocf_cache_line_t cline[OCF_EVICTION_CLEAN_SIZE];
|
||||
};
|
||||
|
||||
struct ocf_user_part {
|
||||
struct ocf_user_part_config *config;
|
||||
struct ocf_user_part_runtime *runtime;
|
||||
struct ocf_refcnt cleaning;
|
||||
ocf_part_id_t id;
|
||||
|
||||
struct ocf_lru_iter eviction_clean_iter;
|
||||
uint32_t next_eviction_list;
|
||||
struct ocf_part_cleaning_ctx cleaning;
|
||||
struct ocf_lst_entry lst_valid;
|
||||
};
|
||||
|
||||
|
@ -169,7 +169,7 @@ static void __init_partitions(ocf_cache_t cache)
|
||||
|
||||
/* Add other partition to the cache and make it as dummy */
|
||||
for (i_part = 0; i_part < OCF_IO_CLASS_MAX; i_part++) {
|
||||
ocf_refcnt_freeze(&cache->user_parts[i_part].cleaning);
|
||||
ocf_refcnt_freeze(&cache->user_parts[i_part].cleaning.counter);
|
||||
|
||||
if (i_part == PARTITION_DEFAULT)
|
||||
continue;
|
||||
|
@ -81,7 +81,7 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
|
||||
}
|
||||
|
||||
if (!ocf_cache_line_try_lock_wr(
|
||||
cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_concurrency(cache),
|
||||
curr_cline)) {
|
||||
break;
|
||||
}
|
||||
@ -90,7 +90,8 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
|
||||
ocf_purge_cleaning_policy(cache, curr_cline);
|
||||
ocf_metadata_sparse_cache_line(cache, curr_cline);
|
||||
|
||||
ocf_cache_line_unlock_wr(cache->device->concurrency.cache_line,
|
||||
ocf_cache_line_unlock_wr(
|
||||
ocf_cache_line_concurrency(cache),
|
||||
curr_cline);
|
||||
|
||||
if (prev_cline != cache->device->collision_table_entries)
|
||||
|
@ -438,7 +438,8 @@ static void _ocf_mngt_flush_container(
|
||||
req->priv = fc;
|
||||
|
||||
fc->req = req;
|
||||
fc->attribs.cache_line_lock = true;
|
||||
fc->attribs.lock_cacheline = true;
|
||||
fc->attribs.lock_metadata = false;
|
||||
fc->attribs.cmpl_context = fc;
|
||||
fc->attribs.cmpl_fn = _ocf_mngt_flush_portion_end;
|
||||
fc->attribs.io_queue = cache->mngt_queue;
|
||||
|
@ -21,6 +21,9 @@ struct ocf_queue {
|
||||
/* per-queue free running global metadata lock index */
|
||||
unsigned lock_idx;
|
||||
|
||||
/* per-queue free running eviction list index */
|
||||
unsigned eviction_idx;
|
||||
|
||||
/* Tracing reference counter */
|
||||
env_atomic64 trace_ref_cntr;
|
||||
|
||||
|
@ -18,6 +18,8 @@ struct ocf_req_info {
|
||||
unsigned int hit_no;
|
||||
unsigned int invalid_no;
|
||||
unsigned int re_part_no;
|
||||
unsigned int seq_no;
|
||||
unsigned int insert_no;
|
||||
|
||||
uint32_t dirty_all;
|
||||
/*!< Number of dirty line in request*/
|
||||
@ -25,15 +27,15 @@ struct ocf_req_info {
|
||||
uint32_t dirty_any;
|
||||
/*!< Indicates that at least one request is dirty */
|
||||
|
||||
uint32_t seq_req : 1;
|
||||
/*!< Sequential cache request flag. */
|
||||
|
||||
uint32_t flush_metadata : 1;
|
||||
/*!< This bit tells if metadata flushing is required */
|
||||
|
||||
uint32_t mapping_error : 1;
|
||||
/*!< Core lines in this request were not mapped into cache */
|
||||
|
||||
uint32_t clean_eviction : 1;
|
||||
/*!< Eviction failed, need to request cleaning */
|
||||
|
||||
uint32_t core_error : 1;
|
||||
/*!< Error occured during I/O on core device */
|
||||
|
||||
@ -104,6 +106,9 @@ struct ocf_request {
|
||||
struct ocf_io_internal ioi;
|
||||
/*!< OCF IO associated with request */
|
||||
|
||||
const struct ocf_engine_callbacks *engine_cbs;
|
||||
/*!< Engine owning the request */
|
||||
|
||||
env_atomic ref_count;
|
||||
/*!< Reference usage count, once OCF request reaches zero it
|
||||
* will be de-initialed. Get/Put method are intended to modify
|
||||
@ -395,6 +400,16 @@ static inline bool ocf_req_test_mapping_error(struct ocf_request *req)
|
||||
return req->info.mapping_error;
|
||||
}
|
||||
|
||||
static inline void ocf_req_set_clean_eviction(struct ocf_request *req)
|
||||
{
|
||||
req->info.clean_eviction = true;
|
||||
}
|
||||
|
||||
static inline bool ocf_req_test_clean_eviction(struct ocf_request *req)
|
||||
{
|
||||
return req->info.clean_eviction;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return OCF request reference count
|
||||
*
|
||||
|
@ -9,8 +9,7 @@
|
||||
#include "ocf/ocf_debug.h"
|
||||
#include "utils/utils_cache_line.h"
|
||||
|
||||
#define SEQ_CUTOFF_FULL_MARGIN \
|
||||
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
|
||||
#define SEQ_CUTOFF_FULL_MARGIN OCF_PENDING_EVICTION_LIMIT
|
||||
|
||||
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache,
|
||||
struct ocf_request *req)
|
||||
|
@ -44,8 +44,7 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
* only valid bits
|
||||
*/
|
||||
if (!is_valid && !ocf_cache_line_are_waiters(
|
||||
cache->device->concurrency.cache_line,
|
||||
line)) {
|
||||
ocf_cache_line_concurrency(cache), line)) {
|
||||
ocf_purge_eviction_policy(cache, line);
|
||||
ocf_metadata_remove_cache_line(cache, line);
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "utils_part.h"
|
||||
#include "utils_io.h"
|
||||
#include "utils_cache_line.h"
|
||||
#include "../ocf_queue_priv.h"
|
||||
|
||||
#define OCF_UTILS_CLEANER_DEBUG 0
|
||||
|
||||
@ -46,7 +47,7 @@ static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache,
|
||||
return NULL;
|
||||
|
||||
req->info.internal = true;
|
||||
req->info.cleaner_cache_line_lock = attribs->cache_line_lock;
|
||||
req->info.cleaner_cache_line_lock = attribs->lock_cacheline;
|
||||
|
||||
/* Allocate pages for cleaning IO */
|
||||
req->data = ctx_data_alloc(cache->owner,
|
||||
@ -213,8 +214,7 @@ static int _ocf_cleaner_cache_line_lock(struct ocf_request *req)
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
return ocf_req_async_lock_rd(
|
||||
req->cache->device->concurrency.cache_line,
|
||||
return ocf_req_async_lock_rd(ocf_cache_line_concurrency(req->cache),
|
||||
req, _ocf_cleaner_on_resume);
|
||||
}
|
||||
|
||||
@ -323,7 +323,6 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req)
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
ocf_metadata_start_exclusive_access(&cache->metadata.lock);
|
||||
/* Update metadata */
|
||||
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||
if (iter->status == LOOKUP_MISS)
|
||||
@ -336,22 +335,29 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req)
|
||||
|
||||
cache_line = iter->coll_idx;
|
||||
|
||||
if (!metadata_test_dirty(cache, cache_line))
|
||||
continue;
|
||||
ocf_hb_cline_prot_lock_wr(&cache->metadata.lock,
|
||||
req->lock_idx, req->map[i].core_id,
|
||||
req->map[i].core_line);
|
||||
|
||||
if (metadata_test_dirty(cache, cache_line)) {
|
||||
ocf_metadata_get_core_and_part_id(cache, cache_line,
|
||||
&core_id, &req->part_id);
|
||||
req->core = &cache->core[core_id];
|
||||
|
||||
ocf_metadata_start_collision_shared_access(cache, cache_line);
|
||||
set_cache_line_clean(cache, 0, ocf_line_end_sector(cache), req,
|
||||
i);
|
||||
ocf_metadata_end_collision_shared_access(cache, cache_line);
|
||||
ocf_metadata_start_collision_shared_access(cache,
|
||||
cache_line);
|
||||
set_cache_line_clean(cache, 0,
|
||||
ocf_line_end_sector(cache), req, i);
|
||||
ocf_metadata_end_collision_shared_access(cache,
|
||||
cache_line);
|
||||
}
|
||||
|
||||
ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock,
|
||||
req->lock_idx, req->map[i].core_id,
|
||||
req->map[i].core_line);
|
||||
}
|
||||
|
||||
ocf_metadata_flush_do_asynch(cache, req, _ocf_cleaner_metadata_io_end);
|
||||
ocf_metadata_end_exclusive_access(&cache->metadata.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -577,6 +583,7 @@ static int _ocf_cleaner_fire_core(struct ocf_request *req)
|
||||
{
|
||||
uint32_t i;
|
||||
struct ocf_map_info *iter;
|
||||
ocf_cache_t cache = req->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
@ -595,7 +602,15 @@ static int _ocf_cleaner_fire_core(struct ocf_request *req)
|
||||
if (iter->status == LOOKUP_MISS)
|
||||
continue;
|
||||
|
||||
ocf_hb_cline_prot_lock_rd(&cache->metadata.lock,
|
||||
req->lock_idx, req->map[i].core_id,
|
||||
req->map[i].core_line);
|
||||
|
||||
_ocf_cleaner_core_submit_io(req, iter);
|
||||
|
||||
ocf_hb_cline_prot_unlock_rd(&cache->metadata.lock,
|
||||
req->lock_idx, req->map[i].core_id,
|
||||
req->map[i].core_line);
|
||||
}
|
||||
|
||||
/* Protect IO completion race */
|
||||
@ -833,6 +848,7 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
int err;
|
||||
ocf_core_id_t core_id;
|
||||
uint64_t core_sector;
|
||||
bool skip;
|
||||
|
||||
/* Allocate master request */
|
||||
master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
|
||||
@ -855,7 +871,6 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
env_atomic_inc(&master->master_remaining);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
||||
/* when request hasn't yet been allocated or is just issued */
|
||||
if (!req) {
|
||||
if (max > count - i) {
|
||||
@ -886,12 +901,23 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get mapping info */
|
||||
ocf_metadata_get_core_info(cache, cache_line, &core_id,
|
||||
&core_sector);
|
||||
|
||||
if (attribs->lock_metadata) {
|
||||
ocf_hb_cline_prot_lock_rd(&cache->metadata.lock,
|
||||
req->lock_idx, core_id, core_sector);
|
||||
}
|
||||
|
||||
skip = false;
|
||||
|
||||
/* when line already cleaned - rare condition under heavy
|
||||
* I/O workload.
|
||||
*/
|
||||
if (!metadata_test_dirty(cache, cache_line)) {
|
||||
OCF_DEBUG_MSG(cache, "Not dirty");
|
||||
continue;
|
||||
skip = true;
|
||||
}
|
||||
|
||||
if (!metadata_test_valid_any(cache, cache_line)) {
|
||||
@ -902,12 +928,16 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
* Cache line (sector) cannot be dirty and not valid
|
||||
*/
|
||||
ENV_BUG();
|
||||
continue;
|
||||
skip = true;
|
||||
}
|
||||
|
||||
/* Get mapping info */
|
||||
ocf_metadata_get_core_info(cache, cache_line, &core_id,
|
||||
&core_sector);
|
||||
if (attribs->lock_metadata) {
|
||||
ocf_hb_cline_prot_unlock_rd(&cache->metadata.lock,
|
||||
req->lock_idx, core_id, core_sector);
|
||||
}
|
||||
|
||||
if (skip)
|
||||
continue;
|
||||
|
||||
if (unlikely(!cache->core[core_id].opened)) {
|
||||
OCF_DEBUG_MSG(cache, "Core object inactive");
|
||||
@ -931,6 +961,7 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
i_out = 0;
|
||||
req = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (req) {
|
||||
@ -1022,7 +1053,7 @@ void ocf_cleaner_refcnt_freeze(ocf_cache_t cache)
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for_each_part(cache, curr_part, part_id)
|
||||
ocf_refcnt_freeze(&curr_part->cleaning);
|
||||
ocf_refcnt_freeze(&curr_part->cleaning.counter);
|
||||
}
|
||||
|
||||
void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
|
||||
@ -1031,7 +1062,7 @@ void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for_each_part(cache, curr_part, part_id)
|
||||
ocf_refcnt_unfreeze(&curr_part->cleaning);
|
||||
ocf_refcnt_unfreeze(&curr_part->cleaning.counter);
|
||||
}
|
||||
|
||||
static void ocf_cleaner_refcnt_register_zero_cb_finish(void *priv)
|
||||
@ -1055,7 +1086,7 @@ void ocf_cleaner_refcnt_register_zero_cb(ocf_cache_t cache,
|
||||
|
||||
for_each_part(cache, curr_part, part_id) {
|
||||
env_atomic_inc(&ctx->waiting);
|
||||
ocf_refcnt_register_zero_cb(&curr_part->cleaning,
|
||||
ocf_refcnt_register_zero_cb(&curr_part->cleaning.counter,
|
||||
ocf_cleaner_refcnt_register_zero_cb_finish, ctx);
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,8 @@ typedef int (*ocf_cleaner_get_item)(struct ocf_cache *cache,
|
||||
* @brief Cleaning attributes for clean request
|
||||
*/
|
||||
struct ocf_cleaner_attribs {
|
||||
uint8_t cache_line_lock : 1; /*!< Clean under cache line lock */
|
||||
uint8_t lock_cacheline : 1; /*!< Cleaner to lock cachelines on its own */
|
||||
uint8_t lock_metadata : 1; /*!< Cleaner to lock metadata on its own */
|
||||
|
||||
uint8_t do_sort : 1; /*!< Sort cache lines which will be cleaned */
|
||||
|
||||
@ -40,7 +41,7 @@ struct ocf_cleaner_attribs {
|
||||
void *getter_context;
|
||||
/*!< Context for getting cache lines */
|
||||
uint32_t getter_item;
|
||||
/*!< Additional variable that can be used by cleaner caller
|
||||
/*!< Additional variable that can be used by cleaner call
|
||||
* to iterate over items
|
||||
*/
|
||||
|
||||
|
@ -103,7 +103,12 @@ void ocf_part_move(struct ocf_request *req)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry->status != LOOKUP_HIT) {
|
||||
/* Moving cachelines to another partition is needed only
|
||||
* for those already mapped before this request, which
|
||||
* indicates either HIT or REMAPPED.
|
||||
*/
|
||||
if (entry->status != LOOKUP_HIT &&
|
||||
entry->status != LOOKUP_REMAPPED) {
|
||||
/* No HIT */
|
||||
continue;
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
* <tested_file_path>src/engine/engine_common.c</tested_file_path>
|
||||
* <tested_function>ocf_prepare_clines_miss</tested_function>
|
||||
* <functions_to_leave>
|
||||
* INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
|
||||
* ONE FUNCTION PER LINE
|
||||
* ocf_prepare_clines_evict
|
||||
* ocf_engine_evict
|
||||
* </functions_to_leave>
|
||||
*/
|
||||
|
||||
@ -36,6 +36,11 @@
|
||||
|
||||
#include "engine/engine_common.c/prepare_clines_miss_generated_wraps.c"
|
||||
|
||||
struct ocf_cache_line_concurrency *__wrap_ocf_cache_line_concurrency(ocf_cache_t cache)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void __wrap_ocf_req_hash_lock_upgrade(struct ocf_request *req)
|
||||
{
|
||||
}
|
||||
@ -66,13 +71,6 @@ void __wrap_ocf_metadata_end_exclusive_access(
|
||||
{
|
||||
}
|
||||
|
||||
int __wrap_space_managment_evict_do(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t evict_cline_no)
|
||||
{
|
||||
function_called();
|
||||
return mock();
|
||||
}
|
||||
|
||||
bool __wrap_ocf_part_is_enabled(struct ocf_user_part *target_part)
|
||||
{
|
||||
return mock();
|
||||
@ -93,9 +91,21 @@ void __wrap_ocf_req_set_mapping_error(struct ocf_request *req)
|
||||
function_called();
|
||||
}
|
||||
|
||||
int __wrap_space_managment_evict_do(struct ocf_request *req)
|
||||
{
|
||||
function_called();
|
||||
return mock();
|
||||
}
|
||||
|
||||
uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
|
||||
{
|
||||
return 100;
|
||||
}
|
||||
|
||||
static void ocf_prepare_clines_miss_test01(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
print_test_description("Target part is disabled and empty\n");
|
||||
will_return(__wrap_ocf_part_is_enabled, false);
|
||||
expect_function_call(__wrap_ocf_req_set_mapping_error);
|
||||
@ -104,7 +114,9 @@ static void ocf_prepare_clines_miss_test01(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test02(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is disabled but has cachelines assigned.\n");
|
||||
print_test_description("\tMark mapping error\n");
|
||||
|
||||
@ -116,20 +128,18 @@ static void ocf_prepare_clines_miss_test02(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test03(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("\tEviction is ok and cachelines lock is acquired.\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_MAPPED);
|
||||
will_return_always(__wrap_space_managment_evict_do, LOOKUP_INSERTED);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
|
||||
will_return(__wrap_ocf_req_test_mapping_error, false);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
will_return(__wrap_lock_clines, 0);
|
||||
expect_function_call(__wrap_lock_clines);
|
||||
@ -139,57 +149,38 @@ static void ocf_prepare_clines_miss_test03(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test04(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("\tEviction failed\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_MISS);
|
||||
expect_function_call(__wrap_ocf_req_set_mapping_error);
|
||||
|
||||
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
|
||||
}
|
||||
|
||||
static void ocf_prepare_clines_miss_test05(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("Eviction is ok, but mapping failed.\n");
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
will_return(__wrap_ocf_req_test_mapping_error, true);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, true);
|
||||
|
||||
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
|
||||
}
|
||||
|
||||
static void ocf_prepare_clines_miss_test06(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("Eviction and mapping were ok, but failed to lock cachelines.\n");
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
will_return(__wrap_ocf_req_test_mapping_error, false);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
expect_function_call(__wrap_lock_clines);
|
||||
will_return(__wrap_lock_clines, -OCF_ERR_NO_LOCK);
|
||||
@ -201,20 +192,20 @@ static void ocf_prepare_clines_miss_test06(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test07(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("Eviction and mapping were ok, lock not acquired.\n");
|
||||
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
will_return(__wrap_ocf_req_test_mapping_error, false);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
expect_function_call(__wrap_lock_clines);
|
||||
will_return(__wrap_lock_clines, OCF_LOCK_NOT_ACQUIRED);
|
||||
@ -224,15 +215,17 @@ static void ocf_prepare_clines_miss_test07(void **state)
|
||||
|
||||
static void ocf_prepare_clines_miss_test08(void **state)
|
||||
{
|
||||
struct ocf_request req = {};
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
|
||||
print_test_description("Target part is enabled has enough space.\n");
|
||||
print_test_description("\tMapping and cacheline lock are both ok\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return(__wrap_ocf_part_has_space, true);
|
||||
will_return_always(__wrap_ocf_part_has_space, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
will_return(__wrap_ocf_req_test_mapping_error, false);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
expect_function_call(__wrap_lock_clines);
|
||||
will_return(__wrap_lock_clines, OCF_LOCK_ACQUIRED);
|
||||
@ -247,7 +240,6 @@ int main(void)
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test02),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test03),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test04),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test05),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test06),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test07),
|
||||
cmocka_unit_test(ocf_prepare_clines_miss_test08)
|
||||
|
@ -27,9 +27,9 @@ struct test_cache
|
||||
{
|
||||
struct ocf_cache cache;
|
||||
struct ocf_user_part_config part[OCF_IO_CLASS_MAX];
|
||||
struct ocf_user_part upart[OCF_IO_CLASS_MAX];
|
||||
uint32_t overflow[OCF_IO_CLASS_MAX];
|
||||
uint32_t evictable[OCF_IO_CLASS_MAX];
|
||||
uint32_t req_unmapped;
|
||||
};
|
||||
|
||||
bool __wrap_ocf_eviction_can_evict(ocf_cache_t cache)
|
||||
@ -62,10 +62,12 @@ uint32_t __wrap_ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
|
||||
tcache->overflow[part->id] -= overflown_consumed;
|
||||
tcache->evictable[part->id] -= clines;
|
||||
tcache->req_unmapped -= clines;
|
||||
|
||||
check_expected(part);
|
||||
check_expected(clines);
|
||||
function_called();
|
||||
|
||||
return mock();
|
||||
}
|
||||
|
||||
@ -157,7 +159,7 @@ static struct ocf_lst_entry *_list_getter(
|
||||
{
|
||||
struct test_cache* tcache = cache;
|
||||
|
||||
return &tcache->upart[idx].lst_valid;
|
||||
return &tcache->cache.user_parts[idx].lst_valid;
|
||||
}
|
||||
|
||||
static void init_part_list(struct test_cache *tcache)
|
||||
@ -165,23 +167,30 @@ static void init_part_list(struct test_cache *tcache)
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
tcache->upart[i].id = i;
|
||||
tcache->upart[i].config = &tcache->part[i];
|
||||
tcache->upart[i].config->priority = i+1;
|
||||
tcache->upart[i].config->flags.eviction = 1;
|
||||
tcache->cache.user_parts[i].id = i;
|
||||
tcache->cache.user_parts[i].config = &tcache->part[i];
|
||||
tcache->cache.user_parts[i].config->priority = i+1;
|
||||
tcache->cache.user_parts[i].config->flags.eviction = 1;
|
||||
}
|
||||
|
||||
ocf_lst_init((ocf_cache_t)tcache, &tcache->cache.lst_part, OCF_IO_CLASS_MAX,
|
||||
_list_getter, ocf_part_lst_cmp_valid);
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
ocf_lst_init_entry(&tcache->cache.lst_part, &tcache->upart[i].lst_valid);
|
||||
ocf_lst_init_entry(&tcache->cache.lst_part, &tcache->cache.user_parts[i].lst_valid);
|
||||
ocf_lst_add_tail(&tcache->cache.lst_part, i);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
|
||||
{
|
||||
struct test_cache* tcache = (struct test_cache*)req->cache;
|
||||
|
||||
return tcache->req_unmapped;
|
||||
}
|
||||
|
||||
#define _expect_evict_call(tcache, part_id, req_count, ret_count) \
|
||||
do { \
|
||||
expect_value(__wrap_ocf_eviction_need_space, part, &tcache.upart[part_id]); \
|
||||
expect_value(__wrap_ocf_eviction_need_space, part, &tcache.cache.user_parts[part_id]); \
|
||||
expect_value(__wrap_ocf_eviction_need_space, clines, req_count); \
|
||||
expect_function_call(__wrap_ocf_eviction_need_space); \
|
||||
will_return(__wrap_ocf_eviction_need_space, ret_count); \
|
||||
@ -190,6 +199,7 @@ static void init_part_list(struct test_cache *tcache)
|
||||
static void ocf_evict_do_test01(void **state)
|
||||
{
|
||||
struct test_cache tcache = {};
|
||||
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
|
||||
unsigned evicted;
|
||||
|
||||
print_test_description("one IO class, no overflow\n");
|
||||
@ -197,16 +207,17 @@ static void ocf_evict_do_test01(void **state)
|
||||
init_part_list(&tcache);
|
||||
|
||||
tcache.evictable[10] = 100;
|
||||
tcache.req_unmapped = 50;
|
||||
|
||||
_expect_evict_call(tcache, 10, 50, 50);
|
||||
|
||||
evicted = ocf_evict_do((ocf_cache_t *)&tcache, NULL, 50, &tcache.upart[0]);
|
||||
evicted = ocf_evict_do(&req);
|
||||
assert_int_equal(evicted, 50);
|
||||
}
|
||||
|
||||
static void ocf_evict_do_test02(void **state)
|
||||
{
|
||||
struct test_cache tcache = {};
|
||||
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
|
||||
unsigned i;
|
||||
unsigned evicted;
|
||||
|
||||
@ -216,16 +227,18 @@ static void ocf_evict_do_test02(void **state)
|
||||
|
||||
tcache.evictable[10] = 100;
|
||||
tcache.overflow[10] = 100;
|
||||
tcache.req_unmapped = 50;
|
||||
|
||||
_expect_evict_call(tcache, 10, 50, 50);
|
||||
|
||||
evicted = ocf_evict_do((ocf_cache_t *)&tcache, NULL, 50, &tcache.upart[0]);
|
||||
evicted = ocf_evict_do(&req);
|
||||
assert_int_equal(evicted, 50);
|
||||
}
|
||||
|
||||
static void ocf_evict_do_test03(void **state)
|
||||
{
|
||||
struct test_cache tcache = {};
|
||||
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
|
||||
unsigned i;
|
||||
unsigned evicted;
|
||||
|
||||
@ -237,19 +250,21 @@ static void ocf_evict_do_test03(void **state)
|
||||
tcache.evictable[12] = 100;
|
||||
tcache.evictable[16] = 100;
|
||||
tcache.evictable[17] = 100;
|
||||
tcache.req_unmapped = 350;
|
||||
|
||||
_expect_evict_call(tcache, 10, 100, 100);
|
||||
_expect_evict_call(tcache, 12, 100, 100);
|
||||
_expect_evict_call(tcache, 16, 100, 100);
|
||||
_expect_evict_call(tcache, 17, 50, 50);
|
||||
|
||||
evicted = ocf_evict_do((ocf_cache_t *)&tcache, NULL, 350, &tcache.upart[0]);
|
||||
evicted = ocf_evict_do(&req);
|
||||
assert_int_equal(evicted, 350);
|
||||
}
|
||||
|
||||
static void ocf_evict_do_test04(void **state)
|
||||
{
|
||||
struct test_cache tcache = {};
|
||||
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
|
||||
unsigned i;
|
||||
unsigned evicted;
|
||||
|
||||
@ -266,6 +281,7 @@ static void ocf_evict_do_test04(void **state)
|
||||
tcache.evictable[17] = 100;
|
||||
tcache.evictable[18] = 100;
|
||||
tcache.overflow[18] = 100;
|
||||
tcache.req_unmapped = 580;
|
||||
|
||||
_expect_evict_call(tcache, 12, 40, 40);
|
||||
_expect_evict_call(tcache, 14, 100, 100);
|
||||
@ -275,7 +291,7 @@ static void ocf_evict_do_test04(void **state)
|
||||
_expect_evict_call(tcache, 16, 100, 100);
|
||||
_expect_evict_call(tcache, 17, 80, 80);
|
||||
|
||||
evicted = ocf_evict_do((ocf_cache_t *)&tcache, NULL, 580, &tcache.upart[0]);
|
||||
evicted = ocf_evict_do(&req);
|
||||
assert_int_equal(evicted, 580);
|
||||
}
|
||||
int main(void)
|
||||
|
@ -10,6 +10,8 @@
|
||||
* _lru_evp_set_empty
|
||||
* _lru_evp_all_empty
|
||||
* ocf_rotate_right
|
||||
* lru_iter_eviction_next
|
||||
* lru_iter_cleaning_next
|
||||
* </functions_to_leave>
|
||||
*/
|
||||
|
||||
@ -157,7 +159,26 @@ void write_test_case_description(void)
|
||||
test_case++;
|
||||
}
|
||||
|
||||
/* transform cacheline numbers so that they remain unique but have
|
||||
* assignment to list modulo OCF_NUM_EVICTION_LISTS */
|
||||
for (test_case = 0; test_case < num_cases; test_case++) {
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
|
||||
j = 0;
|
||||
while (test_cases[j][i][test_case] != -1) {
|
||||
test_cases[j][i][test_case] = test_cases[j][i][test_case] *
|
||||
OCF_NUM_EVICTION_LISTS + i;
|
||||
j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool desc_printed = false;
|
||||
|
||||
if (desc_printed)
|
||||
return;
|
||||
desc_printed = true;
|
||||
|
||||
for (test_case = 0; test_case < num_cases; test_case++) {
|
||||
print_message("test case no %d\n", test_case);
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
|
||||
@ -196,6 +217,11 @@ struct ocf_lru_list *__wrap_evp_lru_get_list(struct ocf_user_part *part,
|
||||
list.num_nodes = i;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_message("list for case %u evp %u: head: %u tail %u elems %u\n",
|
||||
current_case, evp, list.head, list.tail, list.num_nodes);
|
||||
#endif
|
||||
|
||||
return &list;
|
||||
}
|
||||
|
||||
@ -245,6 +271,76 @@ union eviction_policy_meta *__wrap_ocf_metadata_get_eviction_policy(
|
||||
}
|
||||
|
||||
|
||||
void __wrap_add_lru_head(ocf_cache_t cache,
|
||||
struct ocf_lru_list *list,
|
||||
unsigned int collision_index)
|
||||
{
|
||||
unsigned list_head = list->head;
|
||||
unsigned i, j = collision_index % OCF_NUM_EVICTION_LISTS;
|
||||
|
||||
i = 1;
|
||||
while (test_cases[i][j][current_case] != -1)
|
||||
i++;
|
||||
|
||||
test_cases[i+1][j][current_case] = -1;
|
||||
|
||||
while (i--)
|
||||
test_cases[i + 1][j][current_case] = test_cases[i][j][current_case];
|
||||
|
||||
test_cases[0][j][current_case] = collision_index;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_message("case %u evp %u head set to %u\n", current_case, j, collision_index);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void __wrap_remove_lru_list(ocf_cache_t cache,
|
||||
struct ocf_lru_list *list,
|
||||
unsigned int collision_index)
|
||||
{
|
||||
bool found;
|
||||
unsigned i, j;
|
||||
|
||||
found = false;
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
||||
{
|
||||
j = 0;
|
||||
|
||||
while (test_cases[j][i][current_case] != -1) {
|
||||
if (!found && test_cases[j][i][current_case] == collision_index) {
|
||||
assert_int_equal(test_cases[0][i][current_case], list->head);
|
||||
found = true;
|
||||
}
|
||||
if (found)
|
||||
test_cases[j][i][current_case] = test_cases[j+1][i][current_case];
|
||||
j++;
|
||||
}
|
||||
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
|
||||
assert(found);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_message("case %u removed %u from evp %u\n", current_case, collision_index, i);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool __wrap__lru_lock(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cache_line,
|
||||
ocf_core_id_t *core_id, uint64_t *core_line)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool __wrap__lru_trylock_cacheline(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cline)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void _lru_run_test(unsigned test_case)
|
||||
{
|
||||
unsigned start_pos;
|
||||
@ -258,6 +354,8 @@ static void _lru_run_test(unsigned test_case)
|
||||
unsigned pos[OCF_NUM_EVICTION_LISTS];
|
||||
unsigned i;
|
||||
|
||||
write_test_case_description();
|
||||
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
||||
{
|
||||
pos[i] = -1;
|
||||
@ -265,12 +363,10 @@ static void _lru_run_test(unsigned test_case)
|
||||
pos[i]++;
|
||||
}
|
||||
|
||||
lru_iter_init(&iter, NULL, NULL, start_pos, false);
|
||||
lru_iter_init(&iter, NULL, NULL, start_pos, false, false, false,
|
||||
NULL, NULL);
|
||||
|
||||
do {
|
||||
/* get cacheline from iterator */
|
||||
cache_line = lru_iter_next(&iter);
|
||||
|
||||
/* check what is expected to be returned from iterator */
|
||||
if (pos[curr_evp] == -1) {
|
||||
i = 1;
|
||||
@ -294,6 +390,9 @@ static void _lru_run_test(unsigned test_case)
|
||||
pos[curr_evp]--;
|
||||
}
|
||||
|
||||
/* get cacheline from iterator */
|
||||
cache_line = lru_iter_cleaning_next(&iter);
|
||||
|
||||
assert_int_equal(cache_line, expected_cache_line);
|
||||
|
||||
curr_evp = (curr_evp + 1) % OCF_NUM_EVICTION_LISTS;
|
||||
@ -475,7 +574,5 @@ int main(void)
|
||||
|
||||
print_message("Unit test for lru_iter_next\n");
|
||||
|
||||
write_test_case_description();
|
||||
|
||||
return cmocka_run_group_tests(tests, NULL, NULL);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user