Remove partition list

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2021-04-25 04:01:25 +02:00
parent 87f834c793
commit 4f217b91a5
41 changed files with 556 additions and 2119 deletions

View File

@ -654,7 +654,7 @@ void cleaning_policy_acp_purge_block(struct ocf_cache *cache,
int cleaning_policy_acp_purge_range(struct ocf_cache *cache,
int core_id, uint64_t start_byte, uint64_t end_byte)
{
return ocf_metadata_actor(cache, PARTITION_INVALID,
return ocf_metadata_actor(cache, PARTITION_UNSPECIFIED,
core_id, start_byte, end_byte,
cleaning_policy_acp_purge_block);
}

View File

@ -233,15 +233,6 @@ bool ocf_cache_line_are_waiters(struct ocf_alock *alock,
return !ocf_alock_waitlist_is_empty(alock, line);
}
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_alock *alock =
ocf_cache_line_concurrency(cache);
return ocf_alock_is_locked_exclusively(alock, line);
}
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_alock *alock)
{
return ocf_alock_waitlist_count(alock);

View File

@ -141,9 +141,6 @@ bool ocf_cache_line_is_used(struct ocf_alock *c,
bool ocf_cache_line_are_waiters(struct ocf_alock *c,
ocf_cache_line_t line);
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief un_lock request map info entry from from write or read access.
*

View File

@ -11,8 +11,7 @@ struct ocf_request;
#define LOOKUP_HIT 5
#define LOOKUP_MISS 6
#define LOOKUP_INSERTED 8
#define LOOKUP_REMAPPED 9
#define LOOKUP_REMAPPED 8
typedef enum {
/* modes inherited from user API */

View File

@ -7,7 +7,6 @@
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "../ocf_freelist.h"
#include "engine_common.h"
#define OCF_ENGINE_DEBUG_IO_NAME "common"
#include "engine_debug.h"
@ -123,15 +122,6 @@ void ocf_engine_patch_req_info(struct ocf_cache *cache,
req->info.insert_no++;
if (req->part_id != ocf_metadata_get_partition_id(cache,
entry->coll_idx)) {
/*
* Need to move this cache line into other partition
*/
entry->re_part = true;
req->info.re_part_no++;
}
if (idx > 0 && ocf_engine_clines_phys_cont(req, idx - 1))
req->info.seq_no++;
if (idx + 1 < req->core_line_count &&
@ -152,8 +142,7 @@ static void ocf_engine_update_req_info(struct ocf_cache *cache,
ENV_BUG_ON(entry->status != LOOKUP_HIT &&
entry->status != LOOKUP_MISS &&
entry->status != LOOKUP_REMAPPED &&
entry->status != LOOKUP_INSERTED);
entry->status != LOOKUP_REMAPPED);
/* Handle return value */
if (entry->status == LOOKUP_HIT) {
@ -187,10 +176,8 @@ static void ocf_engine_update_req_info(struct ocf_cache *cache,
}
if (entry->status == LOOKUP_INSERTED ||
entry->status == LOOKUP_REMAPPED) {
if (entry->status == LOOKUP_REMAPPED)
req->info.insert_no++;
}
/* Check if cache hit is sequential */
if (idx > 0 && ocf_engine_clines_phys_cont(req, idx - 1))
@ -336,26 +323,6 @@ void ocf_map_cache_line(struct ocf_request *req,
}
static void ocf_engine_map_cache_line(struct ocf_request *req,
unsigned int idx)
{
struct ocf_cache *cache = req->cache;
ocf_cache_line_t cache_line;
if (!ocf_freelist_get_cache_line(cache->freelist, &cache_line)) {
ocf_req_set_mapping_error(req);
return;
}
ocf_metadata_add_to_partition(cache, req->part_id, cache_line);
ocf_map_cache_line(req, idx, cache_line);
/* Update LRU:: Move this node to head of lru list. */
ocf_eviction_init_cache_line(cache, cache_line);
ocf_eviction_set_hot_cache_line(cache, cache_line);
}
static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
struct ocf_request *req)
{
@ -370,7 +337,6 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
case LOOKUP_MISS:
break;
case LOOKUP_INSERTED:
case LOOKUP_REMAPPED:
OCF_DEBUG_RQ(req, "Canceling cache line %u",
entry->coll_idx);
@ -395,56 +361,6 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
}
}
static void ocf_engine_map(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
uint32_t i;
struct ocf_map_info *entry;
uint64_t core_line;
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ocf_req_clear_info(req);
OCF_DEBUG_TRACE(req->cache);
for (i = 0, core_line = req->core_line_first;
core_line <= req->core_line_last; core_line++, i++) {
entry = &(req->map[i]);
ocf_engine_lookup_map_entry(cache, entry, core_id, core_line);
/* attempt mapping only if no mapping error previously,
* otherwise continue the loop anyway to have request fully
* traversed after map()
*/
if (entry->status != LOOKUP_HIT &&
!ocf_req_test_mapping_error(req)) {
ocf_engine_map_cache_line(req, i);
if (!ocf_req_test_mapping_error(req))
entry->status = LOOKUP_INSERTED;
}
if (entry->status != LOOKUP_MISS)
ocf_engine_update_req_info(cache, req, i);
OCF_DEBUG_PARAM(req->cache,
"%s, cache line %u, core line = %llu",
entry->status == LOOKUP_HIT ? "Hit" :
entry->status == LOOKUP_MISS : "Miss" :
"Insert",
entry->coll_idx, entry->core_line);
}
if (!ocf_req_test_mapping_error(req)) {
/* request has been inserted into cache - purge it from promotion
* policy */
ocf_promotion_req_purge(cache->promotion_policy, req);
}
OCF_DEBUG_PARAM(req->cache, "Sequential - %s",
ocf_engine_is_sequential(req) ? "Yes" : "No");
}
static void _ocf_engine_clean_end(void *private_data, int error)
{
struct ocf_request *req = private_data;
@ -495,26 +411,22 @@ static void ocf_engine_evict(struct ocf_request *req)
static int lock_clines(struct ocf_request *req)
{
struct ocf_alock *c = ocf_cache_line_concurrency(req->cache);
enum ocf_engine_lock_type lock_type =
req->engine_cbs->get_lock_type(req);
int lock_type = OCF_WRITE;
switch (lock_type) {
case ocf_engine_lock_write:
return ocf_req_async_lock_wr(c, req, req->engine_cbs->resume);
case ocf_engine_lock_read:
return ocf_req_async_lock_rd(c, req, req->engine_cbs->resume);
default:
return OCF_LOCK_ACQUIRED;
}
if (req->rw == OCF_READ && ocf_engine_is_hit(req))
lock_type = OCF_READ;
return lock_type == OCF_WRITE ?
ocf_req_async_lock_wr(c, req, req->engine_cbs->resume) :
ocf_req_async_lock_rd(c, req, req->engine_cbs->resume);
}
/* Attempt to map cachelines marked as LOOKUP_MISS by evicting from cache.
/* Attempt to map cachelines marked as LOOKUP_MISS.
* Caller must assure that request map info is up to date (request
* is traversed).
*/
static inline int ocf_prepare_clines_evict(struct ocf_request *req)
static inline void ocf_prepare_clines_miss(struct ocf_request *req)
{
int lock_status = -OCF_ERR_NO_LOCK;
bool part_has_space;
part_has_space = ocf_user_part_has_space(req);
@ -529,52 +441,8 @@ static inline int ocf_prepare_clines_evict(struct ocf_request *req)
ocf_engine_evict(req);
if (!ocf_req_test_mapping_error(req)) {
if (!ocf_req_test_mapping_error(req))
ocf_promotion_req_purge(req->cache->promotion_policy, req);
lock_status = lock_clines(req);
if (lock_status < 0)
ocf_req_set_mapping_error(req);
}
return lock_status;
}
static inline int ocf_prepare_clines_miss(struct ocf_request *req)
{
int lock_status = -OCF_ERR_NO_LOCK;
/* requests to disabled partitions go in pass-through */
if (!ocf_user_part_is_enabled(&req->cache->user_parts[req->part_id])) {
ocf_req_set_mapping_error(req);
return lock_status;
}
/* NOTE: ocf_user_part_has_space() below uses potentially stale request
* statistics (collected before hash bucket lock had been upgraded).
* It is ok since this check is opportunistic, as partition occupancy
* is also subject to change. */
if (!ocf_user_part_has_space(req)) {
ocf_engine_lookup(req);
return ocf_prepare_clines_evict(req);
}
ocf_engine_map(req);
if (!ocf_req_test_mapping_error(req)) {
lock_status = lock_clines(req);
if (lock_status < 0) {
/* Mapping succeeded, but we failed to acquire cacheline lock.
* Don't try to evict, just return error to caller */
ocf_req_set_mapping_error(req);
}
return lock_status;
}
/* Request mapping failed, but it is fully traversed as a side
* effect of ocf_engine_map(), so no need to repeat the traversation
* before eviction.
* */
req->info.mapping_error = false;
return ocf_prepare_clines_evict(req);
}
int ocf_engine_prepare_clines(struct ocf_request *req)
@ -583,7 +451,12 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
bool mapped;
bool promote = true;
int lock = -OCF_ERR_NO_LOCK;
int result;
/* requests to disabled partitions go in pass-through */
if (!ocf_user_part_is_enabled(user_part)) {
ocf_req_set_mapping_error(req);
return -OCF_ERR_NO_LOCK;
}
/* Calculate hashes for hash-bucket locking */
ocf_req_hash(req);
@ -599,6 +472,9 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req);
if (mapped) {
lock = lock_clines(req);
if (lock < 0)
ocf_req_set_mapping_error(req);
else
ocf_engine_set_hot(req);
ocf_hb_req_prot_unlock_rd(req);
return lock;
@ -615,6 +491,10 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
/* Mapping must be performed holding (at least) hash-bucket write lock */
ocf_hb_req_prot_lock_upgrade(req);
/* Repeat lookup after upgrading lock */
ocf_engine_lookup(req);
if (unlikely(ocf_engine_is_mapped(req))) {
lock = lock_clines(req);
ocf_engine_set_hot(req);
@ -622,9 +502,19 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
return lock;
}
result = ocf_prepare_clines_miss(req);
ocf_prepare_clines_miss(req);
if (!ocf_req_test_mapping_error(req)) {
lock = lock_clines(req);
if (lock < 0) {
/* Mapping succeeded, but we failed to acquire cacheline lock.
* Don't try to evict, just return error to caller */
ocf_req_set_mapping_error(req);
}
}
if (!ocf_req_test_mapping_error(req))
ocf_engine_set_hot(req);
ocf_hb_req_prot_unlock_wr(req);
if (ocf_req_test_clean_eviction(req)) {
@ -632,7 +522,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
128);
}
return result;
return lock;
}
static int _ocf_engine_clean_getter(struct ocf_cache *cache,

View File

@ -210,19 +210,6 @@ void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
struct ocf_map_info *entry, ocf_core_id_t core_id,
uint64_t core_line);
/**
* @brief Request cacheline lock type
*/
enum ocf_engine_lock_type
{
/** No lock */
ocf_engine_lock_none = 0,
/** Write lock */
ocf_engine_lock_write,
/** Read lock */
ocf_engine_lock_read,
};
/**
* @brief Engine-specific callbacks for common request handling rountine
*
@ -230,9 +217,6 @@ enum ocf_engine_lock_type
*/
struct ocf_engine_callbacks
{
/** Specify locking requirements after request is mapped */
enum ocf_engine_lock_type (*get_lock_type)(struct ocf_request *req);
/** Resume handling after acquiring asynchronous lock */
ocf_req_async_lock_cb resume;
};

View File

@ -210,17 +210,8 @@ static const struct ocf_io_if _io_if_read_generic_resume = {
.write = _ocf_read_generic_do,
};
static enum ocf_engine_lock_type ocf_rd_get_lock_type(struct ocf_request *req)
{
if (ocf_engine_is_hit(req))
return ocf_engine_lock_read;
else
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _rd_engine_callbacks =
{
.get_lock_type = ocf_rd_get_lock_type,
.resume = ocf_engine_on_resume,
};

View File

@ -168,14 +168,8 @@ int ocf_write_wb_do(struct ocf_request *req)
return 0;
}
static enum ocf_engine_lock_type ocf_wb_get_lock_type(struct ocf_request *req)
{
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _wb_engine_callbacks =
{
.get_lock_type = ocf_wb_get_lock_type,
.resume = ocf_engine_on_resume,
};

View File

@ -155,14 +155,8 @@ static const struct ocf_io_if _io_if_wt_resume = {
.write = _ocf_write_wt_do,
};
static enum ocf_engine_lock_type ocf_wt_get_lock_type(struct ocf_request *req)
{
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _wt_engine_callbacks =
{
.get_lock_type = ocf_wt_get_lock_type,
.resume = ocf_engine_on_resume,
};

View File

@ -131,18 +131,26 @@ static inline uint32_t ocf_evict_do(struct ocf_request *req)
ocf_part_id_t target_part_id = req->part_id;
struct ocf_user_part *target_part = &cache->user_parts[target_part_id];
uint32_t evict_cline_no = ocf_engine_unmapped_count(req);
uint32_t evicted;
uint32_t evicted = 0;
/* First attempt to evict overflown partitions in order to
/* First attempt to map from freelist */
if (ocf_lru_num_free(cache) > 0) {
evicted = ocf_eviction_need_space(cache, req, &cache->free,
evict_cline_no);
}
if (evicted >= evict_cline_no)
return evicted;
/* Attempt to evict overflown partitions in order to
* achieve configured maximum size. Ignoring partitions
* priority in this case, as overflown partitions should
* free its cachelines regardless of destination partition
* priority. */
evicted = ocf_evict_user_partitions(cache, req, evict_cline_no,
evicted += ocf_evict_user_partitions(cache, req, evict_cline_no,
true, OCF_IO_CLASS_PRIO_PINNED);
if (evicted >= evict_cline_no)
return evicted;
/* Not enough cachelines in overflown partitions. Go through
* partitions with priority <= target partition and attempt
* to evict from those. */
@ -166,7 +174,7 @@ int space_managment_evict_do(struct ocf_request *req)
}
if (needed <= evicted)
return LOOKUP_INSERTED;
return LOOKUP_REMAPPED;
return LOOKUP_MISS;
}

View File

@ -15,6 +15,9 @@
#define OCF_NUM_EVICTION_LISTS 32
struct ocf_part;
struct ocf_user_part;
struct ocf_part_runtime;
struct ocf_part_cleaning_ctx;
struct ocf_request;
struct eviction_policy {
@ -43,12 +46,10 @@ struct eviction_policy_ops {
uint32_t cline_no);
void (*hot_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
void (*init_evp)(ocf_cache_t cache, struct ocf_part *part);
void (*dirty_cline)(ocf_cache_t cache,
struct ocf_part *part,
uint32_t cline_no);
void (*clean_cline)(ocf_cache_t cache,
struct ocf_part *part,
uint32_t cline_no);
void (*dirty_cline)(ocf_cache_t cache, struct ocf_part *part,
ocf_cache_line_t cline);
void (*clean_cline)(ocf_cache_t cache, struct ocf_part *part,
ocf_cache_line_t cline);
void (*flush_dirty)(ocf_cache_t cache, struct ocf_user_part *user_part,
ocf_queue_t io_queue, uint32_t count);
const char *name;
@ -67,4 +68,19 @@ int space_managment_evict_do(struct ocf_request *req);
int space_management_free(ocf_cache_t cache, uint32_t count);
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines);
typedef void (*ocf_metadata_actor_t)(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor);
void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_upart, struct ocf_part *dst_upart);
uint32_t ocf_lru_num_free(ocf_cache_t cache);
#endif

View File

@ -245,28 +245,76 @@ static struct ocf_lru_list *evp_lru_get_list(struct ocf_part *part,
static inline struct ocf_lru_list *evp_get_cline_list(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
struct ocf_part *part = &cache->user_parts[part_id].part;
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
ocf_part_id_t part_id;
struct ocf_part *part;
part_id = ocf_metadata_get_partition_id(cache, cline);
ENV_BUG_ON(part_id > OCF_USER_IO_CLASS_MAX);
part = &cache->user_parts[part_id].part;
return evp_lru_get_list(part, ev_list,
!metadata_test_dirty(cache, cline));
}
static void evp_lru_move(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_lru_list *src_list,
struct ocf_part *dst_part, struct ocf_lru_list *dst_list)
{
remove_lru_list(cache, src_list, cline);
balance_lru_list(cache, src_list);
add_lru_head(cache, dst_list, cline);
balance_lru_list(cache, dst_list);
env_atomic_dec(&src_part->runtime->curr_size);
env_atomic_inc(&dst_part->runtime->curr_size);
ocf_metadata_set_partition_id(cache, cline, dst_part->id);
}
/* the caller must hold the metadata lock */
void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
struct ocf_lru_list *list;
struct ocf_lru_list *list, *free;
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
ocf_part_id_t part_id;
struct ocf_part *part;
part_id = ocf_metadata_get_partition_id(cache, cline);
ENV_BUG_ON(part_id > OCF_USER_IO_CLASS_MAX);
part = &cache->user_parts[part_id].part;
list = evp_get_cline_list(cache, cline);
remove_lru_list(cache, list, cline);
balance_lru_list(cache, list);
free = evp_lru_get_list(&cache->free, ev_list, true);
evp_lru_move(cache, cline, part, list, &cache->free, free);
}
static void evp_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_part *dst_part)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
struct ocf_lru_list *src_list, *dst_list;
bool clean;
clean = !metadata_test_dirty(cache, cline);
src_list = evp_lru_get_list(src_part, ev_list, clean);
dst_list = evp_lru_get_list(dst_part, ev_list, clean);
evp_lru_move(cache, cline, src_part, src_list, dst_part, dst_list);
}
void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_part *dst_part)
{
OCF_METADATA_EVICTION_WR_LOCK(cline);
evp_lru_repart_locked(cache, cline, src_part, dst_part);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
}
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
struct ocf_part *part, uint32_t start_evp, bool clean,
bool cl_lock_write, _lru_hash_locked_pfn hash_locked,
struct ocf_request *req)
_lru_hash_locked_pfn hash_locked, struct ocf_request *req)
{
uint32_t i;
@ -275,13 +323,14 @@ static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
ENV_BUILD_BUG_ON(OCF_NUM_EVICTION_LISTS > sizeof(iter->evp) * 8);
iter->cache = cache;
iter->c = ocf_cache_line_concurrency(cache);
iter->part = part;
/* set iterator value to start_evp - 1 modulo OCF_NUM_EVICTION_LISTS */
iter->evp = (start_evp + OCF_NUM_EVICTION_LISTS - 1) % OCF_NUM_EVICTION_LISTS;
iter->evp = (start_evp + OCF_NUM_EVICTION_LISTS - 1) %
OCF_NUM_EVICTION_LISTS;
iter->num_avail_evps = OCF_NUM_EVICTION_LISTS;
iter->next_avail_evp = ((1ULL << OCF_NUM_EVICTION_LISTS) - 1);
iter->clean = clean;
iter->cl_lock_write = cl_lock_write;
iter->hash_locked = hash_locked;
iter->req = req;
@ -290,26 +339,23 @@ static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
}
static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
ocf_cache_t cache, struct ocf_part *part,
uint32_t start_evp)
ocf_cache_t cache, struct ocf_part *part, uint32_t start_evp)
{
/* Lock cachelines for read, non-exclusive access */
lru_iter_init(iter, cache, part, start_evp, false, false,
NULL, NULL);
lru_iter_init(iter, cache, part, start_evp, false, NULL, NULL);
}
static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
ocf_cache_t cache, struct ocf_part *part,
uint32_t start_evp, bool cl_lock_write,
struct ocf_request *req)
uint32_t start_evp, struct ocf_request *req)
{
/* Lock hash buckets for write, cachelines according to user request,
* however exclusive cacheline access is needed even in case of read
* access. _evp_lru_evict_hash_locked tells whether given hash bucket
* is already locked as part of request hash locking (to avoid attempt
* to acquire the same hash bucket lock twice) */
lru_iter_init(iter, cache, part, start_evp, true, cl_lock_write,
ocf_req_hash_in_range, req);
lru_iter_init(iter, cache, part, start_evp, true, ocf_req_hash_in_range,
req);
}
@ -343,29 +389,6 @@ static inline bool _lru_evp_all_empty(struct ocf_lru_iter *iter)
return iter->num_avail_evps == 0;
}
static bool inline _lru_trylock_cacheline(struct ocf_lru_iter *iter,
ocf_cache_line_t cline)
{
struct ocf_alock *c =
ocf_cache_line_concurrency(iter->cache);
return iter->cl_lock_write ?
ocf_cache_line_try_lock_wr(c, cline) :
ocf_cache_line_try_lock_rd(c, cline);
}
static void inline _lru_unlock_cacheline(struct ocf_lru_iter *iter,
ocf_cache_line_t cline)
{
struct ocf_alock *c =
ocf_cache_line_concurrency(iter->cache);
if (iter->cl_lock_write)
ocf_cache_line_unlock_wr(c, cline);
else
ocf_cache_line_unlock_rd(c, cline);
}
static bool inline _lru_trylock_hash(struct ocf_lru_iter *iter,
ocf_core_id_t core_id, uint64_t core_line)
{
@ -399,7 +422,7 @@ static bool inline _lru_iter_evition_lock(struct ocf_lru_iter *iter,
{
struct ocf_request *req = iter->req;
if (!_lru_trylock_cacheline(iter, cache_line))
if (!ocf_cache_line_try_lock_wr(iter->c, cache_line))
return false;
ocf_metadata_get_core_info(iter->cache, cache_line,
@ -409,19 +432,18 @@ static bool inline _lru_iter_evition_lock(struct ocf_lru_iter *iter,
if (*core_id == ocf_core_get_id(req->core) &&
*core_line >= req->core_line_first &&
*core_line <= req->core_line_last) {
_lru_unlock_cacheline(iter, cache_line);
ocf_cache_line_unlock_wr(iter->c, cache_line);
return false;
}
if (!_lru_trylock_hash(iter, *core_id, *core_line)) {
_lru_unlock_cacheline(iter, cache_line);
ocf_cache_line_unlock_wr(iter->c, cache_line);
return false;
}
if (!ocf_cache_line_is_locked_exclusively(iter->cache,
cache_line)) {
if (ocf_cache_line_are_waiters(iter->c, cache_line)) {
_lru_unlock_hash(iter, *core_id, *core_line);
_lru_unlock_cacheline(iter, cache_line);
ocf_cache_line_unlock_wr(iter->c, cache_line);
return false;
}
@ -429,11 +451,17 @@ static bool inline _lru_iter_evition_lock(struct ocf_lru_iter *iter,
}
/* Get next clean cacheline from tail of lru lists. Caller must not hold any
* eviction list lock. Returned cacheline is read or write locked, depending on
* iter->write_lock. Returned cacheline has corresponding metadata hash bucket
* locked. Cacheline is moved to the head of lru list before being returned */
* eviction list lock.
* - returned cacheline is write locked
* - returned cacheline has the corresponding metadata hash bucket write locked
* - cacheline is moved to the head of destination partition lru list before
* being returned.
* All this is packed into a single function to lock LRU list once per each
* replaced cacheline.
**/
static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
ocf_core_id_t *core_id, uint64_t *core_line)
struct ocf_part *dst_part, ocf_core_id_t *core_id,
uint64_t *core_line)
{
uint32_t curr_evp;
ocf_cache_line_t cline;
@ -456,12 +484,65 @@ static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
}
if (cline != end_marker) {
if (dst_part != part) {
evp_lru_repart_locked(cache, cline, part,
dst_part);
} else {
remove_lru_list(cache, list, cline);
add_lru_head(cache, list, cline);
balance_lru_list(cache, list);
}
}
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock, curr_evp);
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock,
curr_evp);
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
/* mark list as empty */
_lru_evp_set_empty(iter);
}
} while (cline == end_marker && !_lru_evp_all_empty(iter));
return cline;
}
/* Get next clean cacheline from tail of free lru lists. Caller must not hold any
* eviction list lock.
* - returned cacheline is write locked
* - cacheline is moved to the head of destination partition lru list before
* being returned.
* All this is packed into a single function to lock LRU list once per each
* replaced cacheline.
**/
static inline ocf_cache_line_t lru_iter_free_next(struct ocf_lru_iter *iter,
struct ocf_part *dst_part)
{
uint32_t curr_evp;
ocf_cache_line_t cline;
ocf_cache_t cache = iter->cache;
struct ocf_part *free = iter->part;
struct ocf_lru_list *list;
do {
curr_evp = _lru_next_evp(iter);
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, curr_evp);
list = evp_lru_get_list(free, curr_evp, true);
cline = list->tail;
while (cline != end_marker && !ocf_cache_line_try_lock_wr(
iter->c, cline)) {
cline = ocf_metadata_get_eviction_policy(
iter->cache, cline)->lru.prev;
}
if (cline != end_marker) {
evp_lru_repart_locked(cache, cline, free, dst_part);
}
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock,
curr_evp);
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
/* mark list as empty */
@ -484,8 +565,8 @@ static inline ocf_cache_line_t lru_iter_cleaning_next(struct ocf_lru_iter *iter)
curr_evp = _lru_next_evp(iter);
cline = iter->curr_cline[curr_evp];
while (cline != end_marker && !_lru_trylock_cacheline(iter,
cline)) {
while (cline != end_marker && ! ocf_cache_line_try_lock_rd(
iter->c, cline)) {
cline = ocf_metadata_get_eviction_policy(
iter->cache, cline)->lru.prev;
}
@ -607,9 +688,36 @@ bool evp_lru_can_evict(ocf_cache_t cache)
return true;
}
/* the caller must hold the metadata lock */
static void evp_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
ocf_core_id_t core_id, ocf_part_id_t part_id)
{
ocf_core_t core;
ocf_metadata_start_collision_shared_access(
cache, cline);
metadata_clear_valid_sec(cache, cline, 0,
ocf_line_end_sector(cache));
ocf_metadata_remove_from_collision(cache, cline, part_id);
ocf_metadata_end_collision_shared_access(
cache, cline);
core = ocf_cache_get_core(cache, core_id);
env_atomic_dec(&core->runtime_meta->cached_clines);
env_atomic_dec(&core->runtime_meta->
part_counters[part_id].cached_clines);
}
/* Assign cachelines from src_part to the request req. src_part is either
* user partition (if inserted in the cache) or freelist partition. In case
* of user partition mapped cachelines are invalidated (evicted from the cache)
* before remaping.
* NOTE: the caller must hold the metadata read lock and hash bucket write
* lock for the entire request LBA range.
* NOTE: all cachelines assigned to the request in this function are marked
* as LOOKUP_REMAPPED and are write locked.
*/
uint32_t evp_lru_req_clines(struct ocf_request *req,
struct ocf_part *part, uint32_t cline_no)
struct ocf_part *src_part, uint32_t cline_no)
{
struct ocf_alock* alock;
struct ocf_lru_iter iter;
@ -617,12 +725,11 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
ocf_cache_line_t cline;
uint64_t core_line;
ocf_core_id_t core_id;
ocf_core_t core;
ocf_cache_t cache = req->cache;
bool cl_write_lock =
(req->engine_cbs->get_lock_type(req) == ocf_engine_lock_write);
unsigned evp;
unsigned req_idx = 0;
struct ocf_part *dst_part;
if (cline_no == 0)
return 0;
@ -635,16 +742,24 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
ENV_BUG();
}
ENV_BUG_ON(req->part_id == PARTITION_FREELIST);
dst_part = &cache->user_parts[req->part_id].part;
evp = req->io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
lru_iter_eviction_init(&iter, cache, part, evp, cl_write_lock, req);
lru_iter_eviction_init(&iter, cache, src_part, evp, req);
i = 0;
while (i < cline_no) {
if (!evp_lru_can_evict(cache))
break;
cline = lru_iter_eviction_next(&iter, &core_id, &core_line);
if (src_part->id != PARTITION_FREELIST) {
cline = lru_iter_eviction_next(&iter, dst_part, &core_id,
&core_line);
} else {
cline = lru_iter_free_next(&iter, dst_part);
}
if (cline == end_marker)
break;
@ -662,19 +777,10 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
ENV_BUG_ON(req->map[req_idx].status != LOOKUP_MISS);
ocf_metadata_start_collision_shared_access(
cache, cline);
metadata_clear_valid_sec(cache, cline, 0, ocf_line_end_sector(cache));
ocf_metadata_remove_from_collision(cache, cline, part->id);
ocf_metadata_end_collision_shared_access(
cache, cline);
core = ocf_cache_get_core(cache, core_id);
env_atomic_dec(&core->runtime_meta->cached_clines);
env_atomic_dec(&core->runtime_meta->
part_counters[part->id].cached_clines);
if (src_part->id != PARTITION_FREELIST) {
evp_lru_invalidate(cache, cline, core_id, src_part->id);
_lru_unlock_hash(&iter, core_id, core_line);
}
ocf_map_cache_line(req, req_idx, cline);
@ -682,13 +788,13 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
ocf_engine_patch_req_info(cache, req, req_idx);
alock = ocf_cache_line_concurrency(iter.cache);
ocf_alock_mark_index_locked(alock, req, req_idx, true);
req->alock_rw = cl_write_lock ? OCF_WRITE : OCF_READ;
req->alock_rw = OCF_WRITE;
++req_idx;
++i;
/* Number of cachelines to evict have to match space in the request */
/* Number of cachelines to evict have to match space in the
* request */
ENV_BUG_ON(req_idx == req->core_line_count && i != cline_no );
}
@ -750,10 +856,12 @@ void evp_lru_init_evp(ocf_cache_t cache, struct ocf_part *part)
_lru_init(clean_list);
_lru_init(dirty_list);
}
env_atomic_set(&part->runtime->curr_size, 0);
}
void evp_lru_clean_cline(ocf_cache_t cache, struct ocf_part *part,
uint32_t cline)
ocf_cache_line_t cline)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
struct ocf_lru_list *clean_list;
@ -771,7 +879,7 @@ void evp_lru_clean_cline(ocf_cache_t cache, struct ocf_part *part,
}
void evp_lru_dirty_cline(ocf_cache_t cache, struct ocf_part *part,
uint32_t cline)
ocf_cache_line_t cline)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
struct ocf_lru_list *clean_list;
@ -788,3 +896,162 @@ void evp_lru_dirty_cline(ocf_cache_t cache, struct ocf_part *part,
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
}
static ocf_cache_line_t next_phys_invalid(ocf_cache_t cache,
ocf_cache_line_t phys)
{
ocf_cache_line_t lg;
ocf_cache_line_t collision_table_entries =
ocf_metadata_collision_table_entries(cache);
if (phys == collision_table_entries)
return collision_table_entries;
lg = ocf_metadata_map_phy2lg(cache, phys);
while (metadata_test_valid_any(cache, lg) &&
phys + 1 < collision_table_entries) {
++phys;
if (phys == collision_table_entries)
break;
lg = ocf_metadata_map_phy2lg(cache, phys);
}
return phys;
}
/* put invalid cachelines on freelist partition lru list */
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
{
ocf_cache_line_t phys, cline;
ocf_cache_line_t collision_table_entries =
ocf_metadata_collision_table_entries(cache);
struct ocf_lru_list *list;
unsigned ev_list;
unsigned i;
phys = 0;
for (i = 0; i < num_free_clines; i++) {
/* find first invalid cacheline */
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys == collision_table_entries);
cline = ocf_metadata_map_phy2lg(cache, phys);
++phys;
ocf_metadata_set_partition_id(cache, cline, PARTITION_FREELIST);
ev_list = (cline % OCF_NUM_EVICTION_LISTS);
list = evp_lru_get_list(&cache->free, ev_list, true);
add_lru_head(cache, list, cline);
balance_lru_list(cache, list);
}
/* we should have reached the last invalid cache line */
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys != collision_table_entries);
env_atomic_set(&cache->free.runtime->curr_size, num_free_clines);
}
static bool _is_cache_line_acting(struct ocf_cache *cache,
uint32_t cache_line, ocf_core_id_t core_id,
uint64_t start_line, uint64_t end_line)
{
ocf_core_id_t tmp_core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&tmp_core_id, &core_line);
if (core_id != OCF_CORE_ID_INVALID) {
if (core_id != tmp_core_id)
return false;
if (core_line < start_line || core_line > end_line)
return false;
} else if (tmp_core_id == OCF_CORE_ID_INVALID) {
return false;
}
return true;
}
/*
* Iterates over cache lines that belong to the core device with
* core ID = core_id whose core byte addresses are in the range
* [start_byte, end_byte] and applies actor(cache, cache_line) to all
* matching cache lines
*
* set partition_id to PARTITION_UNSPECIFIED to not care about partition_id
*
* global metadata write lock must be held before calling this function
*/
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor)
{
uint32_t step = 0;
uint64_t start_line, end_line;
int ret = 0;
struct ocf_alock *c = ocf_cache_line_concurrency(cache);
int clean;
struct ocf_lru_list *list;
struct ocf_part *part;
unsigned i, cline;
struct lru_eviction_policy_meta *node;
start_line = ocf_bytes_2_lines(cache, start_byte);
end_line = ocf_bytes_2_lines(cache, end_byte);
if (part_id == PARTITION_UNSPECIFIED) {
for (cline = 0; cline < cache->device->collision_table_entries;
++cline) {
if (_is_cache_line_acting(cache, cline, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(c, cline))
ret = -OCF_ERR_AGAIN;
else
actor(cache, cline);
}
OCF_COND_RESCHED_DEFAULT(step);
}
return ret;
}
ENV_BUG_ON(part_id == PARTITION_FREELIST);
part = &cache->user_parts[part_id].part;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (clean = 0; clean <= 1; clean++) {
list = evp_lru_get_list(part, i, clean);
cline = list->tail;
while (cline != end_marker) {
node = &ocf_metadata_get_eviction_policy(cache,
cline)->lru;
if (!_is_cache_line_acting(cache, cline,
core_id, start_line,
end_line)) {
cline = node->prev;
continue;
}
if (ocf_cache_line_is_used(c, cline))
ret = -OCF_ERR_AGAIN;
else
actor(cache, cline);
cline = node->prev;
OCF_COND_RESCHED_DEFAULT(step);
}
}
}
return ret;
}
uint32_t ocf_lru_num_free(ocf_cache_t cache)
{
return env_atomic_read(&cache->free.runtime->curr_size);
}

View File

@ -10,19 +10,21 @@
struct ocf_part;
struct ocf_user_part;
struct ocf_part_runtime;
struct ocf_part_cleaning_ctx;
struct ocf_request;
void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
bool evp_lru_can_evict(struct ocf_cache *cache);
uint32_t evp_lru_req_clines(struct ocf_request *req,
struct ocf_part *part, uint32_t cline_no);
struct ocf_part *src_part, uint32_t cline_no);
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void evp_lru_init_evp(struct ocf_cache *cache, struct ocf_part *part);
void evp_lru_dirty_cline(struct ocf_cache *cache, struct ocf_part *part,
uint32_t cline);
ocf_cache_line_t cline);
void evp_lru_clean_cline(struct ocf_cache *cache, struct ocf_part *part,
uint32_t cline);
ocf_cache_line_t cline);
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
ocf_queue_t io_queue, uint32_t count);
#endif

View File

@ -63,10 +63,8 @@ static inline uint32_t ocf_eviction_need_space(ocf_cache_t cache,
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].req_clines)) {
result = evict_policy_ops[type].req_clines(req,
part, clines);
}
if (likely(evict_policy_ops[type].req_clines))
result = evict_policy_ops[type].req_clines(req, part, clines);
return result;
}
@ -91,9 +89,7 @@ static inline void ocf_eviction_initialize(struct ocf_cache *cache,
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_evp)) {
OCF_METADATA_EVICTION_WR_LOCK_ALL();
evict_policy_ops[type].init_evp(cache, part);
OCF_METADATA_EVICTION_WR_UNLOCK_ALL();
}
}

View File

@ -14,7 +14,6 @@
#include "metadata_segment.h"
#include "../concurrency/ocf_concurrency.h"
#include "../ocf_def_priv.h"
#include "../ocf_freelist.h"
#include "../ocf_priv.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
@ -94,7 +93,7 @@ static ocf_cache_line_t ocf_metadata_get_entries(
return OCF_USER_IO_CLASS_MAX + 1;
case metadata_segment_part_runtime:
return OCF_USER_IO_CLASS_MAX + 1;
return OCF_NUM_PARTITIONS;
case metadata_segment_core_config:
return OCF_CORE_MAX;
@ -580,6 +579,8 @@ static int ocf_metadata_init_fixed_size(struct ocf_cache *cache,
&part_runtime_meta[i].runtime;
cache->user_parts[i].part.id = i;
}
cache->free.runtime= &part_runtime_meta[PARTITION_FREELIST].runtime;
cache->free.id = PARTITION_FREELIST;
/* Set core metadata */
core_meta_config = METADATA_MEM_POOL(ctrl,
@ -1154,10 +1155,13 @@ static void _recovery_rebuild_cline_metadata(ocf_cache_t cache,
ocf_core_t core = ocf_cache_get_core(cache, core_id);
ocf_part_id_t part_id;
ocf_cache_line_t hash_index;
struct ocf_part_runtime *part;
part_id = PARTITION_DEFAULT;
part = cache->user_parts[part_id].part.runtime;
ocf_metadata_add_to_partition(cache, part_id, cache_line);
ocf_metadata_set_partition_id(cache, part_id, cache_line);
env_atomic_inc(&part->curr_size);
hash_index = ocf_metadata_hash_func(cache, core_line, core_id);
ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,

View File

@ -15,10 +15,6 @@ struct ocf_metadata_list_info {
/*!< Previous cache line in collision list */
ocf_cache_line_t next_col;
/*!< Next cache line in collision list*/
ocf_cache_line_t partition_prev;
/*!< Previous cache line in the same partition*/
ocf_cache_line_t partition_next;
/*!< Next cache line in the same partition*/
ocf_part_id_t partition_id : 8;
/*!< ID of partition where is assigned this cache line*/
} __attribute__((packed));

View File

@ -5,91 +5,8 @@
#include "ocf/ocf.h"
#include "metadata.h"
#include "../ocf_freelist.h"
#include "../utils/utils_cache_line.h"
static bool _is_cache_line_acting(struct ocf_cache *cache,
uint32_t cache_line, ocf_core_id_t core_id,
uint64_t start_line, uint64_t end_line)
{
ocf_core_id_t tmp_core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&tmp_core_id, &core_line);
if (core_id != OCF_CORE_ID_INVALID) {
if (core_id != tmp_core_id)
return false;
if (core_line < start_line || core_line > end_line)
return false;
} else if (tmp_core_id == OCF_CORE_ID_INVALID) {
return false;
}
return true;
}
/*
* Iterates over cache lines that belong to the core device with
* core ID = core_id whose core byte addresses are in the range
* [start_byte, end_byte] and applies actor(cache, cache_line) to all
* matching cache lines
*
* set partition_id to PARTITION_INVALID to not care about partition_id
*
* METADATA lock must be held before calling this function
*/
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor)
{
uint32_t step = 0;
ocf_cache_line_t i, next_i;
uint64_t start_line, end_line;
int ret = 0;
struct ocf_alock *c =
ocf_cache_line_concurrency(cache);
start_line = ocf_bytes_2_lines(cache, start_byte);
end_line = ocf_bytes_2_lines(cache, end_byte);
if (part_id != PARTITION_INVALID) {
for (i = cache->user_parts[part_id].part.runtime->head;
i != cache->device->collision_table_entries;
i = next_i) {
next_i = ocf_metadata_get_partition_next(cache, i);
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(c, i))
ret = -OCF_ERR_AGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
} else {
for (i = 0; i < cache->device->collision_table_entries; ++i) {
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(c, i))
ret = -OCF_ERR_AGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
}
return ret;
}
/* the caller must hold the relevant cache block concurrency reader lock
* and the metadata lock
*/
@ -100,10 +17,6 @@ void ocf_metadata_remove_cache_line(struct ocf_cache *cache,
ocf_metadata_get_partition_id(cache, cache_line);
ocf_metadata_remove_from_collision(cache, cache_line, partition_id);
ocf_metadata_remove_from_partition(cache, partition_id, cache_line);
ocf_freelist_put_cache_line(cache->freelist, cache_line);
}
void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
@ -128,6 +41,6 @@ void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte)
{
return ocf_metadata_actor(cache, PARTITION_INVALID, core_id,
return ocf_metadata_actor(cache, PARTITION_UNSPECIFIED, core_id,
start_byte, end_byte, ocf_metadata_sparse_cache_line);
}

View File

@ -30,12 +30,4 @@ void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte);
typedef void (*ocf_metadata_actor_t)(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor);
#endif /* __METADATA_MISC_H__ */

View File

@ -8,9 +8,8 @@
#include "metadata_internal.h"
#include "../utils/utils_user_part.h"
void ocf_metadata_get_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t *part_id,
ocf_cache_line_t *next_line, ocf_cache_line_t *prev_line)
ocf_part_id_t ocf_metadata_get_partition_id(struct ocf_cache *cache,
ocf_cache_line_t line)
{
const struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
@ -19,26 +18,13 @@ void ocf_metadata_get_partition_info(struct ocf_cache *cache,
info = ocf_metadata_raw_rd_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info) {
if (part_id)
*part_id = info->partition_id;
if (next_line)
*next_line = info->partition_next;
if (prev_line)
*prev_line = info->partition_prev;
} else {
ocf_metadata_error(cache);
if (part_id)
*part_id = PARTITION_DEFAULT;
if (next_line)
*next_line = cache->device->collision_table_entries;
if (prev_line)
*prev_line = cache->device->collision_table_entries;
}
ENV_BUG_ON(!info);
return info->partition_id;
}
void ocf_metadata_set_partition_next(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next_line)
void ocf_metadata_set_partition_id(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
@ -48,178 +34,7 @@ void ocf_metadata_set_partition_next(struct ocf_cache *cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info)
info->partition_next = next_line;
else
ocf_metadata_error(cache);
}
void ocf_metadata_set_partition_prev(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev_line)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info)
info->partition_prev = prev_line;
else
ocf_metadata_error(cache);
}
void ocf_metadata_set_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info) {
info->partition_id = part_id;
info->partition_next = next_line;
info->partition_prev = prev_line;
} else {
else
ocf_metadata_error(cache);
}
}
/* Sets the given collision_index as the new _head_ of the Partition list. */
static void update_partition_head(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
struct ocf_part *part = &cache->user_parts[part_id].part;
part->runtime->head = line;
}
/* Adds the given collision_index to the _head_ of the Partition list */
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
ocf_cache_line_t line_head;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *user_part = &cache->user_parts[part_id];
struct ocf_part *part = &user_part->part;
ENV_BUG_ON(!(line < line_entries));
ocf_metadata_partition_lock(&cache->metadata.lock, part_id);
/* First node to be added/ */
if (!part->runtime->curr_size) {
update_partition_head(cache, part_id, line);
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
if (!ocf_user_part_is_valid(user_part)) {
/* Partition becomes empty, and is not valid
* update list of partitions
*/
ocf_user_part_sort(cache);
}
} else {
/* Not the first node to be added. */
line_head = part->runtime->head;
ENV_BUG_ON(!(line_head < line_entries));
ocf_metadata_set_partition_info(cache, line, part_id,
line_head, line_entries);
ocf_metadata_set_partition_prev(cache, line_head, line);
update_partition_head(cache, part_id, line);
}
part->runtime->curr_size++;
ocf_metadata_partition_unlock(&cache->metadata.lock, part_id);
}
/* Deletes the node with the given collision_index from the Partition list */
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
int is_head, is_tail;
ocf_cache_line_t prev_line, next_line;
uint32_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *user_part = &cache->user_parts[part_id];
struct ocf_part *part = &user_part->part;
ENV_BUG_ON(!(line < line_entries));
ocf_metadata_partition_lock(&cache->metadata.lock, part_id);
/* Get Partition info */
ocf_metadata_get_partition_info(cache, line, NULL,
&next_line, &prev_line);
/* Find out if this node is Partition _head_ */
is_head = (prev_line == line_entries);
is_tail = (next_line == line_entries);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && (part->runtime->curr_size == 1)) {
ocf_metadata_set_partition_info(cache, line,
part_id, line_entries, line_entries);
update_partition_head(cache, part_id, line_entries);
if (!ocf_user_part_is_valid(user_part)) {
/* Partition becomes not empty, and is not valid
* update list of partitions
*/
ocf_user_part_sort(cache);
}
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(!(next_line < line_entries));
update_partition_head(cache, part_id, next_line);
ocf_metadata_set_partition_next(cache, line, line_entries);
ocf_metadata_set_partition_prev(cache, next_line,
line_entries);
} else if (is_tail) {
/* Case 3: else if this collision_index is partition list tail
*/
ENV_BUG_ON(!(prev_line < line_entries));
ocf_metadata_set_partition_prev(cache, line, line_entries);
ocf_metadata_set_partition_next(cache, prev_line,
line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(!(next_line < line_entries));
ENV_BUG_ON(!(prev_line < line_entries));
/* Update prev and next nodes */
ocf_metadata_set_partition_next(cache, prev_line, next_line);
ocf_metadata_set_partition_prev(cache, next_line, prev_line);
/* Update the given node */
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
}
part->runtime->curr_size--;
ocf_metadata_partition_unlock(&cache->metadata.lock, part_id);
}

View File

@ -10,62 +10,16 @@
#include "../ocf_cache_priv.h"
#define PARTITION_DEFAULT 0
#define PARTITION_INVALID ((ocf_part_id_t)-1)
#define PARTITION_UNSPECIFIED ((ocf_part_id_t)-1)
#define PARTITION_FREELIST OCF_USER_IO_CLASS_MAX + 1
#define PARTITION_SIZE_MIN 0
#define PARTITION_SIZE_MAX 100
void ocf_metadata_get_partition_info(
ocf_part_id_t ocf_metadata_get_partition_id(struct ocf_cache *cache,
ocf_cache_line_t line);
void ocf_metadata_set_partition_id(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t *part_id, ocf_cache_line_t *next_line,
ocf_cache_line_t *prev_line);
static inline ocf_part_id_t ocf_metadata_get_partition_id(
struct ocf_cache *cache, ocf_cache_line_t line)
{
ocf_part_id_t part_id;
ocf_metadata_get_partition_info(cache, line, &part_id, NULL, NULL);
return part_id;
}
static inline ocf_cache_line_t ocf_metadata_get_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line)
{
ocf_cache_line_t next;
ocf_metadata_get_partition_info(cache, line, NULL, &next, NULL);
return next;
}
static inline ocf_cache_line_t ocf_metadata_get_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line)
{
ocf_cache_line_t prev;
ocf_metadata_get_partition_info(cache, line, NULL, NULL, &prev);
return prev;
}
void ocf_metadata_set_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next_line);
void ocf_metadata_set_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t prev_line);
void ocf_metadata_set_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t part_id, ocf_cache_line_t next_line,
ocf_cache_line_t prev_line);
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
ocf_part_id_t part_id);
#endif /* __METADATA_PARTITION_H__ */

View File

@ -10,6 +10,8 @@
#include "../cleaning/cleaning.h"
#include "../eviction/eviction.h"
#define OCF_NUM_PARTITIONS OCF_USER_IO_CLASS_MAX + 2
struct ocf_user_part_config {
char name[OCF_IO_CLASS_NAME_MAX];
uint32_t min_size;
@ -27,8 +29,7 @@ struct ocf_user_part_config {
};
struct ocf_part_runtime {
uint32_t curr_size;
uint32_t head;
env_atomic curr_size;
struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS];
};
@ -43,6 +44,8 @@ struct ocf_lru_iter
ocf_cache_line_t curr_cline[OCF_NUM_EVICTION_LISTS];
/* cache object */
ocf_cache_t cache;
/* cacheline concurrency */
struct ocf_alock *c;
/* target partition */
struct ocf_part *part;
/* available (non-empty) eviction list bitmap rotated so that current
@ -59,8 +62,6 @@ struct ocf_lru_iter
struct ocf_request *req;
/* 1 if iterating over clean lists, 0 if over dirty */
bool clean : 1;
/* 1 if cacheline is to be locked for write, 0 if for read*/
bool cl_lock_write : 1;
};
#define OCF_EVICTION_CLEAN_SIZE 32U

View File

@ -11,6 +11,7 @@
#include "../ocf_queue_priv.h"
#include "../metadata/metadata.h"
#include "../metadata/metadata_io.h"
#include "../metadata/metadata_partition_structs.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_cache_line.h"
@ -22,7 +23,6 @@
#include "../concurrency/ocf_concurrency.h"
#include "../eviction/ops.h"
#include "../ocf_ctx_priv.h"
#include "../ocf_freelist.h"
#include "../cleaning/cleaning.h"
#include "../promotion/ops.h"
@ -123,8 +123,6 @@ struct ocf_cache_attach_context {
* load or recovery
*/
bool freelist_inited : 1;
bool concurrency_inited : 1;
} flags;
@ -182,26 +180,22 @@ static void __init_partitions(ocf_cache_t cache)
}
}
static void __init_user_parts_attached(ocf_cache_t cache)
static void __init_parts_attached(ocf_cache_t cache)
{
struct ocf_part *part;
ocf_part_id_t part_id;
for (part_id = 0; part_id < OCF_USER_IO_CLASS_MAX; part_id++) {
part = &cache->user_parts[part_id].part;
for (part_id = 0; part_id < OCF_USER_IO_CLASS_MAX; part_id++)
ocf_eviction_initialize(cache, &cache->user_parts[part_id].part);
part->runtime->head = cache->device->collision_table_entries;
part->runtime->curr_size = 0;
ocf_eviction_initialize(cache, part);
}
ocf_eviction_initialize(cache, &cache->free);
}
static void __init_freelist(ocf_cache_t cache)
static void __init_free(ocf_cache_t cache)
{
uint64_t free_clines = ocf_metadata_collision_table_entries(cache) -
ocf_get_cache_occupancy(cache);
ocf_freelist_populate(cache->freelist, free_clines);
ocf_lru_populate(cache, free_clines);
}
static ocf_error_t __init_cleaning_policy(ocf_cache_t cache)
@ -301,8 +295,8 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache,
ocf_metadata_init_hash_table(cache);
ocf_metadata_init_collision(cache);
__init_user_parts_attached(cache);
__init_freelist(cache);
__init_parts_attached(cache);
__init_free(cache);
result = __init_cleaning_policy(cache);
if (result) {
@ -321,7 +315,7 @@ static void init_attached_data_structures_recovery(ocf_cache_t cache)
{
ocf_metadata_init_hash_table(cache);
ocf_metadata_init_collision(cache);
__init_user_parts_attached(cache);
__init_parts_attached(cache);
__reset_stats(cache);
__init_metadata_version(cache);
}
@ -477,7 +471,8 @@ void _ocf_mngt_load_init_instance_complete(void *priv, int error)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
}
__init_freelist(cache);
if (context->metadata.shutdown_status != ocf_metadata_clean_shutdown)
__init_free(cache);
cleaning_policy = cache->conf_meta->cleaning_policy_type;
if (!cleaning_policy_ops[cleaning_policy].initialize)
@ -996,12 +991,6 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline,
context->flags.attached_metadata_inited = true;
ret = ocf_freelist_init(&cache->freelist, cache);
if (ret)
OCF_PL_FINISH_RET(pipeline, ret);
context->flags.freelist_inited = true;
ret = ocf_concurrency_init(cache);
if (ret)
OCF_PL_FINISH_RET(pipeline, ret);
@ -1147,9 +1136,6 @@ static void _ocf_mngt_attach_handle_error(
if (context->flags.concurrency_inited)
ocf_concurrency_deinit(cache);
if (context->flags.freelist_inited)
ocf_freelist_deinit(cache->freelist);
if (context->flags.volume_inited)
ocf_volume_deinit(&cache->device->volume);
@ -2025,7 +2011,6 @@ static void _ocf_mngt_cache_unplug_complete(void *priv, int error)
ocf_metadata_deinit_variable_size(cache);
ocf_concurrency_deinit(cache);
ocf_freelist_deinit(cache->freelist);
ocf_volume_deinit(&cache->device->volume);

View File

@ -21,7 +21,6 @@
#include "ocf_logger_priv.h"
#include "ocf/ocf_trace.h"
#include "promotion/promotion.h"
#include "ocf_freelist.h"
#define DIRTY_FLUSHED 1
#define DIRTY_NOT_FLUSHED 0
@ -80,7 +79,7 @@ struct ocf_cache {
struct ocf_lst user_part_list;
struct ocf_user_part user_parts[OCF_USER_IO_CLASS_MAX + 1];
ocf_freelist_t freelist;
struct ocf_part free;
ocf_eviction_t eviction_policy_init;

View File

@ -1,432 +0,0 @@
/*
* Copyright(c) 2019-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata/metadata.h"
struct ocf_freelist_part {
ocf_cache_line_t head;
ocf_cache_line_t tail;
env_atomic64 curr_size;
};
struct ocf_freelist {
/* parent cache */
struct ocf_cache *cache;
/* partition list array */
struct ocf_freelist_part *part;
/* freelist lock array */
env_spinlock *lock;
/* number of free lists */
uint32_t count;
/* next slowpath victim idx */
env_atomic slowpath_victim_idx;
/* total number of free lines */
env_atomic64 total_free;
};
static void ocf_freelist_lock(ocf_freelist_t freelist, uint32_t ctx)
{
env_spinlock_lock(&freelist->lock[ctx]);
}
static int ocf_freelist_trylock(ocf_freelist_t freelist, uint32_t ctx)
{
return env_spinlock_trylock(&freelist->lock[ctx]);
}
static void ocf_freelist_unlock(ocf_freelist_t freelist, uint32_t ctx)
{
env_spinlock_unlock(&freelist->lock[ctx]);
}
/* Sets the given collision_index as the new _head_ of the Partition list. */
static void _ocf_freelist_remove_cache_line(ocf_freelist_t freelist,
uint32_t ctx, ocf_cache_line_t cline)
{
struct ocf_cache *cache = freelist->cache;
struct ocf_freelist_part *freelist_part = &freelist->part[ctx];
int is_head, is_tail;
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ocf_cache_line_t prev, next;
ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
freelist->cache);
uint32_t free;
ENV_BUG_ON(cline >= line_entries);
/* Get Partition info */
ocf_metadata_get_partition_info(cache, cline, NULL, &next, &prev);
/* Find out if this node is Partition _head_ */
is_head = (prev == line_entries);
is_tail = (next == line_entries);
free = env_atomic64_read(&freelist_part->curr_size);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && free == 1) {
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
freelist_part->head = line_entries;
freelist_part->tail = line_entries;
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(next >= line_entries);
freelist_part->head = next;
ocf_metadata_set_partition_prev(cache, next, line_entries);
ocf_metadata_set_partition_next(cache, cline, line_entries);
} else if (is_tail) {
/* Case 3: else if this cline is partition list tail */
ENV_BUG_ON(prev >= line_entries);
freelist_part->tail = prev;
ocf_metadata_set_partition_prev(cache, cline, line_entries);
ocf_metadata_set_partition_next(cache, prev, line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(next >= line_entries || prev >= line_entries);
/* Update prev and next nodes */
ocf_metadata_set_partition_prev(cache, next, prev);
ocf_metadata_set_partition_next(cache, prev, next);
/* Update the given node */
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
}
env_atomic64_dec(&freelist_part->curr_size);
env_atomic64_dec(&freelist->total_free);
}
static ocf_cache_line_t next_phys_invalid(ocf_cache_t cache,
ocf_cache_line_t phys)
{
ocf_cache_line_t lg;
ocf_cache_line_t collision_table_entries =
ocf_metadata_collision_table_entries(cache);
if (phys == collision_table_entries)
return collision_table_entries;
lg = ocf_metadata_map_phy2lg(cache, phys);
while (metadata_test_valid_any(cache, lg)) {
++phys;
if (phys == collision_table_entries)
break;
lg = ocf_metadata_map_phy2lg(cache, phys);
}
return phys;
}
/* Assign unused cachelines to freelist */
void ocf_freelist_populate(ocf_freelist_t freelist,
ocf_cache_line_t num_free_clines)
{
unsigned step = 0;
ocf_cache_t cache = freelist->cache;
unsigned num_freelists = freelist->count;
ocf_cache_line_t prev, next, idx;
ocf_cache_line_t phys;
ocf_cache_line_t collision_table_entries =
ocf_metadata_collision_table_entries(cache);
unsigned freelist_idx;
uint64_t freelist_size;
phys = 0;
for (freelist_idx = 0; freelist_idx < num_freelists; freelist_idx++)
{
/* calculate current freelist size */
freelist_size = num_free_clines / num_freelists;
if (freelist_idx < (num_free_clines % num_freelists))
++freelist_size;
env_atomic64_set(&freelist->part[freelist_idx].curr_size,
freelist_size);
if (!freelist_size) {
/* init empty freelist and move to next one */
freelist->part[freelist_idx].head =
collision_table_entries;
freelist->part[freelist_idx].tail =
collision_table_entries;
continue;
}
/* find first invalid cacheline */
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys == collision_table_entries);
idx = ocf_metadata_map_phy2lg(cache, phys);
++phys;
/* store freelist head */
freelist->part[freelist_idx].head = idx;
/* link freelist elements using partition list */
prev = collision_table_entries;
while (--freelist_size) {
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys == collision_table_entries);
next = ocf_metadata_map_phy2lg(cache, phys);
++phys;
ocf_metadata_set_partition_info(cache, idx,
PARTITION_INVALID, next, prev);
prev = idx;
idx = next;
OCF_COND_RESCHED_DEFAULT(step);
}
/* terminate partition list */
ocf_metadata_set_partition_info(cache, idx, PARTITION_INVALID,
collision_table_entries, prev);
/* store freelist tail */
freelist->part[freelist_idx].tail = idx;
}
/* we should have reached the last invalid cache line */
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys != collision_table_entries);
env_atomic64_set(&freelist->total_free, num_free_clines);
}
static void ocf_freelist_add_cache_line(ocf_freelist_t freelist,
uint32_t ctx, ocf_cache_line_t line)
{
struct ocf_cache *cache = freelist->cache;
struct ocf_freelist_part *freelist_part = &freelist->part[ctx];
ocf_cache_line_t tail;
ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
freelist->cache);
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ENV_BUG_ON(line >= line_entries);
if (env_atomic64_read(&freelist_part->curr_size) == 0) {
freelist_part->head = line;
freelist_part->tail = line;
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, line_entries);
} else {
tail = freelist_part->tail;
ENV_BUG_ON(tail >= line_entries);
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, tail);
ocf_metadata_set_partition_next(cache, tail, line);
freelist_part->tail = line;
}
env_atomic64_inc(&freelist_part->curr_size);
env_atomic64_inc(&freelist->total_free);
}
typedef enum {
OCF_FREELIST_ERR_NOLOCK = 1,
OCF_FREELIST_ERR_LIST_EMPTY,
} ocf_freelist_get_err_t;
static ocf_freelist_get_err_t ocf_freelist_get_cache_line_ctx(
ocf_freelist_t freelist, uint32_t ctx, bool can_wait,
ocf_cache_line_t *cline)
{
if (env_atomic64_read(&freelist->part[ctx].curr_size) == 0)
return -OCF_FREELIST_ERR_LIST_EMPTY;
if (!can_wait && ocf_freelist_trylock(freelist, ctx))
return -OCF_FREELIST_ERR_NOLOCK;
if (can_wait)
ocf_freelist_lock(freelist, ctx);
if (env_atomic64_read(&freelist->part[ctx].curr_size) == 0) {
ocf_freelist_unlock(freelist, ctx);
return -OCF_FREELIST_ERR_LIST_EMPTY;
}
*cline = freelist->part[ctx].head;
_ocf_freelist_remove_cache_line(freelist, ctx, *cline);
ocf_freelist_unlock(freelist, ctx);
return 0;
}
static int get_next_victim_freelist(ocf_freelist_t freelist)
{
int ctx, next;
do {
ctx = env_atomic_read(&freelist->slowpath_victim_idx);
next = (ctx + 1) % freelist->count;
} while (ctx != env_atomic_cmpxchg(&freelist->slowpath_victim_idx, ctx,
next));
return ctx;
}
static bool ocf_freelist_get_cache_line_slow(ocf_freelist_t freelist,
ocf_cache_line_t *cline)
{
int i, ctx;
int err;
bool lock_err;
/* try slowpath without waiting on lock */
lock_err = false;
for (i = 0; i < freelist->count; i++) {
ctx = get_next_victim_freelist(freelist);
err = ocf_freelist_get_cache_line_ctx(freelist, ctx, false,
cline);
if (!err)
return true;
if (err == -OCF_FREELIST_ERR_NOLOCK)
lock_err = true;
}
if (!lock_err) {
/* Slowpath failed due to empty freelists - no point in
* iterating through contexts to attempt slowpath with full
* lock */
return false;
}
/* slow path with waiting on lock */
for (i = 0; i < freelist->count; i++) {
ctx = get_next_victim_freelist(freelist);
if (!ocf_freelist_get_cache_line_ctx(freelist, ctx, true,
cline)) {
return true;
}
}
return false;
}
static bool ocf_freelist_get_cache_line_fast(ocf_freelist_t freelist,
ocf_cache_line_t *cline)
{
bool ret;
uint32_t ctx = env_get_execution_context();
ret = !ocf_freelist_get_cache_line_ctx(freelist, ctx, false, cline);
env_put_execution_context(ctx);
return ret;
}
bool ocf_freelist_get_cache_line(ocf_freelist_t freelist,
ocf_cache_line_t *cline)
{
if (env_atomic64_read(&freelist->total_free) == 0)
return false;
if (!ocf_freelist_get_cache_line_fast(freelist, cline))
return ocf_freelist_get_cache_line_slow(freelist, cline);
return true;
}
void ocf_freelist_put_cache_line(ocf_freelist_t freelist,
ocf_cache_line_t cline)
{
uint32_t ctx = env_get_execution_context();
ocf_freelist_lock(freelist, ctx);
ocf_freelist_add_cache_line(freelist, ctx, cline);
ocf_freelist_unlock(freelist, ctx);
env_put_execution_context(ctx);
}
int ocf_freelist_init(ocf_freelist_t *freelist, struct ocf_cache *cache)
{
uint32_t num;
int i;
int result;
ocf_freelist_t tmp_freelist;
ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
cache);
tmp_freelist = env_vzalloc(sizeof(*tmp_freelist));
if (!tmp_freelist)
return -OCF_ERR_NO_MEM;
num = env_get_execution_context_count();
tmp_freelist->cache = cache;
tmp_freelist->count = num;
env_atomic64_set(&tmp_freelist->total_free, 0);
tmp_freelist->lock = env_vzalloc(sizeof(tmp_freelist->lock[0]) * num);
tmp_freelist->part = env_vzalloc(sizeof(tmp_freelist->part[0]) * num);
if (!tmp_freelist->lock || !tmp_freelist->part) {
result = -OCF_ERR_NO_MEM;
goto free_allocs;
}
for (i = 0; i < num; i++) {
result = env_spinlock_init(&tmp_freelist->lock[i]);
if (result)
goto spinlock_err;
tmp_freelist->part[i].head = line_entries;
tmp_freelist->part[i].tail = line_entries;
env_atomic64_set(&tmp_freelist->part[i].curr_size, 0);
}
*freelist = tmp_freelist;
return 0;
spinlock_err:
while (i--)
env_spinlock_destroy(&tmp_freelist->lock[i]);
free_allocs:
env_vfree(tmp_freelist->lock);
env_vfree(tmp_freelist->part);
env_vfree(tmp_freelist);
return result;
}
void ocf_freelist_deinit(ocf_freelist_t freelist)
{
int i;
for (i = 0; i < freelist->count; i++)
env_spinlock_destroy(&freelist->lock[i]);
env_vfree(freelist->lock);
env_vfree(freelist->part);
env_vfree(freelist);
}
ocf_cache_line_t ocf_freelist_num_free(ocf_freelist_t freelist)
{
return env_atomic64_read(&freelist->total_free);
}

View File

@ -1,34 +0,0 @@
/*
* Copyright(c) 2019-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_FREELIST_H__
#define __OCF_FREELIST_H__
#include "ocf_cache_priv.h"
struct ocf_freelist;
typedef struct ocf_freelist *ocf_freelist_t;
/* Init / deinit freelist runtime structures */
int ocf_freelist_init(ocf_freelist_t *freelist, struct ocf_cache *cache);
void ocf_freelist_deinit(ocf_freelist_t freelist);
/* Assign unused cachelines to freelist */
void ocf_freelist_populate(ocf_freelist_t freelist,
ocf_cache_line_t num_free_clines);
/* Get cacheline from freelist */
bool ocf_freelist_get_cache_line(ocf_freelist_t freelist,
ocf_cache_line_t *cline);
/* Put cacheline back to freelist */
void ocf_freelist_put_cache_line(ocf_freelist_t freelist,
ocf_cache_line_t cline);
/* Return total number of free cachelines */
ocf_cache_line_t ocf_freelist_num_free(ocf_freelist_t freelist);
#endif /* __OCF_FREELIST_H__ */

View File

@ -13,6 +13,7 @@ int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
struct ocf_io_class_info *info)
{
ocf_part_id_t part_id = io_class;
struct ocf_part *part;
OCF_CHECK_NULL(cache);
@ -33,9 +34,11 @@ int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
return -OCF_ERR_INVAL;
}
part = &cache->user_parts[part_id].part;
info->priority = cache->user_parts[part_id].config->priority;
info->curr_size = ocf_cache_is_device_attached(cache) ?
cache->user_parts[part_id].part.runtime->curr_size : 0;
env_atomic_read(&part->runtime->curr_size) : 0;
info->min_size = cache->user_parts[part_id].config->min_size;
info->max_size = cache->user_parts[part_id].config->max_size;

View File

@ -19,8 +19,8 @@ static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache,
if (!ocf_cache_is_device_attached(cache))
return false;
return (ocf_freelist_num_free(cache->freelist) <=
SEQ_CUTOFF_FULL_MARGIN + req->core_line_count);
return (ocf_lru_num_free(cache) <= SEQ_CUTOFF_FULL_MARGIN +
req->core_line_count);
}
static int ocf_seq_cutoff_stream_cmp(struct ocf_rb_node *n1,

View File

@ -222,7 +222,7 @@ bool nhit_req_should_promote(ocf_promotion_policy_t policy,
uint64_t core_line;
uint64_t occupied_cachelines =
ocf_metadata_collision_table_entries(policy->owner) -
ocf_freelist_num_free(policy->owner->freelist);
ocf_lru_num_free(policy->owner);
cfg = (struct nhit_promotion_policy_config*)policy->config;

View File

@ -790,23 +790,6 @@ bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock,
return empty;
}
/* NOTE: it is caller responsibility to assure that noone acquires
* a lock in background */
bool ocf_alock_is_locked_exclusively(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int val = env_atomic_read(access);
ENV_BUG_ON(val == OCF_CACHE_LINE_ACCESS_IDLE);
if (!ocf_alock_waitlist_is_empty(alock, entry))
return false;
return val == OCF_CACHE_LINE_ACCESS_ONE_RD ||
val == OCF_CACHE_LINE_ACCESS_WR;
}
uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock)
{
return env_atomic_read(&alock->waiting);

View File

@ -53,9 +53,6 @@ int ocf_alock_lock_wr(struct ocf_alock *alock,
bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock,
ocf_cache_line_t entry);
bool ocf_alock_is_locked_exclusively(struct ocf_alock *alock,
ocf_cache_line_t entry);
uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock);
size_t ocf_alock_obj_size(void);

View File

@ -27,9 +27,11 @@ static int ocf_user_part_lst_cmp_valid(struct ocf_cache *cache,
struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part,
lst_valid);
size_t p1_size = ocf_cache_is_device_attached(cache) ?
p1->part.runtime->curr_size : 0;
env_atomic_read(&p1->part.runtime->curr_size)
: 0;
size_t p2_size = ocf_cache_is_device_attached(cache) ?
p2->part.runtime->curr_size : 0;
env_atomic_read(&p2->part.runtime->curr_size)
: 0;
int v1 = p1->config->priority;
int v2 = p2->config->priority;
@ -104,11 +106,11 @@ void ocf_user_part_move(struct ocf_request *req)
}
/* Moving cachelines to another partition is needed only
* for those already mapped before this request, which
* indicates either HIT or REMAPPED.
* for those already mapped before this request and remapped
* cachelines are assigned to target partition during eviction.
* So only hit cachelines are interesting.
*/
if (entry->status != LOOKUP_HIT &&
entry->status != LOOKUP_REMAPPED) {
if (entry->status != LOOKUP_HIT) {
/* No HIT */
continue;
}
@ -127,9 +129,6 @@ void ocf_user_part_move(struct ocf_request *req)
continue;
}
/* Remove from old eviction */
ocf_eviction_purge_cache_line(cache, line);
if (metadata_test_dirty(cache, line)) {
/*
* Remove cline from cleaning - this if for ioclass
@ -142,13 +141,8 @@ void ocf_user_part_move(struct ocf_request *req)
purge_cache_block(cache, line);
}
/* Let's change partition */
ocf_metadata_remove_from_partition(cache, id_old, line);
ocf_metadata_add_to_partition(cache, id_new, line);
/* Add to new eviction */
ocf_eviction_init_cache_line(cache, line);
ocf_eviction_set_hot_cache_line(cache, line);
ocf_lru_repart(cache, line, &cache->user_parts[id_old].part,
&cache->user_parts[id_new].part);
/* Check if cache line is dirty. If yes then need to change
* cleaning policy and update partition dirty clines

View File

@ -53,7 +53,7 @@ static inline ocf_part_id_t ocf_user_part_class2id(ocf_cache_t cache, uint64_t c
static inline uint32_t ocf_part_get_occupancy(struct ocf_part *part)
{
return part->runtime->curr_size;
return env_atomic_read(&part->runtime->curr_size);
}
static inline uint32_t ocf_user_part_get_min_size(ocf_cache_t cache,

View File

@ -16,7 +16,7 @@ MAIN_DIRECTORY_OF_UNIT_TESTS = "../tests/"
# Paths to all directories, in which tests are stored. All paths should be relative to
# MAIN_DIRECTORY_OF_UNIT_TESTS
DIRECTORIES_WITH_TESTS_LIST = ["cleaning/", "metadata/", "mngt/", "concurrency/", "engine/",
"eviction/", "utils/", "promotion/", "ocf_freelist.c/"]
"eviction/", "utils/", "promotion/"]
# Paths to all directories containing files with sources. All paths should be relative to
# MAIN_DIRECTORY_OF_TESTED_PROJECT

View File

@ -4,6 +4,10 @@
* <functions_to_leave>
* ocf_prepare_clines_evict
* ocf_engine_evict
* ocf_req_set_mapping_error
* ocf_req_test_mapping_error
* ocf_req_set_part_evict
* ocf_req_part_evict
* </functions_to_leave>
*/
@ -22,7 +26,6 @@
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "../ocf_freelist.h"
#include "engine_common.h"
#include "engine_debug.h"
#include "../utils/utils_cache_line.h"
@ -71,45 +74,27 @@ void __wrap_ocf_metadata_end_exclusive_access(
{
}
bool __wrap_ocf_user_part_is_enabled(struct ocf_user_part *target_part)
{
return mock();
}
void __wrap_ocf_engine_map(struct ocf_request *req)
{
function_called();
}
bool __wrap_ocf_req_test_mapping_error(struct ocf_request *req)
{
return mock();
}
void __wrap_ocf_req_set_mapping_error(struct ocf_request *req)
{
function_called();
}
int __wrap_space_managment_evict_do(struct ocf_request *req)
{
function_called();
return mock();
}
uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
{
return 100;
}
static void ocf_prepare_clines_miss_test01(void **state)
{
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is disabled and empty\n");
will_return(__wrap_ocf_user_part_is_enabled, false);
expect_function_call(__wrap_ocf_req_set_mapping_error);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
print_test_description("Target part doesn't have enough space.\n");
print_test_description("\tEviction success\n");
will_return_always(__wrap_ocf_user_part_has_space, false);
expect_function_call(__wrap_space_managment_evict_do);
will_return_always(__wrap_space_managment_evict_do, LOOKUP_REMAPPED);
ocf_prepare_clines_miss(&req);
assert(!ocf_req_test_mapping_error(&req));
assert(ocf_req_part_evict(&req));
}
static void ocf_prepare_clines_miss_test02(void **state)
@ -117,13 +102,17 @@ static void ocf_prepare_clines_miss_test02(void **state)
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is disabled but has cachelines assigned.\n");
print_test_description("\tMark mapping error\n");
print_test_description("Target part doesn't have enough space.\n");
print_test_description("\tEviction failed\n");
will_return(__wrap_ocf_user_part_is_enabled, false);
expect_function_call(__wrap_ocf_req_set_mapping_error);
will_return_always(__wrap_ocf_user_part_has_space, false);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
expect_function_call(__wrap_space_managment_evict_do);
will_return(__wrap_space_managment_evict_do, LOOKUP_MISS);
ocf_prepare_clines_miss(&req);
assert(ocf_req_test_mapping_error(&req));
assert(ocf_req_part_evict(&req));
}
static void ocf_prepare_clines_miss_test03(void **state)
@ -131,20 +120,16 @@ static void ocf_prepare_clines_miss_test03(void **state)
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled but doesn't have enough space.\n");
print_test_description("\tEviction is ok and cachelines lock is acquired.\n");
print_test_description("Target part has enough space.\n");
print_test_description("\tEviction success\n");
will_return(__wrap_ocf_user_part_is_enabled, true);
will_return_always(__wrap_ocf_user_part_has_space, false);
will_return_always(__wrap_ocf_user_part_has_space, true);
expect_function_call(__wrap_space_managment_evict_do);
will_return_always(__wrap_space_managment_evict_do, LOOKUP_INSERTED);
will_return_always(__wrap_space_managment_evict_do, LOOKUP_REMAPPED);
will_return_always(__wrap_ocf_req_test_mapping_error, false);
will_return(__wrap_lock_clines, 0);
expect_function_call(__wrap_lock_clines);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), 0);
ocf_prepare_clines_miss(&req);
assert(!ocf_req_test_mapping_error(&req));
assert(!ocf_req_part_evict(&req));
}
static void ocf_prepare_clines_miss_test04(void **state)
@ -152,85 +137,17 @@ static void ocf_prepare_clines_miss_test04(void **state)
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled but doesn't have enough space.\n");
print_test_description("Target part has enough space.\n");
print_test_description("\tEviction failed\n");
will_return(__wrap_ocf_user_part_is_enabled, true);
will_return_always(__wrap_ocf_user_part_has_space, false);
will_return_always(__wrap_ocf_user_part_has_space, true);
expect_function_call(__wrap_space_managment_evict_do);
will_return(__wrap_space_managment_evict_do, LOOKUP_MISS);
expect_function_call(__wrap_ocf_req_set_mapping_error);
will_return_always(__wrap_ocf_req_test_mapping_error, true);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
}
static void ocf_prepare_clines_miss_test06(void **state)
{
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled but doesn't have enough space.\n");
print_test_description("Eviction and mapping were ok, but failed to lock cachelines.\n");
will_return_always(__wrap_ocf_user_part_has_space, false);
expect_function_call(__wrap_space_managment_evict_do);
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
will_return(__wrap_ocf_user_part_is_enabled, true);
will_return_always(__wrap_ocf_req_test_mapping_error, false);
expect_function_call(__wrap_lock_clines);
will_return(__wrap_lock_clines, -OCF_ERR_NO_LOCK);
expect_function_call(__wrap_ocf_req_set_mapping_error);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
}
static void ocf_prepare_clines_miss_test07(void **state)
{
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled but doesn't have enough space.\n");
print_test_description("Eviction and mapping were ok, lock not acquired.\n");
will_return_always(__wrap_ocf_user_part_has_space, false);
expect_function_call(__wrap_space_managment_evict_do);
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
will_return(__wrap_ocf_user_part_is_enabled, true);
will_return_always(__wrap_ocf_req_test_mapping_error, false);
expect_function_call(__wrap_lock_clines);
will_return(__wrap_lock_clines, OCF_LOCK_NOT_ACQUIRED);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), OCF_LOCK_NOT_ACQUIRED);
}
static void ocf_prepare_clines_miss_test08(void **state)
{
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled has enough space.\n");
print_test_description("\tMapping and cacheline lock are both ok\n");
will_return(__wrap_ocf_user_part_is_enabled, true);
will_return_always(__wrap_ocf_user_part_has_space, true);
expect_function_call(__wrap_ocf_engine_map);
will_return_always(__wrap_ocf_req_test_mapping_error, false);
expect_function_call(__wrap_lock_clines);
will_return(__wrap_lock_clines, OCF_LOCK_ACQUIRED);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), OCF_LOCK_ACQUIRED);
ocf_prepare_clines_miss(&req);
assert(ocf_req_test_mapping_error(&req));
assert(!ocf_req_part_evict(&req));
}
int main(void)
@ -240,9 +157,6 @@ int main(void)
cmocka_unit_test(ocf_prepare_clines_miss_test02),
cmocka_unit_test(ocf_prepare_clines_miss_test03),
cmocka_unit_test(ocf_prepare_clines_miss_test04),
cmocka_unit_test(ocf_prepare_clines_miss_test06),
cmocka_unit_test(ocf_prepare_clines_miss_test07),
cmocka_unit_test(ocf_prepare_clines_miss_test08)
};
print_message("Unit test for ocf_prepare_clines_miss\n");

View File

@ -27,11 +27,17 @@ struct test_cache
{
struct ocf_cache cache;
struct ocf_user_part_config part[OCF_USER_IO_CLASS_MAX];
struct ocf_part_runtime runtime[OCF_USER_IO_CLASS_MAX];
uint32_t overflow[OCF_USER_IO_CLASS_MAX];
uint32_t evictable[OCF_USER_IO_CLASS_MAX];
uint32_t req_unmapped;
};
uint32_t __wrap_ocf_lru_num_free(ocf_cache_t cache)
{
return 0;
}
bool __wrap_ocf_eviction_can_evict(ocf_cache_t cache)
{
return true;
@ -58,7 +64,9 @@ uint32_t __wrap_ocf_eviction_need_space(struct ocf_cache *cache,
uint32_t clines)
{
struct test_cache *tcache = (struct test_cache *)cache;
unsigned overflown_consumed = min(clines, tcache->overflow[part->id]);
unsigned overflown_consumed;
overflown_consumed = min(clines, tcache->overflow[part->id]);
tcache->overflow[part->id] -= overflown_consumed;
tcache->evictable[part->id] -= clines;
@ -102,9 +110,11 @@ int ocf_user_part_lst_cmp_valid(struct ocf_cache *cache,
struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part,
lst_valid);
size_t p1_size = ocf_cache_is_device_attached(cache) ?
p1->part.runtime->curr_size : 0;
env_atomic_read(&p1->part.runtime->curr_size)
: 0;
size_t p2_size = ocf_cache_is_device_attached(cache) ?
p2->part.runtime->curr_size : 0;
env_atomic_read(&p2->part.runtime->curr_size)
: 0;
int v1 = p1->config->priority;
int v2 = p2->config->priority;

View File

@ -39,6 +39,11 @@
static union eviction_policy_meta meta[META_COUNT];
struct ocf_cache_line_concurrency *__wrap_ocf_cache_line_concurrency(ocf_cache_t cache)
{
return NULL;
}
union eviction_policy_meta*
__wrap_ocf_metadata_get_eviction_policy(ocf_cache_t cache, ocf_cache_line_t line)
{

View File

@ -40,6 +40,11 @@
//#define DEBUG
struct ocf_cache_line_concurrency *__wrap_ocf_cache_line_concurrency(ocf_cache_t cache)
{
return NULL;
}
ocf_cache_line_t test_cases[10 * OCF_NUM_EVICTION_LISTS][OCF_NUM_EVICTION_LISTS][20];
unsigned num_cases = 20;
@ -335,12 +340,17 @@ bool __wrap__lru_lock(struct ocf_lru_iter *iter,
return true;
}
bool __wrap__lru_trylock_cacheline(struct ocf_lru_iter *iter,
ocf_cache_line_t cline)
bool __wrap_ocf_cache_line_try_lock_rd(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
return true;
}
bool __wrap_ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
return false;
}
static void _lru_run_test(unsigned test_case)
{
unsigned start_pos;

View File

@ -1,382 +0,0 @@
/*
* <tested_file_path>src/ocf_freelist.c</tested_file_path>
* <tested_function>ocf_freelist_get_cache_line</tested_function>
* <functions_to_leave>
* ocf_freelist_init
* ocf_freelist_deinit
* ocf_freelist_populate
* next_phys_invalid
* ocf_freelist_lock
* ocf_freelist_trylock
* ocf_freelist_unlock
* _ocf_freelist_remove_cache_line
* ocf_freelist_get_cache_line_fast
* ocf_freelist_get_cache_line_slow
* ocf_freelist_add_cache_line
* ocf_freelist_get_cache_line_ctx
* get_next_victim_freelist
* ocf_freelist_put_cache_line
* </functions_to_leave>
*/
#undef static
#undef inline
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "ocf_freelist.c/ocf_freelist_get_put_generated_wraps.c"
ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
{
return mock();
}
unsigned __wrap_env_get_execution_context_count(void)
{
return mock();
}
unsigned __wrap_env_get_execution_context(void)
{
return mock();
}
void __wrap_env_put_execution_context(unsigned ctx)
{
}
/* simulate no striping */
ocf_cache_line_t __wrap_ocf_metadata_map_phy2lg(ocf_cache_t cache, ocf_cache_line_t phy)
{
return phy;
}
bool __wrap_metadata_test_valid_any(ocf_cache_t cache, ocf_cache_line_t cline)
{
return mock();
}
/* metadata partition info interface mock: */
#define max_clines 100
struct {
ocf_cache_line_t prev;
ocf_cache_line_t next;
} partition_list[max_clines];
void __wrap_ocf_metadata_set_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
{
assert_int_equal(part_id, PARTITION_INVALID);
partition_list[line].prev = prev_line;
partition_list[line].next = next_line;
}
void __wrap_ocf_metadata_get_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t *part_id,
ocf_cache_line_t *next_line, ocf_cache_line_t *prev_line)
{
if (part_id)
*part_id = PARTITION_INVALID;
if (prev_line)
*prev_line = partition_list[line].prev;
if (next_line)
*next_line = partition_list[line].next;
}
void __wrap_ocf_metadata_set_partition_prev(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev_line)
{
partition_list[line].prev = prev_line;
}
void __wrap_ocf_metadata_set_partition_next(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next_line)
{
partition_list[line].next = next_line;
}
static void ocf_freelist_get_cache_line_get_fast(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
ocf_cache_line_t line;
print_test_description("Verify get free cache line get fast path");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
ocf_freelist_init(&freelist, NULL);
ocf_freelist_populate(freelist, num_cls);
/* now there are following cachelines on per-context lists:
* ctx 0: 0, 1, 2
* ctx 1: 3, 4, 5
* ctx 2: 6, 7
*/
/* get cline from context 1 */
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 3);
/* ctx 0: 0, 1, 2
* ctx 1: _, 4, 5
* ctx 2: 6, 7 */
/* get cline from context 2 */
will_return(__wrap_env_get_execution_context, 2);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 6);
/* ctx 0: 0, 1, 2
* ctx 1: _, 4, 5
* ctx 2: _, 7 */
/* get cline from context 1 */
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 4);
/* ctx 0: 0, 1, 2
* ctx 1: _, _, 5
* ctx 2: _, 7 */
/* get cline from context 0 */
will_return(__wrap_env_get_execution_context, 0);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 0);
/* ctx 0: _, 1, 2
* ctx 1: _, _, 5
* ctx 2: _, 7 */
/* get cline from context 0 */
will_return(__wrap_env_get_execution_context, 0);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 1);
/* ctx 0: _, _, 2
* ctx 1: _, _, 5
* ctx 2: _, 7 */
/* get cline from context 0 */
will_return(__wrap_env_get_execution_context, 0);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 2);
/* ctx 0: _, _, _,
* ctx 1: _, _, 5
* ctx 2: _, 7 */
/* get cline from context 2 */
will_return(__wrap_env_get_execution_context, 2);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 7);
/* ctx 0: _, _, _,
* ctx 1: _, _, _5
* ctx 2: _, _ */
/* get cline from context 1 */
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 5);
/* ctx 0: _, _, _,
* ctx 1: _, _, _
* ctx 2: _, _ */
ocf_freelist_deinit(freelist);
}
static void ocf_freelist_get_cache_line_get_slow(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
ocf_cache_line_t line;
print_test_description("Verify get free cache line get slow path");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
/* always return exec ctx 0 */
will_return_maybe(__wrap_env_get_execution_context, 0);
ocf_freelist_init(&freelist, NULL);
ocf_freelist_populate(freelist, num_cls);
/* now there are following cachelines on per-context lists:
* ctx 0: 0, 1, 2
* ctx 1: 3, 4, 5
* ctx 2: 6, 7
*/
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 0);
/* ctx 0: _, 1, 2
* ctx 1: 3, 4, 5
* ctx 2: 6, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 1);
/* ctx 0: _, _, 2
* ctx 1: 3, 4, 5
* ctx 2: 6, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 2);
/* ctx 0: _, _, _
* ctx 1: 3, 4, 5
* ctx 2: 6, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 3);
/* ctx 0: _, _, _
* ctx 1: _, 4, 5
* ctx 2: 6, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 6);
/* ctx 0: _, _, _
* ctx 1: _, 4, 5
* ctx 2: _, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 4);
/* ctx 0: _, _, _
* ctx 1: _, _, 5
* ctx 2: _, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 7);
/* ctx 0: _, _, _
* ctx 1: _, _, 5
* ctx 2: _, _ */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 5);
/* ctx 0: _, _, _,
* ctx 1: _, _, _
* ctx 2: _, _ */
ocf_freelist_deinit(freelist);
}
static void ocf_freelist_get_cache_line_put(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
ocf_cache_line_t line;
print_test_description("Verify freelist cacheline put");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
ocf_freelist_init(&freelist, NULL);
ocf_freelist_populate(freelist, num_cls);
/* get some clines from the freelists */
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
/* ctx 0:
* ctx 1: 4, 5
* ctx 2: 7 */
will_return(__wrap_env_get_execution_context, 1);
ocf_freelist_put_cache_line(freelist, 0);
will_return(__wrap_env_get_execution_context, 1);
ocf_freelist_put_cache_line(freelist, 2);
will_return(__wrap_env_get_execution_context, 2);
ocf_freelist_put_cache_line(freelist, 3);
/* ctx 0:
* ctx 1: 4, 5, 0, 2
* ctx 2: 7, 3*/
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 4);
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 5);
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 0);
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 2);
will_return(__wrap_env_get_execution_context, 2);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 7);
will_return(__wrap_env_get_execution_context, 2);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 3);
ocf_freelist_deinit(freelist);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_freelist_get_cache_line_get_fast),
cmocka_unit_test(ocf_freelist_get_cache_line_get_slow),
cmocka_unit_test(ocf_freelist_get_cache_line_put)
};
print_message("Unit test for ocf_freelist_get_cache_line\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}

View File

@ -1,68 +0,0 @@
/*
* <tested_file_path>src/ocf_freelist.c</tested_file_path>
* <tested_function>ocf_freelist_populate</tested_function>
* <functions_to_leave>
* ocf_freelist_init
* ocf_freelist_deinit
* </functions_to_leave>
*/
#undef static
#undef inline
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "ocf_freelist.c/ocf_freelist_init_generated_wraps.c"
ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
{
function_called();
return mock();
}
ocf_cache_line_t __wrap_env_get_execution_context_count(ocf_cache_t cache)
{
function_called();
return mock();
}
static void ocf_freelist_init_test01(void **state)
{
unsigned num_cls = 9;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
ocf_cache_t cache = 0x1234;
print_test_description("Freelist initialization test");
expect_function_call(__wrap_ocf_metadata_collision_table_entries);
will_return(__wrap_ocf_metadata_collision_table_entries, num_cls);
expect_function_call(__wrap_env_get_execution_context_count);
will_return(__wrap_env_get_execution_context_count, num_ctxts);
ocf_freelist_init(&freelist, cache);
assert(freelist != NULL);
ocf_freelist_deinit(freelist);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_freelist_init_test01)
};
print_message("Unit test of ocf_freelist_init\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}

View File

@ -1,213 +0,0 @@
/*
* <tested_file_path>src/ocf_freelist.c</tested_file_path>
* <tested_function>ocf_freelist_get_cache_line</tested_function>
* <functions_to_leave>
* ocf_freelist_init
* ocf_freelist_deinit
* ocf_freelist_populate
* next_phys_invalid
* ocf_freelist_unlock
* _ocf_freelist_remove_cache_line
* ocf_freelist_get_cache_line_fast
* ocf_freelist_get_cache_line_slow
* ocf_freelist_add_cache_line
* ocf_freelist_get_cache_line_ctx
* get_next_victim_freelist
* ocf_freelist_put_cache_line
* </functions_to_leave>
*/
#undef static
#undef inline
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "ocf_freelist.c/ocf_freelist_get_put_generated_wraps.c"
ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
{
return mock();
}
unsigned __wrap_env_get_execution_context_count(void)
{
return mock();
}
unsigned __wrap_env_get_execution_context(void)
{
return mock();
}
void __wrap_env_put_execution_context(unsigned ctx)
{
}
/* simulate no striping */
ocf_cache_line_t __wrap_ocf_metadata_map_phy2lg(ocf_cache_t cache, ocf_cache_line_t phy)
{
return phy;
}
bool __wrap_metadata_test_valid_any(ocf_cache_t cache, ocf_cache_line_t cline)
{
return mock();
}
void __wrap_ocf_freelist_lock(ocf_freelist_t freelist, uint32_t ctx)
{
function_called();
check_expected(ctx);
}
int __wrap_ocf_freelist_trylock(ocf_freelist_t freelist, uint32_t ctx)
{
function_called();
check_expected(ctx);
return mock();
}
/* metadata partition info interface mock: */
#define max_clines 100
struct {
ocf_cache_line_t prev;
ocf_cache_line_t next;
} partition_list[max_clines];
void __wrap_ocf_metadata_set_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
{
assert_int_equal(part_id, PARTITION_INVALID);
partition_list[line].prev = prev_line;
partition_list[line].next = next_line;
}
void __wrap_ocf_metadata_get_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t *part_id,
ocf_cache_line_t *next_line, ocf_cache_line_t *prev_line)
{
if (part_id)
*part_id = PARTITION_INVALID;
if (prev_line)
*prev_line = partition_list[line].prev;
if (next_line)
*next_line = partition_list[line].next;
}
void __wrap_ocf_metadata_set_partition_prev(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev_line)
{
partition_list[line].prev = prev_line;
}
void __wrap_ocf_metadata_set_partition_next(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next_line)
{
partition_list[line].next = next_line;
}
static void ocf_freelist_get_put_locks(void **state)
{
unsigned num_cls = 4;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
ocf_cache_line_t line;
print_test_description("Verify lock/trylock sequence in get free cacheline");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
/* simulate context 1 for the entire test duration */
will_return_maybe(__wrap_env_get_execution_context, 1);
ocf_freelist_init(&freelist, NULL);
ocf_freelist_populate(freelist, num_cls);
/****************************************************************/
/* verify fast path locking - scucessfull trylock */
/* ctx 0: 0, 3
* ctx 1: 1
* ctx 2: 2
* slowpath next victim: 0
*/
expect_value(__wrap_ocf_freelist_trylock, ctx, 1);
expect_function_call(__wrap_ocf_freelist_trylock);
will_return(__wrap_ocf_freelist_trylock, 0);
ocf_freelist_get_cache_line(freelist, &line);
/****************************************************************/
/* verify fast path locking - scucessfull trylock in slowpath */
/* ctx 0: 0, 3
* ctx 1:
* ctx 2: 2
* slowpath next victim: 0 */
/* we expect trylock for context 0, since context 1 has empty list */
expect_value(__wrap_ocf_freelist_trylock, ctx, 0);
expect_function_call(__wrap_ocf_freelist_trylock);
will_return(__wrap_ocf_freelist_trylock, 0);
ocf_freelist_get_cache_line(freelist, &line);
/****************************************************************/
/* verify fast path locking - trylock failure in slowpath */
/* ctx 0: 3
* ctx 1:
* ctx 2: 2
* slowpath next victim: 1 */
/* fastpath will fail immediately - context 1 list is empty */
/* next slowpath victim context (1) is empty - will move to ctx 2 */
/* so now we expect trylock for context no 2 - injecting error here*/
expect_value(__wrap_ocf_freelist_trylock, ctx, 2);
expect_function_call(__wrap_ocf_freelist_trylock);
will_return(__wrap_ocf_freelist_trylock, 1);
/* slowpath will attempt to trylock next non-empty context - 0
* - injecting error here as well */
expect_value(__wrap_ocf_freelist_trylock, ctx, 0);
expect_function_call(__wrap_ocf_freelist_trylock);
will_return(__wrap_ocf_freelist_trylock, 1);
/* slowpath trylock loop failed - expecting full lock */
expect_value(__wrap_ocf_freelist_lock, ctx, 2);
expect_function_call(__wrap_ocf_freelist_lock);
/* execute freelist_get_cache_line */
ocf_freelist_get_cache_line(freelist, &line);
/****************************************************************/
ocf_freelist_deinit(freelist);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_freelist_get_put_locks)
};
print_message("Unit test for ocf_freelist_get_cache_line locking\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}

View File

@ -1,138 +0,0 @@
/*
* <tested_file_path>src/ocf_freelist.c</tested_file_path>
* <tested_function>ocf_freelist_populate</tested_function>
* <functions_to_leave>
* ocf_freelist_init
* ocf_freelist_deinit
* ocf_freelist_populate
* next_phys_invalid
* </functions_to_leave>
*/
#undef static
#undef inline
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "ocf_freelist.c/ocf_freelist_populate_generated_wraps.c"
ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
{
return mock();
}
ocf_cache_line_t __wrap_env_get_execution_context_count(ocf_cache_t cache)
{
return mock();
}
/* simulate no striping */
ocf_cache_line_t __wrap_ocf_metadata_map_phy2lg(ocf_cache_t cache, ocf_cache_line_t phy)
{
return phy;
}
bool __wrap_metadata_test_valid_any(ocf_cache_t cache, ocf_cache_line_t cline)
{
return mock();
}
void __wrap_ocf_metadata_set_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
{
print_message("%s %u %u %u\n", __func__, prev_line, line, next_line);
check_expected(line);
check_expected(part_id);
check_expected(next_line);
check_expected(prev_line);
}
#define expect_set_info(curr, part, next, prev) \
expect_value(__wrap_ocf_metadata_set_partition_info, line, curr); \
expect_value(__wrap_ocf_metadata_set_partition_info, part_id, part); \
expect_value(__wrap_ocf_metadata_set_partition_info, next_line, next); \
expect_value(__wrap_ocf_metadata_set_partition_info, prev_line, prev);
static void ocf_freelist_populate_test01(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
print_test_description("Verify proper set_partition_info order and arguments - empty cache");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
ocf_freelist_init(&freelist, NULL);
expect_set_info(0, PARTITION_INVALID, 1 , num_cls);
expect_set_info(1, PARTITION_INVALID, 2 , 0);
expect_set_info(2, PARTITION_INVALID, num_cls, 1);
expect_set_info(3, PARTITION_INVALID, 4 , num_cls);
expect_set_info(4, PARTITION_INVALID, 5 , 3);
expect_set_info(5, PARTITION_INVALID, num_cls, 4);
expect_set_info(6, PARTITION_INVALID, 7 , num_cls);
expect_set_info(7, PARTITION_INVALID, num_cls, 6);
ocf_freelist_populate(freelist, num_cls);
ocf_freelist_deinit(freelist);
}
static void ocf_freelist_populate_test02(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
print_test_description("Verify proper set_partition_info order and arguments - some valid clines");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
ocf_freelist_init(&freelist, NULL);
/* simulate only cachelines 2, 3, 4, 7 invalid */
will_return(__wrap_metadata_test_valid_any, true);
will_return(__wrap_metadata_test_valid_any, true);
will_return(__wrap_metadata_test_valid_any, false);
will_return(__wrap_metadata_test_valid_any, false);
will_return(__wrap_metadata_test_valid_any, false);
will_return(__wrap_metadata_test_valid_any, true);
will_return(__wrap_metadata_test_valid_any, true);
will_return(__wrap_metadata_test_valid_any, false);
expect_set_info(2, PARTITION_INVALID, 3 , num_cls);
expect_set_info(3, PARTITION_INVALID, num_cls, 2);
expect_set_info(4, PARTITION_INVALID, num_cls, num_cls);
expect_set_info(7, PARTITION_INVALID, num_cls, num_cls);
ocf_freelist_populate(freelist, 4);
ocf_freelist_deinit(freelist);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_freelist_populate_test01),
cmocka_unit_test(ocf_freelist_populate_test02)
};
print_message("Unit test of src/ocf_freelist.c\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}