diff --git a/env/posix/ocf_env.h b/env/posix/ocf_env.h
index bda31bd..b144104 100644
--- a/env/posix/ocf_env.h
+++ b/env/posix/ocf_env.h
@@ -46,6 +46,7 @@
#define min(a,b) MIN(a,b)
#define ENV_PRIu64 "lu"
+#define ENV_PRId64 "ld"
typedef uint8_t u8;
typedef uint16_t u16;
diff --git a/inc/ocf_io_class.h b/inc/ocf_io_class.h
index c8cb05f..88ca597 100644
--- a/inc/ocf_io_class.h
+++ b/inc/ocf_io_class.h
@@ -40,8 +40,8 @@ struct ocf_io_class_info {
uint32_t max_size;
/*!< Maximum number of cache lines that might be assigned into
- * this IO class. If current size reach maximum size no more
- * allocation for this IO class takes place
+ * this IO class. If current size reaches maximum size then some
+ * of ioclass's cachelines are evicted.
*/
uint8_t eviction_policy_type;
diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c
index 9e8b0ee..6eb5083 100644
--- a/src/engine/engine_common.c
+++ b/src/engine/engine_common.c
@@ -14,6 +14,7 @@
#include "../utils/utils_cache_line.h"
#include "../ocf_request.h"
#include "../utils/utils_cleaner.h"
+#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
#include "../eviction/eviction.h"
#include "../promotion/promotion.h"
@@ -127,7 +128,8 @@ void ocf_engine_update_req_info(struct ocf_cache *cache,
/*
* Need to move this cache line into other partition
*/
- _entry->re_part = req->info.re_part = true;
+ _entry->re_part = true;
+ req->info.re_part_no++;
}
break;
@@ -254,7 +256,7 @@ static void ocf_engine_map_cache_line(struct ocf_request *req,
ocf_cleaning_t clean_policy_type;
if (!ocf_freelist_get_cache_line(cache->freelist, cache_line)) {
- req->info.mapping_error = 1;
+ ocf_req_set_mapping_error(req);
return;
}
@@ -334,7 +336,7 @@ static void ocf_engine_map(struct ocf_request *req)
if (ocf_engine_unmapped_count(req) >
ocf_freelist_num_free(cache->freelist)) {
- req->info.mapping_error = 1;
+ ocf_req_set_mapping_error(req);
return;
}
@@ -353,7 +355,7 @@ static void ocf_engine_map(struct ocf_request *req)
ocf_engine_map_cache_line(req, entry->core_line,
entry->hash, &entry->coll_idx);
- if (req->info.mapping_error) {
+ if (ocf_req_test_mapping_error(req)) {
/*
* Eviction error (mapping error), need to
* clean, return and do pass through
@@ -375,7 +377,7 @@ static void ocf_engine_map(struct ocf_request *req)
}
- if (!req->info.mapping_error) {
+ if (!ocf_req_test_mapping_error(req)) {
/* request has been inserted into cache - purge it from promotion
* policy */
ocf_promotion_req_purge(cache->promotion_policy, req);
@@ -408,15 +410,6 @@ static void _ocf_engine_clean_end(void *private_data, int error)
}
}
-static int ocf_engine_evict(struct ocf_request *req)
-{
- if (!ocf_engine_unmapped_count(req))
- return 0;
-
- return space_managment_evict_do(req->cache, req,
- ocf_engine_unmapped_count(req));
-}
-
static int lock_clines(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
@@ -432,13 +425,139 @@ static int lock_clines(struct ocf_request *req,
}
}
+static inline int ocf_prepare_clines_hit(struct ocf_request *req,
+ const struct ocf_engine_callbacks *engine_cbs)
+{
+ int lock_status = -OCF_ERR_NO_LOCK;
+ struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
+ uint32_t clines_to_evict;
+ int res;
+
+ /* Cachelines are mapped in correct partition */
+ if (ocf_part_is_enabled(&req->cache->user_parts[req->part_id]) &&
+ !ocf_engine_needs_repart(req)) {
+ lock_status = lock_clines(req, engine_cbs);
+ ocf_req_hash_unlock_rd(req);
+ return lock_status;
+ }
+
+ res = ocf_part_check_space(req, &clines_to_evict);
+
+ if (res == OCF_PART_HAS_SPACE)
+ lock_status = lock_clines(req, engine_cbs);
+
+ /* Since target part is empty and disabled, request should be submited in
+ * pass-through */
+ if (res == OCF_PART_IS_DISABLED)
+ ocf_req_set_mapping_error(req);
+
+ ocf_req_hash_unlock_rd(req);
+
+ if (res != OCF_PART_IS_FULL)
+ return lock_status;
+
+ ocf_metadata_start_exclusive_access(metadata_lock);
+ ocf_part_check_space(req, &clines_to_evict);
+
+ if (space_managment_evict_do(req->cache, req, clines_to_evict) ==
+ LOOKUP_MISS) {
+ ocf_req_set_mapping_error(req);
+ goto unlock;
+ }
+
+ if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
+ /* Target part is disabled but had some cachelines assigned. Submit
+ * request in pass-through after eviction has been made */
+ ocf_req_set_mapping_error(req);
+ goto unlock;
+ }
+
+ lock_status = lock_clines(req, engine_cbs);
+
+unlock:
+ ocf_metadata_end_exclusive_access(metadata_lock);
+
+ return lock_status;
+}
+
+static inline int ocf_prepare_clines_miss(struct ocf_request *req,
+ const struct ocf_engine_callbacks *engine_cbs)
+{
+ int lock_status = -OCF_ERR_NO_LOCK;
+ struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
+ uint32_t clines_to_evict = 0;
+ int res;
+
+ /* Mapping must be performed holding (at least) hash-bucket write lock */
+ ocf_req_hash_lock_upgrade(req);
+
+ /* Verify whether partition occupancy threshold is not reached yet or cache
+ * is not out of free cachelines */
+ res = ocf_part_check_space(req, &clines_to_evict);
+ if (res == OCF_PART_IS_DISABLED) {
+ ocf_req_set_mapping_error(req);
+ ocf_req_hash_unlock_wr(req);
+ return lock_status;
+ }
+
+ if (res == OCF_PART_HAS_SPACE) {
+ ocf_engine_map(req);
+ if (ocf_req_test_mapping_error(req)) {
+ goto eviction;
+ }
+
+ lock_status = lock_clines(req, engine_cbs);
+ if (lock_status < 0) {
+ /* Mapping succeeded, but we failed to acquire cacheline lock.
+ * Don't try to evict, just return error to caller */
+ ocf_req_set_mapping_error(req);
+ }
+
+ ocf_req_hash_unlock_wr(req);
+ return lock_status;
+ }
+
+eviction:
+ ocf_req_hash_unlock_wr(req);
+ ocf_metadata_start_exclusive_access(metadata_lock);
+
+ ocf_part_check_space(req, &clines_to_evict);
+
+ if (space_managment_evict_do(req->cache, req, clines_to_evict) ==
+ LOOKUP_MISS) {
+ ocf_req_set_mapping_error(req);
+ goto unlock;
+ }
+
+ if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
+ /* Partition is disabled but it had cachelines assigned. Now, that they
+ * are evicted, don't try to map cachelines - we don't want to insert
+ * new cachelines - the request should be submited in pass through mode
+ * instead */
+ ocf_req_set_mapping_error(req);
+ goto unlock;
+ }
+
+ ocf_engine_map(req);
+ if (ocf_req_test_mapping_error(req))
+ goto unlock;
+
+ lock_status = lock_clines(req, engine_cbs);
+ if (lock_status < 0)
+ ocf_req_set_mapping_error(req);
+
+unlock:
+ ocf_metadata_end_exclusive_access(metadata_lock);
+
+ return lock_status;
+}
+
int ocf_engine_prepare_clines(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
bool mapped;
bool promote = true;
- int lock = -ENOENT;
- struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
+ int lock = -OCF_ERR_NO_LOCK;
/* Calculate hashes for hash-bucket locking */
ocf_req_hash(req);
@@ -452,50 +571,19 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
- if (mapped) {
- /* Request cachelines are already mapped, acquire cacheline
- * lock */
- lock = lock_clines(req, engine_cbs);
- } else {
- /* check if request should promote cachelines */
- promote = ocf_promotion_req_should_promote(
- req->cache->promotion_policy, req);
- if (!promote)
- req->info.mapping_error = 1;
- }
+ if (mapped)
+ return ocf_prepare_clines_hit(req, engine_cbs);
- if (mapped || !promote) {
- /* Will not attempt mapping - release hash bucket lock */
+ /* check if request should promote cachelines */
+ promote = ocf_promotion_req_should_promote(
+ req->cache->promotion_policy, req);
+ if (!promote) {
+ ocf_req_set_mapping_error(req);
ocf_req_hash_unlock_rd(req);
- } else {
- /* Need to map (potentially evict) cachelines. Mapping must be
- * performed holding (at least) hash-bucket write lock */
- ocf_req_hash_lock_upgrade(req);
- ocf_engine_map(req);
- if (!req->info.mapping_error)
- lock = lock_clines(req, engine_cbs);
- ocf_req_hash_unlock_wr(req);
-
- if (req->info.mapping_error) {
- /* Not mapped - evict cachelines under global exclusive
- * lock*/
- ocf_metadata_start_exclusive_access(metadata_lock);
-
- /* Now there is exclusive access for metadata. May
- * traverse once again and evict cachelines if needed.
- */
- if (ocf_engine_evict(req) == LOOKUP_MAPPED)
- ocf_engine_map(req);
-
- if (!req->info.mapping_error)
- lock = lock_clines(req, engine_cbs);
-
- ocf_metadata_end_exclusive_access(metadata_lock);
- }
+ return lock;
}
-
- return lock;
+ return ocf_prepare_clines_miss(req, engine_cbs);
}
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
diff --git a/src/engine/engine_common.h b/src/engine/engine_common.h
index 2557c64..8ec9d0c 100644
--- a/src/engine/engine_common.h
+++ b/src/engine/engine_common.h
@@ -47,6 +47,20 @@ static inline bool ocf_engine_is_hit(struct ocf_request *req)
*/
#define ocf_engine_is_miss(req) (!ocf_engine_is_hit(req))
+/**
+ * @brief Check if all the cache lines are assigned to a good partition
+ *
+ * @param req OCF request
+ *
+ * @retval true request's cache lines are assigned to a good partition
+ * @retval false some of the request's cache lines needs to be reassigned to
+ * a target partition
+ */
+static inline bool ocf_engine_needs_repart(struct ocf_request *req)
+{
+ return req->info.re_part_no > 0;
+}
+
/**
* @brief Check if all cache lines are mapped fully
*
@@ -98,6 +112,18 @@ static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *req)
return req->core_line_count - (req->info.hit_no + req->info.invalid_no);
}
+/**
+ * @brief Get number of cache lines to repart
+ *
+ * @param req OCF request
+ *
+ * @retval Number of cache lines to repart
+ */
+static inline uint32_t ocf_engine_repart_count(struct ocf_request *req)
+{
+ return req->info.re_part_no;
+}
+
/**
* @brief Get number of IOs to perform cache read or write
*
diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c
index b8b867f..23a828b 100644
--- a/src/engine/engine_fast.c
+++ b/src/engine/engine_fast.c
@@ -69,7 +69,7 @@ static int _ocf_read_fast_do(struct ocf_request *req)
/* Get OCF request - increase reference counter */
ocf_req_get(req);
- if (req->info.re_part) {
+ if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
@@ -108,6 +108,7 @@ int ocf_read_fast(struct ocf_request *req)
{
bool hit;
int lock = OCF_LOCK_NOT_ACQUIRED;
+ bool part_has_space = false;
/* Get OCF request - increase reference counter */
ocf_req_get(req);
@@ -124,14 +125,18 @@ int ocf_read_fast(struct ocf_request *req)
ocf_engine_traverse(req);
hit = ocf_engine_is_hit(req);
- if (hit) {
+
+ if (ocf_part_check_space(req, NULL) == OCF_PART_HAS_SPACE)
+ part_has_space = true;
+
+ if (hit && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
}
ocf_req_hash_unlock_rd(req);
- if (hit) {
+ if (hit && part_has_space) {
OCF_DEBUG_RQ(req, "Fast path success");
if (lock >= 0) {
@@ -154,10 +159,7 @@ int ocf_read_fast(struct ocf_request *req)
/* Put OCF request - decrease reference counter */
ocf_req_put(req);
- if (hit)
- return OCF_FAST_PATH_YES;
- else
- return OCF_FAST_PATH_NO;
+ return (hit && part_has_space) ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
}
/* __ __ _ _ ______ _ _____ _ _
@@ -177,6 +179,7 @@ int ocf_write_fast(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
+ int part_has_space = false;
/* Get OCF request - increase reference counter */
ocf_req_get(req);
@@ -193,14 +196,18 @@ int ocf_write_fast(struct ocf_request *req)
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
- if (mapped) {
+
+ if (ocf_part_check_space(req, NULL) == OCF_PART_HAS_SPACE)
+ part_has_space = true;
+
+ if (mapped && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
}
ocf_req_hash_unlock_rd(req);
- if (mapped) {
+ if (mapped && part_has_space) {
if (lock >= 0) {
OCF_DEBUG_RQ(req, "Fast path success");
@@ -223,6 +230,5 @@ int ocf_write_fast(struct ocf_request *req)
/* Put OCF request - decrease reference counter */
ocf_req_put(req);
- return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
-
+ return (mapped && part_has_space) ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
}
diff --git a/src/engine/engine_pt.c b/src/engine/engine_pt.c
index 5485fcf..9144d84 100644
--- a/src/engine/engine_pt.c
+++ b/src/engine/engine_pt.c
@@ -67,7 +67,7 @@ int ocf_read_pt_do(struct ocf_request *req)
return 0;
}
- if (req->info.re_part) {
+ if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c
index 566f83e..b118262 100644
--- a/src/engine/engine_rd.c
+++ b/src/engine/engine_rd.c
@@ -172,7 +172,7 @@ static int _ocf_read_generic_do(struct ocf_request *req)
ocf_req_hash_unlock_rd(req);
}
- if (req->info.re_part) {
+ if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
@@ -243,7 +243,7 @@ int ocf_read_generic(struct ocf_request *req)
lock = ocf_engine_prepare_clines(req, &_rd_engine_callbacks);
- if (!req->info.mapping_error) {
+ if (!ocf_req_test_mapping_error(req)) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
diff --git a/src/engine/engine_wb.c b/src/engine/engine_wb.c
index 305125e..a018969 100644
--- a/src/engine/engine_wb.c
+++ b/src/engine/engine_wb.c
@@ -121,7 +121,7 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req)
* 3. Then continue processing request (flush metadata)
*/
- if (req->info.re_part) {
+ if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
@@ -189,7 +189,7 @@ int ocf_write_wb(struct ocf_request *req)
lock = ocf_engine_prepare_clines(req, &_wb_engine_callbacks);
- if (!req->info.mapping_error) {
+ if (!ocf_req_test_mapping_error(req)) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */
diff --git a/src/engine/engine_wt.c b/src/engine/engine_wt.c
index 00bd303..26d1163 100644
--- a/src/engine/engine_wt.c
+++ b/src/engine/engine_wt.c
@@ -118,7 +118,7 @@ static void _ocf_write_wt_update_bits(struct ocf_request *req)
ocf_req_hash_unlock_wr(req);
}
- if (req->info.re_part) {
+ if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
@@ -183,7 +183,7 @@ int ocf_write_wt(struct ocf_request *req)
lock = ocf_engine_prepare_clines(req, &_wt_engine_callbacks);
- if (!req->info.mapping_error) {
+ if (!ocf_req_test_mapping_error(req)) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */
diff --git a/src/eviction/eviction.c b/src/eviction/eviction.c
index 5a41935..a61e9df 100644
--- a/src/eviction/eviction.c
+++ b/src/eviction/eviction.c
@@ -20,10 +20,14 @@ struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
},
};
-static uint32_t ocf_evict_calculate(struct ocf_user_part *part,
- uint32_t to_evict)
+static uint32_t ocf_evict_calculate(ocf_cache_t cache,
+ struct ocf_user_part *part, uint32_t to_evict, bool roundup)
{
- if (part->runtime->curr_size <= part->config->min_size) {
+
+ uint32_t curr_part_size = ocf_part_get_occupancy(part);
+ uint32_t min_part_size = ocf_part_get_min_size(cache, part);
+
+ if (curr_part_size <= min_part_size) {
/*
* Cannot evict from this partition because current size
* is less than minimum size
@@ -31,15 +35,31 @@ static uint32_t ocf_evict_calculate(struct ocf_user_part *part,
return 0;
}
- if (to_evict < OCF_TO_EVICTION_MIN)
+ if (roundup && to_evict < OCF_TO_EVICTION_MIN)
to_evict = OCF_TO_EVICTION_MIN;
- if (to_evict > (part->runtime->curr_size - part->config->min_size))
- to_evict = part->runtime->curr_size - part->config->min_size;
+ if (to_evict > (curr_part_size - min_part_size))
+ to_evict = curr_part_size - min_part_size;
return to_evict;
}
+static inline uint32_t ocf_evict_part_do(ocf_cache_t cache,
+ ocf_queue_t io_queue, const uint32_t evict_cline_no,
+ struct ocf_user_part *target_part)
+{
+ uint32_t to_evict = 0;
+
+ if (!evp_lru_can_evict(cache))
+ return 0;
+
+ to_evict = ocf_evict_calculate(cache, target_part, evict_cline_no,
+ false);
+
+ return ocf_eviction_need_space(cache, io_queue,
+ target_part, to_evict);
+}
+
static inline uint32_t ocf_evict_do(ocf_cache_t cache,
ocf_queue_t io_queue, const uint32_t evict_cline_no,
struct ocf_user_part *target_part)
@@ -67,16 +87,13 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
/* It seams that no more partition for eviction */
break;
}
- if (part_id == target_part->id) {
- /* Omit targeted, evict from different first */
- continue;
- }
if (evicted >= evict_cline_no) {
/* Evicted requested number of cache line, stop */
goto out;
}
- to_evict = ocf_evict_calculate(part, evict_cline_no);
+ to_evict = ocf_evict_calculate(cache, part, evict_cline_no,
+ true);
if (to_evict == 0) {
/* No cache lines to evict for this partition */
continue;
@@ -86,18 +103,6 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
part, to_evict);
}
- if (!ocf_eviction_can_evict(cache))
- goto out;
-
- if (evicted < evict_cline_no) {
- /* Now we can evict form targeted partition */
- to_evict = ocf_evict_calculate(target_part, evict_cline_no);
- if (to_evict) {
- evicted += ocf_eviction_need_space(cache, io_queue,
- target_part, to_evict);
- }
- }
-
out:
return evicted;
}
@@ -109,16 +114,22 @@ int space_managment_evict_do(struct ocf_cache *cache,
uint32_t free;
struct ocf_user_part *req_part = &cache->user_parts[req->part_id];
- free = ocf_freelist_num_free(cache->freelist);
- if (evict_cline_no <= free)
- return LOOKUP_MAPPED;
+ if (ocf_req_part_evict(req)) {
+ evicted = ocf_evict_part_do(cache, req->io_queue, evict_cline_no,
+ req_part);
+ } else {
+ free = ocf_freelist_num_free(cache->freelist);
+ if (evict_cline_no <= free)
+ return LOOKUP_MAPPED;
- evict_cline_no -= free;
- evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, req_part);
+ evict_cline_no -= free;
+
+ evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, req_part);
+ }
if (evict_cline_no <= evicted)
return LOOKUP_MAPPED;
- req->info.mapping_error |= true;
+ ocf_req_set_mapping_error(req);
return LOOKUP_MISS;
}
diff --git a/src/eviction/eviction.h b/src/eviction/eviction.h
index 4843d37..a385c68 100644
--- a/src/eviction/eviction.h
+++ b/src/eviction/eviction.h
@@ -58,11 +58,11 @@ struct eviction_policy_ops {
extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
/*
- * Deallocates space from low priority partitions.
+ * Deallocates space according to eviction priorities.
*
- * Returns -1 on error
- * or the destination partition ID for the free buffers
- * (it matches label and is part of the object (#core_id) IO group)
+ * @returns:
+ * 'LOOKUP_HIT' if evicted enough cachelines to serve @req
+ * 'LOOKUP_MISS' otherwise
*/
int space_managment_evict_do(ocf_cache_t cache,
struct ocf_request *req, uint32_t evict_cline_no);
diff --git a/src/metadata/metadata_partition.h b/src/metadata/metadata_partition.h
index 61beb0e..2ce804d 100644
--- a/src/metadata/metadata_partition.h
+++ b/src/metadata/metadata_partition.h
@@ -11,7 +11,7 @@
#define PARTITION_DEFAULT 0
#define PARTITION_INVALID ((ocf_part_id_t)-1)
-#define PARTITION_SIZE_MAX ((ocf_cache_line_t)-1)
+#define PARTITION_SIZE_MAX 100
void ocf_metadata_get_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,
diff --git a/src/metadata/metadata_partition_structs.h b/src/metadata/metadata_partition_structs.h
index baa8c88..8e93a0a 100644
--- a/src/metadata/metadata_partition_structs.h
+++ b/src/metadata/metadata_partition_structs.h
@@ -11,26 +11,26 @@
#include "../eviction/eviction.h"
struct ocf_user_part_config {
- char name[OCF_IO_CLASS_NAME_MAX];
- uint32_t min_size;
- uint32_t max_size;
- int16_t priority;
- ocf_cache_mode_t cache_mode;
- struct {
- uint8_t valid : 1;
- uint8_t added : 1;
- uint8_t eviction : 1;
- /*!< This bits is setting during partition sorting,
- * and means that can evict from this partition
- */
- } flags;
+ char name[OCF_IO_CLASS_NAME_MAX];
+ uint32_t min_size;
+ uint32_t max_size;
+ int16_t priority;
+ ocf_cache_mode_t cache_mode;
+ struct {
+ uint8_t valid : 1;
+ uint8_t added : 1;
+ uint8_t eviction : 1;
+ /*!< This bits is setting during partition sorting,
+ * and means that can evict from this partition
+ */
+ } flags;
};
struct ocf_user_part_runtime {
- uint32_t curr_size;
- uint32_t head;
- struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS];
- struct cleaning_policy cleaning;
+ uint32_t curr_size;
+ uint32_t head;
+ struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS];
+ struct cleaning_policy cleaning;
};
/* Iterator state, visiting all eviction lists within a partition
diff --git a/src/mngt/ocf_mngt_io_class.c b/src/mngt/ocf_mngt_io_class.c
index 481c061..a8ece82 100644
--- a/src/mngt/ocf_mngt_io_class.c
+++ b/src/mngt/ocf_mngt_io_class.c
@@ -31,6 +31,8 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
uint32_t max_size, uint8_t priority, bool valid)
{
uint32_t size;
+ struct ocf_lst_entry *iter;
+ uint32_t iter_id;
if (!name)
return -OCF_ERR_INVAL;
@@ -41,6 +43,9 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
if (cache->user_parts[part_id].config->flags.valid)
return -OCF_ERR_INVAL;
+ if (min_size > max_size)
+ return -OCF_ERR_INVAL;
+
if (max_size > PARTITION_SIZE_MAX)
return -OCF_ERR_INVAL;
@@ -51,6 +56,14 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
return -OCF_ERR_INVAL;
}
+ for_each_lst(&cache->lst_part, iter, iter_id) {
+ if (iter_id == part_id) {
+ ocf_cache_log(cache, log_err,
+ "Part with id %hu already exists\n", part_id);
+ return -OCF_ERR_INVAL;
+ }
+ }
+
size = sizeof(cache->user_parts[part_id].config->name);
if (env_strncpy(cache->user_parts[part_id].config->name, size, name, size))
return -OCF_ERR_INVAL;
@@ -77,8 +90,7 @@ static int _ocf_mngt_set_partition_size(struct ocf_cache *cache,
if (min > max)
return -OCF_ERR_INVAL;
- if (_ocf_mngt_count_parts_min_size(cache) + min
- >= cache->device->collision_table_entries) {
+ if (_ocf_mngt_count_parts_min_size(cache) + min > PARTITION_SIZE_MAX) {
/* Illegal configuration in which sum of all min_sizes exceeds
* cache size.
*/
@@ -126,17 +138,17 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
/* Try set partition size */
if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) {
ocf_cache_log(cache, log_info,
- "Setting IO class size, id: %u, name: '%s' "
- "[ ERROR ]\n", part_id, dest_part->config->name);
+ "Setting IO class size, id: %u, name: '%s', max size: %u%%"
+ " [ ERROR ]\n", part_id, dest_part->config->name, max);
return -OCF_ERR_INVAL;
}
ocf_part_set_prio(cache, dest_part, prio);
dest_part->config->cache_mode = cache_mode;
ocf_cache_log(cache, log_info,
- "Updating unclassified IO class, id: "
- "%u [ OK ]\n", part_id);
-
+ "Updating unclassified IO class, id: %u, name :'%s',"
+ "max size: %u%% [ OK ]\n",
+ part_id, dest_part->config->name, max);
return 0;
}
@@ -150,23 +162,23 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
/* Try set partition size */
if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) {
ocf_cache_log(cache, log_info,
- "Setting IO class size, id: %u, name: '%s' "
- "[ ERROR ]\n", part_id, dest_part->config->name);
+ "Setting IO class size, id: %u, name: '%s', max size %u%%"
+ "[ ERROR ]\n", part_id, dest_part->config->name, max);
return -OCF_ERR_INVAL;
}
if (ocf_part_is_valid(dest_part)) {
/* Updating existing */
ocf_cache_log(cache, log_info, "Updating existing IO "
- "class, id: %u, name: '%s' [ OK ]\n",
- part_id, dest_part->config->name);
+ "class, id: %u, name: '%s', max size %u%% [ OK ]\n",
+ part_id, dest_part->config->name, max);
} else {
/* Adding new */
ocf_part_set_valid(cache, part_id, true);
ocf_cache_log(cache, log_info, "Adding new IO class, "
- "id: %u, name: '%s' [ OK ]\n", part_id,
- dest_part->config->name);
+ "id: %u, name: '%s', max size %u%% [ OK ]\n", part_id,
+ dest_part->config->name, max);
}
ocf_part_set_prio(cache, dest_part, prio);
diff --git a/src/ocf_request.h b/src/ocf_request.h
index 2d9c7f8..08431dc 100644
--- a/src/ocf_request.h
+++ b/src/ocf_request.h
@@ -13,9 +13,10 @@
struct ocf_req_allocator;
struct ocf_req_info {
- /* Number of hits, invalid, misses. */
+ /* Number of hits, invalid, misses, reparts. */
unsigned int hit_no;
unsigned int invalid_no;
+ unsigned int re_part_no;
uint32_t dirty_all;
/*!< Number of dirty line in request*/
@@ -32,11 +33,6 @@ struct ocf_req_info {
uint32_t mapping_error : 1;
/*!< Core lines in this request were not mapped into cache */
- uint32_t re_part : 1;
- /*!< This bit indicate that in the request some cache lines
- * has to be moved to another partition
- */
-
uint32_t core_error : 1;
/*!< Error occured during I/O on core device */
@@ -191,6 +187,9 @@ struct ocf_request {
uint8_t wi_second_pass : 1;
/*!< Set after first pass of WI write is completed */
+ uint8_t part_evict : 1;
+ /* !< Some cachelines from request's partition must be evicted */
+
log_sid_t sid;
/*!< Tracing sequence ID */
@@ -332,6 +331,40 @@ void ocf_req_clear_map(struct ocf_request *req);
*/
void ocf_req_hash(struct ocf_request *req);
+/**
+ * @brief Request should trigger eviction from it's target partition
+ *
+ * @param req - OCF request
+ */
+static inline void ocf_req_set_part_evict(struct ocf_request *req)
+{
+ req->part_evict = true;
+}
+
+/**
+ * @brief Request shouldn't trigger eviction from it's target partition
+ *
+ * @param req - OCF request
+ */
+static inline void ocf_req_clear_part_evict(struct ocf_request *req)
+{
+ req->part_evict = false;
+}
+
+/**
+ * @brief Check wheter request shouldn't trigger eviction from it's target
+ * partition or any partition
+ *
+ * @param req - OCF request
+ * @return true - Eviciton should be triggered from request's target partition
+ * @return false - Eviction should be triggered with respect to eviction
+ * priority
+ */
+static inline bool ocf_req_part_evict(struct ocf_request *req)
+{
+ return req->part_evict;
+}
+
int ocf_req_set_dirty(struct ocf_request *req);
/**
@@ -348,6 +381,16 @@ static inline void ocf_req_clear(struct ocf_request *req)
env_atomic_set(&req->req_remaining, 0);
}
+static inline void ocf_req_set_mapping_error(struct ocf_request *req)
+{
+ req->info.mapping_error = true;
+}
+
+static inline bool ocf_req_test_mapping_error(struct ocf_request *req)
+{
+ return req->info.mapping_error;
+}
+
/**
* @brief Return OCF request reference count
*
diff --git a/src/ocf_stats.c b/src/ocf_stats.c
index 102c9fb..2f5e37f 100644
--- a/src/ocf_stats.c
+++ b/src/ocf_stats.c
@@ -276,10 +276,7 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
struct ocf_stats_io_class *stats)
{
ocf_cache_t cache;
- uint32_t cache_occupancy_total = 0;
struct ocf_counters_part *part_stat;
- ocf_core_t i_core;
- ocf_core_id_t i_core_id;
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(stats);
@@ -292,11 +289,6 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
if (!ocf_part_is_valid(&cache->user_parts[part_id]))
return -OCF_ERR_IO_CLASS_NOT_EXIST;
- for_each_core(cache, i_core, i_core_id) {
- cache_occupancy_total += env_atomic_read(
- &i_core->runtime_meta->cached_clines);
- }
-
part_stat = &core->counters->part_counters[part_id];
stats->occupancy_clines = env_atomic_read(&core->runtime_meta->
@@ -304,8 +296,7 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
stats->dirty_clines = env_atomic_read(&core->runtime_meta->
part_counters[part_id].dirty_clines);
- stats->free_clines = cache->conf_meta->cachelines -
- cache_occupancy_total;
+ stats->free_clines = 0;
copy_req_stats(&stats->read_reqs, &part_stat->read_reqs);
copy_req_stats(&stats->write_reqs, &part_stat->write_reqs);
diff --git a/src/ocf_stats_builder.c b/src/ocf_stats_builder.c
index 604928b..d418fe2 100644
--- a/src/ocf_stats_builder.c
+++ b/src/ocf_stats_builder.c
@@ -229,15 +229,9 @@ static void _ocf_stats_part_fill(ocf_cache_t cache, ocf_part_id_t part_id,
_lines4k(stats->occupancy_clines, cache_line_size),
_lines4k(cache_size, cache_line_size));
- if (part_id == PARTITION_DEFAULT) {
- _set(&usage->free,
- _lines4k(stats->free_clines, cache_line_size),
- _lines4k(cache_size, cache_line_size));
- } else {
- _set(&usage->free,
- _lines4k(0, cache_line_size),
- _lines4k(0, cache_line_size));
- }
+ _set(&usage->free,
+ _lines4k(stats->free_clines, cache_line_size),
+ _lines4k(cache_size, cache_line_size));
_set(&usage->clean,
_lines4k(stats->occupancy_clines - stats->dirty_clines,
diff --git a/src/utils/utils_part.c b/src/utils/utils_part.c
index 3da853b..7ee28d8 100644
--- a/src/utils/utils_part.c
+++ b/src/utils/utils_part.c
@@ -190,3 +190,82 @@ void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
}
}
}
+
+static inline uint32_t ocf_part_evict_size(struct ocf_request *req)
+{
+ uint32_t needed_cache_lines, part_available, cache_lines_to_evict;
+ uint32_t part_occupancy, part_occupancy_debt;
+ struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
+ uint32_t part_occupancy_limit =
+ ocf_part_get_max_size(req->cache, target_part);
+
+ needed_cache_lines = ocf_engine_repart_count(req) +
+ ocf_engine_unmapped_count(req);
+
+ part_occupancy = ocf_part_get_occupancy(target_part);
+
+ if (part_occupancy_limit >= part_occupancy) {
+ part_available = part_occupancy_limit - part_occupancy;
+ part_occupancy_debt = 0;
+ } else {
+ /* Occupancy is greater than occupancy limit. Evict missing number of
+ * cachelines, but no more than single eviction limit */
+ part_occupancy_debt = min((uint32_t)OCF_PENDING_EVICTION_LIMIT,
+ part_occupancy - part_occupancy_limit);
+ part_available = 0;
+ }
+
+ if (ocf_freelist_num_free(req->cache->freelist) <
+ ocf_engine_unmapped_count(req)) {
+ /* Number of cachelines to insert greater than number of free
+ * cachelines */
+ if (part_available >= needed_cache_lines) {
+ /* Cache is full, but target's part occupancy limit is not reached
+ */
+ ocf_req_clear_part_evict(req);
+ cache_lines_to_evict = needed_cache_lines;
+ } else {
+ /* Cache is full and target part reached it's occupancy limit */
+ ocf_req_set_part_evict(req);
+ cache_lines_to_evict = needed_cache_lines - part_available;
+ }
+
+ } else if (part_available < needed_cache_lines) {
+ /* Enough of free cache lines, but partition reached it's occupancy
+ * limit */
+ cache_lines_to_evict = needed_cache_lines - part_available;
+ ocf_req_set_part_evict(req);
+
+ } else if (part_available >= needed_cache_lines) {
+ /* Enough free cachelines available and they can be assigned to target
+ * partition */
+ cache_lines_to_evict = 0;
+
+ }
+
+ return cache_lines_to_evict + part_occupancy_debt;
+}
+
+uint32_t ocf_part_check_space(struct ocf_request *req, uint32_t *to_evict)
+{
+ uint32_t ret = OCF_PART_IS_FULL;
+ uint32_t _to_evict;
+ struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
+
+ if (!ocf_part_is_enabled(target_part) &&
+ ocf_part_get_occupancy(target_part) == 0) {
+ /* If partition is disabled, but has assigned cachelines, eviction has
+ * to be triggered */
+ return OCF_PART_IS_DISABLED;
+ }
+
+ _to_evict = ocf_part_evict_size(req);
+
+ if (_to_evict == 0)
+ ret = OCF_PART_HAS_SPACE;
+
+ if (to_evict)
+ *to_evict = _to_evict;
+
+ return ret;
+}
diff --git a/src/utils/utils_part.h b/src/utils/utils_part.h
index fcf63da..17c5fb2 100644
--- a/src/utils/utils_part.h
+++ b/src/utils/utils_part.h
@@ -8,6 +8,7 @@
#include "../ocf_request.h"
#include "../engine/cache_engine.h"
+#include "../engine/engine_common.h"
#include "../metadata/metadata_partition.h"
void ocf_part_init(struct ocf_cache *cache);
@@ -50,6 +51,38 @@ static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class)
return PARTITION_DEFAULT;
}
+static inline uint32_t ocf_part_get_occupancy(struct ocf_user_part *part)
+{
+ return part->runtime->curr_size;
+}
+
+static inline uint32_t ocf_part_get_min_size(ocf_cache_t cache,
+ struct ocf_user_part *part)
+{
+ uint64_t ioclass_size;
+
+ ioclass_size = part->config->min_size * cache->conf_meta->cachelines;
+
+ ioclass_size /= 100;
+
+ return (uint32_t)ioclass_size;
+}
+
+
+static inline uint32_t ocf_part_get_max_size(ocf_cache_t cache,
+ struct ocf_user_part *part)
+{
+ uint64_t ioclass_size, max_size, cache_size;
+
+ max_size = part->config->max_size;
+ cache_size = cache->conf_meta->cachelines;
+
+ ioclass_size = max_size * cache_size;
+ ioclass_size = OCF_DIV_ROUND_UP(ioclass_size, 100);
+
+ return (uint32_t)ioclass_size;
+}
+
void ocf_part_move(struct ocf_request *req);
#define for_each_part(cache, part, id) \
@@ -61,7 +94,27 @@ static inline void ocf_part_sort(struct ocf_cache *cache)
ocf_lst_sort(&cache->lst_part);
}
-static inline ocf_cache_mode_t ocf_part_get_cache_mode(struct ocf_cache *cache,
+static inline bool ocf_part_is_enabled(struct ocf_user_part *part)
+{
+ return part->config->max_size != 0;
+}
+
+#define OCF_PART_HAS_SPACE 0
+#define OCF_PART_IS_FULL 1
+#define OCF_PART_IS_DISABLED 2
+/**
+ * Check whether there is enough free cachelines to serve request. If partition
+ * occupancy limit is reached, `req->part_evict` is set to true. Otherwise
+ * flag is set to false and eviction from any partition should be triggered.
+ *
+ * @return
+ * OCF_PART_HAS_SPACE when cachelines alloted successfully
+ * OCF_PART_IS_FULL when need to evict some cachelines to serve request
+ * OCF_PART_IS_DISABLED when caching for particular partition is disabled
+ */
+uint32_t ocf_part_check_space(struct ocf_request *req, uint32_t *to_evict);
+
+static inline ocf_cache_mode_t ocf_part_get_cache_mode(ocf_cache_t cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_IO_CLASS_MAX)
diff --git a/tests/functional/pyocf/types/cache.py b/tests/functional/pyocf/types/cache.py
index d067c0f..9644342 100644
--- a/tests/functional/pyocf/types/cache.py
+++ b/tests/functional/pyocf/types/cache.py
@@ -25,6 +25,7 @@ from ..ocf import OcfLib
from .shared import (
Uuid,
OcfError,
+ OcfErrorCode,
CacheLineSize,
CacheLines,
OcfCompletion,
@@ -34,6 +35,7 @@ from ..utils import Size, struct_to_dict
from .core import Core
from .queue import Queue
from .stats.cache import CacheInfo
+from .ioclass import IoClassesInfo, IoClassInfo
from .stats.shared import UsageStats, RequestsStats, BlocksStats, ErrorsStats
@@ -75,6 +77,9 @@ class ConfValidValues:
promotion_nhit_trigger_threshold_range = range(0, 100)
+CACHE_MODE_NONE = -1
+
+
class CacheMode(IntEnum):
WT = 0
WB = 1
@@ -299,6 +304,93 @@ class Cache:
if status:
raise OcfError("Error setting cache seq cut off policy", status)
+ def get_partition_info(self, part_id: int):
+ ioclass_info = IoClassInfo()
+ self.read_lock()
+
+ status = self.owner.lib.ocf_cache_io_class_get_info(
+ self.cache_handle, part_id, byref(ioclass_info)
+ )
+
+ self.read_unlock()
+ if status:
+ raise OcfError("Error retriving ioclass info", status)
+
+ return {
+ "_name": ioclass_info._name.decode("ascii"),
+ "_cache_mode": ioclass_info._cache_mode,
+ "_priority": int(ioclass_info._priority),
+ "_curr_size": (ioclass_info._curr_size),
+ "_min_size": int(ioclass_info._min_size),
+ "_max_size": int(ioclass_info._max_size),
+ "_eviction_policy_type": int(ioclass_info._eviction_policy_type),
+ "_cleaning_policy_type": int(ioclass_info._cleaning_policy_type),
+ }
+
+ def add_partition(
+ self, part_id: int, name: str, min_size: int, max_size: int, priority: int, valid: bool
+ ):
+ self.write_lock()
+
+ _name = name.encode("ascii")
+
+ status = self.owner.lib.ocf_mngt_add_partition_to_cache(
+ self.cache_handle, part_id, _name, min_size, max_size, priority, valid
+ )
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error adding partition to cache", status)
+
+ def configure_partition(
+ self,
+ part_id: int,
+ name: str,
+ min_size: int,
+ max_size: int,
+ priority: int,
+ cache_mode=CACHE_MODE_NONE,
+ ):
+ ioclasses_info = IoClassesInfo()
+
+ self.read_lock()
+
+ for i in range(IoClassesInfo.MAX_IO_CLASSES):
+ ioclass_info = IoClassInfo()
+ status = self.owner.lib.ocf_cache_io_class_get_info(
+ self.cache_handle, i, byref(ioclass_info)
+ )
+ if status not in [0, -OcfErrorCode.OCF_ERR_IO_CLASS_NOT_EXIST]:
+ raise OcfError("Error retriving existing ioclass config", status)
+ ioclasses_info._config[i]._class_id = i
+ ioclasses_info._config[i]._name = (
+ ioclass_info._name if len(ioclass_info._name) > 0 else 0
+ )
+ ioclasses_info._config[i]._prio = ioclass_info._priority
+ ioclasses_info._config[i]._cache_mode = ioclass_info._cache_mode
+ ioclasses_info._config[i]._min_size = ioclass_info._min_size
+ ioclasses_info._config[i]._max_size = ioclass_info._max_size
+
+ self.read_unlock()
+
+ ioclasses_info._config[part_id]._name = name.encode("ascii")
+ ioclasses_info._config[part_id]._cache_mode = int(cache_mode)
+ ioclasses_info._config[part_id]._prio = priority
+ ioclasses_info._config[part_id]._min_size = min_size
+ ioclasses_info._config[part_id]._max_size = max_size
+
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_cache_io_classes_configure(
+ self.cache_handle, byref(ioclasses_info)
+ )
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error adding partition to cache", status)
+
def configure_device(
self, device, force=False, perform_test=True, cache_line_size=None
):
@@ -339,6 +431,21 @@ class Cache:
if c.results["error"]:
raise OcfError("Attaching cache device failed", c.results["error"])
+ def detach_device(self):
+ self.write_lock()
+
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+
+ self.owner.lib.ocf_mngt_cache_detach(
+ self.cache_handle, c, None
+ )
+
+ c.wait()
+ self.write_unlock()
+
+ if c.results["error"]:
+ raise OcfError("Attaching cache device failed", c.results["error"])
+
def load_cache(self, device):
self.configure_device(device)
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
@@ -605,3 +712,17 @@ lib.ocf_mngt_cache_cleaning_set_param.argtypes = [
c_uint32,
]
lib.ocf_mngt_cache_cleaning_set_param.restype = c_int
+lib.ocf_cache_io_class_get_info.restype = c_int
+lib.ocf_cache_io_class_get_info.argtypes = [c_void_p, c_uint32, c_void_p]
+lib.ocf_mngt_add_partition_to_cache.restype = c_int
+lib.ocf_mngt_add_partition_to_cache.argtypes = [
+ c_void_p,
+ c_uint16,
+ c_char_p,
+ c_uint32,
+ c_uint32,
+ c_uint8,
+ c_bool,
+]
+lib.ocf_mngt_cache_io_classes_configure.restype = c_int
+lib.ocf_mngt_cache_io_classes_configure.argtypes = [c_void_p, c_void_p]
diff --git a/tests/functional/pyocf/types/ioclass.py b/tests/functional/pyocf/types/ioclass.py
new file mode 100644
index 0000000..ca84ad9
--- /dev/null
+++ b/tests/functional/pyocf/types/ioclass.py
@@ -0,0 +1,36 @@
+#
+# Copyright(c) 2019-2020 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_uint8, c_uint32, c_int, c_int16, c_uint16, c_char, c_char_p, Structure
+
+
+class IoClassInfo(Structure):
+ MAX_IO_CLASS_NAME_SIZE = 1024
+ _fields_ = [
+ ("_name", c_char * MAX_IO_CLASS_NAME_SIZE),
+ ("_cache_mode", c_int),
+ ("_priority", c_int16),
+ ("_curr_size", c_uint32),
+ ("_min_size", c_uint32),
+ ("_max_size", c_uint32),
+ ("_eviction_policy_type", c_uint8),
+ ("_cleaning_policy_type", c_int),
+ ]
+
+
+class IoClassConfig(Structure):
+ _fields_ = [
+ ("_class_id", c_uint32),
+ ("_name", c_char_p),
+ ("_prio", c_uint16),
+ ("_cache_mode", c_int),
+ ("_min_size", c_uint32),
+ ("_max_size", c_uint32),
+ ]
+
+
+class IoClassesInfo(Structure):
+ MAX_IO_CLASSES = 33
+ _fields_ = [("_config", IoClassConfig * MAX_IO_CLASSES)]
diff --git a/tests/functional/tests/management/test_attach_cache.py b/tests/functional/tests/management/test_attach_cache.py
new file mode 100644
index 0000000..5ec8002
--- /dev/null
+++ b/tests/functional/tests/management/test_attach_cache.py
@@ -0,0 +1,121 @@
+#
+# Copyright(c) 2019-2020 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+from ctypes import c_int, c_void_p, byref, c_uint32, memmove, cast
+from random import randrange
+from itertools import count
+
+import pytest
+
+from pyocf.ocf import OcfLib
+from pyocf.types.cache import (
+ Cache,
+ CacheMode,
+ MetadataLayout,
+ EvictionPolicy,
+ CleaningPolicy,
+)
+from pyocf.types.core import Core
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.types.shared import (
+ OcfError,
+ OcfCompletion,
+ CacheLines,
+ CacheLineSize,
+ SeqCutOffPolicy,
+)
+from pyocf.types.volume import Volume
+from pyocf.utils import Size
+
+logger = logging.getLogger(__name__)
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", [CacheMode.WB, CacheMode.WT, CacheMode.WO])
+@pytest.mark.parametrize("new_cache_size", [25, 45])
+def test_attach_different_size(
+ pyocf_ctx, new_cache_size, mode: CacheMode, cls: CacheLineSize
+):
+ """Start cache and add partition with limited occupancy. Fill partition with data,
+ attach cache with different size and trigger IO. Verify if occupancy thresold is
+ respected with both original and new cache device.
+ """
+ cache_device = Volume(Size.from_MiB(35))
+ core_device = Volume(Size.from_MiB(100))
+ cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ cache.configure_partition(
+ part_id=1, name="test_part", min_size=0, max_size=50, priority=1
+ )
+
+ cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
+
+ cache_size = cache.get_stats()["conf"]["size"]
+
+ block_size = 4096
+ data = bytes(block_size)
+
+ for i in range(cache_size.blocks_4k):
+ io_to_exp_obj(core, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
+
+ part_current_size = CacheLines(
+ cache.get_partition_info(part_id=1)["_curr_size"], cls
+ )
+
+ assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
+
+ cache.detach_device()
+ new_cache_device = Volume(Size.from_MiB(new_cache_size))
+ cache.attach_device(new_cache_device, force=True)
+
+ cache_size = cache.get_stats()["conf"]["size"]
+
+ for i in range(cache_size.blocks_4k):
+ io_to_exp_obj(core, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
+
+ part_current_size = CacheLines(
+ cache.get_partition_info(part_id=1)["_curr_size"], cls
+ )
+
+ assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
+
+
+def io_to_exp_obj(core, address, size, data, offset, direction, target_ioclass, flags):
+ return _io(
+ core.new_io,
+ core.cache.get_default_queue(),
+ address,
+ size,
+ data,
+ offset,
+ direction,
+ target_ioclass,
+ flags,
+ )
+
+
+def _io(new_io, queue, address, size, data, offset, direction, target_ioclass, flags):
+ io = new_io(queue, address, size, direction, target_ioclass, flags)
+ if direction == IoDir.READ:
+ _data = Data.from_bytes(bytes(size))
+ else:
+ _data = Data.from_bytes(data, offset, size)
+ ret = __io(io, queue, address, size, _data, direction)
+ if not ret and direction == IoDir.READ:
+ memmove(cast(data, c_void_p).value + offset, _data.handle, size)
+ return ret
+
+
+def __io(io, queue, address, size, data, direction):
+ io.set_data(data, 0)
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+ return int(completion.results["err"])
diff --git a/tests/unit/tests/engine/engine_common.c/prepare_clines_hit.c b/tests/unit/tests/engine/engine_common.c/prepare_clines_hit.c
new file mode 100644
index 0000000..d66bc8a
--- /dev/null
+++ b/tests/unit/tests/engine/engine_common.c/prepare_clines_hit.c
@@ -0,0 +1,198 @@
+/*
+ * src/engine/engine_common.c
+ * ocf_prepare_clines_hit
+ *
+ * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
+ * ONE FUNCTION PER LINE
+ *
+ */
+
+#undef static
+
+#undef inline
+
+
+#include
+#include
+#include
+#include
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "../ocf_priv.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_queue_priv.h"
+#include "../ocf_freelist.h"
+#include "engine_common.h"
+#include "engine_debug.h"
+#include "../utils/utils_cache_line.h"
+#include "../ocf_request.h"
+#include "../utils/utils_cleaner.h"
+#include "../utils/utils_part.h"
+#include "../metadata/metadata.h"
+#include "../eviction/eviction.h"
+#include "../promotion/promotion.h"
+#include "../concurrency/ocf_concurrency.h"
+
+#include "engine/engine_common.c/prepare_clines_miss_generated_wraps.c"
+
+void __wrap_ocf_req_hash_unlock_rd(struct ocf_request *req)
+{
+}
+
+uint32_t __wrap_ocf_part_check_space(struct ocf_request *req,
+ uint32_t *to_evict)
+{
+ return mock();
+}
+
+int __wrap_lock_clines(struct ocf_request *req,
+ const struct ocf_engine_callbacks *engine_cbs)
+{
+ function_called();
+ return mock();
+}
+
+void __wrap_ocf_metadata_start_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock)
+{
+}
+
+void __wrap_ocf_metadata_end_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock)
+{
+}
+
+int __wrap_space_managment_evict_do(struct ocf_cache *cache,
+ struct ocf_request *req, uint32_t evict_cline_no)
+{
+ return mock();
+}
+
+bool __wrap_ocf_part_is_enabled(struct ocf_user_part *target_part)
+{
+ return mock();
+}
+
+bool __wrap_ocf_engine_needs_repart(struct ocf_request *req)
+{
+ return mock();
+}
+
+void __wrap_ocf_req_set_mapping_error(struct ocf_request *req)
+{
+ function_called();
+}
+
+static void ocf_prepare_clines_hit_test01(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Request is hit and part is enabled\n");
+ will_return(__wrap_ocf_part_is_enabled, true);
+ will_return(__wrap_ocf_engine_needs_repart, false);
+
+ will_return(__wrap_lock_clines, 0);
+ expect_function_call(__wrap_lock_clines);
+
+ assert_int_equal(ocf_prepare_clines_hit(&req, NULL), 0);
+}
+
+static void ocf_prepare_clines_hit_test02(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Request is hit but part is disabled - tigger eviction\n");
+ will_return(__wrap_ocf_part_is_enabled, false);
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_DISABLED);
+
+ expect_function_call(__wrap_ocf_req_set_mapping_error);
+
+ assert_int_equal(ocf_prepare_clines_hit(&req, NULL), -OCF_ERR_NO_LOCK);
+}
+
+static void ocf_prepare_clines_hit_test03(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Request needs repart, part has enough of a free space\n");
+ will_return(__wrap_ocf_part_is_enabled, true);
+ will_return(__wrap_ocf_engine_needs_repart, true);
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_HAS_SPACE);
+
+ expect_function_call(__wrap_lock_clines);
+ will_return(__wrap_lock_clines, 0);
+
+ assert_int_equal(ocf_prepare_clines_hit(&req, NULL), 0);
+}
+
+static void ocf_prepare_clines_hit_test04(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Request needs repart, eviction fails\n");
+ will_return(__wrap_ocf_part_is_enabled, true);
+ will_return(__wrap_ocf_engine_needs_repart, true);
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+ will_return(__wrap_space_managment_evict_do, LOOKUP_MISS);
+ expect_function_call(__wrap_ocf_req_set_mapping_error);
+
+ assert_int_equal(ocf_prepare_clines_hit(&req, NULL), -OCF_ERR_NO_LOCK);
+}
+
+static void ocf_prepare_clines_hit_test05(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Request needs repart, eviction passed, no lock\n");
+
+ will_return(__wrap_ocf_part_is_enabled, true);
+ will_return(__wrap_ocf_engine_needs_repart, true);
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+ will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
+
+ expect_function_call(__wrap_lock_clines);
+ will_return(__wrap_lock_clines, OCF_ERR_NO_LOCK);
+
+ will_return(__wrap_ocf_part_is_enabled, true);
+
+ assert_int_equal(ocf_prepare_clines_hit(&req, NULL), OCF_ERR_NO_LOCK);
+}
+
+static void ocf_prepare_clines_hit_test06(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Partition is disabled, but has some cachelines assigned.\n");
+ print_test_description("Trigger eviction and but don't lock cachelines\n");
+
+ will_return(__wrap_ocf_part_is_enabled, false);
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+ will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
+
+ will_return(__wrap_ocf_part_is_enabled, false);
+ expect_function_call(__wrap_ocf_req_set_mapping_error);
+
+ assert_int_equal(ocf_prepare_clines_hit(&req, NULL), -OCF_ERR_NO_LOCK);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_prepare_clines_hit_test01),
+ cmocka_unit_test(ocf_prepare_clines_hit_test02),
+ cmocka_unit_test(ocf_prepare_clines_hit_test03),
+ cmocka_unit_test(ocf_prepare_clines_hit_test04),
+ cmocka_unit_test(ocf_prepare_clines_hit_test05),
+ cmocka_unit_test(ocf_prepare_clines_hit_test06)
+ };
+
+ print_message("Unit test for ocf_prepare_clines_hit\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/tests/unit/tests/engine/engine_common.c/prepare_clines_miss.c b/tests/unit/tests/engine/engine_common.c/prepare_clines_miss.c
new file mode 100644
index 0000000..81fec82
--- /dev/null
+++ b/tests/unit/tests/engine/engine_common.c/prepare_clines_miss.c
@@ -0,0 +1,263 @@
+/*
+ * src/engine/engine_common.c
+ * ocf_prepare_clines_miss
+ *
+ * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
+ * ONE FUNCTION PER LINE
+ *
+ */
+
+#undef static
+
+#undef inline
+
+
+#include
+#include
+#include
+#include
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "../ocf_priv.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_queue_priv.h"
+#include "../ocf_freelist.h"
+#include "engine_common.h"
+#include "engine_debug.h"
+#include "../utils/utils_cache_line.h"
+#include "../ocf_request.h"
+#include "../utils/utils_cleaner.h"
+#include "../utils/utils_part.h"
+#include "../metadata/metadata.h"
+#include "../eviction/eviction.h"
+#include "../promotion/promotion.h"
+#include "../concurrency/ocf_concurrency.h"
+
+#include "engine/engine_common.c/prepare_clines_miss_generated_wraps.c"
+
+void __wrap_ocf_req_hash_lock_upgrade(struct ocf_request *req)
+{
+}
+
+void __wrap_ocf_req_hash_unlock_wr(struct ocf_request *req)
+{
+}
+
+uint32_t __wrap_ocf_part_check_space(struct ocf_request *req,
+ uint32_t *to_evict)
+{
+ return mock();
+}
+
+int __wrap_lock_clines(struct ocf_request *req,
+ const struct ocf_engine_callbacks *engine_cbs)
+{
+ function_called();
+ return mock();
+}
+
+void __wrap_ocf_metadata_start_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock)
+{
+}
+
+void __wrap_ocf_metadata_end_exclusive_access(
+ struct ocf_metadata_lock *metadata_lock)
+{
+}
+
+int __wrap_space_managment_evict_do(struct ocf_cache *cache,
+ struct ocf_request *req, uint32_t evict_cline_no)
+{
+ function_called();
+ return mock();
+}
+
+bool __wrap_ocf_part_is_enabled(struct ocf_user_part *target_part)
+{
+ return mock();
+}
+
+void __wrap_ocf_engine_map(struct ocf_request *req)
+{
+ function_called();
+}
+
+bool __wrap_ocf_req_test_mapping_error(struct ocf_request *req)
+{
+ return mock();
+}
+
+void __wrap_ocf_req_set_mapping_error(struct ocf_request *req)
+{
+ function_called();
+}
+
+static void ocf_prepare_clines_miss_test01(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Target part is disabled and empty\n");
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_DISABLED);
+ expect_function_call(__wrap_ocf_req_set_mapping_error);
+ assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
+}
+
+static void ocf_prepare_clines_miss_test02(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Target part is disabled but has cachelines assigned.\n");
+ print_test_description("\tTrigger eviction and mark mapping error\n");
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+
+ expect_function_call(__wrap_space_managment_evict_do);
+ will_return(__wrap_space_managment_evict_do, LOOKUP_MAPPED);
+ will_return(__wrap_ocf_part_is_enabled, false);
+ expect_function_call(__wrap_ocf_req_set_mapping_error);
+
+ assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
+}
+
+static void ocf_prepare_clines_miss_test03(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Target part is enabled but doesn't have enough space.\n");
+ print_test_description("\tEviction is ok and cachelines lock is acquired.\n");
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+
+ expect_function_call(__wrap_space_managment_evict_do);
+ will_return(__wrap_space_managment_evict_do, LOOKUP_MAPPED);
+ will_return(__wrap_ocf_part_is_enabled, true);
+
+ expect_function_call(__wrap_ocf_engine_map);
+
+ will_return(__wrap_ocf_req_test_mapping_error, false);
+
+ will_return(__wrap_lock_clines, 0);
+ expect_function_call(__wrap_lock_clines);
+
+ assert_int_equal(ocf_prepare_clines_miss(&req, NULL), 0);
+}
+
+static void ocf_prepare_clines_miss_test04(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Target part is enabled but doesn't have enough space.\n");
+ print_test_description("\tEviction failed\n");
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+
+ expect_function_call(__wrap_space_managment_evict_do);
+ will_return(__wrap_space_managment_evict_do, LOOKUP_MISS);
+ expect_function_call(__wrap_ocf_req_set_mapping_error);
+
+ assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
+}
+
+static void ocf_prepare_clines_miss_test05(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Target part is enabled but doesn't have enough space.\n");
+ print_test_description("Eviction is ok, but mapping failed.\n");
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+
+ expect_function_call(__wrap_space_managment_evict_do);
+ will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
+
+ will_return(__wrap_ocf_part_is_enabled, true);
+
+ expect_function_call(__wrap_ocf_engine_map);
+ will_return(__wrap_ocf_req_test_mapping_error, true);
+
+ assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
+}
+
+static void ocf_prepare_clines_miss_test06(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Target part is enabled but doesn't have enough space.\n");
+ print_test_description("Eviction and mapping were ok, but failed to lock cachelines.\n");
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+
+ expect_function_call(__wrap_space_managment_evict_do);
+ will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
+
+ will_return(__wrap_ocf_part_is_enabled, true);
+
+ expect_function_call(__wrap_ocf_engine_map);
+ will_return(__wrap_ocf_req_test_mapping_error, false);
+
+ expect_function_call(__wrap_lock_clines);
+ will_return(__wrap_lock_clines, -OCF_ERR_NO_LOCK);
+
+ expect_function_call(__wrap_ocf_req_set_mapping_error);
+
+ assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
+}
+
+static void ocf_prepare_clines_miss_test07(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Target part is enabled but doesn't have enough space.\n");
+ print_test_description("Eviction and mapping were ok, lock not acquired.\n");
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+ will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL);
+
+ expect_function_call(__wrap_space_managment_evict_do);
+ will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
+
+ will_return(__wrap_ocf_part_is_enabled, true);
+
+ expect_function_call(__wrap_ocf_engine_map);
+ will_return(__wrap_ocf_req_test_mapping_error, false);
+
+ expect_function_call(__wrap_lock_clines);
+ will_return(__wrap_lock_clines, OCF_LOCK_NOT_ACQUIRED);
+
+ assert_int_equal(ocf_prepare_clines_miss(&req, NULL), OCF_LOCK_NOT_ACQUIRED);
+}
+
+static void ocf_prepare_clines_miss_test08(void **state)
+{
+ struct ocf_request req = {};
+ print_test_description("Target part is enabled has enough space.\n");
+ print_test_description("\tMapping and cacheline lock are both ok\n");
+
+ will_return(__wrap_ocf_part_check_space, OCF_PART_HAS_SPACE);
+
+ expect_function_call(__wrap_ocf_engine_map);
+ will_return(__wrap_ocf_req_test_mapping_error, false);
+
+ expect_function_call(__wrap_lock_clines);
+ will_return(__wrap_lock_clines, OCF_LOCK_ACQUIRED);
+
+ assert_int_equal(ocf_prepare_clines_miss(&req, NULL), OCF_LOCK_ACQUIRED);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_prepare_clines_miss_test01),
+ cmocka_unit_test(ocf_prepare_clines_miss_test02),
+ cmocka_unit_test(ocf_prepare_clines_miss_test03),
+ cmocka_unit_test(ocf_prepare_clines_miss_test04),
+ cmocka_unit_test(ocf_prepare_clines_miss_test05),
+ cmocka_unit_test(ocf_prepare_clines_miss_test06),
+ cmocka_unit_test(ocf_prepare_clines_miss_test07),
+ cmocka_unit_test(ocf_prepare_clines_miss_test08)
+ };
+
+ print_message("Unit test for ocf_prepare_clines_miss\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
diff --git a/tests/unit/tests/utils/utils_part.c/ocf_part_evict_size.c b/tests/unit/tests/utils/utils_part.c/ocf_part_evict_size.c
new file mode 100644
index 0000000..2980372
--- /dev/null
+++ b/tests/unit/tests/utils/utils_part.c/ocf_part_evict_size.c
@@ -0,0 +1,516 @@
+/*
+ * src/utils/utils_part.c
+ * ocf_part_evict_size
+ *
+ * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
+ * ONE FUNCTION PER LINE
+ *
+ */
+
+#undef static
+
+#undef inline
+
+
+#include
+#include
+#include
+#include
+#include "print_desc.h"
+
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_request.h"
+#include "../metadata/metadata.h"
+#include "../engine/cache_engine.h"
+#include "../eviction/ops.h"
+#include "utils_part.h"
+
+#include "utils/utils_part.c/ocf_part_evict_size_generated_wraps.c"
+
+uint32_t __wrap_ocf_part_get_max_size(ocf_cache_t cache,
+ struct ocf_user_part *target_part)
+{
+ return mock();
+}
+
+uint32_t __wrap_ocf_engine_repart_count(struct ocf_request *req)
+{
+ return mock();
+}
+
+uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
+{
+ return mock();
+}
+
+uint32_t __wrap_ocf_part_get_occupancy(struct ocf_user_part *target_part)
+{
+ return mock();
+}
+
+ocf_cache_line_t __wrap_ocf_freelist_num_free(ocf_freelist_t freelist)
+{
+ return mock();
+}
+
+void __wrap_ocf_req_set_part_evict(struct ocf_request *req)
+{
+ function_called();
+}
+
+void __wrap_ocf_req_clear_part_evict(struct ocf_request *req)
+{
+ function_called();
+}
+
+static void ocf_part_evict_size_test01(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 0;
+ uint32_t cachelines_to_map = 320;
+ uint32_t part_occupied_cachelines = 512;
+ uint32_t freelist_size = 500;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ print_test_description("Enough free space available");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ // Enough free cachelines to map a whole request
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ assert_int_equal(ocf_part_evict_size(&req), 0);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test02(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 0;
+ uint32_t cachelines_to_map = 320;
+ uint32_t part_occupied_cachelines = 960;
+ uint32_t freelist_size = 500;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t available_cachelines = max_part_size - part_occupied_cachelines;
+ uint32_t cachelines_to_evict = cachelines_to_map - available_cachelines;
+
+ print_test_description("Cache has enough free cachelines,"
+ " but target partition must be evicted");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ expect_function_call(__wrap_ocf_req_set_part_evict);
+
+ assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test03(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 320;
+ uint32_t cachelines_to_map = 0;
+ uint32_t part_occupied_cachelines = 512;
+ uint32_t freelist_size = 0;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t cachelines_to_evict = 0;
+
+ print_test_description("Only repart (no mapping). Freelist is empty but "
+ "space in a target part is availabe,");
+ print_test_description("\tso no cachelines should be "
+ " evcited from cache");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test04(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 320;
+ uint32_t cachelines_to_map = 0;
+ uint32_t part_occupied_cachelines = 1100;
+ uint32_t freelist_size = 0;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t cachelines_debt = part_occupied_cachelines - max_part_size;
+ uint32_t cachelines_to_evict = cachelines_to_repart + cachelines_debt;
+
+ print_test_description("Only repart (no mapping). Freelist is empty and no"
+ " space in target part is availabe.");
+ print_test_description("\tEvict only from target partition");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ expect_function_call(__wrap_ocf_req_set_part_evict);
+
+ assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test05(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 0;
+ uint32_t cachelines_to_map = 320;
+ uint32_t part_occupied_cachelines = 960;
+ uint32_t freelist_size = 0;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t available_cachelines = max_part_size - part_occupied_cachelines;
+ uint32_t cachelines_to_evict = cachelines_to_map - available_cachelines;
+
+ print_test_description("Freelist is empty and no space in the target part "
+ "is available");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ expect_function_call(__wrap_ocf_req_set_part_evict);
+
+ assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test06(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 0;
+ uint32_t cachelines_to_map = 320;
+ uint32_t part_occupied_cachelines = 320;
+ uint32_t freelist_size = 0;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t available_cachelines = max_part_size - part_occupied_cachelines;
+ uint32_t cachelines_to_evict = cachelines_to_map;
+
+ print_test_description("Freelist is empty but target part has enough space");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ expect_function_call(__wrap_ocf_req_clear_part_evict);
+
+ assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test07(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 0;
+ uint32_t cachelines_to_map = 320;
+ uint32_t part_occupied_cachelines = 1280;
+ uint32_t freelist_size = 0;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t debt_cachelines = part_occupied_cachelines - max_part_size;
+ uint32_t cachelines_to_evict = cachelines_to_map + debt_cachelines;
+
+ print_test_description("Freelist is empty and part occupancy exceeded");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ expect_function_call(__wrap_ocf_req_set_part_evict);
+
+ assert_int_equal(ocf_part_evict_size(&req),
+ (part_occupied_cachelines - max_part_size) + cachelines_to_map);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test08(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 320;
+ uint32_t cachelines_to_map = 0;
+ uint32_t part_occupied_cachelines = 1280;
+ uint32_t freelist_size = 0;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t debt_cachelines = part_occupied_cachelines - max_part_size;
+ uint32_t cachelines_to_evict = debt_cachelines + cachelines_to_repart;
+
+ print_test_description("Target part occupancy limit is exceeded during "
+ "repart");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ expect_function_call(__wrap_ocf_req_set_part_evict);
+
+ assert_int_equal(ocf_part_evict_size(&req),
+ (part_occupied_cachelines - max_part_size) + cachelines_to_repart);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test09(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 320;
+ uint32_t cachelines_to_map = 0;
+ uint32_t part_occupied_cachelines = 320;
+ uint32_t freelist_size = 0;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t cachelines_to_evict = 0;
+
+ print_test_description("Repart while target part has enough of available "
+ "space");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test10(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 0;
+ uint32_t cachelines_to_map = 320;
+ uint32_t part_occupied_cachelines = 320;
+ uint32_t freelist_size = 320;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t cachelines_to_evict = 0;
+
+ print_test_description("Enough of available cachelines in target part, "
+ "freelist has exactly required number of free cachelines");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test11(void **state)
+{
+ uint32_t max_part_size = 1024;
+ uint32_t cachelines_to_repart = 320;
+ uint32_t cachelines_to_map = 0;
+ uint32_t part_occupied_cachelines = 384;
+ uint32_t freelist_size = 0;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t cachelines_to_evict = 0;
+
+ print_test_description("Number of cachelines to repart is equal to number "
+ "of cachelines available in the target partition");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test12(void **state)
+{
+ uint32_t max_part_size = 0;
+ uint32_t cachelines_to_repart = 0;
+ uint32_t cachelines_to_map = 320;
+ uint32_t part_occupied_cachelines = 384;
+ uint32_t freelist_size = 0;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t cachelines_to_evict =
+ part_occupied_cachelines + cachelines_to_map;
+
+ print_test_description("Freelist IS empty. Max occupancy set to 0, but "
+ "some cachelines are still assigned to traget part - evict them");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ expect_function_call(__wrap_ocf_req_set_part_evict);
+
+ assert_true(ocf_part_evict_size(&req) >= part_occupied_cachelines);
+
+ test_free(req.cache);
+}
+
+static void ocf_part_evict_size_test13(void **state)
+{
+ uint32_t max_part_size = 0;
+ uint32_t cachelines_to_repart = 0;
+ uint32_t cachelines_to_map = 320;
+ uint32_t part_occupied_cachelines = 384;
+ uint32_t freelist_size = 1024;
+
+ struct ocf_request req;
+ req.cache = test_malloc(sizeof(struct ocf_cache));
+
+ uint32_t cachelines_to_evict =
+ part_occupied_cachelines + cachelines_to_map;
+
+ print_test_description("Freelist IS NOT empty. Max occupancy set to 0, but"
+ " some cachelines are still assigned to traget part - evict them");
+
+ will_return(__wrap_ocf_part_get_max_size, max_part_size);
+
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+ will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart);
+
+ will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines);
+
+ will_return(__wrap_ocf_freelist_num_free, freelist_size);
+ will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map);
+
+ expect_function_call(__wrap_ocf_req_set_part_evict);
+
+ assert_true(ocf_part_evict_size(&req) >= part_occupied_cachelines);
+
+ test_free(req.cache);
+}
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test(ocf_part_evict_size_test01),
+ cmocka_unit_test(ocf_part_evict_size_test02),
+ cmocka_unit_test(ocf_part_evict_size_test03),
+ cmocka_unit_test(ocf_part_evict_size_test04),
+ cmocka_unit_test(ocf_part_evict_size_test05),
+ cmocka_unit_test(ocf_part_evict_size_test06),
+ cmocka_unit_test(ocf_part_evict_size_test07),
+ cmocka_unit_test(ocf_part_evict_size_test08),
+ cmocka_unit_test(ocf_part_evict_size_test09),
+ cmocka_unit_test(ocf_part_evict_size_test10),
+ cmocka_unit_test(ocf_part_evict_size_test11),
+ cmocka_unit_test(ocf_part_evict_size_test12),
+ cmocka_unit_test(ocf_part_evict_size_test13)
+ };
+
+ print_message("Unit test for ocf_part_evict_size\n");
+
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}