From ac2effb83d4ec68a3d6e7b3836c811572c7ac2d9 Mon Sep 17 00:00:00 2001 From: Michal Rakowski Date: Mon, 4 May 2020 14:53:10 +0200 Subject: [PATCH 01/22] Fix whitespaces Signed-off-by: Michal Rakowski Signed-off-by: Michal Mielewczyk --- src/metadata/metadata_partition_structs.h | 34 +++++++++++------------ 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/metadata/metadata_partition_structs.h b/src/metadata/metadata_partition_structs.h index baa8c88..8e93a0a 100644 --- a/src/metadata/metadata_partition_structs.h +++ b/src/metadata/metadata_partition_structs.h @@ -11,26 +11,26 @@ #include "../eviction/eviction.h" struct ocf_user_part_config { - char name[OCF_IO_CLASS_NAME_MAX]; - uint32_t min_size; - uint32_t max_size; - int16_t priority; - ocf_cache_mode_t cache_mode; - struct { - uint8_t valid : 1; - uint8_t added : 1; - uint8_t eviction : 1; - /*!< This bits is setting during partition sorting, - * and means that can evict from this partition - */ - } flags; + char name[OCF_IO_CLASS_NAME_MAX]; + uint32_t min_size; + uint32_t max_size; + int16_t priority; + ocf_cache_mode_t cache_mode; + struct { + uint8_t valid : 1; + uint8_t added : 1; + uint8_t eviction : 1; + /*!< This bits is setting during partition sorting, + * and means that can evict from this partition + */ + } flags; }; struct ocf_user_part_runtime { - uint32_t curr_size; - uint32_t head; - struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS]; - struct cleaning_policy cleaning; + uint32_t curr_size; + uint32_t head; + struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS]; + struct cleaning_policy cleaning; }; /* Iterator state, visiting all eviction lists within a partition From c643a419771c99ce937875a9afcb14ef362eafbf Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 3 Dec 2020 05:02:23 -0500 Subject: [PATCH 02/22] Prevent adding ioclass with the same id twice Signed-off-by: Michal Mielewczyk --- src/mngt/ocf_mngt_io_class.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/mngt/ocf_mngt_io_class.c b/src/mngt/ocf_mngt_io_class.c index 481c061..003931b 100644 --- a/src/mngt/ocf_mngt_io_class.c +++ b/src/mngt/ocf_mngt_io_class.c @@ -31,6 +31,8 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache, uint32_t max_size, uint8_t priority, bool valid) { uint32_t size; + struct ocf_lst_entry *iter; + uint32_t iter_id; if (!name) return -OCF_ERR_INVAL; @@ -51,6 +53,14 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache, return -OCF_ERR_INVAL; } + for_each_lst(&cache->lst_part, iter, iter_id) { + if (iter_id == part_id) { + ocf_cache_log(cache, log_err, + "Part with id %hu already exists\n", part_id); + return -OCF_ERR_INVAL; + } + } + size = sizeof(cache->user_parts[part_id].config->name); if (env_strncpy(cache->user_parts[part_id].config->name, size, name, size)) return -OCF_ERR_INVAL; From e9d729007807e1b37484dcd4c553265630b2d673 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 29 Oct 2020 07:14:43 -0400 Subject: [PATCH 03/22] Extend ioclass management logging When setting ioclass, print info about it's max size Signed-off-by: Michal Mielewczyk --- src/mngt/ocf_mngt_io_class.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/mngt/ocf_mngt_io_class.c b/src/mngt/ocf_mngt_io_class.c index 003931b..5cf398d 100644 --- a/src/mngt/ocf_mngt_io_class.c +++ b/src/mngt/ocf_mngt_io_class.c @@ -136,17 +136,17 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache, /* Try set partition size */ if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) { ocf_cache_log(cache, log_info, - "Setting IO class size, id: %u, name: '%s' " - "[ ERROR ]\n", part_id, dest_part->config->name); + "Setting IO class size, id: %u, name: '%s', max size: %u" + " [ ERROR ]\n", part_id, dest_part->config->name, max); return -OCF_ERR_INVAL; } ocf_part_set_prio(cache, dest_part, prio); dest_part->config->cache_mode = cache_mode; ocf_cache_log(cache, log_info, - "Updating unclassified IO class, id: " - "%u [ OK ]\n", part_id); - + "Updating unclassified IO class, id: %u, name :'%s'," + "max size: %u [ OK ]\n", + part_id, dest_part->config->name, max); return 0; } @@ -160,23 +160,23 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache, /* Try set partition size */ if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) { ocf_cache_log(cache, log_info, - "Setting IO class size, id: %u, name: '%s' " - "[ ERROR ]\n", part_id, dest_part->config->name); + "Setting IO class size, id: %u, name: '%s', max size %u" + "[ ERROR ]\n", part_id, dest_part->config->name, max); return -OCF_ERR_INVAL; } if (ocf_part_is_valid(dest_part)) { /* Updating existing */ ocf_cache_log(cache, log_info, "Updating existing IO " - "class, id: %u, name: '%s' [ OK ]\n", - part_id, dest_part->config->name); + "class, id: %u, name: '%s', max size %u [ OK ]\n", + part_id, dest_part->config->name, max); } else { /* Adding new */ ocf_part_set_valid(cache, part_id, true); ocf_cache_log(cache, log_info, "Adding new IO class, " - "id: %u, name: '%s' [ OK ]\n", part_id, - dest_part->config->name); + "id: %u, name: '%s', max size %u [ OK ]\n", part_id, + dest_part->config->name, max); } ocf_part_set_prio(cache, dest_part, prio); From 718dc743c8a72477ec3cad452009c48b0cf2f4ea Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 15 Oct 2020 18:19:13 -0400 Subject: [PATCH 04/22] Enable particular ioclass eviction If partition's occupancy limit is reached, cachelines should be evicted from request's target partition. Information whether particular partition eviction should be triggered is carried as a flag by request which triggered eviction. Signed-off-by: Michal Mielewczyk --- src/eviction/eviction.c | 32 +++++++++++++++++++++++++++----- src/ocf_request.h | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 5 deletions(-) diff --git a/src/eviction/eviction.c b/src/eviction/eviction.c index 5a41935..bf73447 100644 --- a/src/eviction/eviction.c +++ b/src/eviction/eviction.c @@ -40,6 +40,22 @@ static uint32_t ocf_evict_calculate(struct ocf_user_part *part, return to_evict; } +static inline uint32_t ocf_evict_part_do(ocf_cache_t cache, + ocf_queue_t io_queue, const uint32_t evict_cline_no, + struct ocf_user_part *target_part) +{ + uint32_t to_evict = 0; + + if (!evp_lru_can_evict(cache)) + return 0; + + to_evict = ocf_evict_calculate(&cache->user_parts[target_part_id], + evict_cline_no); + + return ocf_eviction_need_space(cache, io_queue, + target_part, to_evict); +} + static inline uint32_t ocf_evict_do(ocf_cache_t cache, ocf_queue_t io_queue, const uint32_t evict_cline_no, struct ocf_user_part *target_part) @@ -109,12 +125,18 @@ int space_managment_evict_do(struct ocf_cache *cache, uint32_t free; struct ocf_user_part *req_part = &cache->user_parts[req->part_id]; - free = ocf_freelist_num_free(cache->freelist); - if (evict_cline_no <= free) - return LOOKUP_MAPPED; + if (ocf_req_part_evict(req)) { + evicted = ocf_evict_part_do(cache, req->io_queue, evict_cline_no, + req_part); + } else { + free = ocf_freelist_num_free(cache->freelist); + if (evict_cline_no <= free) + return LOOKUP_MAPPED; - evict_cline_no -= free; - evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, req_part); + evict_cline_no -= free; + + evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, req_part); + } if (evict_cline_no <= evicted) return LOOKUP_MAPPED; diff --git a/src/ocf_request.h b/src/ocf_request.h index 2d9c7f8..21885b2 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -191,6 +191,9 @@ struct ocf_request { uint8_t wi_second_pass : 1; /*!< Set after first pass of WI write is completed */ + uint8_t part_evict : 1; + /* !< Some cachelines from request's partition must be evicted */ + log_sid_t sid; /*!< Tracing sequence ID */ @@ -332,6 +335,40 @@ void ocf_req_clear_map(struct ocf_request *req); */ void ocf_req_hash(struct ocf_request *req); +/** + * @brief Request should trigger eviction from it's target partition + * + * @param req - OCF request + */ +static inline void ocf_req_set_part_evict(struct ocf_request *req) +{ + req->part_evict = true; +} + +/** + * @brief Request shouldn't trigger eviction from it's target partition + * + * @param req - OCF request + */ +static inline void ocf_req_clear_part_evict(struct ocf_request *req) +{ + req->part_evict = false; +} + +/** + * @brief Check wheter request shouldn't trigger eviction from it's target + * partition or any partition + * + * @param req - OCF request + * @return true - Eviciton should be triggered from request's target partition + * @return false - Eviction should be triggered with respect to eviction + * priority + */ +static inline bool ocf_req_part_evict(struct ocf_request *req) +{ + return req->part_evict; +} + int ocf_req_set_dirty(struct ocf_request *req); /** From e999de7232944828733a1d91994af5ee2ed6114a Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Fri, 6 Nov 2020 13:12:03 -0500 Subject: [PATCH 05/22] Don't roundup when evicting single part Signed-off-by: Michal Mielewczyk --- src/eviction/eviction.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/eviction/eviction.c b/src/eviction/eviction.c index bf73447..8ee5b59 100644 --- a/src/eviction/eviction.c +++ b/src/eviction/eviction.c @@ -21,7 +21,7 @@ struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = { }; static uint32_t ocf_evict_calculate(struct ocf_user_part *part, - uint32_t to_evict) + uint32_t to_evict, bool roundup) { if (part->runtime->curr_size <= part->config->min_size) { /* @@ -31,7 +31,7 @@ static uint32_t ocf_evict_calculate(struct ocf_user_part *part, return 0; } - if (to_evict < OCF_TO_EVICTION_MIN) + if (roundup && to_evict < OCF_TO_EVICTION_MIN) to_evict = OCF_TO_EVICTION_MIN; if (to_evict > (part->runtime->curr_size - part->config->min_size)) @@ -50,7 +50,7 @@ static inline uint32_t ocf_evict_part_do(ocf_cache_t cache, return 0; to_evict = ocf_evict_calculate(&cache->user_parts[target_part_id], - evict_cline_no); + evict_cline_no, false); return ocf_eviction_need_space(cache, io_queue, target_part, to_evict); @@ -92,7 +92,7 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache, goto out; } - to_evict = ocf_evict_calculate(part, evict_cline_no); + to_evict = ocf_evict_calculate(part, evict_cline_no, true); if (to_evict == 0) { /* No cache lines to evict for this partition */ continue; From 21e98a6dbc9ac12da1d76b43de7c93800c013e2e Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Wed, 4 Nov 2020 02:34:01 -0500 Subject: [PATCH 06/22] Evict request's target partition in regrular order Instead of evicting target partition as the last one, respect eviction priorities Signed-off-by: Michal Mielewczyk --- src/eviction/eviction.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/src/eviction/eviction.c b/src/eviction/eviction.c index 8ee5b59..51f2ec9 100644 --- a/src/eviction/eviction.c +++ b/src/eviction/eviction.c @@ -83,10 +83,6 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache, /* It seams that no more partition for eviction */ break; } - if (part_id == target_part->id) { - /* Omit targeted, evict from different first */ - continue; - } if (evicted >= evict_cline_no) { /* Evicted requested number of cache line, stop */ goto out; @@ -102,18 +98,6 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache, part, to_evict); } - if (!ocf_eviction_can_evict(cache)) - goto out; - - if (evicted < evict_cline_no) { - /* Now we can evict form targeted partition */ - to_evict = ocf_evict_calculate(target_part, evict_cline_no); - if (to_evict) { - evicted += ocf_eviction_need_space(cache, io_queue, - target_part, to_evict); - } - } - out: return evicted; } From 4f228317a11e89b1e8b17ed21c79d39dc226bc71 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 26 Nov 2020 18:24:29 -0500 Subject: [PATCH 07/22] Update docs for `space_managment_evict_do()` Signed-off-by: Michal Mielewczyk --- src/eviction/eviction.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/eviction/eviction.h b/src/eviction/eviction.h index 4843d37..a385c68 100644 --- a/src/eviction/eviction.h +++ b/src/eviction/eviction.h @@ -58,11 +58,11 @@ struct eviction_policy_ops { extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max]; /* - * Deallocates space from low priority partitions. + * Deallocates space according to eviction priorities. * - * Returns -1 on error - * or the destination partition ID for the free buffers - * (it matches label and is part of the object (#core_id) IO group) + * @returns: + * 'LOOKUP_HIT' if evicted enough cachelines to serve @req + * 'LOOKUP_MISS' otherwise */ int space_managment_evict_do(ocf_cache_t cache, struct ocf_request *req, uint32_t evict_cline_no); From 6ca57790571852f167f45c868fb0b03ef363c2b1 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 15 Oct 2020 22:05:20 -0400 Subject: [PATCH 08/22] Update `struct ocf_io_class_info` docs Signed-off-by: Michal Mielewczyk --- inc/ocf_io_class.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inc/ocf_io_class.h b/inc/ocf_io_class.h index c8cb05f..88ca597 100644 --- a/inc/ocf_io_class.h +++ b/inc/ocf_io_class.h @@ -40,8 +40,8 @@ struct ocf_io_class_info { uint32_t max_size; /*!< Maximum number of cache lines that might be assigned into - * this IO class. If current size reach maximum size no more - * allocation for this IO class takes place + * this IO class. If current size reaches maximum size then some + * of ioclass's cachelines are evicted. */ uint8_t eviction_policy_type; From 4329e617cf3dfaf7094b4f4f5754a8c2d58e38c1 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Fri, 6 Nov 2020 09:22:36 -0500 Subject: [PATCH 09/22] env: print format for 64 bits signed int Signed-off-by: Michal Mielewczyk --- env/posix/ocf_env.h | 1 + 1 file changed, 1 insertion(+) diff --git a/env/posix/ocf_env.h b/env/posix/ocf_env.h index bda31bd..b144104 100644 --- a/env/posix/ocf_env.h +++ b/env/posix/ocf_env.h @@ -46,6 +46,7 @@ #define min(a,b) MIN(a,b) #define ENV_PRIu64 "lu" +#define ENV_PRId64 "ld" typedef uint8_t u8; typedef uint16_t u16; From e26ca303995f9015ebaf03778a785f5084a07996 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 12 Nov 2020 02:55:08 -0500 Subject: [PATCH 10/22] Track explicit number of cachelines to be reparted Instead of redunant calculating number of cachlines to be reparted, keep this information in request's info Signed-off-by: Michal Mielewczyk --- src/engine/engine_common.c | 1 + src/engine/engine_common.h | 26 ++++++++++++++++++++++++++ src/ocf_request.h | 3 ++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index 9e8b0ee..492ee28 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -128,6 +128,7 @@ void ocf_engine_update_req_info(struct ocf_cache *cache, * Need to move this cache line into other partition */ _entry->re_part = req->info.re_part = true; + req->info.re_part_no++; } break; diff --git a/src/engine/engine_common.h b/src/engine/engine_common.h index 2557c64..8ec9d0c 100644 --- a/src/engine/engine_common.h +++ b/src/engine/engine_common.h @@ -47,6 +47,20 @@ static inline bool ocf_engine_is_hit(struct ocf_request *req) */ #define ocf_engine_is_miss(req) (!ocf_engine_is_hit(req)) +/** + * @brief Check if all the cache lines are assigned to a good partition + * + * @param req OCF request + * + * @retval true request's cache lines are assigned to a good partition + * @retval false some of the request's cache lines needs to be reassigned to + * a target partition + */ +static inline bool ocf_engine_needs_repart(struct ocf_request *req) +{ + return req->info.re_part_no > 0; +} + /** * @brief Check if all cache lines are mapped fully * @@ -98,6 +112,18 @@ static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *req) return req->core_line_count - (req->info.hit_no + req->info.invalid_no); } +/** + * @brief Get number of cache lines to repart + * + * @param req OCF request + * + * @retval Number of cache lines to repart + */ +static inline uint32_t ocf_engine_repart_count(struct ocf_request *req) +{ + return req->info.re_part_no; +} + /** * @brief Get number of IOs to perform cache read or write * diff --git a/src/ocf_request.h b/src/ocf_request.h index 21885b2..4527901 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -13,9 +13,10 @@ struct ocf_req_allocator; struct ocf_req_info { - /* Number of hits, invalid, misses. */ + /* Number of hits, invalid, misses, reparts. */ unsigned int hit_no; unsigned int invalid_no; + unsigned int re_part_no; uint32_t dirty_all; /*!< Number of dirty line in request*/ From 9d80882b009fca1cbc34c5d1385ab474080fff45 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 12 Nov 2020 03:08:09 -0500 Subject: [PATCH 11/22] Remove `re_part` field from `struct ocf_req_info` Since the request carries an explicit information about number of the cacheliens to be reparted, no need of keeping the boolean information if some of the request's cachelines are assigned to a wrong partition Signed-off-by: Michal Mielewczyk --- src/engine/engine_common.c | 2 +- src/engine/engine_fast.c | 2 +- src/engine/engine_pt.c | 2 +- src/engine/engine_rd.c | 2 +- src/engine/engine_wb.c | 2 +- src/engine/engine_wt.c | 2 +- src/ocf_request.h | 5 ----- 7 files changed, 6 insertions(+), 11 deletions(-) diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index 492ee28..ba20e66 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -127,7 +127,7 @@ void ocf_engine_update_req_info(struct ocf_cache *cache, /* * Need to move this cache line into other partition */ - _entry->re_part = req->info.re_part = true; + _entry->re_part = true; req->info.re_part_no++; } diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c index b8b867f..8beb599 100644 --- a/src/engine/engine_fast.c +++ b/src/engine/engine_fast.c @@ -69,7 +69,7 @@ static int _ocf_read_fast_do(struct ocf_request *req) /* Get OCF request - increase reference counter */ ocf_req_get(req); - if (req->info.re_part) { + if (ocf_engine_needs_repart(req)) { OCF_DEBUG_RQ(req, "Re-Part"); ocf_req_hash_lock_wr(req); diff --git a/src/engine/engine_pt.c b/src/engine/engine_pt.c index 5485fcf..9144d84 100644 --- a/src/engine/engine_pt.c +++ b/src/engine/engine_pt.c @@ -67,7 +67,7 @@ int ocf_read_pt_do(struct ocf_request *req) return 0; } - if (req->info.re_part) { + if (ocf_engine_needs_repart(req)) { OCF_DEBUG_RQ(req, "Re-Part"); ocf_req_hash_lock_wr(req); diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c index 566f83e..0d999eb 100644 --- a/src/engine/engine_rd.c +++ b/src/engine/engine_rd.c @@ -172,7 +172,7 @@ static int _ocf_read_generic_do(struct ocf_request *req) ocf_req_hash_unlock_rd(req); } - if (req->info.re_part) { + if (ocf_engine_needs_repart(req)) { OCF_DEBUG_RQ(req, "Re-Part"); ocf_req_hash_lock_wr(req); diff --git a/src/engine/engine_wb.c b/src/engine/engine_wb.c index 305125e..d4c7041 100644 --- a/src/engine/engine_wb.c +++ b/src/engine/engine_wb.c @@ -121,7 +121,7 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req) * 3. Then continue processing request (flush metadata) */ - if (req->info.re_part) { + if (ocf_engine_needs_repart(req)) { OCF_DEBUG_RQ(req, "Re-Part"); ocf_req_hash_lock_wr(req); diff --git a/src/engine/engine_wt.c b/src/engine/engine_wt.c index 00bd303..040c25c 100644 --- a/src/engine/engine_wt.c +++ b/src/engine/engine_wt.c @@ -118,7 +118,7 @@ static void _ocf_write_wt_update_bits(struct ocf_request *req) ocf_req_hash_unlock_wr(req); } - if (req->info.re_part) { + if (ocf_engine_needs_repart(req)) { OCF_DEBUG_RQ(req, "Re-Part"); ocf_req_hash_lock_wr(req); diff --git a/src/ocf_request.h b/src/ocf_request.h index 4527901..1603d38 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -33,11 +33,6 @@ struct ocf_req_info { uint32_t mapping_error : 1; /*!< Core lines in this request were not mapped into cache */ - uint32_t re_part : 1; - /*!< This bit indicate that in the request some cache lines - * has to be moved to another partition - */ - uint32_t core_error : 1; /*!< Error occured during I/O on core device */ From 600bd1d8590fe48207717547604f0ff46de1492f Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Fri, 13 Nov 2020 01:47:22 -0500 Subject: [PATCH 12/22] Access partition's metadata counters via functions Signed-off-by: Michal Mielewczyk --- src/utils/utils_part.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/utils/utils_part.h b/src/utils/utils_part.h index fcf63da..6f951f7 100644 --- a/src/utils/utils_part.h +++ b/src/utils/utils_part.h @@ -50,6 +50,17 @@ static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class) return PARTITION_DEFAULT; } +static inline uint32_t ocf_part_get_occupancy(struct ocf_user_part *part) +{ + return part->runtime->curr_size; +} + +static inline uint32_t ocf_part_get_max_size(ocf_cache_t cache, + ocf_part_id_t part_id) +{ + return cache->user_parts[part_id].config->max_size; +} + void ocf_part_move(struct ocf_request *req); #define for_each_part(cache, part, id) \ From 05f3c22dadf045ebe86915ab315379071818ea46 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Wed, 21 Oct 2020 06:42:12 -0400 Subject: [PATCH 13/22] Occupancy per ioclass utilities Functions to check space availability and to manage cachelines reservation Signed-off-by: Michal Mielewczyk --- src/utils/utils_part.c | 79 ++++++++++++++++++++++++++++++++++++++++++ src/utils/utils_part.h | 23 +++++++++++- 2 files changed, 101 insertions(+), 1 deletion(-) diff --git a/src/utils/utils_part.c b/src/utils/utils_part.c index 3da853b..7ee28d8 100644 --- a/src/utils/utils_part.c +++ b/src/utils/utils_part.c @@ -190,3 +190,82 @@ void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id, } } } + +static inline uint32_t ocf_part_evict_size(struct ocf_request *req) +{ + uint32_t needed_cache_lines, part_available, cache_lines_to_evict; + uint32_t part_occupancy, part_occupancy_debt; + struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id]; + uint32_t part_occupancy_limit = + ocf_part_get_max_size(req->cache, target_part); + + needed_cache_lines = ocf_engine_repart_count(req) + + ocf_engine_unmapped_count(req); + + part_occupancy = ocf_part_get_occupancy(target_part); + + if (part_occupancy_limit >= part_occupancy) { + part_available = part_occupancy_limit - part_occupancy; + part_occupancy_debt = 0; + } else { + /* Occupancy is greater than occupancy limit. Evict missing number of + * cachelines, but no more than single eviction limit */ + part_occupancy_debt = min((uint32_t)OCF_PENDING_EVICTION_LIMIT, + part_occupancy - part_occupancy_limit); + part_available = 0; + } + + if (ocf_freelist_num_free(req->cache->freelist) < + ocf_engine_unmapped_count(req)) { + /* Number of cachelines to insert greater than number of free + * cachelines */ + if (part_available >= needed_cache_lines) { + /* Cache is full, but target's part occupancy limit is not reached + */ + ocf_req_clear_part_evict(req); + cache_lines_to_evict = needed_cache_lines; + } else { + /* Cache is full and target part reached it's occupancy limit */ + ocf_req_set_part_evict(req); + cache_lines_to_evict = needed_cache_lines - part_available; + } + + } else if (part_available < needed_cache_lines) { + /* Enough of free cache lines, but partition reached it's occupancy + * limit */ + cache_lines_to_evict = needed_cache_lines - part_available; + ocf_req_set_part_evict(req); + + } else if (part_available >= needed_cache_lines) { + /* Enough free cachelines available and they can be assigned to target + * partition */ + cache_lines_to_evict = 0; + + } + + return cache_lines_to_evict + part_occupancy_debt; +} + +uint32_t ocf_part_check_space(struct ocf_request *req, uint32_t *to_evict) +{ + uint32_t ret = OCF_PART_IS_FULL; + uint32_t _to_evict; + struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id]; + + if (!ocf_part_is_enabled(target_part) && + ocf_part_get_occupancy(target_part) == 0) { + /* If partition is disabled, but has assigned cachelines, eviction has + * to be triggered */ + return OCF_PART_IS_DISABLED; + } + + _to_evict = ocf_part_evict_size(req); + + if (_to_evict == 0) + ret = OCF_PART_HAS_SPACE; + + if (to_evict) + *to_evict = _to_evict; + + return ret; +} diff --git a/src/utils/utils_part.h b/src/utils/utils_part.h index 6f951f7..f993061 100644 --- a/src/utils/utils_part.h +++ b/src/utils/utils_part.h @@ -8,6 +8,7 @@ #include "../ocf_request.h" #include "../engine/cache_engine.h" +#include "../engine/engine_common.h" #include "../metadata/metadata_partition.h" void ocf_part_init(struct ocf_cache *cache); @@ -72,7 +73,27 @@ static inline void ocf_part_sort(struct ocf_cache *cache) ocf_lst_sort(&cache->lst_part); } -static inline ocf_cache_mode_t ocf_part_get_cache_mode(struct ocf_cache *cache, +static inline bool ocf_part_is_enabled(struct ocf_user_part *part) +{ + return part->config->max_size != 0; +} + +#define OCF_PART_HAS_SPACE 0 +#define OCF_PART_IS_FULL 1 +#define OCF_PART_IS_DISABLED 2 +/** + * Check whether there is enough free cachelines to serve request. If partition + * occupancy limit is reached, `req->part_evict` is set to true. Otherwise + * flag is set to false and eviction from any partition should be triggered. + * + * @return + * OCF_PART_HAS_SPACE when cachelines alloted successfully + * OCF_PART_IS_FULL when need to evict some cachelines to serve request + * OCF_PART_IS_DISABLED when caching for particular partition is disabled + */ +uint32_t ocf_part_check_space(struct ocf_request *req, uint32_t *to_evict); + +static inline ocf_cache_mode_t ocf_part_get_cache_mode(ocf_cache_t cache, ocf_part_id_t part_id) { if (part_id < OCF_IO_CLASS_MAX) From 418303eee8f97745f393b9fbb3ef5f8b2bd847f6 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Mon, 16 Nov 2020 04:42:32 -0500 Subject: [PATCH 14/22] UT for `occupancy per ioclass` utils Signed-off-by: Michal Mielewczyk --- .../utils/utils_part.c/ocf_part_evict_size.c | 516 ++++++++++++++++++ 1 file changed, 516 insertions(+) create mode 100644 tests/unit/tests/utils/utils_part.c/ocf_part_evict_size.c diff --git a/tests/unit/tests/utils/utils_part.c/ocf_part_evict_size.c b/tests/unit/tests/utils/utils_part.c/ocf_part_evict_size.c new file mode 100644 index 0000000..2980372 --- /dev/null +++ b/tests/unit/tests/utils/utils_part.c/ocf_part_evict_size.c @@ -0,0 +1,516 @@ +/* + * src/utils/utils_part.c + * ocf_part_evict_size + * + * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE + * ONE FUNCTION PER LINE + * + */ + +#undef static + +#undef inline + + +#include +#include +#include +#include +#include "print_desc.h" + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "../ocf_request.h" +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../eviction/ops.h" +#include "utils_part.h" + +#include "utils/utils_part.c/ocf_part_evict_size_generated_wraps.c" + +uint32_t __wrap_ocf_part_get_max_size(ocf_cache_t cache, + struct ocf_user_part *target_part) +{ + return mock(); +} + +uint32_t __wrap_ocf_engine_repart_count(struct ocf_request *req) +{ + return mock(); +} + +uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req) +{ + return mock(); +} + +uint32_t __wrap_ocf_part_get_occupancy(struct ocf_user_part *target_part) +{ + return mock(); +} + +ocf_cache_line_t __wrap_ocf_freelist_num_free(ocf_freelist_t freelist) +{ + return mock(); +} + +void __wrap_ocf_req_set_part_evict(struct ocf_request *req) +{ + function_called(); +} + +void __wrap_ocf_req_clear_part_evict(struct ocf_request *req) +{ + function_called(); +} + +static void ocf_part_evict_size_test01(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 0; + uint32_t cachelines_to_map = 320; + uint32_t part_occupied_cachelines = 512; + uint32_t freelist_size = 500; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + print_test_description("Enough free space available"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + // Enough free cachelines to map a whole request + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + assert_int_equal(ocf_part_evict_size(&req), 0); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test02(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 0; + uint32_t cachelines_to_map = 320; + uint32_t part_occupied_cachelines = 960; + uint32_t freelist_size = 500; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t available_cachelines = max_part_size - part_occupied_cachelines; + uint32_t cachelines_to_evict = cachelines_to_map - available_cachelines; + + print_test_description("Cache has enough free cachelines," + " but target partition must be evicted"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + expect_function_call(__wrap_ocf_req_set_part_evict); + + assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test03(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 320; + uint32_t cachelines_to_map = 0; + uint32_t part_occupied_cachelines = 512; + uint32_t freelist_size = 0; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t cachelines_to_evict = 0; + + print_test_description("Only repart (no mapping). Freelist is empty but " + "space in a target part is availabe,"); + print_test_description("\tso no cachelines should be " + " evcited from cache"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test04(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 320; + uint32_t cachelines_to_map = 0; + uint32_t part_occupied_cachelines = 1100; + uint32_t freelist_size = 0; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t cachelines_debt = part_occupied_cachelines - max_part_size; + uint32_t cachelines_to_evict = cachelines_to_repart + cachelines_debt; + + print_test_description("Only repart (no mapping). Freelist is empty and no" + " space in target part is availabe."); + print_test_description("\tEvict only from target partition"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + expect_function_call(__wrap_ocf_req_set_part_evict); + + assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test05(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 0; + uint32_t cachelines_to_map = 320; + uint32_t part_occupied_cachelines = 960; + uint32_t freelist_size = 0; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t available_cachelines = max_part_size - part_occupied_cachelines; + uint32_t cachelines_to_evict = cachelines_to_map - available_cachelines; + + print_test_description("Freelist is empty and no space in the target part " + "is available"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + expect_function_call(__wrap_ocf_req_set_part_evict); + + assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test06(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 0; + uint32_t cachelines_to_map = 320; + uint32_t part_occupied_cachelines = 320; + uint32_t freelist_size = 0; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t available_cachelines = max_part_size - part_occupied_cachelines; + uint32_t cachelines_to_evict = cachelines_to_map; + + print_test_description("Freelist is empty but target part has enough space"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + expect_function_call(__wrap_ocf_req_clear_part_evict); + + assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test07(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 0; + uint32_t cachelines_to_map = 320; + uint32_t part_occupied_cachelines = 1280; + uint32_t freelist_size = 0; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t debt_cachelines = part_occupied_cachelines - max_part_size; + uint32_t cachelines_to_evict = cachelines_to_map + debt_cachelines; + + print_test_description("Freelist is empty and part occupancy exceeded"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + expect_function_call(__wrap_ocf_req_set_part_evict); + + assert_int_equal(ocf_part_evict_size(&req), + (part_occupied_cachelines - max_part_size) + cachelines_to_map); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test08(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 320; + uint32_t cachelines_to_map = 0; + uint32_t part_occupied_cachelines = 1280; + uint32_t freelist_size = 0; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t debt_cachelines = part_occupied_cachelines - max_part_size; + uint32_t cachelines_to_evict = debt_cachelines + cachelines_to_repart; + + print_test_description("Target part occupancy limit is exceeded during " + "repart"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + expect_function_call(__wrap_ocf_req_set_part_evict); + + assert_int_equal(ocf_part_evict_size(&req), + (part_occupied_cachelines - max_part_size) + cachelines_to_repart); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test09(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 320; + uint32_t cachelines_to_map = 0; + uint32_t part_occupied_cachelines = 320; + uint32_t freelist_size = 0; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t cachelines_to_evict = 0; + + print_test_description("Repart while target part has enough of available " + "space"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test10(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 0; + uint32_t cachelines_to_map = 320; + uint32_t part_occupied_cachelines = 320; + uint32_t freelist_size = 320; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t cachelines_to_evict = 0; + + print_test_description("Enough of available cachelines in target part, " + "freelist has exactly required number of free cachelines"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test11(void **state) +{ + uint32_t max_part_size = 1024; + uint32_t cachelines_to_repart = 320; + uint32_t cachelines_to_map = 0; + uint32_t part_occupied_cachelines = 384; + uint32_t freelist_size = 0; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t cachelines_to_evict = 0; + + print_test_description("Number of cachelines to repart is equal to number " + "of cachelines available in the target partition"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + assert_int_equal(ocf_part_evict_size(&req), cachelines_to_evict); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test12(void **state) +{ + uint32_t max_part_size = 0; + uint32_t cachelines_to_repart = 0; + uint32_t cachelines_to_map = 320; + uint32_t part_occupied_cachelines = 384; + uint32_t freelist_size = 0; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t cachelines_to_evict = + part_occupied_cachelines + cachelines_to_map; + + print_test_description("Freelist IS empty. Max occupancy set to 0, but " + "some cachelines are still assigned to traget part - evict them"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + expect_function_call(__wrap_ocf_req_set_part_evict); + + assert_true(ocf_part_evict_size(&req) >= part_occupied_cachelines); + + test_free(req.cache); +} + +static void ocf_part_evict_size_test13(void **state) +{ + uint32_t max_part_size = 0; + uint32_t cachelines_to_repart = 0; + uint32_t cachelines_to_map = 320; + uint32_t part_occupied_cachelines = 384; + uint32_t freelist_size = 1024; + + struct ocf_request req; + req.cache = test_malloc(sizeof(struct ocf_cache)); + + uint32_t cachelines_to_evict = + part_occupied_cachelines + cachelines_to_map; + + print_test_description("Freelist IS NOT empty. Max occupancy set to 0, but" + " some cachelines are still assigned to traget part - evict them"); + + will_return(__wrap_ocf_part_get_max_size, max_part_size); + + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + will_return(__wrap_ocf_engine_repart_count, cachelines_to_repart); + + will_return(__wrap_ocf_part_get_occupancy, part_occupied_cachelines); + + will_return(__wrap_ocf_freelist_num_free, freelist_size); + will_return(__wrap_ocf_engine_unmapped_count, cachelines_to_map); + + expect_function_call(__wrap_ocf_req_set_part_evict); + + assert_true(ocf_part_evict_size(&req) >= part_occupied_cachelines); + + test_free(req.cache); +} + +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(ocf_part_evict_size_test01), + cmocka_unit_test(ocf_part_evict_size_test02), + cmocka_unit_test(ocf_part_evict_size_test03), + cmocka_unit_test(ocf_part_evict_size_test04), + cmocka_unit_test(ocf_part_evict_size_test05), + cmocka_unit_test(ocf_part_evict_size_test06), + cmocka_unit_test(ocf_part_evict_size_test07), + cmocka_unit_test(ocf_part_evict_size_test08), + cmocka_unit_test(ocf_part_evict_size_test09), + cmocka_unit_test(ocf_part_evict_size_test10), + cmocka_unit_test(ocf_part_evict_size_test11), + cmocka_unit_test(ocf_part_evict_size_test12), + cmocka_unit_test(ocf_part_evict_size_test13) + }; + + print_message("Unit test for ocf_part_evict_size\n"); + + return cmocka_run_group_tests(tests, NULL, NULL); +} From 9e11a88f2e2d83879679cc3fd87d797f28f163c0 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Wed, 4 Nov 2020 08:17:26 -0500 Subject: [PATCH 15/22] Occupancy per ioclass Respect occpuancy limit set single ioclass Signed-off-by: Michal Mielewczyk --- src/engine/engine_common.c | 189 +++++++++++++++++++++++++++---------- src/engine/engine_fast.c | 26 +++-- 2 files changed, 154 insertions(+), 61 deletions(-) diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index ba20e66..2f25137 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -14,6 +14,7 @@ #include "../utils/utils_cache_line.h" #include "../ocf_request.h" #include "../utils/utils_cleaner.h" +#include "../utils/utils_part.h" #include "../metadata/metadata.h" #include "../eviction/eviction.h" #include "../promotion/promotion.h" @@ -409,15 +410,6 @@ static void _ocf_engine_clean_end(void *private_data, int error) } } -static int ocf_engine_evict(struct ocf_request *req) -{ - if (!ocf_engine_unmapped_count(req)) - return 0; - - return space_managment_evict_do(req->cache, req, - ocf_engine_unmapped_count(req)); -} - static int lock_clines(struct ocf_request *req, const struct ocf_engine_callbacks *engine_cbs) { @@ -433,13 +425,139 @@ static int lock_clines(struct ocf_request *req, } } +static inline int ocf_prepare_clines_hit(struct ocf_request *req, + const struct ocf_engine_callbacks *engine_cbs) +{ + int lock_status = -OCF_ERR_NO_LOCK; + struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; + uint32_t clines_to_evict; + int res; + + /* Cachelines are mapped in correct partition */ + if (ocf_part_is_enabled(&req->cache->user_parts[req->part_id]) && + !ocf_engine_needs_repart(req)) { + lock_status = lock_clines(req, engine_cbs); + ocf_req_hash_unlock_rd(req); + return lock_status; + } + + res = ocf_part_check_space(req, &clines_to_evict); + + if (res == OCF_PART_HAS_SPACE) + lock_status = lock_clines(req, engine_cbs); + + /* Since target part is empty and disabled, request should be submited in + * pass-through */ + if (res == OCF_PART_IS_DISABLED) + ocf_req_set_mapping_error(req); + + ocf_req_hash_unlock_rd(req); + + if (res != OCF_PART_IS_FULL) + return lock_status; + + ocf_metadata_start_exclusive_access(metadata_lock); + ocf_part_check_space(req, &clines_to_evict); + + if (space_managment_evict_do(req->cache, req, clines_to_evict) == + LOOKUP_MISS) { + ocf_req_set_mapping_error(req); + goto unlock; + } + + if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) { + /* Target part is disabled but had some cachelines assigned. Submit + * request in pass-through after eviction has been made */ + ocf_req_set_mapping_error(req); + goto unlock; + } + + lock_status = lock_clines(req, engine_cbs); + +unlock: + ocf_metadata_end_exclusive_access(metadata_lock); + + return lock_status; +} + +static inline int ocf_prepare_clines_miss(struct ocf_request *req, + const struct ocf_engine_callbacks *engine_cbs) +{ + int lock_status = -OCF_ERR_NO_LOCK; + struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; + uint32_t clines_to_evict = 0; + int res; + + /* Mapping must be performed holding (at least) hash-bucket write lock */ + ocf_req_hash_lock_upgrade(req); + + /* Verify whether partition occupancy threshold is not reached yet or cache + * is not out of free cachelines */ + res = ocf_part_check_space(req, &clines_to_evict); + if (res == OCF_PART_IS_DISABLED) { + ocf_req_set_mapping_error(req); + ocf_req_hash_unlock_wr(req); + return lock_status; + } + + if (res == OCF_PART_HAS_SPACE) { + ocf_engine_map(req); + if (ocf_req_test_mapping_error(req)) { + goto eviction; + } + + lock_status = lock_clines(req, engine_cbs); + if (lock_status < 0) { + /* Mapping succeeded, but we failed to acquire cacheline lock. + * Don't try to evict, just return error to caller */ + ocf_req_set_mapping_error(req); + } + + ocf_req_hash_unlock_wr(req); + return lock_status; + } + +eviction: + ocf_req_hash_unlock_wr(req); + ocf_metadata_start_exclusive_access(metadata_lock); + + ocf_part_check_space(req, &clines_to_evict); + + if (space_managment_evict_do(req->cache, req, clines_to_evict) == + LOOKUP_MISS) { + ocf_req_set_mapping_error(req); + goto unlock; + } + + if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) { + /* Partition is disabled but it had cachelines assigned. Now, that they + * are evicted, don't try to map cachelines - we don't want to insert + * new cachelines - the request should be submited in pass through mode + * instead */ + ocf_req_set_mapping_error(req); + goto unlock; + } + + ocf_engine_map(req); + if (ocf_req_test_mapping_error(req)) + goto unlock; + + lock_status = lock_clines(req, engine_cbs); + if (lock_status < 0) + ocf_req_set_mapping_error(req); + +unlock: + ocf_metadata_end_exclusive_access(metadata_lock); + + return lock_status; +} + int ocf_engine_prepare_clines(struct ocf_request *req, const struct ocf_engine_callbacks *engine_cbs) { bool mapped; bool promote = true; - int lock = -ENOENT; - struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; + int lock = -OCF_ERR_NO_LOCK; /* Calculate hashes for hash-bucket locking */ ocf_req_hash(req); @@ -453,50 +571,19 @@ int ocf_engine_prepare_clines(struct ocf_request *req, ocf_engine_traverse(req); mapped = ocf_engine_is_mapped(req); - if (mapped) { - /* Request cachelines are already mapped, acquire cacheline - * lock */ - lock = lock_clines(req, engine_cbs); - } else { - /* check if request should promote cachelines */ - promote = ocf_promotion_req_should_promote( - req->cache->promotion_policy, req); - if (!promote) - req->info.mapping_error = 1; - } + if (mapped) + return ocf_prepare_clines_hit(req, engine_cbs); - if (mapped || !promote) { - /* Will not attempt mapping - release hash bucket lock */ + /* check if request should promote cachelines */ + promote = ocf_promotion_req_should_promote( + req->cache->promotion_policy, req); + if (!promote) { + req->info.mapping_error = 1; ocf_req_hash_unlock_rd(req); - } else { - /* Need to map (potentially evict) cachelines. Mapping must be - * performed holding (at least) hash-bucket write lock */ - ocf_req_hash_lock_upgrade(req); - ocf_engine_map(req); - if (!req->info.mapping_error) - lock = lock_clines(req, engine_cbs); - ocf_req_hash_unlock_wr(req); - - if (req->info.mapping_error) { - /* Not mapped - evict cachelines under global exclusive - * lock*/ - ocf_metadata_start_exclusive_access(metadata_lock); - - /* Now there is exclusive access for metadata. May - * traverse once again and evict cachelines if needed. - */ - if (ocf_engine_evict(req) == LOOKUP_MAPPED) - ocf_engine_map(req); - - if (!req->info.mapping_error) - lock = lock_clines(req, engine_cbs); - - ocf_metadata_end_exclusive_access(metadata_lock); - } + return lock; } - - return lock; + return ocf_prepare_clines_miss(req, engine_cbs); } static int _ocf_engine_clean_getter(struct ocf_cache *cache, diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c index 8beb599..23a828b 100644 --- a/src/engine/engine_fast.c +++ b/src/engine/engine_fast.c @@ -108,6 +108,7 @@ int ocf_read_fast(struct ocf_request *req) { bool hit; int lock = OCF_LOCK_NOT_ACQUIRED; + bool part_has_space = false; /* Get OCF request - increase reference counter */ ocf_req_get(req); @@ -124,14 +125,18 @@ int ocf_read_fast(struct ocf_request *req) ocf_engine_traverse(req); hit = ocf_engine_is_hit(req); - if (hit) { + + if (ocf_part_check_space(req, NULL) == OCF_PART_HAS_SPACE) + part_has_space = true; + + if (hit && part_has_space) { ocf_io_start(&req->ioi.io); lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); } ocf_req_hash_unlock_rd(req); - if (hit) { + if (hit && part_has_space) { OCF_DEBUG_RQ(req, "Fast path success"); if (lock >= 0) { @@ -154,10 +159,7 @@ int ocf_read_fast(struct ocf_request *req) /* Put OCF request - decrease reference counter */ ocf_req_put(req); - if (hit) - return OCF_FAST_PATH_YES; - else - return OCF_FAST_PATH_NO; + return (hit && part_has_space) ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO; } /* __ __ _ _ ______ _ _____ _ _ @@ -177,6 +179,7 @@ int ocf_write_fast(struct ocf_request *req) { bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; + int part_has_space = false; /* Get OCF request - increase reference counter */ ocf_req_get(req); @@ -193,14 +196,18 @@ int ocf_write_fast(struct ocf_request *req) ocf_engine_traverse(req); mapped = ocf_engine_is_mapped(req); - if (mapped) { + + if (ocf_part_check_space(req, NULL) == OCF_PART_HAS_SPACE) + part_has_space = true; + + if (mapped && part_has_space) { ocf_io_start(&req->ioi.io); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } ocf_req_hash_unlock_rd(req); - if (mapped) { + if (mapped && part_has_space) { if (lock >= 0) { OCF_DEBUG_RQ(req, "Fast path success"); @@ -223,6 +230,5 @@ int ocf_write_fast(struct ocf_request *req) /* Put OCF request - decrease reference counter */ ocf_req_put(req); - return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO; - + return (mapped && part_has_space) ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO; } From 60680b15b2ecc6c946b28c33a5d3ba2727b3f361 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Mon, 7 Dec 2020 04:15:51 -0500 Subject: [PATCH 16/22] Accessors for `req->info.mapping_error` Signed-off-by: Michal Mielewczyk --- src/engine/engine_common.c | 10 +++++----- src/engine/engine_rd.c | 2 +- src/engine/engine_wb.c | 2 +- src/engine/engine_wt.c | 2 +- src/eviction/eviction.c | 2 +- src/ocf_request.h | 10 ++++++++++ 6 files changed, 19 insertions(+), 9 deletions(-) diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index 2f25137..6eb5083 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -256,7 +256,7 @@ static void ocf_engine_map_cache_line(struct ocf_request *req, ocf_cleaning_t clean_policy_type; if (!ocf_freelist_get_cache_line(cache->freelist, cache_line)) { - req->info.mapping_error = 1; + ocf_req_set_mapping_error(req); return; } @@ -336,7 +336,7 @@ static void ocf_engine_map(struct ocf_request *req) if (ocf_engine_unmapped_count(req) > ocf_freelist_num_free(cache->freelist)) { - req->info.mapping_error = 1; + ocf_req_set_mapping_error(req); return; } @@ -355,7 +355,7 @@ static void ocf_engine_map(struct ocf_request *req) ocf_engine_map_cache_line(req, entry->core_line, entry->hash, &entry->coll_idx); - if (req->info.mapping_error) { + if (ocf_req_test_mapping_error(req)) { /* * Eviction error (mapping error), need to * clean, return and do pass through @@ -377,7 +377,7 @@ static void ocf_engine_map(struct ocf_request *req) } - if (!req->info.mapping_error) { + if (!ocf_req_test_mapping_error(req)) { /* request has been inserted into cache - purge it from promotion * policy */ ocf_promotion_req_purge(cache->promotion_policy, req); @@ -578,7 +578,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req, promote = ocf_promotion_req_should_promote( req->cache->promotion_policy, req); if (!promote) { - req->info.mapping_error = 1; + ocf_req_set_mapping_error(req); ocf_req_hash_unlock_rd(req); return lock; } diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c index 0d999eb..b118262 100644 --- a/src/engine/engine_rd.c +++ b/src/engine/engine_rd.c @@ -243,7 +243,7 @@ int ocf_read_generic(struct ocf_request *req) lock = ocf_engine_prepare_clines(req, &_rd_engine_callbacks); - if (!req->info.mapping_error) { + if (!ocf_req_test_mapping_error(req)) { if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { /* Lock was not acquired, need to wait for resume */ diff --git a/src/engine/engine_wb.c b/src/engine/engine_wb.c index d4c7041..a018969 100644 --- a/src/engine/engine_wb.c +++ b/src/engine/engine_wb.c @@ -189,7 +189,7 @@ int ocf_write_wb(struct ocf_request *req) lock = ocf_engine_prepare_clines(req, &_wb_engine_callbacks); - if (!req->info.mapping_error) { + if (!ocf_req_test_mapping_error(req)) { if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { /* WR lock was not acquired, need to wait for resume */ diff --git a/src/engine/engine_wt.c b/src/engine/engine_wt.c index 040c25c..26d1163 100644 --- a/src/engine/engine_wt.c +++ b/src/engine/engine_wt.c @@ -183,7 +183,7 @@ int ocf_write_wt(struct ocf_request *req) lock = ocf_engine_prepare_clines(req, &_wt_engine_callbacks); - if (!req->info.mapping_error) { + if (!ocf_req_test_mapping_error(req)) { if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { /* WR lock was not acquired, need to wait for resume */ diff --git a/src/eviction/eviction.c b/src/eviction/eviction.c index 51f2ec9..9c5a948 100644 --- a/src/eviction/eviction.c +++ b/src/eviction/eviction.c @@ -125,6 +125,6 @@ int space_managment_evict_do(struct ocf_cache *cache, if (evict_cline_no <= evicted) return LOOKUP_MAPPED; - req->info.mapping_error |= true; + ocf_req_set_mapping_error(req); return LOOKUP_MISS; } diff --git a/src/ocf_request.h b/src/ocf_request.h index 1603d38..08431dc 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -381,6 +381,16 @@ static inline void ocf_req_clear(struct ocf_request *req) env_atomic_set(&req->req_remaining, 0); } +static inline void ocf_req_set_mapping_error(struct ocf_request *req) +{ + req->info.mapping_error = true; +} + +static inline bool ocf_req_test_mapping_error(struct ocf_request *req) +{ + return req->info.mapping_error; +} + /** * @brief Return OCF request reference count * From 76148de3106772b89d3064be31d06d26435caec8 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Mon, 7 Dec 2020 04:13:02 -0500 Subject: [PATCH 17/22] Unit tests for mapping Signed-off-by: Michal Mielewczyk --- .../engine_common.c/prepare_clines_hit.c | 198 +++++++++++++ .../engine_common.c/prepare_clines_miss.c | 263 ++++++++++++++++++ 2 files changed, 461 insertions(+) create mode 100644 tests/unit/tests/engine/engine_common.c/prepare_clines_hit.c create mode 100644 tests/unit/tests/engine/engine_common.c/prepare_clines_miss.c diff --git a/tests/unit/tests/engine/engine_common.c/prepare_clines_hit.c b/tests/unit/tests/engine/engine_common.c/prepare_clines_hit.c new file mode 100644 index 0000000..d66bc8a --- /dev/null +++ b/tests/unit/tests/engine/engine_common.c/prepare_clines_hit.c @@ -0,0 +1,198 @@ +/* + * src/engine/engine_common.c + * ocf_prepare_clines_hit + * + * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE + * ONE FUNCTION PER LINE + * + */ + +#undef static + +#undef inline + + +#include +#include +#include +#include +#include "print_desc.h" + +#include "ocf/ocf.h" +#include "../ocf_priv.h" +#include "../ocf_cache_priv.h" +#include "../ocf_queue_priv.h" +#include "../ocf_freelist.h" +#include "engine_common.h" +#include "engine_debug.h" +#include "../utils/utils_cache_line.h" +#include "../ocf_request.h" +#include "../utils/utils_cleaner.h" +#include "../utils/utils_part.h" +#include "../metadata/metadata.h" +#include "../eviction/eviction.h" +#include "../promotion/promotion.h" +#include "../concurrency/ocf_concurrency.h" + +#include "engine/engine_common.c/prepare_clines_miss_generated_wraps.c" + +void __wrap_ocf_req_hash_unlock_rd(struct ocf_request *req) +{ +} + +uint32_t __wrap_ocf_part_check_space(struct ocf_request *req, + uint32_t *to_evict) +{ + return mock(); +} + +int __wrap_lock_clines(struct ocf_request *req, + const struct ocf_engine_callbacks *engine_cbs) +{ + function_called(); + return mock(); +} + +void __wrap_ocf_metadata_start_exclusive_access( + struct ocf_metadata_lock *metadata_lock) +{ +} + +void __wrap_ocf_metadata_end_exclusive_access( + struct ocf_metadata_lock *metadata_lock) +{ +} + +int __wrap_space_managment_evict_do(struct ocf_cache *cache, + struct ocf_request *req, uint32_t evict_cline_no) +{ + return mock(); +} + +bool __wrap_ocf_part_is_enabled(struct ocf_user_part *target_part) +{ + return mock(); +} + +bool __wrap_ocf_engine_needs_repart(struct ocf_request *req) +{ + return mock(); +} + +void __wrap_ocf_req_set_mapping_error(struct ocf_request *req) +{ + function_called(); +} + +static void ocf_prepare_clines_hit_test01(void **state) +{ + struct ocf_request req = {}; + print_test_description("Request is hit and part is enabled\n"); + will_return(__wrap_ocf_part_is_enabled, true); + will_return(__wrap_ocf_engine_needs_repart, false); + + will_return(__wrap_lock_clines, 0); + expect_function_call(__wrap_lock_clines); + + assert_int_equal(ocf_prepare_clines_hit(&req, NULL), 0); +} + +static void ocf_prepare_clines_hit_test02(void **state) +{ + struct ocf_request req = {}; + print_test_description("Request is hit but part is disabled - tigger eviction\n"); + will_return(__wrap_ocf_part_is_enabled, false); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_DISABLED); + + expect_function_call(__wrap_ocf_req_set_mapping_error); + + assert_int_equal(ocf_prepare_clines_hit(&req, NULL), -OCF_ERR_NO_LOCK); +} + +static void ocf_prepare_clines_hit_test03(void **state) +{ + struct ocf_request req = {}; + print_test_description("Request needs repart, part has enough of a free space\n"); + will_return(__wrap_ocf_part_is_enabled, true); + will_return(__wrap_ocf_engine_needs_repart, true); + + will_return(__wrap_ocf_part_check_space, OCF_PART_HAS_SPACE); + + expect_function_call(__wrap_lock_clines); + will_return(__wrap_lock_clines, 0); + + assert_int_equal(ocf_prepare_clines_hit(&req, NULL), 0); +} + +static void ocf_prepare_clines_hit_test04(void **state) +{ + struct ocf_request req = {}; + print_test_description("Request needs repart, eviction fails\n"); + will_return(__wrap_ocf_part_is_enabled, true); + will_return(__wrap_ocf_engine_needs_repart, true); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + will_return(__wrap_space_managment_evict_do, LOOKUP_MISS); + expect_function_call(__wrap_ocf_req_set_mapping_error); + + assert_int_equal(ocf_prepare_clines_hit(&req, NULL), -OCF_ERR_NO_LOCK); +} + +static void ocf_prepare_clines_hit_test05(void **state) +{ + struct ocf_request req = {}; + print_test_description("Request needs repart, eviction passed, no lock\n"); + + will_return(__wrap_ocf_part_is_enabled, true); + will_return(__wrap_ocf_engine_needs_repart, true); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + will_return(__wrap_space_managment_evict_do, LOOKUP_HIT); + + expect_function_call(__wrap_lock_clines); + will_return(__wrap_lock_clines, OCF_ERR_NO_LOCK); + + will_return(__wrap_ocf_part_is_enabled, true); + + assert_int_equal(ocf_prepare_clines_hit(&req, NULL), OCF_ERR_NO_LOCK); +} + +static void ocf_prepare_clines_hit_test06(void **state) +{ + struct ocf_request req = {}; + print_test_description("Partition is disabled, but has some cachelines assigned.\n"); + print_test_description("Trigger eviction and but don't lock cachelines\n"); + + will_return(__wrap_ocf_part_is_enabled, false); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + will_return(__wrap_space_managment_evict_do, LOOKUP_HIT); + + will_return(__wrap_ocf_part_is_enabled, false); + expect_function_call(__wrap_ocf_req_set_mapping_error); + + assert_int_equal(ocf_prepare_clines_hit(&req, NULL), -OCF_ERR_NO_LOCK); +} + +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(ocf_prepare_clines_hit_test01), + cmocka_unit_test(ocf_prepare_clines_hit_test02), + cmocka_unit_test(ocf_prepare_clines_hit_test03), + cmocka_unit_test(ocf_prepare_clines_hit_test04), + cmocka_unit_test(ocf_prepare_clines_hit_test05), + cmocka_unit_test(ocf_prepare_clines_hit_test06) + }; + + print_message("Unit test for ocf_prepare_clines_hit\n"); + + return cmocka_run_group_tests(tests, NULL, NULL); +} diff --git a/tests/unit/tests/engine/engine_common.c/prepare_clines_miss.c b/tests/unit/tests/engine/engine_common.c/prepare_clines_miss.c new file mode 100644 index 0000000..81fec82 --- /dev/null +++ b/tests/unit/tests/engine/engine_common.c/prepare_clines_miss.c @@ -0,0 +1,263 @@ +/* + * src/engine/engine_common.c + * ocf_prepare_clines_miss + * + * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE + * ONE FUNCTION PER LINE + * + */ + +#undef static + +#undef inline + + +#include +#include +#include +#include +#include "print_desc.h" + +#include "ocf/ocf.h" +#include "../ocf_priv.h" +#include "../ocf_cache_priv.h" +#include "../ocf_queue_priv.h" +#include "../ocf_freelist.h" +#include "engine_common.h" +#include "engine_debug.h" +#include "../utils/utils_cache_line.h" +#include "../ocf_request.h" +#include "../utils/utils_cleaner.h" +#include "../utils/utils_part.h" +#include "../metadata/metadata.h" +#include "../eviction/eviction.h" +#include "../promotion/promotion.h" +#include "../concurrency/ocf_concurrency.h" + +#include "engine/engine_common.c/prepare_clines_miss_generated_wraps.c" + +void __wrap_ocf_req_hash_lock_upgrade(struct ocf_request *req) +{ +} + +void __wrap_ocf_req_hash_unlock_wr(struct ocf_request *req) +{ +} + +uint32_t __wrap_ocf_part_check_space(struct ocf_request *req, + uint32_t *to_evict) +{ + return mock(); +} + +int __wrap_lock_clines(struct ocf_request *req, + const struct ocf_engine_callbacks *engine_cbs) +{ + function_called(); + return mock(); +} + +void __wrap_ocf_metadata_start_exclusive_access( + struct ocf_metadata_lock *metadata_lock) +{ +} + +void __wrap_ocf_metadata_end_exclusive_access( + struct ocf_metadata_lock *metadata_lock) +{ +} + +int __wrap_space_managment_evict_do(struct ocf_cache *cache, + struct ocf_request *req, uint32_t evict_cline_no) +{ + function_called(); + return mock(); +} + +bool __wrap_ocf_part_is_enabled(struct ocf_user_part *target_part) +{ + return mock(); +} + +void __wrap_ocf_engine_map(struct ocf_request *req) +{ + function_called(); +} + +bool __wrap_ocf_req_test_mapping_error(struct ocf_request *req) +{ + return mock(); +} + +void __wrap_ocf_req_set_mapping_error(struct ocf_request *req) +{ + function_called(); +} + +static void ocf_prepare_clines_miss_test01(void **state) +{ + struct ocf_request req = {}; + print_test_description("Target part is disabled and empty\n"); + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_DISABLED); + expect_function_call(__wrap_ocf_req_set_mapping_error); + assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK); +} + +static void ocf_prepare_clines_miss_test02(void **state) +{ + struct ocf_request req = {}; + print_test_description("Target part is disabled but has cachelines assigned.\n"); + print_test_description("\tTrigger eviction and mark mapping error\n"); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + + expect_function_call(__wrap_space_managment_evict_do); + will_return(__wrap_space_managment_evict_do, LOOKUP_MAPPED); + will_return(__wrap_ocf_part_is_enabled, false); + expect_function_call(__wrap_ocf_req_set_mapping_error); + + assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK); +} + +static void ocf_prepare_clines_miss_test03(void **state) +{ + struct ocf_request req = {}; + print_test_description("Target part is enabled but doesn't have enough space.\n"); + print_test_description("\tEviction is ok and cachelines lock is acquired.\n"); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + + expect_function_call(__wrap_space_managment_evict_do); + will_return(__wrap_space_managment_evict_do, LOOKUP_MAPPED); + will_return(__wrap_ocf_part_is_enabled, true); + + expect_function_call(__wrap_ocf_engine_map); + + will_return(__wrap_ocf_req_test_mapping_error, false); + + will_return(__wrap_lock_clines, 0); + expect_function_call(__wrap_lock_clines); + + assert_int_equal(ocf_prepare_clines_miss(&req, NULL), 0); +} + +static void ocf_prepare_clines_miss_test04(void **state) +{ + struct ocf_request req = {}; + print_test_description("Target part is enabled but doesn't have enough space.\n"); + print_test_description("\tEviction failed\n"); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + + expect_function_call(__wrap_space_managment_evict_do); + will_return(__wrap_space_managment_evict_do, LOOKUP_MISS); + expect_function_call(__wrap_ocf_req_set_mapping_error); + + assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK); +} + +static void ocf_prepare_clines_miss_test05(void **state) +{ + struct ocf_request req = {}; + print_test_description("Target part is enabled but doesn't have enough space.\n"); + print_test_description("Eviction is ok, but mapping failed.\n"); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + + expect_function_call(__wrap_space_managment_evict_do); + will_return(__wrap_space_managment_evict_do, LOOKUP_HIT); + + will_return(__wrap_ocf_part_is_enabled, true); + + expect_function_call(__wrap_ocf_engine_map); + will_return(__wrap_ocf_req_test_mapping_error, true); + + assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK); +} + +static void ocf_prepare_clines_miss_test06(void **state) +{ + struct ocf_request req = {}; + print_test_description("Target part is enabled but doesn't have enough space.\n"); + print_test_description("Eviction and mapping were ok, but failed to lock cachelines.\n"); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + + expect_function_call(__wrap_space_managment_evict_do); + will_return(__wrap_space_managment_evict_do, LOOKUP_HIT); + + will_return(__wrap_ocf_part_is_enabled, true); + + expect_function_call(__wrap_ocf_engine_map); + will_return(__wrap_ocf_req_test_mapping_error, false); + + expect_function_call(__wrap_lock_clines); + will_return(__wrap_lock_clines, -OCF_ERR_NO_LOCK); + + expect_function_call(__wrap_ocf_req_set_mapping_error); + + assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK); +} + +static void ocf_prepare_clines_miss_test07(void **state) +{ + struct ocf_request req = {}; + print_test_description("Target part is enabled but doesn't have enough space.\n"); + print_test_description("Eviction and mapping were ok, lock not acquired.\n"); + + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + will_return(__wrap_ocf_part_check_space, OCF_PART_IS_FULL); + + expect_function_call(__wrap_space_managment_evict_do); + will_return(__wrap_space_managment_evict_do, LOOKUP_HIT); + + will_return(__wrap_ocf_part_is_enabled, true); + + expect_function_call(__wrap_ocf_engine_map); + will_return(__wrap_ocf_req_test_mapping_error, false); + + expect_function_call(__wrap_lock_clines); + will_return(__wrap_lock_clines, OCF_LOCK_NOT_ACQUIRED); + + assert_int_equal(ocf_prepare_clines_miss(&req, NULL), OCF_LOCK_NOT_ACQUIRED); +} + +static void ocf_prepare_clines_miss_test08(void **state) +{ + struct ocf_request req = {}; + print_test_description("Target part is enabled has enough space.\n"); + print_test_description("\tMapping and cacheline lock are both ok\n"); + + will_return(__wrap_ocf_part_check_space, OCF_PART_HAS_SPACE); + + expect_function_call(__wrap_ocf_engine_map); + will_return(__wrap_ocf_req_test_mapping_error, false); + + expect_function_call(__wrap_lock_clines); + will_return(__wrap_lock_clines, OCF_LOCK_ACQUIRED); + + assert_int_equal(ocf_prepare_clines_miss(&req, NULL), OCF_LOCK_ACQUIRED); +} + +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(ocf_prepare_clines_miss_test01), + cmocka_unit_test(ocf_prepare_clines_miss_test02), + cmocka_unit_test(ocf_prepare_clines_miss_test03), + cmocka_unit_test(ocf_prepare_clines_miss_test04), + cmocka_unit_test(ocf_prepare_clines_miss_test05), + cmocka_unit_test(ocf_prepare_clines_miss_test06), + cmocka_unit_test(ocf_prepare_clines_miss_test07), + cmocka_unit_test(ocf_prepare_clines_miss_test08) + }; + + print_message("Unit test for ocf_prepare_clines_miss\n"); + + return cmocka_run_group_tests(tests, NULL, NULL); +} From bcfc821068ce4c47c7ed2b3b98171788d6e2c54c Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 26 Nov 2020 05:34:22 -0500 Subject: [PATCH 18/22] Don't calc free cachelines in per-ioclass stats Signed-off-by: Michal Mielewczyk --- src/ocf_stats.c | 11 +---------- src/ocf_stats_builder.c | 12 +++--------- 2 files changed, 4 insertions(+), 19 deletions(-) diff --git a/src/ocf_stats.c b/src/ocf_stats.c index 102c9fb..2f5e37f 100644 --- a/src/ocf_stats.c +++ b/src/ocf_stats.c @@ -276,10 +276,7 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id, struct ocf_stats_io_class *stats) { ocf_cache_t cache; - uint32_t cache_occupancy_total = 0; struct ocf_counters_part *part_stat; - ocf_core_t i_core; - ocf_core_id_t i_core_id; OCF_CHECK_NULL(core); OCF_CHECK_NULL(stats); @@ -292,11 +289,6 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id, if (!ocf_part_is_valid(&cache->user_parts[part_id])) return -OCF_ERR_IO_CLASS_NOT_EXIST; - for_each_core(cache, i_core, i_core_id) { - cache_occupancy_total += env_atomic_read( - &i_core->runtime_meta->cached_clines); - } - part_stat = &core->counters->part_counters[part_id]; stats->occupancy_clines = env_atomic_read(&core->runtime_meta-> @@ -304,8 +296,7 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id, stats->dirty_clines = env_atomic_read(&core->runtime_meta-> part_counters[part_id].dirty_clines); - stats->free_clines = cache->conf_meta->cachelines - - cache_occupancy_total; + stats->free_clines = 0; copy_req_stats(&stats->read_reqs, &part_stat->read_reqs); copy_req_stats(&stats->write_reqs, &part_stat->write_reqs); diff --git a/src/ocf_stats_builder.c b/src/ocf_stats_builder.c index 604928b..d418fe2 100644 --- a/src/ocf_stats_builder.c +++ b/src/ocf_stats_builder.c @@ -229,15 +229,9 @@ static void _ocf_stats_part_fill(ocf_cache_t cache, ocf_part_id_t part_id, _lines4k(stats->occupancy_clines, cache_line_size), _lines4k(cache_size, cache_line_size)); - if (part_id == PARTITION_DEFAULT) { - _set(&usage->free, - _lines4k(stats->free_clines, cache_line_size), - _lines4k(cache_size, cache_line_size)); - } else { - _set(&usage->free, - _lines4k(0, cache_line_size), - _lines4k(0, cache_line_size)); - } + _set(&usage->free, + _lines4k(stats->free_clines, cache_line_size), + _lines4k(cache_size, cache_line_size)); _set(&usage->clean, _lines4k(stats->occupancy_clines - stats->dirty_clines, From 0dc8b5811ce59e48141eea2d12ee1c437a513bde Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Mon, 30 Nov 2020 19:21:12 -0500 Subject: [PATCH 19/22] Store min and max ioclass size as percentage val Min and max values, keept as an explicit number of cachelines, are tightly coupled with particular cache. This might lead to errors and mismatches after reattaching cache of different size. To prevent those errors, min and max should be calculated dynamically. Signed-off-by: Michal Mielewczyk --- src/eviction/eviction.c | 21 +++++++++++++-------- src/metadata/metadata_partition.h | 2 +- src/mngt/ocf_mngt_io_class.c | 16 +++++++++------- src/utils/utils_part.h | 27 ++++++++++++++++++++++++--- 4 files changed, 47 insertions(+), 19 deletions(-) diff --git a/src/eviction/eviction.c b/src/eviction/eviction.c index 9c5a948..a61e9df 100644 --- a/src/eviction/eviction.c +++ b/src/eviction/eviction.c @@ -20,10 +20,14 @@ struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = { }, }; -static uint32_t ocf_evict_calculate(struct ocf_user_part *part, - uint32_t to_evict, bool roundup) +static uint32_t ocf_evict_calculate(ocf_cache_t cache, + struct ocf_user_part *part, uint32_t to_evict, bool roundup) { - if (part->runtime->curr_size <= part->config->min_size) { + + uint32_t curr_part_size = ocf_part_get_occupancy(part); + uint32_t min_part_size = ocf_part_get_min_size(cache, part); + + if (curr_part_size <= min_part_size) { /* * Cannot evict from this partition because current size * is less than minimum size @@ -34,8 +38,8 @@ static uint32_t ocf_evict_calculate(struct ocf_user_part *part, if (roundup && to_evict < OCF_TO_EVICTION_MIN) to_evict = OCF_TO_EVICTION_MIN; - if (to_evict > (part->runtime->curr_size - part->config->min_size)) - to_evict = part->runtime->curr_size - part->config->min_size; + if (to_evict > (curr_part_size - min_part_size)) + to_evict = curr_part_size - min_part_size; return to_evict; } @@ -49,8 +53,8 @@ static inline uint32_t ocf_evict_part_do(ocf_cache_t cache, if (!evp_lru_can_evict(cache)) return 0; - to_evict = ocf_evict_calculate(&cache->user_parts[target_part_id], - evict_cline_no, false); + to_evict = ocf_evict_calculate(cache, target_part, evict_cline_no, + false); return ocf_eviction_need_space(cache, io_queue, target_part, to_evict); @@ -88,7 +92,8 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache, goto out; } - to_evict = ocf_evict_calculate(part, evict_cline_no, true); + to_evict = ocf_evict_calculate(cache, part, evict_cline_no, + true); if (to_evict == 0) { /* No cache lines to evict for this partition */ continue; diff --git a/src/metadata/metadata_partition.h b/src/metadata/metadata_partition.h index 0dd024f..272bad4 100644 --- a/src/metadata/metadata_partition.h +++ b/src/metadata/metadata_partition.h @@ -11,7 +11,7 @@ #define PARTITION_DEFAULT 0 #define PARTITION_INVALID ((ocf_part_id_t)-1) -#define PARTITION_SIZE_MAX ((ocf_cache_line_t)-1) +#define PARTITION_SIZE_MAX 100 static inline void ocf_metadata_get_partition_info( struct ocf_cache *cache, ocf_cache_line_t line, diff --git a/src/mngt/ocf_mngt_io_class.c b/src/mngt/ocf_mngt_io_class.c index 5cf398d..a8ece82 100644 --- a/src/mngt/ocf_mngt_io_class.c +++ b/src/mngt/ocf_mngt_io_class.c @@ -43,6 +43,9 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache, if (cache->user_parts[part_id].config->flags.valid) return -OCF_ERR_INVAL; + if (min_size > max_size) + return -OCF_ERR_INVAL; + if (max_size > PARTITION_SIZE_MAX) return -OCF_ERR_INVAL; @@ -87,8 +90,7 @@ static int _ocf_mngt_set_partition_size(struct ocf_cache *cache, if (min > max) return -OCF_ERR_INVAL; - if (_ocf_mngt_count_parts_min_size(cache) + min - >= cache->device->collision_table_entries) { + if (_ocf_mngt_count_parts_min_size(cache) + min > PARTITION_SIZE_MAX) { /* Illegal configuration in which sum of all min_sizes exceeds * cache size. */ @@ -136,7 +138,7 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache, /* Try set partition size */ if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) { ocf_cache_log(cache, log_info, - "Setting IO class size, id: %u, name: '%s', max size: %u" + "Setting IO class size, id: %u, name: '%s', max size: %u%%" " [ ERROR ]\n", part_id, dest_part->config->name, max); return -OCF_ERR_INVAL; } @@ -145,7 +147,7 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache, ocf_cache_log(cache, log_info, "Updating unclassified IO class, id: %u, name :'%s'," - "max size: %u [ OK ]\n", + "max size: %u%% [ OK ]\n", part_id, dest_part->config->name, max); return 0; } @@ -160,7 +162,7 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache, /* Try set partition size */ if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) { ocf_cache_log(cache, log_info, - "Setting IO class size, id: %u, name: '%s', max size %u" + "Setting IO class size, id: %u, name: '%s', max size %u%%" "[ ERROR ]\n", part_id, dest_part->config->name, max); return -OCF_ERR_INVAL; } @@ -168,14 +170,14 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache, if (ocf_part_is_valid(dest_part)) { /* Updating existing */ ocf_cache_log(cache, log_info, "Updating existing IO " - "class, id: %u, name: '%s', max size %u [ OK ]\n", + "class, id: %u, name: '%s', max size %u%% [ OK ]\n", part_id, dest_part->config->name, max); } else { /* Adding new */ ocf_part_set_valid(cache, part_id, true); ocf_cache_log(cache, log_info, "Adding new IO class, " - "id: %u, name: '%s', max size %u [ OK ]\n", part_id, + "id: %u, name: '%s', max size %u%% [ OK ]\n", part_id, dest_part->config->name, max); } diff --git a/src/utils/utils_part.h b/src/utils/utils_part.h index f993061..17c5fb2 100644 --- a/src/utils/utils_part.h +++ b/src/utils/utils_part.h @@ -56,10 +56,31 @@ static inline uint32_t ocf_part_get_occupancy(struct ocf_user_part *part) return part->runtime->curr_size; } -static inline uint32_t ocf_part_get_max_size(ocf_cache_t cache, - ocf_part_id_t part_id) +static inline uint32_t ocf_part_get_min_size(ocf_cache_t cache, + struct ocf_user_part *part) { - return cache->user_parts[part_id].config->max_size; + uint64_t ioclass_size; + + ioclass_size = part->config->min_size * cache->conf_meta->cachelines; + + ioclass_size /= 100; + + return (uint32_t)ioclass_size; +} + + +static inline uint32_t ocf_part_get_max_size(ocf_cache_t cache, + struct ocf_user_part *part) +{ + uint64_t ioclass_size, max_size, cache_size; + + max_size = part->config->max_size; + cache_size = cache->conf_meta->cachelines; + + ioclass_size = max_size * cache_size; + ioclass_size = OCF_DIV_ROUND_UP(ioclass_size, 100); + + return (uint32_t)ioclass_size; } void ocf_part_move(struct ocf_request *req); From 93318696cdb21079393062478360b04821bc9353 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 3 Dec 2020 05:34:06 -0500 Subject: [PATCH 20/22] pyocf: ioclass api Signed-off-by: Michal Mielewczyk --- tests/functional/pyocf/types/cache.py | 106 ++++++++++++++++++++++++ tests/functional/pyocf/types/ioclass.py | 36 ++++++++ 2 files changed, 142 insertions(+) create mode 100644 tests/functional/pyocf/types/ioclass.py diff --git a/tests/functional/pyocf/types/cache.py b/tests/functional/pyocf/types/cache.py index d067c0f..928054b 100644 --- a/tests/functional/pyocf/types/cache.py +++ b/tests/functional/pyocf/types/cache.py @@ -25,6 +25,7 @@ from ..ocf import OcfLib from .shared import ( Uuid, OcfError, + OcfErrorCode, CacheLineSize, CacheLines, OcfCompletion, @@ -34,6 +35,7 @@ from ..utils import Size, struct_to_dict from .core import Core from .queue import Queue from .stats.cache import CacheInfo +from .ioclass import IoClassesInfo, IoClassInfo from .stats.shared import UsageStats, RequestsStats, BlocksStats, ErrorsStats @@ -75,6 +77,9 @@ class ConfValidValues: promotion_nhit_trigger_threshold_range = range(0, 100) +CACHE_MODE_NONE = -1 + + class CacheMode(IntEnum): WT = 0 WB = 1 @@ -299,6 +304,93 @@ class Cache: if status: raise OcfError("Error setting cache seq cut off policy", status) + def get_partition_info(self, part_id: int): + ioclass_info = IoClassInfo() + self.read_lock() + + status = self.owner.lib.ocf_cache_io_class_get_info( + self.cache_handle, part_id, byref(ioclass_info) + ) + + self.read_unlock() + if status: + raise OcfError("Error retriving ioclass info", status) + + return { + "_name": ioclass_info._name.decode("ascii"), + "_cache_mode": ioclass_info._cache_mode, + "_priority": int(ioclass_info._priority), + "_curr_size": (ioclass_info._curr_size), + "_min_size": int(ioclass_info._min_size), + "_max_size": int(ioclass_info._max_size), + "_eviction_policy_type": int(ioclass_info._eviction_policy_type), + "_cleaning_policy_type": int(ioclass_info._cleaning_policy_type), + } + + def add_partition( + self, part_id: int, name: str, min_size: int, max_size: int, priority: int, valid: bool + ): + self.write_lock() + + _name = name.encode("ascii") + + status = self.owner.lib.ocf_mngt_add_partition_to_cache( + self.cache_handle, part_id, _name, min_size, max_size, priority, valid + ) + + self.write_unlock() + + if status: + raise OcfError("Error adding partition to cache", status) + + def configure_partition( + self, + part_id: int, + name: str, + min_size: int, + max_size: int, + priority: int, + cache_mode=CACHE_MODE_NONE, + ): + ioclasses_info = IoClassesInfo() + + self.read_lock() + + for i in range(IoClassesInfo.MAX_IO_CLASSES): + ioclass_info = IoClassInfo() + status = self.owner.lib.ocf_cache_io_class_get_info( + self.cache_handle, i, byref(ioclass_info) + ) + if status not in [0, -OcfErrorCode.OCF_ERR_IO_CLASS_NOT_EXIST]: + raise OcfError("Error retriving existing ioclass config", status) + ioclasses_info._config[i]._class_id = i + ioclasses_info._config[i]._name = ( + ioclass_info._name if len(ioclass_info._name) > 0 else 0 + ) + ioclasses_info._config[i]._prio = ioclass_info._priority + ioclasses_info._config[i]._cache_mode = ioclass_info._cache_mode + ioclasses_info._config[i]._min_size = ioclass_info._min_size + ioclasses_info._config[i]._max_size = ioclass_info._max_size + + self.read_unlock() + + ioclasses_info._config[part_id]._name = name.encode("ascii") + ioclasses_info._config[part_id]._cache_mode = int(cache_mode) + ioclasses_info._config[part_id]._prio = priority + ioclasses_info._config[part_id]._min_size = min_size + ioclasses_info._config[part_id]._max_size = max_size + + self.write_lock() + + status = self.owner.lib.ocf_mngt_cache_io_classes_configure( + self.cache_handle, byref(ioclasses_info) + ) + + self.write_unlock() + + if status: + raise OcfError("Error adding partition to cache", status) + def configure_device( self, device, force=False, perform_test=True, cache_line_size=None ): @@ -605,3 +697,17 @@ lib.ocf_mngt_cache_cleaning_set_param.argtypes = [ c_uint32, ] lib.ocf_mngt_cache_cleaning_set_param.restype = c_int +lib.ocf_cache_io_class_get_info.restype = c_int +lib.ocf_cache_io_class_get_info.argtypes = [c_void_p, c_uint32, c_void_p] +lib.ocf_mngt_add_partition_to_cache.restype = c_int +lib.ocf_mngt_add_partition_to_cache.argtypes = [ + c_void_p, + c_uint16, + c_char_p, + c_uint32, + c_uint32, + c_uint8, + c_bool, +] +lib.ocf_mngt_cache_io_classes_configure.restype = c_int +lib.ocf_mngt_cache_io_classes_configure.argtypes = [c_void_p, c_void_p] diff --git a/tests/functional/pyocf/types/ioclass.py b/tests/functional/pyocf/types/ioclass.py new file mode 100644 index 0000000..ca84ad9 --- /dev/null +++ b/tests/functional/pyocf/types/ioclass.py @@ -0,0 +1,36 @@ +# +# Copyright(c) 2019-2020 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +from ctypes import c_uint8, c_uint32, c_int, c_int16, c_uint16, c_char, c_char_p, Structure + + +class IoClassInfo(Structure): + MAX_IO_CLASS_NAME_SIZE = 1024 + _fields_ = [ + ("_name", c_char * MAX_IO_CLASS_NAME_SIZE), + ("_cache_mode", c_int), + ("_priority", c_int16), + ("_curr_size", c_uint32), + ("_min_size", c_uint32), + ("_max_size", c_uint32), + ("_eviction_policy_type", c_uint8), + ("_cleaning_policy_type", c_int), + ] + + +class IoClassConfig(Structure): + _fields_ = [ + ("_class_id", c_uint32), + ("_name", c_char_p), + ("_prio", c_uint16), + ("_cache_mode", c_int), + ("_min_size", c_uint32), + ("_max_size", c_uint32), + ] + + +class IoClassesInfo(Structure): + MAX_IO_CLASSES = 33 + _fields_ = [("_config", IoClassConfig * MAX_IO_CLASSES)] From 5c2a5c5e794de1dfd68b07e184015fe36eefb649 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 3 Dec 2020 05:41:08 -0500 Subject: [PATCH 21/22] pyocf: add cache detach api Signed-off-by: Michal Mielewczyk --- tests/functional/pyocf/types/cache.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/functional/pyocf/types/cache.py b/tests/functional/pyocf/types/cache.py index 928054b..9644342 100644 --- a/tests/functional/pyocf/types/cache.py +++ b/tests/functional/pyocf/types/cache.py @@ -431,6 +431,21 @@ class Cache: if c.results["error"]: raise OcfError("Attaching cache device failed", c.results["error"]) + def detach_device(self): + self.write_lock() + + c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]) + + self.owner.lib.ocf_mngt_cache_detach( + self.cache_handle, c, None + ) + + c.wait() + self.write_unlock() + + if c.results["error"]: + raise OcfError("Attaching cache device failed", c.results["error"]) + def load_cache(self, device): self.configure_device(device) c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]) From 2206b2fdef6590148689b0adaa41d81a67e32f0a Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 3 Dec 2020 07:20:47 -0500 Subject: [PATCH 22/22] pyocf: cache attach test Signed-off-by: Michal Mielewczyk --- .../tests/management/test_attach_cache.py | 121 ++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 tests/functional/tests/management/test_attach_cache.py diff --git a/tests/functional/tests/management/test_attach_cache.py b/tests/functional/tests/management/test_attach_cache.py new file mode 100644 index 0000000..5ec8002 --- /dev/null +++ b/tests/functional/tests/management/test_attach_cache.py @@ -0,0 +1,121 @@ +# +# Copyright(c) 2019-2020 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +import logging +from ctypes import c_int, c_void_p, byref, c_uint32, memmove, cast +from random import randrange +from itertools import count + +import pytest + +from pyocf.ocf import OcfLib +from pyocf.types.cache import ( + Cache, + CacheMode, + MetadataLayout, + EvictionPolicy, + CleaningPolicy, +) +from pyocf.types.core import Core +from pyocf.types.data import Data +from pyocf.types.io import IoDir +from pyocf.types.shared import ( + OcfError, + OcfCompletion, + CacheLines, + CacheLineSize, + SeqCutOffPolicy, +) +from pyocf.types.volume import Volume +from pyocf.utils import Size + +logger = logging.getLogger(__name__) + + +@pytest.mark.parametrize("cls", CacheLineSize) +@pytest.mark.parametrize("mode", [CacheMode.WB, CacheMode.WT, CacheMode.WO]) +@pytest.mark.parametrize("new_cache_size", [25, 45]) +def test_attach_different_size( + pyocf_ctx, new_cache_size, mode: CacheMode, cls: CacheLineSize +): + """Start cache and add partition with limited occupancy. Fill partition with data, + attach cache with different size and trigger IO. Verify if occupancy thresold is + respected with both original and new cache device. + """ + cache_device = Volume(Size.from_MiB(35)) + core_device = Volume(Size.from_MiB(100)) + cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) + core = Core.using_device(core_device) + cache.add_core(core) + + cache.configure_partition( + part_id=1, name="test_part", min_size=0, max_size=50, priority=1 + ) + + cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) + + cache_size = cache.get_stats()["conf"]["size"] + + block_size = 4096 + data = bytes(block_size) + + for i in range(cache_size.blocks_4k): + io_to_exp_obj(core, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0) + + part_current_size = CacheLines( + cache.get_partition_info(part_id=1)["_curr_size"], cls + ) + + assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5 + + cache.detach_device() + new_cache_device = Volume(Size.from_MiB(new_cache_size)) + cache.attach_device(new_cache_device, force=True) + + cache_size = cache.get_stats()["conf"]["size"] + + for i in range(cache_size.blocks_4k): + io_to_exp_obj(core, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0) + + part_current_size = CacheLines( + cache.get_partition_info(part_id=1)["_curr_size"], cls + ) + + assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5 + + +def io_to_exp_obj(core, address, size, data, offset, direction, target_ioclass, flags): + return _io( + core.new_io, + core.cache.get_default_queue(), + address, + size, + data, + offset, + direction, + target_ioclass, + flags, + ) + + +def _io(new_io, queue, address, size, data, offset, direction, target_ioclass, flags): + io = new_io(queue, address, size, direction, target_ioclass, flags) + if direction == IoDir.READ: + _data = Data.from_bytes(bytes(size)) + else: + _data = Data.from_bytes(data, offset, size) + ret = __io(io, queue, address, size, _data, direction) + if not ret and direction == IoDir.READ: + memmove(cast(data, c_void_p).value + offset, _data.handle, size) + return ret + + +def __io(io, queue, address, size, data, direction): + io.set_data(data, 0) + completion = OcfCompletion([("err", c_int)]) + io.callback = completion.callback + io.submit() + completion.wait() + return int(completion.results["err"])