From b611c0e67abd0b3179d1825952af6bc7dc9a3446 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Fri, 8 Mar 2019 04:43:19 -0500 Subject: [PATCH 1/2] Calling completions function in nop cleaning policy. Unlocking cache and putting queue are perormed in cleaning completion, so all cleaning policies has to call completion. Signed-off-by: Michal Mielewczyk --- src/cleaning/cleaning.c | 12 ++++-------- src/cleaning/cleaning.h | 2 ++ src/cleaning/nop.c | 14 ++++++++++++++ src/cleaning/nop.h | 15 +++++++++++++++ 4 files changed, 35 insertions(+), 8 deletions(-) create mode 100644 src/cleaning/nop.c create mode 100644 src/cleaning/nop.h diff --git a/src/cleaning/cleaning.c b/src/cleaning/cleaning.c index 5964f81..ff8ea80 100644 --- a/src/cleaning/cleaning.c +++ b/src/cleaning/cleaning.c @@ -5,6 +5,7 @@ #include "cleaning.h" #include "alru.h" +#include "nop.h" #include "acp.h" #include "../ocf_priv.h" #include "../ocf_cache_priv.h" @@ -13,11 +14,10 @@ #include "../metadata/metadata.h" #include "../ocf_queue_priv.h" -#define SLEEP_TIME_MS (1000) - struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max] = { [ocf_cleaning_nop] = { .name = "nop", + .perform_cleaning = cleaning_nop_perform_cleaning, }, [ocf_cleaning_alru] = { .setup = cleaning_policy_alru_setup, @@ -110,7 +110,6 @@ static void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval) cleaner->end(cleaner, interval); ocf_queue_put(cleaner->io_queue); - cleaner->io_queue = NULL; } void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue) @@ -150,9 +149,6 @@ void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue) ocf_queue_get(queue); cleaner->io_queue = queue; - /* Call cleaning. */ - if (cleaning_policy_ops[clean_type].perform_cleaning) { - cleaning_policy_ops[clean_type].perform_cleaning(cache, - ocf_cleaner_run_complete); - } + cleaning_policy_ops[clean_type].perform_cleaning(cache, + ocf_cleaner_run_complete); } diff --git a/src/cleaning/cleaning.h b/src/cleaning/cleaning.h index bb9061e..e8dcf75 100644 --- a/src/cleaning/cleaning.h +++ b/src/cleaning/cleaning.h @@ -14,6 +14,8 @@ #define CLEANING_POLICY_CONFIG_BYTES 256 #define CLEANING_POLICY_TYPE_MAX 4 +#define SLEEP_TIME_MS (1000) + struct ocf_request; struct cleaning_policy_config { diff --git a/src/cleaning/nop.c b/src/cleaning/nop.c new file mode 100644 index 0000000..bfa0fab --- /dev/null +++ b/src/cleaning/nop.c @@ -0,0 +1,14 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "nop.h" +#include "../ocf_cache_priv.h" + +void cleaning_nop_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl) +{ + uint32_t interval = SLEEP_TIME_MS; + cmpl(&cache->cleaner, interval); +} diff --git a/src/cleaning/nop.h b/src/cleaning/nop.h new file mode 100644 index 0000000..291483f --- /dev/null +++ b/src/cleaning/nop.h @@ -0,0 +1,15 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __LAYER_CLEANING_POLICY_NOP_H__ + +#define __LAYER_CLEANING_POLICY_NOP_H__ + +#include "cleaning.h" +#include "nop_structs.h" + +void cleaning_nop_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl); + +#endif From d06f376627559ad4528807651d013460b6cdb6b9 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Fri, 8 Mar 2019 04:45:29 -0500 Subject: [PATCH 2/2] Alru cleaning policy improvement Instead of calling flush separatly for each IO class, it is called after collecting number of dirty cache lines defined by user or after iterating through all IO classes. Signed-off-by: Michal Mielewczyk --- src/cleaning/alru.c | 156 +++++++++--------- src/cleaning/nop.c | 2 +- src/cleaning/nop.h | 3 +- .../cleaning.c/ocf_cleaner_run_test.c | 7 +- 4 files changed, 83 insertions(+), 85 deletions(-) diff --git a/src/cleaning/alru.c b/src/cleaning/alru.c index 9ffaeb1..e04f98e 100644 --- a/src/cleaning/alru.c +++ b/src/cleaning/alru.c @@ -51,8 +51,7 @@ struct flush_merge_struct { struct alru_flush_ctx { struct ocf_cleaner_attribs attribs; - struct ocf_user_part *parts[OCF_IO_CLASS_MAX]; - int part_id; + bool flush_perfomed; uint32_t clines_no; ocf_cache_t cache; ocf_cleaner_end_t cmpl; @@ -704,113 +703,110 @@ static bool block_is_busy(struct ocf_cache *cache, return false; } -static int get_data_to_flush(struct flush_data *dst, uint32_t clines_no, - struct ocf_cache *cache, struct ocf_user_part *part) +static int get_data_to_flush(struct alru_flush_ctx *fctx) { + ocf_cache_t cache = fctx->cache; struct alru_cleaning_policy_config *config; struct cleaning_policy_meta policy; ocf_cache_line_t cache_line; - int to_flush = 0; + struct ocf_user_part *parts[OCF_IO_CLASS_MAX]; uint32_t last_access; + int to_flush = 0; + int part_id = OCF_IO_CLASS_ID_MAX; config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; - cache_line = part->runtime->cleaning.policy.alru.lru_tail; + get_parts_sorted(parts, cache); - last_access = compute_timestamp(config); + while (part_id >= OCF_IO_CLASS_ID_MIN) { + cache_line = + parts[part_id]->runtime->cleaning.policy.alru.lru_tail; - OCF_DEBUG_PARAM(cache, "Last access=%u, timestamp=%u rel=%d", - last_access, policy.meta.alru.timestamp, - policy.meta.alru.timestamp < last_access); + last_access = compute_timestamp(config); - while (to_flush < clines_no && - more_blocks_to_flush(cache, cache_line, last_access)) { - if (!block_is_busy(cache, cache_line)) { - get_block_to_flush(&dst[to_flush], cache_line, cache); - to_flush++; + OCF_DEBUG_PARAM(cache, "Last access=%u, timestamp=%u rel=%d", + last_access, policy.meta.alru.timestamp, + policy.meta.alru.timestamp < last_access); + + while (more_blocks_to_flush(cache, cache_line, last_access)) { + if (to_flush >= fctx->clines_no) + goto end; + + if (!block_is_busy(cache, cache_line)) { + get_block_to_flush(&fctx->flush_data[to_flush], cache_line, + cache); + to_flush++; + } + + ocf_metadata_get_cleaning_policy(cache, cache_line, &policy); + cache_line = policy.meta.alru.lru_prev; } - - ocf_metadata_get_cleaning_policy(cache, cache_line, &policy); - cache_line = policy.meta.alru.lru_prev; + part_id--; } +end: OCF_DEBUG_PARAM(cache, "Collected items_to_clean=%u", to_flush); return to_flush; } -static bool alru_do_clean(ocf_cache_t cache, struct alru_flush_ctx *fctx) -{ - struct ocf_user_part *part = fctx->parts[fctx->part_id]; - int to_clean; - - if (!is_cleanup_possible(cache)) - return false; - - if (OCF_METADATA_LOCK_WR_TRY()) - return false; - - OCF_REALLOC(&fctx->flush_data, sizeof(fctx->flush_data[0]), - fctx->clines_no, &fctx->flush_data_limit); - if (!fctx->flush_data) { - OCF_METADATA_UNLOCK_WR(); - ocf_cache_log(cache, log_warn, "No memory to allocate flush " - "data for ALRU cleaning policy"); - return false; - } - - to_clean = get_data_to_flush(fctx->flush_data, fctx->clines_no, - cache, part); - if (to_clean > 0) { - fctx->clines_no -= to_clean; - ocf_cleaner_do_flush_data_async(cache, fctx->flush_data, - to_clean, &fctx->attribs); - } else { - /* Update timestamp only if there are no items to be cleaned */ - cache->device->runtime_meta->cleaning_thread_access = - env_ticks_to_secs(env_get_tick_count()); - } - - OCF_METADATA_UNLOCK_WR(); - - return to_clean > 0; -} - -static void alru_clean(void *priv, int error) +static void alru_clean_complete(void *priv, int err) { struct alru_cleaning_policy_config *config; struct alru_flush_ctx *fctx = priv; ocf_cache_t cache = fctx->cache; int interval; - while (fctx->clines_no > 0 && --fctx->part_id >= 0) { - /* - * The alru_do_clean() function returns true when submitting - * flush request for the io class succeeded. In such case we - * return and wait until flush finishes - then this function - * will be called as completion callback and iteration over - * io classes will continue. - * - * If the processed io class contains nothing to clean, the - * alru_do_clean() function returns false, and then we try to - * clean another io class until we reach last io class or until - * requested number of cache lines will be flushed - then we - * call the completion and finish. - */ - if (alru_do_clean(cache, fctx)) - return; - } - OCF_REALLOC_DEINIT(&fctx->flush_data, &fctx->flush_data_limit); config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; - interval = (fctx->clines_no > 0) ? - config->thread_wakeup_time * 1000 : 0; + interval = fctx->flush_perfomed ? 0 : config->thread_wakeup_time * 1000; fctx->cmpl(&fctx->cache->cleaner, interval); } +static void alru_clean(struct alru_flush_ctx *fctx) +{ + ocf_cache_t cache = fctx->cache; + int to_clean; + + if (!is_cleanup_possible(cache)) { + alru_clean_complete(fctx, 0); + return; + } + + if (OCF_METADATA_LOCK_WR_TRY()) { + alru_clean_complete(fctx, 0); + return; + } + + OCF_REALLOC(&fctx->flush_data, sizeof(fctx->flush_data[0]), + fctx->clines_no, &fctx->flush_data_limit); + if (!fctx->flush_data) { + ocf_cache_log(cache, log_warn, "No memory to allocate flush " + "data for ALRU cleaning policy"); + goto end; + } + + to_clean = get_data_to_flush(fctx); + if (to_clean > 0) { + fctx->flush_perfomed = true; + ocf_cleaner_do_flush_data_async(cache, fctx->flush_data, to_clean, + &fctx->attribs); + OCF_METADATA_UNLOCK_WR(); + return; + } + + /* Update timestamp only if there are no items to be cleaned */ + cache->device->runtime_meta->cleaning_thread_access = + env_ticks_to_secs(env_get_tick_count()); + +end: + OCF_METADATA_UNLOCK_WR(); + alru_clean_complete(fctx, 0); +} + void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl) { struct alru_flush_ctx *fctx = cache->cleaner.cleaning_policy_context; @@ -821,7 +817,7 @@ void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl) OCF_REALLOC_INIT(&fctx->flush_data, &fctx->flush_data_limit); fctx->attribs.cmpl_context = fctx; - fctx->attribs.cmpl_fn = alru_clean; + fctx->attribs.cmpl_fn = alru_clean_complete; fctx->attribs.cache_line_lock = true; fctx->attribs.do_sort = true; fctx->attribs.io_queue = cache->cleaner.io_queue; @@ -829,9 +825,7 @@ void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl) fctx->clines_no = config->flush_max_buffers; fctx->cache = cache; fctx->cmpl = cmpl; + fctx->flush_perfomed = false; - get_parts_sorted(fctx->parts, cache); - fctx->part_id = OCF_IO_CLASS_MAX; - - alru_clean(fctx, 0); + alru_clean(fctx); } diff --git a/src/cleaning/nop.c b/src/cleaning/nop.c index bfa0fab..0462141 100644 --- a/src/cleaning/nop.c +++ b/src/cleaning/nop.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2012-2018 Intel Corporation + * Copyright(c) 2012-2019 Intel Corporation * SPDX-License-Identifier: BSD-3-Clause-Clear */ diff --git a/src/cleaning/nop.h b/src/cleaning/nop.h index 291483f..d055acb 100644 --- a/src/cleaning/nop.h +++ b/src/cleaning/nop.h @@ -1,10 +1,9 @@ /* - * Copyright(c) 2012-2018 Intel Corporation + * Copyright(c) 2012-2019 Intel Corporation * SPDX-License-Identifier: BSD-3-Clause-Clear */ #ifndef __LAYER_CLEANING_POLICY_NOP_H__ - #define __LAYER_CLEANING_POLICY_NOP_H__ #include "cleaning.h" diff --git a/tests/unit/tests/cleaning/cleaning.c/ocf_cleaner_run_test.c b/tests/unit/tests/cleaning/cleaning.c/ocf_cleaner_run_test.c index 674f24b..0058cc1 100644 --- a/tests/unit/tests/cleaning/cleaning.c/ocf_cleaner_run_test.c +++ b/tests/unit/tests/cleaning/cleaning.c/ocf_cleaner_run_test.c @@ -40,6 +40,11 @@ * in tested source file. */ +int __wrap_cleaning_nop_perform_cleaning(struct ocf_cache *cache, + ocf_cleaner_end_t cmpl) +{ +} + void __wrap_cleaning_policy_alru_setup(struct ocf_cache *cache) {} @@ -59,7 +64,7 @@ int __wrap_cleaning_policy_acp_initialize(struct ocf_cache *cache, void __wrap_cleaning_policy_acp_deinitialize(struct ocf_cache *cache){} int __wrap_cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache, - uint32_t io_queue){} + ocf_cleaner_end_t cmpl){} void __wrap_cleaning_policy_acp_init_cache_block(struct ocf_cache *cache, uint32_t cache_line){}