From 1a234744ef0d85bbf0dc1cb132a9bb2dbdab51aa Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Mon, 25 Mar 2019 22:10:22 -0400 Subject: [PATCH 1/3] Fix flush cache error handling Signed-off-by: Adam Rutkowski --- src/mngt/ocf_mngt_flush.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mngt/ocf_mngt_flush.c b/src/mngt/ocf_mngt_flush.c index b586f09..8842715 100644 --- a/src/mngt/ocf_mngt_flush.c +++ b/src/mngt/ocf_mngt_flush.c @@ -445,7 +445,7 @@ static void _ocf_mngt_flush_container( finish: env_atomic_cmpxchg(&context->fcs.error, 0, error); - end(fc); + end(context); } void _ocf_flush_container_complete(void *ctx) From 7003d38b44dc9526c84b7c64c9f04723235f4df4 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Mon, 25 Mar 2019 18:45:53 -0400 Subject: [PATCH 2/3] Encapsulate request reference counting logic in utils_refcnt In order to synchronize management operations with I/O OCF maintains in-flight request counters. For example such ref counters are used during ocf_mngt_detach to drain requests accessing cache metadata (cache requests counter) and in ocf_mngt_flush where we wait for outstanding requests sent in write back mode (dirty requests counter). Typically I/O threads increment cache/dirty counter when creating request and decrement counter on request completion. Management thread sets atomic variable to signal the start of management operation. I/O threads react to this by changing I/O requests mode so that the cache/dirty reference counter is not incremented. As a result reference counter keeps getting decremented. Management thread waits for the counter to drop to 0 and proceeds with management operation with assumption that no cache/dirty requests are in progress. This patch introduces a handy utility for requests reference counting logic. ocf_refcnt_inc / dec are used to increment/ decrement counter. ocf_refcnt_freeze() makes subsequent ocf_refcnt_inc() calls to return false, indicating that counter cannot be incremented at this moment. ocf_refcnt_register_zero_cb can be used to asynchronously wait for counter to drop to 0. Signed-off-by: Adam Rutkowski --- src/utils/utils_refcnt.c | 53 ++++++++++++++++++++++++++++++++++++++++ src/utils/utils_refcnt.h | 43 ++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 src/utils/utils_refcnt.c create mode 100644 src/utils/utils_refcnt.h diff --git a/src/utils/utils_refcnt.c b/src/utils/utils_refcnt.c new file mode 100644 index 0000000..045e952 --- /dev/null +++ b/src/utils/utils_refcnt.c @@ -0,0 +1,53 @@ +/* + * Copyright(c) 2019 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "../utils/utils_refcnt.h" + +void ocf_refcnt_dec(struct ocf_refcnt *rc) +{ + int val = env_atomic_dec_return(&rc->counter); + ENV_BUG_ON(val < 0); + + if (!val && env_atomic_cmpxchg(&rc->callback, 1, 0)) + rc->cb(rc->priv); +} + +bool ocf_refcnt_inc(struct ocf_refcnt *rc) +{ + if (!env_atomic_read(&rc->freeze)) { + env_atomic_inc(&rc->counter); + if (!env_atomic_read(&rc->freeze)) + return true; + else + ocf_refcnt_dec(rc); + } + + return false; +} + + +void ocf_refcnt_freeze(struct ocf_refcnt *rc) +{ + env_atomic_inc(&rc->freeze); +} + +void ocf_refcnt_register_zero_cb(struct ocf_refcnt *rc, ocf_refcnt_cb_t cb, + void *priv) +{ + ENV_BUG_ON(!env_atomic_read(&rc->freeze)); + ENV_BUG_ON(env_atomic_read(&rc->callback)); + + env_atomic_inc(&rc->counter); + rc->cb = cb; + rc->priv = priv; + env_atomic_set(&rc->callback, 1); + ocf_refcnt_dec(rc); +} + +void ocf_refcnt_unfreeze(struct ocf_refcnt *rc) +{ + int val = env_atomic_dec_return(&rc->freeze); + ENV_BUG_ON(val < 0); +} diff --git a/src/utils/utils_refcnt.h b/src/utils/utils_refcnt.h new file mode 100644 index 0000000..5df2714 --- /dev/null +++ b/src/utils/utils_refcnt.h @@ -0,0 +1,43 @@ +/* + * Copyright(c) 2019 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_REFCNT_H__ +#define __OCF_REFCNT_H__ + +#include "ocf_env.h" + +typedef void (*ocf_refcnt_cb_t)(void *priv); + +struct ocf_refcnt +{ + env_atomic counter; + env_atomic freeze; + env_atomic callback; + ocf_refcnt_cb_t cb; + void *priv; +}; + +/* Try to increment counter. Returns true if successfull, false if freezed */ +bool ocf_refcnt_inc(struct ocf_refcnt *rc); + +/* Decrement reference counter */ +void ocf_refcnt_dec(struct ocf_refcnt *rc); + +/* Disallow incrementing of underlying counter - attempts to increment counter + * will be failing until ocf_refcnt_unfreeze is calleed. + * It's ok to call freeze multiple times, in which case counter is freezed + * until all freeze calls are offset by a corresponding unfreeze.*/ +void ocf_refcnt_freeze(struct ocf_refcnt *rc); + +/* Cancel the effect of single ocf_refcnt_freeze call */ +void ocf_refcnt_unfreeze(struct ocf_refcnt *rc); + +/* Register callback to be called when reference counter drops to 0. + * Must be called after counter is freezed. + * Cannot be called until previously regsitered callback had fired. */ +void ocf_refcnt_register_zero_cb(struct ocf_refcnt *rc, ocf_refcnt_cb_t cb, + void *priv); + +#endif // __OCF_REFCNT_H__ From 54374639b8684f5987bb9ca5b64bc281b0173dd4 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Mon, 25 Mar 2019 19:26:31 -0400 Subject: [PATCH 3/3] Asynchronous wait for dirty requests in flush Signed-off-by: Adam Rutkowski --- src/engine/cache_engine.c | 5 +---- src/mngt/ocf_mngt_cache.c | 6 +++--- src/mngt/ocf_mngt_flush.c | 23 +++++++++++---------- src/ocf_cache_priv.h | 7 ++----- src/ocf_core.c | 42 ++++++++++----------------------------- 5 files changed, 30 insertions(+), 53 deletions(-) diff --git a/src/engine/cache_engine.c b/src/engine/cache_engine.c index 8512e17..6a23e96 100644 --- a/src/engine/cache_engine.c +++ b/src/engine/cache_engine.c @@ -20,6 +20,7 @@ #include "engine_d2c.h" #include "engine_ops.h" #include "../utils/utils_part.h" +#include "../utils/utils_refcnt.h" #include "../utils/utils_req.h" #include "../metadata/metadata.h" #include "../eviction/eviction.h" @@ -231,10 +232,6 @@ ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache, if (ocf_fallback_pt_is_on(cache)) mode = ocf_cache_mode_pt; - if (mode == ocf_cache_mode_wb && - env_atomic_read(&cache->flush_started)) - mode = ocf_cache_mode_wt; - return mode; } diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index ba62bfe..35ee562 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -17,6 +17,7 @@ #include "../utils/utils_io.h" #include "../utils/utils_cache_line.h" #include "../utils/utils_pipeline.h" +#include "../utils/utils_refcnt.h" #include "../ocf_utils.h" #include "../concurrency/ocf_concurrency.h" #include "../eviction/ops.h" @@ -1584,7 +1585,6 @@ static void _ocf_mngt_attach_post_init(ocf_pipeline_t pipeline, context->flags.cleaner_started = true; } - env_waitqueue_init(&cache->pending_dirty_wq); env_waitqueue_init(&cache->pending_cache_wq); env_atomic_set(&cache->attached, 1); @@ -2419,7 +2419,7 @@ static void ocf_mngt_cache_detach_finish(ocf_pipeline_t pipeline, struct ocf_mngt_cache_detach_context *context = priv; ocf_cache_t cache = context->cache; - ENV_BUG_ON(env_atomic_dec_return(&cache->flush_started) < 0); + ocf_refcnt_unfreeze(&cache->dirty); if (!error) { ocf_cache_log(cache, log_info, "Successfully detached\n"); @@ -2479,7 +2479,7 @@ void ocf_mngt_cache_detach(ocf_cache_t cache, context->cache = cache; /* prevent dirty io */ - env_atomic_inc(&cache->flush_started); + ocf_refcnt_freeze(&cache->dirty); ocf_pipeline_next(pipeline); } diff --git a/src/mngt/ocf_mngt_flush.c b/src/mngt/ocf_mngt_flush.c index 8842715..8ba52a0 100644 --- a/src/mngt/ocf_mngt_flush.c +++ b/src/mngt/ocf_mngt_flush.c @@ -14,6 +14,7 @@ #include "../utils/utils_cache_line.h" #include "../utils/utils_part.h" #include "../utils/utils_pipeline.h" +#include "../utils/utils_refcnt.h" #include "../utils/utils_req.h" #include "../ocf_def_priv.h" @@ -77,30 +78,31 @@ struct ocf_mngt_cache_flush_context struct flush_containers_context fcs; }; +static void _ocf_mngt_begin_flush_complete(void *priv) +{ + struct ocf_mngt_cache_flush_context *context = priv; + ocf_pipeline_next(context->pipeline); +} + static void _ocf_mngt_begin_flush(ocf_pipeline_t pipeline, void *priv, ocf_pipeline_arg_t arg) { struct ocf_mngt_cache_flush_context *context = priv; ocf_cache_t cache = context->cache; - /* FIXME: need mechanism for async waiting for outstanding flushed to + /* FIXME: need mechanism for async waiting for outstanding flushes to * finish */ env_mutex_lock(&cache->flush_mutex); - env_atomic_inc(&cache->flush_started); + ocf_refcnt_freeze(&cache->dirty); - /* FIXME: remove waitqueue from async begin */ - env_waitqueue_wait(cache->pending_dirty_wq, - !env_atomic_read(&cache->pending_dirty_requests)); - - cache->flushing_interrupted = 0; - - ocf_pipeline_next(context->pipeline); + ocf_refcnt_register_zero_cb(&cache->dirty, + _ocf_mngt_begin_flush_complete, context); } static void _ocf_mngt_end_flush(ocf_cache_t cache) { - ENV_BUG_ON(env_atomic_dec_return(&cache->flush_started) < 0); + ocf_refcnt_unfreeze(&cache->dirty); env_mutex_unlock(&cache->flush_mutex); } @@ -598,6 +600,7 @@ static void _ocf_mngt_cache_flush(ocf_pipeline_t pipeline, void *priv, ocf_pipeline_arg_t arg) { struct ocf_mngt_cache_flush_context *context = priv; + context->cache->flushing_interrupted = 0; _ocf_mngt_flush_all_cores(context, _ocf_mngt_flush_all_cores_complete); } diff --git a/src/ocf_cache_priv.h b/src/ocf_cache_priv.h index 99bc000..333ad3e 100644 --- a/src/ocf_cache_priv.h +++ b/src/ocf_cache_priv.h @@ -14,6 +14,7 @@ #include "metadata/metadata_partition_structs.h" #include "metadata/metadata_updater_priv.h" #include "utils/utils_list.h" +#include "utils/utils_refcnt.h" #include "ocf_stats_priv.h" #include "cleaning/cleaning.h" #include "ocf_logger_priv.h" @@ -172,8 +173,7 @@ struct ocf_cache { env_atomic pending_cache_requests; env_waitqueue pending_cache_wq; - env_atomic pending_dirty_requests; - env_waitqueue pending_dirty_wq; + struct ocf_refcnt dirty; uint32_t fallback_pt_error_threshold; env_atomic fallback_pt_error_counter; @@ -195,9 +195,6 @@ struct ocf_cache { env_atomic flush_in_progress; - /* Prevent dirty requests. May be incremented recursively */ - env_atomic flush_started; - /* 1 if cache device attached, 0 otherwise */ env_atomic attached; diff --git a/src/ocf_core.c b/src/ocf_core.c index c32bb09..f5eefcb 100644 --- a/src/ocf_core.c +++ b/src/ocf_core.c @@ -153,30 +153,21 @@ static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume) return core_volume->core; } -static inline void inc_dirty_req_counter(struct ocf_core_io *core_io, - ocf_cache_t cache) +static inline int ocf_io_set_dirty(ocf_cache_t cache, + struct ocf_core_io *core_io) { - core_io->dirty = 1; - env_atomic_inc(&cache->pending_dirty_requests); + core_io->dirty = ocf_refcnt_inc(&cache->dirty); + return core_io->dirty ? 0 : -EBUSY; } static inline void dec_counter_if_req_was_dirty(struct ocf_core_io *core_io, ocf_cache_t cache) { - int pending_dirty_req_count; - if (!core_io->dirty) return; - pending_dirty_req_count = - env_atomic_dec_return(&cache->pending_dirty_requests); - - ENV_BUG_ON(pending_dirty_req_count < 0); - core_io->dirty = 0; - - if (!pending_dirty_req_count) - env_waitqueue_wake_up(&cache->pending_dirty_wq); + ocf_refcnt_dec(&cache->dirty); } static inline int ocf_core_validate_io(struct ocf_io *io) @@ -258,15 +249,9 @@ void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode) if (cache_mode == ocf_cache_mode_none) req_cache_mode = ocf_get_effective_cache_mode(cache, core, io); - - if (req_cache_mode == ocf_req_cache_mode_wb) { - inc_dirty_req_counter(core_io, cache); - - //Double cache mode check prevents sending WB request - //while flushing is performed. - req_cache_mode = ocf_get_effective_cache_mode(cache, core, io); - if (req_cache_mode != ocf_req_cache_mode_wb) - dec_counter_if_req_was_dirty(core_io, cache); + if (req_cache_mode == ocf_req_cache_mode_wb && + ocf_io_set_dirty(cache, core_io)) { + req_cache_mode = ocf_req_cache_mode_wt; } core_io->req = ocf_req_new(io->io_queue, core, io->addr, io->bytes, @@ -332,14 +317,9 @@ int ocf_core_submit_io_fast(struct ocf_io *io) } req_cache_mode = ocf_get_effective_cache_mode(cache, core, io); - if (req_cache_mode == ocf_req_cache_mode_wb) { - inc_dirty_req_counter(core_io, cache); - - //Double cache mode check prevents sending WB request - //while flushing is performed. - req_cache_mode = ocf_get_effective_cache_mode(cache, core, io); - if (req_cache_mode != ocf_req_cache_mode_wb) - dec_counter_if_req_was_dirty(core_io, cache); + if (req_cache_mode == ocf_req_cache_mode_wb && + ocf_io_set_dirty(cache, core_io)) { + req_cache_mode = ocf_req_cache_mode_wt; } switch (req_cache_mode) {