Asynchronous wait for dirty requests in flush

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2019-03-25 19:26:31 -04:00
parent 7003d38b44
commit 54374639b8
5 changed files with 30 additions and 53 deletions

View File

@ -20,6 +20,7 @@
#include "engine_d2c.h"
#include "engine_ops.h"
#include "../utils/utils_part.h"
#include "../utils/utils_refcnt.h"
#include "../utils/utils_req.h"
#include "../metadata/metadata.h"
#include "../eviction/eviction.h"
@ -231,10 +232,6 @@ ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
if (ocf_fallback_pt_is_on(cache))
mode = ocf_cache_mode_pt;
if (mode == ocf_cache_mode_wb &&
env_atomic_read(&cache->flush_started))
mode = ocf_cache_mode_wt;
return mode;
}

View File

@ -17,6 +17,7 @@
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_pipeline.h"
#include "../utils/utils_refcnt.h"
#include "../ocf_utils.h"
#include "../concurrency/ocf_concurrency.h"
#include "../eviction/ops.h"
@ -1584,7 +1585,6 @@ static void _ocf_mngt_attach_post_init(ocf_pipeline_t pipeline,
context->flags.cleaner_started = true;
}
env_waitqueue_init(&cache->pending_dirty_wq);
env_waitqueue_init(&cache->pending_cache_wq);
env_atomic_set(&cache->attached, 1);
@ -2419,7 +2419,7 @@ static void ocf_mngt_cache_detach_finish(ocf_pipeline_t pipeline,
struct ocf_mngt_cache_detach_context *context = priv;
ocf_cache_t cache = context->cache;
ENV_BUG_ON(env_atomic_dec_return(&cache->flush_started) < 0);
ocf_refcnt_unfreeze(&cache->dirty);
if (!error) {
ocf_cache_log(cache, log_info, "Successfully detached\n");
@ -2479,7 +2479,7 @@ void ocf_mngt_cache_detach(ocf_cache_t cache,
context->cache = cache;
/* prevent dirty io */
env_atomic_inc(&cache->flush_started);
ocf_refcnt_freeze(&cache->dirty);
ocf_pipeline_next(pipeline);
}

View File

@ -14,6 +14,7 @@
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../utils/utils_pipeline.h"
#include "../utils/utils_refcnt.h"
#include "../utils/utils_req.h"
#include "../ocf_def_priv.h"
@ -77,30 +78,31 @@ struct ocf_mngt_cache_flush_context
struct flush_containers_context fcs;
};
static void _ocf_mngt_begin_flush_complete(void *priv)
{
struct ocf_mngt_cache_flush_context *context = priv;
ocf_pipeline_next(context->pipeline);
}
static void _ocf_mngt_begin_flush(ocf_pipeline_t pipeline, void *priv,
ocf_pipeline_arg_t arg)
{
struct ocf_mngt_cache_flush_context *context = priv;
ocf_cache_t cache = context->cache;
/* FIXME: need mechanism for async waiting for outstanding flushed to
/* FIXME: need mechanism for async waiting for outstanding flushes to
* finish */
env_mutex_lock(&cache->flush_mutex);
env_atomic_inc(&cache->flush_started);
ocf_refcnt_freeze(&cache->dirty);
/* FIXME: remove waitqueue from async begin */
env_waitqueue_wait(cache->pending_dirty_wq,
!env_atomic_read(&cache->pending_dirty_requests));
cache->flushing_interrupted = 0;
ocf_pipeline_next(context->pipeline);
ocf_refcnt_register_zero_cb(&cache->dirty,
_ocf_mngt_begin_flush_complete, context);
}
static void _ocf_mngt_end_flush(ocf_cache_t cache)
{
ENV_BUG_ON(env_atomic_dec_return(&cache->flush_started) < 0);
ocf_refcnt_unfreeze(&cache->dirty);
env_mutex_unlock(&cache->flush_mutex);
}
@ -598,6 +600,7 @@ static void _ocf_mngt_cache_flush(ocf_pipeline_t pipeline, void *priv,
ocf_pipeline_arg_t arg)
{
struct ocf_mngt_cache_flush_context *context = priv;
context->cache->flushing_interrupted = 0;
_ocf_mngt_flush_all_cores(context, _ocf_mngt_flush_all_cores_complete);
}

View File

@ -14,6 +14,7 @@
#include "metadata/metadata_partition_structs.h"
#include "metadata/metadata_updater_priv.h"
#include "utils/utils_list.h"
#include "utils/utils_refcnt.h"
#include "ocf_stats_priv.h"
#include "cleaning/cleaning.h"
#include "ocf_logger_priv.h"
@ -172,8 +173,7 @@ struct ocf_cache {
env_atomic pending_cache_requests;
env_waitqueue pending_cache_wq;
env_atomic pending_dirty_requests;
env_waitqueue pending_dirty_wq;
struct ocf_refcnt dirty;
uint32_t fallback_pt_error_threshold;
env_atomic fallback_pt_error_counter;
@ -195,9 +195,6 @@ struct ocf_cache {
env_atomic flush_in_progress;
/* Prevent dirty requests. May be incremented recursively */
env_atomic flush_started;
/* 1 if cache device attached, 0 otherwise */
env_atomic attached;

View File

@ -153,30 +153,21 @@ static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume)
return core_volume->core;
}
static inline void inc_dirty_req_counter(struct ocf_core_io *core_io,
ocf_cache_t cache)
static inline int ocf_io_set_dirty(ocf_cache_t cache,
struct ocf_core_io *core_io)
{
core_io->dirty = 1;
env_atomic_inc(&cache->pending_dirty_requests);
core_io->dirty = ocf_refcnt_inc(&cache->dirty);
return core_io->dirty ? 0 : -EBUSY;
}
static inline void dec_counter_if_req_was_dirty(struct ocf_core_io *core_io,
ocf_cache_t cache)
{
int pending_dirty_req_count;
if (!core_io->dirty)
return;
pending_dirty_req_count =
env_atomic_dec_return(&cache->pending_dirty_requests);
ENV_BUG_ON(pending_dirty_req_count < 0);
core_io->dirty = 0;
if (!pending_dirty_req_count)
env_waitqueue_wake_up(&cache->pending_dirty_wq);
ocf_refcnt_dec(&cache->dirty);
}
static inline int ocf_core_validate_io(struct ocf_io *io)
@ -258,15 +249,9 @@ void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
if (cache_mode == ocf_cache_mode_none)
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode == ocf_req_cache_mode_wb) {
inc_dirty_req_counter(core_io, cache);
//Double cache mode check prevents sending WB request
//while flushing is performed.
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode != ocf_req_cache_mode_wb)
dec_counter_if_req_was_dirty(core_io, cache);
if (req_cache_mode == ocf_req_cache_mode_wb &&
ocf_io_set_dirty(cache, core_io)) {
req_cache_mode = ocf_req_cache_mode_wt;
}
core_io->req = ocf_req_new(io->io_queue, core, io->addr, io->bytes,
@ -332,14 +317,9 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
}
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode == ocf_req_cache_mode_wb) {
inc_dirty_req_counter(core_io, cache);
//Double cache mode check prevents sending WB request
//while flushing is performed.
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode != ocf_req_cache_mode_wb)
dec_counter_if_req_was_dirty(core_io, cache);
if (req_cache_mode == ocf_req_cache_mode_wb &&
ocf_io_set_dirty(cache, core_io)) {
req_cache_mode = ocf_req_cache_mode_wt;
}
switch (req_cache_mode) {