Merge pull request #87 from arutk/prv-async_flush

Asynchronous flush and purge - part 2
This commit is contained in:
Michał Wysoczański 2019-03-26 12:34:24 +01:00 committed by GitHub
commit bf9b85150f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 127 additions and 54 deletions

View File

@ -20,6 +20,7 @@
#include "engine_d2c.h"
#include "engine_ops.h"
#include "../utils/utils_part.h"
#include "../utils/utils_refcnt.h"
#include "../utils/utils_req.h"
#include "../metadata/metadata.h"
#include "../eviction/eviction.h"
@ -231,10 +232,6 @@ ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
if (ocf_fallback_pt_is_on(cache))
mode = ocf_cache_mode_pt;
if (mode == ocf_cache_mode_wb &&
env_atomic_read(&cache->flush_started))
mode = ocf_cache_mode_wt;
return mode;
}

View File

@ -17,6 +17,7 @@
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_pipeline.h"
#include "../utils/utils_refcnt.h"
#include "../ocf_utils.h"
#include "../concurrency/ocf_concurrency.h"
#include "../eviction/ops.h"
@ -1584,7 +1585,6 @@ static void _ocf_mngt_attach_post_init(ocf_pipeline_t pipeline,
context->flags.cleaner_started = true;
}
env_waitqueue_init(&cache->pending_dirty_wq);
env_waitqueue_init(&cache->pending_cache_wq);
env_atomic_set(&cache->attached, 1);
@ -2419,7 +2419,7 @@ static void ocf_mngt_cache_detach_finish(ocf_pipeline_t pipeline,
struct ocf_mngt_cache_detach_context *context = priv;
ocf_cache_t cache = context->cache;
ENV_BUG_ON(env_atomic_dec_return(&cache->flush_started) < 0);
ocf_refcnt_unfreeze(&cache->dirty);
if (!error) {
ocf_cache_log(cache, log_info, "Successfully detached\n");
@ -2479,7 +2479,7 @@ void ocf_mngt_cache_detach(ocf_cache_t cache,
context->cache = cache;
/* prevent dirty io */
env_atomic_inc(&cache->flush_started);
ocf_refcnt_freeze(&cache->dirty);
ocf_pipeline_next(pipeline);
}

View File

@ -14,6 +14,7 @@
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../utils/utils_pipeline.h"
#include "../utils/utils_refcnt.h"
#include "../utils/utils_req.h"
#include "../ocf_def_priv.h"
@ -77,30 +78,31 @@ struct ocf_mngt_cache_flush_context
struct flush_containers_context fcs;
};
static void _ocf_mngt_begin_flush_complete(void *priv)
{
struct ocf_mngt_cache_flush_context *context = priv;
ocf_pipeline_next(context->pipeline);
}
static void _ocf_mngt_begin_flush(ocf_pipeline_t pipeline, void *priv,
ocf_pipeline_arg_t arg)
{
struct ocf_mngt_cache_flush_context *context = priv;
ocf_cache_t cache = context->cache;
/* FIXME: need mechanism for async waiting for outstanding flushed to
/* FIXME: need mechanism for async waiting for outstanding flushes to
* finish */
env_mutex_lock(&cache->flush_mutex);
env_atomic_inc(&cache->flush_started);
ocf_refcnt_freeze(&cache->dirty);
/* FIXME: remove waitqueue from async begin */
env_waitqueue_wait(cache->pending_dirty_wq,
!env_atomic_read(&cache->pending_dirty_requests));
cache->flushing_interrupted = 0;
ocf_pipeline_next(context->pipeline);
ocf_refcnt_register_zero_cb(&cache->dirty,
_ocf_mngt_begin_flush_complete, context);
}
static void _ocf_mngt_end_flush(ocf_cache_t cache)
{
ENV_BUG_ON(env_atomic_dec_return(&cache->flush_started) < 0);
ocf_refcnt_unfreeze(&cache->dirty);
env_mutex_unlock(&cache->flush_mutex);
}
@ -445,7 +447,7 @@ static void _ocf_mngt_flush_container(
finish:
env_atomic_cmpxchg(&context->fcs.error, 0, error);
end(fc);
end(context);
}
void _ocf_flush_container_complete(void *ctx)
@ -598,6 +600,7 @@ static void _ocf_mngt_cache_flush(ocf_pipeline_t pipeline, void *priv,
ocf_pipeline_arg_t arg)
{
struct ocf_mngt_cache_flush_context *context = priv;
context->cache->flushing_interrupted = 0;
_ocf_mngt_flush_all_cores(context, _ocf_mngt_flush_all_cores_complete);
}

View File

@ -14,6 +14,7 @@
#include "metadata/metadata_partition_structs.h"
#include "metadata/metadata_updater_priv.h"
#include "utils/utils_list.h"
#include "utils/utils_refcnt.h"
#include "ocf_stats_priv.h"
#include "cleaning/cleaning.h"
#include "ocf_logger_priv.h"
@ -172,8 +173,7 @@ struct ocf_cache {
env_atomic pending_cache_requests;
env_waitqueue pending_cache_wq;
env_atomic pending_dirty_requests;
env_waitqueue pending_dirty_wq;
struct ocf_refcnt dirty;
uint32_t fallback_pt_error_threshold;
env_atomic fallback_pt_error_counter;
@ -195,9 +195,6 @@ struct ocf_cache {
env_atomic flush_in_progress;
/* Prevent dirty requests. May be incremented recursively */
env_atomic flush_started;
/* 1 if cache device attached, 0 otherwise */
env_atomic attached;

View File

@ -153,30 +153,21 @@ static inline ocf_core_t ocf_volume_to_core(ocf_volume_t volume)
return core_volume->core;
}
static inline void inc_dirty_req_counter(struct ocf_core_io *core_io,
ocf_cache_t cache)
static inline int ocf_io_set_dirty(ocf_cache_t cache,
struct ocf_core_io *core_io)
{
core_io->dirty = 1;
env_atomic_inc(&cache->pending_dirty_requests);
core_io->dirty = ocf_refcnt_inc(&cache->dirty);
return core_io->dirty ? 0 : -EBUSY;
}
static inline void dec_counter_if_req_was_dirty(struct ocf_core_io *core_io,
ocf_cache_t cache)
{
int pending_dirty_req_count;
if (!core_io->dirty)
return;
pending_dirty_req_count =
env_atomic_dec_return(&cache->pending_dirty_requests);
ENV_BUG_ON(pending_dirty_req_count < 0);
core_io->dirty = 0;
if (!pending_dirty_req_count)
env_waitqueue_wake_up(&cache->pending_dirty_wq);
ocf_refcnt_dec(&cache->dirty);
}
static inline int ocf_core_validate_io(struct ocf_io *io)
@ -258,15 +249,9 @@ void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
if (cache_mode == ocf_cache_mode_none)
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode == ocf_req_cache_mode_wb) {
inc_dirty_req_counter(core_io, cache);
//Double cache mode check prevents sending WB request
//while flushing is performed.
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode != ocf_req_cache_mode_wb)
dec_counter_if_req_was_dirty(core_io, cache);
if (req_cache_mode == ocf_req_cache_mode_wb &&
ocf_io_set_dirty(cache, core_io)) {
req_cache_mode = ocf_req_cache_mode_wt;
}
core_io->req = ocf_req_new(io->io_queue, core, io->addr, io->bytes,
@ -332,14 +317,9 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
}
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode == ocf_req_cache_mode_wb) {
inc_dirty_req_counter(core_io, cache);
//Double cache mode check prevents sending WB request
//while flushing is performed.
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode != ocf_req_cache_mode_wb)
dec_counter_if_req_was_dirty(core_io, cache);
if (req_cache_mode == ocf_req_cache_mode_wb &&
ocf_io_set_dirty(cache, core_io)) {
req_cache_mode = ocf_req_cache_mode_wt;
}
switch (req_cache_mode) {

53
src/utils/utils_refcnt.c Normal file
View File

@ -0,0 +1,53 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "../utils/utils_refcnt.h"
void ocf_refcnt_dec(struct ocf_refcnt *rc)
{
int val = env_atomic_dec_return(&rc->counter);
ENV_BUG_ON(val < 0);
if (!val && env_atomic_cmpxchg(&rc->callback, 1, 0))
rc->cb(rc->priv);
}
bool ocf_refcnt_inc(struct ocf_refcnt *rc)
{
if (!env_atomic_read(&rc->freeze)) {
env_atomic_inc(&rc->counter);
if (!env_atomic_read(&rc->freeze))
return true;
else
ocf_refcnt_dec(rc);
}
return false;
}
void ocf_refcnt_freeze(struct ocf_refcnt *rc)
{
env_atomic_inc(&rc->freeze);
}
void ocf_refcnt_register_zero_cb(struct ocf_refcnt *rc, ocf_refcnt_cb_t cb,
void *priv)
{
ENV_BUG_ON(!env_atomic_read(&rc->freeze));
ENV_BUG_ON(env_atomic_read(&rc->callback));
env_atomic_inc(&rc->counter);
rc->cb = cb;
rc->priv = priv;
env_atomic_set(&rc->callback, 1);
ocf_refcnt_dec(rc);
}
void ocf_refcnt_unfreeze(struct ocf_refcnt *rc)
{
int val = env_atomic_dec_return(&rc->freeze);
ENV_BUG_ON(val < 0);
}

43
src/utils/utils_refcnt.h Normal file
View File

@ -0,0 +1,43 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_REFCNT_H__
#define __OCF_REFCNT_H__
#include "ocf_env.h"
typedef void (*ocf_refcnt_cb_t)(void *priv);
struct ocf_refcnt
{
env_atomic counter;
env_atomic freeze;
env_atomic callback;
ocf_refcnt_cb_t cb;
void *priv;
};
/* Try to increment counter. Returns true if successfull, false if freezed */
bool ocf_refcnt_inc(struct ocf_refcnt *rc);
/* Decrement reference counter */
void ocf_refcnt_dec(struct ocf_refcnt *rc);
/* Disallow incrementing of underlying counter - attempts to increment counter
* will be failing until ocf_refcnt_unfreeze is calleed.
* It's ok to call freeze multiple times, in which case counter is freezed
* until all freeze calls are offset by a corresponding unfreeze.*/
void ocf_refcnt_freeze(struct ocf_refcnt *rc);
/* Cancel the effect of single ocf_refcnt_freeze call */
void ocf_refcnt_unfreeze(struct ocf_refcnt *rc);
/* Register callback to be called when reference counter drops to 0.
* Must be called after counter is freezed.
* Cannot be called until previously regsitered callback had fired. */
void ocf_refcnt_register_zero_cb(struct ocf_refcnt *rc, ocf_refcnt_cb_t cb,
void *priv);
#endif // __OCF_REFCNT_H__