Handle flush core interruption.
When flushing core, acquiring write lock is not necessary, so it was replaced with read lock wherever possible. If flushing operation is interrupted by signal, not only waiting for it to be finished is interrupted but also OCF flush operation is stopped. Signed-off-by: Michal Mielewczyk <michal.mielewczyk@intel.com>
This commit is contained in:
parent
3e033df9c6
commit
a0937758b7
@ -280,6 +280,10 @@ static void _cache_mngt_core_flush_complete(ocf_core_t core, void *priv,
|
|||||||
{
|
{
|
||||||
struct _cache_mngt_async_context *context = priv;
|
struct _cache_mngt_async_context *context = priv;
|
||||||
int result;
|
int result;
|
||||||
|
ocf_cache_t cache = ocf_core_get_cache(core);
|
||||||
|
|
||||||
|
if (context->compl_func)
|
||||||
|
context->compl_func(cache);
|
||||||
|
|
||||||
result = _cache_mngt_async_callee_set_result(context, error);
|
result = _cache_mngt_async_callee_set_result(context, error);
|
||||||
|
|
||||||
@ -287,7 +291,8 @@ static void _cache_mngt_core_flush_complete(ocf_core_t core, void *priv,
|
|||||||
kfree(context);
|
kfree(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _cache_mngt_core_flush_sync(ocf_core_t core, bool interruption)
|
static int _cache_mngt_core_flush_sync(ocf_core_t core, bool interruption,
|
||||||
|
void (*compl)(ocf_cache_t cache))
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
struct _cache_mngt_async_context *context;
|
struct _cache_mngt_async_context *context;
|
||||||
@ -299,6 +304,7 @@ static int _cache_mngt_core_flush_sync(ocf_core_t core, bool interruption)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
_cache_mngt_async_context_init(context);
|
_cache_mngt_async_context_init(context);
|
||||||
|
context->compl_func = compl;
|
||||||
atomic_set(&cache_priv->flush_interrupt_enabled, interruption);
|
atomic_set(&cache_priv->flush_interrupt_enabled, interruption);
|
||||||
|
|
||||||
ocf_mngt_core_flush(core, _cache_mngt_core_flush_complete, context);
|
ocf_mngt_core_flush(core, _cache_mngt_core_flush_complete, context);
|
||||||
@ -308,6 +314,41 @@ static int _cache_mngt_core_flush_sync(ocf_core_t core, bool interruption)
|
|||||||
|
|
||||||
if (result != -KCAS_ERR_WAITING_INTERRUPTED)
|
if (result != -KCAS_ERR_WAITING_INTERRUPTED)
|
||||||
kfree(context);
|
kfree(context);
|
||||||
|
else if (result == -KCAS_ERR_WAITING_INTERRUPTED && interruption)
|
||||||
|
ocf_mngt_cache_flush_interrupt(cache);
|
||||||
|
|
||||||
|
atomic_set(&cache_priv->flush_interrupt_enabled, 1);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _cache_mngt_core_flush_uninterruptible_complete(ocf_core_t core,
|
||||||
|
void *priv, int error)
|
||||||
|
{
|
||||||
|
struct _cache_mngt_sync_context *context = priv;
|
||||||
|
|
||||||
|
*context->result = error;
|
||||||
|
complete(&context->cmpl);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since wait_for_completion() is used, hang tasks may occure if flush would
|
||||||
|
* take long time.
|
||||||
|
*/
|
||||||
|
static int _cache_mngt_core_flush_uninterruptible(ocf_core_t core)
|
||||||
|
{
|
||||||
|
int result;
|
||||||
|
struct _cache_mngt_sync_context context;
|
||||||
|
ocf_cache_t cache = ocf_core_get_cache(core);
|
||||||
|
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
|
||||||
|
|
||||||
|
init_completion(&context.cmpl);
|
||||||
|
context.result = &result;
|
||||||
|
atomic_set(&cache_priv->flush_interrupt_enabled, 0);
|
||||||
|
|
||||||
|
ocf_mngt_core_flush(core, _cache_mngt_core_flush_uninterruptible_complete,
|
||||||
|
&context);
|
||||||
|
wait_for_completion(&context.cmpl);
|
||||||
|
|
||||||
atomic_set(&cache_priv->flush_interrupt_enabled, 1);
|
atomic_set(&cache_priv->flush_interrupt_enabled, 1);
|
||||||
|
|
||||||
@ -507,6 +548,12 @@ static void mark_core_id_free(uint64_t *bitmap, uint16_t core_id)
|
|||||||
clear_bit(core_id, (unsigned long *)bitmap);
|
clear_bit(core_id, (unsigned long *)bitmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void _cache_read_unlock_put_cmpl(ocf_cache_t cache)
|
||||||
|
{
|
||||||
|
ocf_mngt_cache_read_unlock(cache);
|
||||||
|
ocf_mngt_cache_put(cache);
|
||||||
|
}
|
||||||
|
|
||||||
int cache_mngt_flush_object(const char *cache_name, size_t cache_name_len,
|
int cache_mngt_flush_object(const char *cache_name, size_t cache_name_len,
|
||||||
const char *core_name, size_t core_name_len)
|
const char *core_name, size_t core_name_len)
|
||||||
{
|
{
|
||||||
@ -519,28 +566,23 @@ int cache_mngt_flush_object(const char *cache_name, size_t cache_name_len,
|
|||||||
if (result)
|
if (result)
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
result = _cache_mngt_lock_sync(cache);
|
result = _cache_mngt_read_lock_sync(cache);
|
||||||
if (result) {
|
if (result) {
|
||||||
ocf_mngt_cache_put(cache);
|
ocf_mngt_cache_put(cache);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
result = ocf_core_get_by_name(cache, core_name, core_name_len, &core);
|
result = ocf_core_get_by_name(cache, core_name, core_name_len, &core);
|
||||||
if (result)
|
if (result) {
|
||||||
goto out;
|
ocf_mngt_cache_read_unlock(cache);
|
||||||
|
|
||||||
result = _cache_mngt_core_flush_sync(core, true);
|
|
||||||
|
|
||||||
out:
|
|
||||||
ocf_mngt_cache_unlock(cache);
|
|
||||||
ocf_mngt_cache_put(cache);
|
ocf_mngt_cache_put(cache);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _cache_read_unlock_put_cmpl(ocf_cache_t cache)
|
result = _cache_mngt_core_flush_sync(core, true,
|
||||||
{
|
_cache_read_unlock_put_cmpl);
|
||||||
ocf_mngt_cache_read_unlock(cache);
|
|
||||||
ocf_mngt_cache_put(cache);
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cache_mngt_flush_device(const char *cache_name, size_t name_len)
|
int cache_mngt_flush_device(const char *cache_name, size_t name_len)
|
||||||
@ -1117,8 +1159,11 @@ int _cache_mngt_remove_core_prepare(ocf_cache_t cache, ocf_core_t core,
|
|||||||
if (!cmd->force_no_flush) {
|
if (!cmd->force_no_flush) {
|
||||||
if (core_active) {
|
if (core_active) {
|
||||||
/* Flush core */
|
/* Flush core */
|
||||||
|
if (flush_interruptible)
|
||||||
flush_result = _cache_mngt_core_flush_sync(core,
|
flush_result = _cache_mngt_core_flush_sync(core,
|
||||||
flush_interruptible);
|
flush_interruptible, _cache_read_unlock_put_cmpl);
|
||||||
|
else
|
||||||
|
flush_result = _cache_mngt_core_flush_uninterruptible(core);
|
||||||
} else if (!ocf_mngt_core_is_dirty(core)) {
|
} else if (!ocf_mngt_core_is_dirty(core)) {
|
||||||
/* Clean core is always "flushed" */
|
/* Clean core is always "flushed" */
|
||||||
flush_result = 0;
|
flush_result = 0;
|
||||||
@ -1163,20 +1208,28 @@ int cache_mngt_remove_core_from_cache(struct kcas_remove_core *cmd)
|
|||||||
if (!cmd->force_no_flush) {
|
if (!cmd->force_no_flush) {
|
||||||
/* First check state and flush data (if requested by user)
|
/* First check state and flush data (if requested by user)
|
||||||
under read lock */
|
under read lock */
|
||||||
result = _cache_mngt_read_lock_sync(cache);
|
/* Getting cache twice is workaround to make flush error handling easier
|
||||||
|
and avoid dealing with synchronizing issues */
|
||||||
|
result = ocf_mngt_cache_get(cache);
|
||||||
if (result)
|
if (result)
|
||||||
goto put;
|
goto put;
|
||||||
|
result = _cache_mngt_read_lock_sync(cache);
|
||||||
|
if (result) {
|
||||||
|
ocf_mngt_cache_put(cache);
|
||||||
|
goto put;
|
||||||
|
}
|
||||||
|
|
||||||
result = get_core_by_id(cache, cmd->core_id, &core);
|
result = get_core_by_id(cache, cmd->core_id, &core);
|
||||||
if (result < 0)
|
if (result < 0) {
|
||||||
goto rd_unlock;
|
ocf_mngt_cache_unlock(cache);
|
||||||
|
ocf_mngt_cache_put(cache);
|
||||||
|
goto put;
|
||||||
|
}
|
||||||
|
|
||||||
result = _cache_mngt_remove_core_prepare(cache, core, cmd,
|
result = _cache_mngt_remove_core_prepare(cache, core, cmd,
|
||||||
false);
|
false);
|
||||||
if (result)
|
if (result)
|
||||||
goto rd_unlock;
|
goto put;
|
||||||
|
|
||||||
ocf_mngt_cache_read_unlock(cache);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Acquire write lock */
|
/* Acquire write lock */
|
||||||
@ -1232,11 +1285,6 @@ unlock:
|
|||||||
put:
|
put:
|
||||||
ocf_mngt_cache_put(cache);
|
ocf_mngt_cache_put(cache);
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
rd_unlock:
|
|
||||||
ocf_mngt_cache_read_unlock(cache);
|
|
||||||
ocf_mngt_cache_put(cache);
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int cache_mngt_reset_stats(const char *cache_name, size_t cache_name_len,
|
int cache_mngt_reset_stats(const char *cache_name, size_t cache_name_len,
|
||||||
|
Loading…
Reference in New Issue
Block a user