From 55d53867cc2b8559390ed685680f482e742ab203 Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Thu, 12 Mar 2020 15:48:07 -0400 Subject: [PATCH] Complete cache stop from kthread To enable stopping management queue from completion context, last queue_put() have to be called from separate thread. After cache is fully deinitialized, thread calls module_put_and_exit() to prevent scenario when code is still executed, but there are no references to module. Signed-off-by: Michal Mielewczyk --- modules/cas_cache/layer_cache_management.c | 81 +++++++++++++--------- 1 file changed, 48 insertions(+), 33 deletions(-) diff --git a/modules/cas_cache/layer_cache_management.c b/modules/cas_cache/layer_cache_management.c index 9e61b49..9b6824c 100644 --- a/modules/cas_cache/layer_cache_management.c +++ b/modules/cas_cache/layer_cache_management.c @@ -362,33 +362,45 @@ static void _cache_mngt_cache_priv_deinit(ocf_cache_t cache) vfree(cache_priv); } -static int exit_instance_finish(ocf_cache_t cache, int error) +struct _cache_mngt_stop_context { + struct _cache_mngt_async_context async; + int error; + ocf_cache_t cache; + struct work_struct work; +}; + +static int exit_instance_finish(void *data) { struct cache_priv *cache_priv; + struct _cache_mngt_stop_context *ctx = data; ocf_queue_t mngt_queue; bool flush_status; + int result = 0; - flush_status = ocf_mngt_cache_is_dirty(cache); - cache_priv = ocf_cache_get_priv(cache); + flush_status = ocf_mngt_cache_is_dirty(ctx->cache); + cache_priv = ocf_cache_get_priv(ctx->cache); mngt_queue = cache_priv->mngt_queue; - if (error && error != -OCF_ERR_WRITE_CACHE) - BUG_ON(error); + if (ctx->error && ctx->error != -OCF_ERR_WRITE_CACHE) + BUG_ON(ctx->error); - if (!error && flush_status) - error = -KCAS_ERR_STOPPED_DIRTY; + if (!ctx->error && flush_status) + result = -KCAS_ERR_STOPPED_DIRTY; - module_put(THIS_MODULE); - - cas_cls_deinit(cache); + cas_cls_deinit(ctx->cache); vfree(cache_priv); - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); + ocf_mngt_cache_unlock(ctx->cache); + ocf_mngt_cache_put(ctx->cache); ocf_queue_put(mngt_queue); - return error; + result = _cache_mngt_async_callee_set_result(&ctx->async, result); + + if (result == -KCAS_ERR_WAITING_INTERRUPTED) + kfree(ctx); + + module_put_and_exit(0); } struct _cache_mngt_attach_context { @@ -406,12 +418,11 @@ struct _cache_mngt_attach_context { }; -static void cache_start_rollback(struct work_struct *work) +static int cache_start_rollback(void *data) { struct cache_priv *cache_priv; ocf_queue_t mngt_queue = NULL; - struct _cache_mngt_attach_context *ctx = - container_of(work, struct _cache_mngt_attach_context, work); + struct _cache_mngt_attach_context *ctx = data; ocf_cache_t cache = ctx->cache; int result; @@ -429,58 +440,64 @@ static void cache_start_rollback(struct work_struct *work) if (mngt_queue) ocf_queue_put(mngt_queue); - module_put(THIS_MODULE); - result = _cache_mngt_async_callee_set_result(&ctx->async, ctx->ocf_start_error); if (result == -KCAS_ERR_WAITING_INTERRUPTED) kfree(ctx); + + module_put_and_exit(0); + + return 0; } static void _cache_mngt_cache_stop_rollback_complete(ocf_cache_t cache, void *priv, int error) { struct _cache_mngt_attach_context *ctx = priv; + struct task_struct *thread; if (error == -OCF_ERR_WRITE_CACHE) printk(KERN_WARNING "Cannot save cache state\n"); else BUG_ON(error); - INIT_WORK(&ctx->work, cache_start_rollback); - schedule_work(&ctx->work); + thread = kthread_run(cache_start_rollback, ctx, + "cas_cache_rollback_complete"); + BUG_ON(IS_ERR(thread)); } static void _cache_mngt_cache_stop_complete(ocf_cache_t cache, void *priv, int error) { - struct _cache_mngt_async_context *context = priv; - int result = exit_instance_finish(cache, error); + struct _cache_mngt_stop_context *context = priv; + struct task_struct *thread; + context->error = error; - result = _cache_mngt_async_callee_set_result(context, result); - - if (result == -KCAS_ERR_WAITING_INTERRUPTED) - kfree(context); + thread = kthread_run(exit_instance_finish, context, + "cas_cache_stop_complete"); + BUG_ON(IS_ERR(thread)); } static int _cache_mngt_cache_stop_sync(ocf_cache_t cache) { - struct _cache_mngt_async_context *context; + struct _cache_mngt_stop_context *context; int result = 0; context = env_malloc(sizeof(*context), GFP_KERNEL); if (!context) return -ENOMEM; - _cache_mngt_async_context_init(context); + _cache_mngt_async_context_init(&context->async); + context->error = 0; + context->cache = cache; ocf_mngt_cache_stop(cache, _cache_mngt_cache_stop_complete, context); - result = wait_for_completion_interruptible(&context->cmpl); + result = wait_for_completion_interruptible(&context->async.cmpl); - result = _cache_mngt_async_caller_set_result(context, result); + result = _cache_mngt_async_caller_set_result(&context->async, result); - if (context->result != -KCAS_ERR_WAITING_INTERRUPTED) + if (result != -KCAS_ERR_WAITING_INTERRUPTED) kfree(context); return result; @@ -1786,7 +1803,6 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, { struct _cache_mngt_attach_context *context; ocf_cache_t cache; - ocf_queue_t mngt_queue = NULL; struct cache_priv *cache_priv; int result = 0, rollback_result = 0; bool load = (cmd && cmd->init_cache == CACHE_INIT_LOAD); @@ -1832,7 +1848,6 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg, goto err; cache_priv = ocf_cache_get_priv(cache); - mngt_queue = cache_priv->mngt_queue; if (load) { ocf_mngt_cache_load(cache, device_cfg,