Merge pull request #847 from mmichal10/pre-refcnt-fixes

Refactors&fixes
This commit is contained in:
Robert Baldyga 2024-10-14 17:31:54 +02:00 committed by GitHub
commit e5a2042e0d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 110 additions and 59 deletions

View File

@ -22,15 +22,6 @@ struct cache_priv {
ocf_queue_t io_queue; ocf_queue_t io_queue;
}; };
/*
* Helper function for error handling.
*/
void error(char *msg)
{
printf("ERROR: %s", msg);
exit(1);
}
/* /*
* Queue ops providing interface for running queue thread in asynchronous * Queue ops providing interface for running queue thread in asynchronous
* way. Optional synchronous kick callback is not provided. The stop() * way. Optional synchronous kick callback is not provided. The stop()
@ -318,8 +309,10 @@ void perform_workload(ocf_core_t core)
/* Allocate data buffer and fill it with example data */ /* Allocate data buffer and fill it with example data */
data1 = ctx_data_alloc(1); data1 = ctx_data_alloc(1);
if (!data1) if (!data1) {
error("Unable to allocate data1\n"); printf("Error: Unable to allocate data1\n");
return;
}
strcpy(data1->ptr, "This is some test data"); strcpy(data1->ptr, "This is some test data");
/* Prepare and submit write IO to the core */ /* Prepare and submit write IO to the core */
submit_io(core, data1, 0, 512, OCF_WRITE, complete_write); submit_io(core, data1, 0, 512, OCF_WRITE, complete_write);
@ -332,8 +325,10 @@ void perform_workload(ocf_core_t core)
/* Allocate data buffer for read */ /* Allocate data buffer for read */
data2 = ctx_data_alloc(1); data2 = ctx_data_alloc(1);
if (!data2) if (!data2) {
error("Unable to allocate data2\n"); printf("Error: Unable to allocate data2\n");
return;
}
/* Prepare and submit read IO to the core */ /* Prepare and submit read IO to the core */
submit_io(core, data2, 0, 512, OCF_READ, complete_read); submit_io(core, data2, 0, 512, OCF_READ, complete_read);
/* After read completes, complete_read() callback will be called, /* After read completes, complete_read() callback will be called,
@ -360,21 +355,29 @@ int main(int argc, char *argv[])
/* Initialize completion semaphore */ /* Initialize completion semaphore */
ret = sem_init(&context.sem, 0, 0); ret = sem_init(&context.sem, 0, 0);
if (ret) if (ret) {
error("Unable to initialize completion semaphore\n"); printf("Error: Unable to initialize completion semaphore\n");
goto sem_err;
}
context.error = &ret; context.error = &ret;
/* Initialize OCF context */ /* Initialize OCF context */
if (ctx_init(&ctx)) if (ctx_init(&ctx)) {
error("Unable to initialize context\n"); printf("Error: Unable to initialize context\n");
goto ctx_err;
}
/* Start cache */ /* Start cache */
if (initialize_cache(ctx, &cache1)) if (initialize_cache(ctx, &cache1)) {
error("Unable to start cache\n"); printf("Error: Unable to start cache\n");
goto cache_err;
}
/* Add core */ /* Add core */
if (initialize_core(cache1, &core1)) if (initialize_core(cache1, &core1)) {
error("Unable to add core\n"); printf("Error: Unable to add core\n");
goto core_err;
}
/* Do some actual io operations */ /* Do some actual io operations */
perform_workload(core1); perform_workload(core1);
@ -382,27 +385,31 @@ int main(int argc, char *argv[])
/* Remove core from cache */ /* Remove core from cache */
ocf_mngt_cache_remove_core(core1, remove_core_complete, &context); ocf_mngt_cache_remove_core(core1, remove_core_complete, &context);
sem_wait(&context.sem); sem_wait(&context.sem);
if (ret) if (ret) {
error("Unable to remove core\n"); printf("Error: Unable to remove core\n");
goto core_err;
}
/* Stop cache */ /* Stop cache */
ocf_mngt_cache_stop(cache1, simple_complete, &context); ocf_mngt_cache_stop(cache1, simple_complete, &context);
sem_wait(&context.sem); sem_wait(&context.sem);
if (ret) if (ret) {
error("Unable to stop cache\n"); printf("Error: Unable to stop cache\n");
}
core_err:
cache_priv = ocf_cache_get_priv(cache1); cache_priv = ocf_cache_get_priv(cache1);
/* Put the management queue */ /* Put the management queue */
ocf_queue_put(cache_priv->mngt_queue); ocf_queue_put(cache_priv->mngt_queue);
free(cache_priv); free(cache_priv);
cache_err:
/* Deinitialize context */ /* Deinitialize context */
ctx_cleanup(ctx); ctx_cleanup(ctx);
ctx_err:
/* Destroy completion semaphore */ /* Destroy completion semaphore */
sem_destroy(&context.sem); sem_destroy(&context.sem);
sem_err:
return 0; return ret;
} }

View File

@ -139,12 +139,17 @@ static ocf_req_cb ocf_cache_mode_to_engine_cb(
bool ocf_fallback_pt_is_on(ocf_cache_t cache) bool ocf_fallback_pt_is_on(ocf_cache_t cache)
{ {
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0); int threshold = cache->fallback_pt_error_threshold;
int counter;
return (cache->fallback_pt_error_threshold != if (threshold == OCF_CACHE_FALLBACK_PT_INACTIVE)
OCF_CACHE_FALLBACK_PT_INACTIVE && return false;
env_atomic_read(&cache->fallback_pt_error_counter) >=
cache->fallback_pt_error_threshold); counter = env_atomic_read(&cache->fallback_pt_error_counter);
ENV_BUG_ON(counter < 0);
return counter >= threshold;
} }
void ocf_resolve_effective_cache_mode(ocf_cache_t cache, void ocf_resolve_effective_cache_mode(ocf_cache_t cache,

View File

@ -463,7 +463,6 @@ static inline void ocf_metadata_config_init(ocf_cache_t cache, size_t size)
static void ocf_metadata_deinit_fixed_size(struct ocf_cache *cache) static void ocf_metadata_deinit_fixed_size(struct ocf_cache *cache)
{ {
int result = 0;
uint32_t i; uint32_t i;
struct ocf_metadata_ctrl *ctrl = (struct ocf_metadata_ctrl *) struct ocf_metadata_ctrl *ctrl = (struct ocf_metadata_ctrl *)
@ -481,9 +480,6 @@ static void ocf_metadata_deinit_fixed_size(struct ocf_cache *cache)
env_vfree(ctrl); env_vfree(ctrl);
cache->metadata.priv = NULL; cache->metadata.priv = NULL;
if (result)
ENV_BUG();
} }
static struct ocf_metadata_ctrl *ocf_metadata_ctrl_init( static struct ocf_metadata_ctrl *ocf_metadata_ctrl_init(

View File

@ -66,9 +66,6 @@ struct ocf_cache_mngt_init_params {
bool metadata_inited : 1; bool metadata_inited : 1;
/*!< Metadata is inited to valid state */ /*!< Metadata is inited to valid state */
bool added_to_list : 1;
/*!< Cache is added to context list */
bool cache_locked : 1; bool cache_locked : 1;
/*!< Cache has been locked */ /*!< Cache has been locked */
} flags; } flags;
@ -826,33 +823,59 @@ static void _ocf_mngt_load_metadata(ocf_pipeline_t pipeline,
* @brief allocate memory for new cache, add it to cache queue, set initial * @brief allocate memory for new cache, add it to cache queue, set initial
* values and running state * values and running state
*/ */
static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params *params) static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params *params,
char *new_cache_name)
{ {
ocf_cache_t cache = env_vzalloc(sizeof(*cache)); ocf_cache_t cache = env_vzalloc(sizeof(*cache));
int result; int result;
if (!cache) if (!cache) {
ocf_log(params->ctx, log_err, "Failed to allocate cache %s\n",
new_cache_name);
return -OCF_ERR_NO_MEM; return -OCF_ERR_NO_MEM;
}
if (ocf_mngt_cache_lock_init(cache)) { if (ocf_mngt_cache_lock_init(cache)) {
ocf_log(params->ctx, log_err,
"Failed to allocate cache %s lock\n",
new_cache_name);
result = -OCF_ERR_NO_MEM; result = -OCF_ERR_NO_MEM;
goto alloc_err; goto alloc_err;
} }
/* Lock cache during setup - this trylock should always succeed */ /* Lock cache during setup - this trylock should always succeed */
ENV_BUG_ON(ocf_mngt_cache_trylock(cache)); result = ocf_mngt_cache_trylock(cache);
if (result) {
ocf_log(params->ctx, log_crit,
"Failed to lock the newly created cache %s\n",
new_cache_name);
goto lock_init_err;
}
if (env_mutex_init(&cache->flush_mutex)) { if (env_mutex_init(&cache->flush_mutex)) {
ocf_log(params->ctx, log_err,
"Failed to allocate cache %s flush lock\n",
new_cache_name);
result = -OCF_ERR_NO_MEM; result = -OCF_ERR_NO_MEM;
goto lock_err; goto lock_err;
} }
INIT_LIST_HEAD(&cache->io_queues); INIT_LIST_HEAD(&cache->io_queues);
result = env_spinlock_init(&cache->io_queues_lock); result = env_spinlock_init(&cache->io_queues_lock);
if (result) if (result) {
ocf_log(params->ctx, log_err,
"Failed to allocate cache %s queue lock\n",
new_cache_name);
goto mutex_err; goto mutex_err;
}
ENV_BUG_ON(!ocf_refcnt_inc(&cache->refcnt.cache)); result = !ocf_refcnt_inc(&cache->refcnt.cache);
if (result) {
ocf_log(params->ctx, log_crit,
"Failed to increment %s refcnt\n",
new_cache_name);
goto cache_refcnt_inc_err;
}
/* start with freezed metadata ref counter to indicate detached device*/ /* start with freezed metadata ref counter to indicate detached device*/
ocf_refcnt_freeze(&cache->refcnt.metadata); ocf_refcnt_freeze(&cache->refcnt.metadata);
@ -869,10 +892,13 @@ static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params *params)
return 0; return 0;
cache_refcnt_inc_err:
env_spinlock_destroy(&cache->io_queues_lock);
mutex_err: mutex_err:
env_mutex_destroy(&cache->flush_mutex); env_mutex_destroy(&cache->flush_mutex);
lock_err: lock_err:
ocf_mngt_cache_unlock(cache); ocf_mngt_cache_unlock(cache);
lock_init_err:
ocf_mngt_cache_lock_deinit(cache); ocf_mngt_cache_lock_deinit(cache);
alloc_err: alloc_err:
env_vfree(cache); env_vfree(cache);
@ -950,7 +976,7 @@ static int _ocf_mngt_init_prepare_cache(struct ocf_cache_mngt_init_params *param
ocf_log(param->ctx, log_info, "Inserting cache %s\n", cfg->name); ocf_log(param->ctx, log_info, "Inserting cache %s\n", cfg->name);
ret = _ocf_mngt_init_new_cache(param); ret = _ocf_mngt_init_new_cache(param, cfg->name);
if (ret) if (ret)
goto out; goto out;
@ -1442,20 +1468,15 @@ static void _ocf_mngt_init_handle_error(ocf_ctx_t ctx,
env_mutex_destroy(&cache->flush_mutex); env_mutex_destroy(&cache->flush_mutex);
if (params->flags.cache_locked)
ocf_mngt_cache_unlock(cache);
ocf_mngt_cache_lock_deinit(cache); ocf_mngt_cache_lock_deinit(cache);
if (params->flags.metadata_inited) if (params->flags.metadata_inited)
ocf_metadata_deinit(cache); ocf_metadata_deinit(cache);
if (!params->flags.added_to_list)
return;
env_rmutex_lock(&ctx->lock);
list_del(&cache->list);
env_vfree(cache); env_vfree(cache);
env_rmutex_unlock(&ctx->lock);
} }
static void _ocf_mngt_cache_init(ocf_cache_t cache, static void _ocf_mngt_cache_init(ocf_cache_t cache,
@ -1493,6 +1514,8 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
params.metadata.promotion_policy = cfg->promotion_policy; params.metadata.promotion_policy = cfg->promotion_policy;
params.locked = cfg->locked; params.locked = cfg->locked;
ocf_ctx_get(ctx);
result = env_rmutex_lock_interruptible(&ctx->lock); result = env_rmutex_lock_interruptible(&ctx->lock);
if (result) if (result)
goto _cache_mngt_init_instance_ERROR; goto _cache_mngt_init_instance_ERROR;
@ -1501,6 +1524,8 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
result = _ocf_mngt_init_prepare_cache(&params, cfg); result = _ocf_mngt_init_prepare_cache(&params, cfg);
if (result) { if (result) {
env_rmutex_unlock(&ctx->lock); env_rmutex_unlock(&ctx->lock);
ocf_log(ctx, log_err, "Failed to prepare cache %s\n",
cfg->name);
goto _cache_mngt_init_instance_ERROR; goto _cache_mngt_init_instance_ERROR;
} }
@ -1514,6 +1539,8 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
result = ocf_metadata_init(tmp_cache, params.metadata.line_size); result = ocf_metadata_init(tmp_cache, params.metadata.line_size);
if (result) { if (result) {
env_rmutex_unlock(&ctx->lock); env_rmutex_unlock(&ctx->lock);
ocf_log(ctx, log_err, "Failed to initialize cache %s "
"metadata\n", cfg->name);
result = -OCF_ERR_NO_MEM; result = -OCF_ERR_NO_MEM;
goto _cache_mngt_init_instance_ERROR; goto _cache_mngt_init_instance_ERROR;
} }
@ -1521,20 +1548,19 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
result = ocf_cache_set_name(tmp_cache, cfg->name, OCF_CACHE_NAME_SIZE); result = ocf_cache_set_name(tmp_cache, cfg->name, OCF_CACHE_NAME_SIZE);
if (result) { if (result) {
ocf_log(ctx, log_err, "Failed to set cache %s name\n",
cfg->name);
env_rmutex_unlock(&ctx->lock); env_rmutex_unlock(&ctx->lock);
goto _cache_mngt_init_instance_ERROR; goto _cache_mngt_init_instance_ERROR;
} }
list_add_tail(&tmp_cache->list, &ctx->caches); list_add_tail(&tmp_cache->list, &ctx->caches);
params.flags.added_to_list = true;
env_rmutex_unlock(&ctx->lock); env_rmutex_unlock(&ctx->lock);
ocf_cache_log(tmp_cache, log_debug, "Metadata initialized\n"); ocf_cache_log(tmp_cache, log_debug, "Metadata initialized\n");
_ocf_mngt_cache_init(tmp_cache, &params); _ocf_mngt_cache_init(tmp_cache, &params);
ocf_ctx_get(ctx);
if (!params.locked) { if (!params.locked) {
/* User did not request to lock cache instance after creation - /* User did not request to lock cache instance after creation -
unlock it here since we have acquired the lock to unlock it here since we have acquired the lock to
@ -1550,6 +1576,7 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
_cache_mngt_init_instance_ERROR: _cache_mngt_init_instance_ERROR:
_ocf_mngt_init_handle_error(ctx, &params); _ocf_mngt_init_handle_error(ctx, &params);
*cache = NULL; *cache = NULL;
ocf_ctx_put(ctx);
return result; return result;
} }

View File

@ -270,8 +270,10 @@ static int _ocf_mngt_cache_trylock(ocf_cache_t cache,
return -OCF_ERR_CACHE_NOT_EXIST; return -OCF_ERR_CACHE_NOT_EXIST;
result = trylock_fn(&cache->lock); result = trylock_fn(&cache->lock);
if (result) if (result) {
ocf_mngt_cache_put(cache);
return result; return result;
}
if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) { if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) {
/* Cache already stopping, do not allow any operation */ /* Cache already stopping, do not allow any operation */

View File

@ -37,6 +37,7 @@ static int _ocf_pipeline_run_step(struct ocf_request *req)
while (true) { while (true) {
step = &pipeline->properties->steps[pipeline->next_step]; step = &pipeline->properties->steps[pipeline->next_step];
ocf_cache_log(req->cache, log_debug, "PL STEP: %s\n", step->name);
switch (step->type) { switch (step->type) {
case ocf_pipeline_step_single: case ocf_pipeline_step_single:
pipeline->next_step++; pipeline->next_step++;

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2019-2022 Intel Corporation * Copyright(c) 2019-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -70,6 +71,7 @@ typedef bool (*ocf_pipeline_cond_step_predicate_t)(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg); void *priv, ocf_pipeline_arg_t arg);
struct ocf_pipeline_step { struct ocf_pipeline_step {
char *name;
enum ocf_pipeline_step_type type; enum ocf_pipeline_step_type type;
ocf_pipeline_step_hndl_t hndl; ocf_pipeline_step_hndl_t hndl;
ocf_pipeline_cond_step_predicate_t pred; ocf_pipeline_cond_step_predicate_t pred;
@ -79,14 +81,19 @@ struct ocf_pipeline_step {
}; };
}; };
#define xstr(a) str(a)
#define str(a) #a
#define OCF_PL_STEP(_hndl) \ #define OCF_PL_STEP(_hndl) \
{ \ { \
.name = xstr(_hndl), \
.type = ocf_pipeline_step_single, \ .type = ocf_pipeline_step_single, \
.hndl = _hndl, \ .hndl = _hndl, \
} }
#define OCF_PL_STEP_ARG_INT(_hndl, _int) \ #define OCF_PL_STEP_ARG_INT(_hndl, _int) \
{ \ { \
.name = xstr(_hndl), \
.type = ocf_pipeline_step_single, \ .type = ocf_pipeline_step_single, \
.hndl = _hndl, \ .hndl = _hndl, \
.arg = { \ .arg = { \
@ -97,6 +104,7 @@ struct ocf_pipeline_step {
#define OCF_PL_STEP_ARG_PTR(_hndl, _ptr) \ #define OCF_PL_STEP_ARG_PTR(_hndl, _ptr) \
{ \ { \
.name = xstr(_hndl), \
.type = ocf_pipeline_step_single, \ .type = ocf_pipeline_step_single, \
.hndl = _hndl, \ .hndl = _hndl, \
.arg = { \ .arg = { \
@ -107,6 +115,7 @@ struct ocf_pipeline_step {
#define OCF_PL_STEP_FOREACH(_hndl, _args) \ #define OCF_PL_STEP_FOREACH(_hndl, _args) \
{ \ { \
.name = xstr(_hndl), \
.type = ocf_pipeline_step_foreach, \ .type = ocf_pipeline_step_foreach, \
.hndl = _hndl, \ .hndl = _hndl, \
.args = _args, \ .args = _args, \
@ -114,11 +123,13 @@ struct ocf_pipeline_step {
#define OCF_PL_STEP_TERMINATOR() \ #define OCF_PL_STEP_TERMINATOR() \
{ \ { \
.name = "<TERMINATOR>", \
.type = ocf_pipeline_step_terminator, \ .type = ocf_pipeline_step_terminator, \
} }
#define OCF_PL_STEP_COND(_pred, _hndl) \ #define OCF_PL_STEP_COND(_pred, _hndl) \
{ \ { \
.name = xstr(_hndl), \
.pred = _pred, \ .pred = _pred, \
.type = ocf_pipeline_step_conditional, \ .type = ocf_pipeline_step_conditional, \
.hndl = _hndl, \ .hndl = _hndl, \
@ -126,6 +137,7 @@ struct ocf_pipeline_step {
#define OCF_PL_STEP_COND_ARG_INT(_pred, _hndl, _int) \ #define OCF_PL_STEP_COND_ARG_INT(_pred, _hndl, _int) \
{ \ { \
.name = xstr(_hndl), \
.pred = _pred, \ .pred = _pred, \
.type = ocf_pipeline_step_conditional, \ .type = ocf_pipeline_step_conditional, \
.hndl = _hndl, \ .hndl = _hndl, \
@ -137,6 +149,7 @@ struct ocf_pipeline_step {
#define OCF_PL_STEP_COND_ARG_PTR(_pred, _hndl, _ptr) \ #define OCF_PL_STEP_COND_ARG_PTR(_pred, _hndl, _ptr) \
{ \ { \
.name = xstr(_hndl), \
.pred = _pred, \ .pred = _pred, \
.type = ocf_pipeline_step_conditional, \ .type = ocf_pipeline_step_conditional, \
.hndl = _hndl, \ .hndl = _hndl, \