cleaning: rename recovery to populate

The function not only recovers cleaning policy metadata but is also utilized
to initialize data structures so more generic name is actually more accurate

Signed-off-by: Michal Mielewczyk <michal.mielewczyk@intel.com>
This commit is contained in:
Michal Mielewczyk 2022-09-15 09:48:02 +02:00
parent 8faf74169a
commit c0e99e1f79
7 changed files with 53 additions and 53 deletions

View File

@ -334,9 +334,9 @@ int cleaning_policy_acp_initialize(ocf_cache_t cache, int init_metadata)
return 0;
}
#define OCF_ACP_RECOVERY_SHARDS_CNT 32
#define OCF_ACP_POPULATE_SHARDS_CNT 32
struct ocf_acp_recovery_context {
struct ocf_acp_populate_context {
ocf_cache_t cache;
struct {
@ -344,16 +344,16 @@ struct ocf_acp_recovery_context {
struct {
struct list_head chunk_list;
} bucket[ACP_MAX_BUCKETS];
} shard[OCF_ACP_RECOVERY_SHARDS_CNT];
} shard[OCF_ACP_POPULATE_SHARDS_CNT];
ocf_cleaning_recovery_end_t cmpl;
ocf_cleaning_populate_end_t cmpl;
void *priv;
};
static int ocf_acp_recovery_handle(ocf_parallelize_t parallelize,
static int ocf_acp_populate_handle(ocf_parallelize_t parallelize,
void *priv, unsigned shard_id, unsigned shards_cnt)
{
struct ocf_acp_recovery_context *context = priv;
struct ocf_acp_populate_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_cache_line_t entries = cache->device->collision_table_entries;
ocf_cache_line_t cline, portion;
@ -390,7 +390,7 @@ static int ocf_acp_recovery_handle(ocf_parallelize_t parallelize,
return 0;
}
static void ocf_acp_recovery_chunk(struct ocf_acp_recovery_context *context,
static void ocf_acp_populate_chunk(struct ocf_acp_populate_context *context,
struct acp_chunk_info *chunk)
{
ocf_cache_t cache = context->cache;
@ -400,7 +400,7 @@ static void ocf_acp_recovery_chunk(struct ocf_acp_recovery_context *context,
uint8_t bucket_id;
chunk->num_dirty = 0;
for (shard_id = 0; shard_id < OCF_ACP_RECOVERY_SHARDS_CNT; shard_id++) {
for (shard_id = 0; shard_id < OCF_ACP_POPULATE_SHARDS_CNT; shard_id++) {
chunk->num_dirty += context->shard[shard_id]
.chunk[chunk->core_id][chunk->chunk_id];
}
@ -417,10 +417,10 @@ static void ocf_acp_recovery_chunk(struct ocf_acp_recovery_context *context,
list_move_tail(&chunk->list, &bucket->chunk_list);
}
static void ocf_acp_recovery_finish(ocf_parallelize_t parallelize,
static void ocf_acp_populate_finish(ocf_parallelize_t parallelize,
void *priv, int error)
{
struct ocf_acp_recovery_context *context = priv;
struct ocf_acp_populate_context *context = priv;
ocf_cache_t cache = context->cache;
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
ocf_core_id_t core_id;
@ -435,7 +435,7 @@ static void ocf_acp_recovery_finish(ocf_parallelize_t parallelize,
num_chunks = OCF_DIV_ROUND_UP(core_size, ACP_CHUNK_SIZE);
for (chunk_id = 0; chunk_id < num_chunks; chunk_id++) {
ocf_acp_recovery_chunk(context,
ocf_acp_populate_chunk(context,
&acp->chunk_info[core_id][chunk_id]);
OCF_COND_RESCHED_DEFAULT(step);
}
@ -455,14 +455,14 @@ static void ocf_acp_recovery_finish(ocf_parallelize_t parallelize,
ocf_parallelize_destroy(parallelize);
}
void cleaning_policy_acp_recovery(ocf_cache_t cache,
ocf_cleaning_recovery_end_t cmpl, void *priv)
void cleaning_policy_acp_populate(ocf_cache_t cache,
ocf_cleaning_populate_end_t cmpl, void *priv)
{
struct ocf_acp_recovery_context *context;
struct ocf_acp_populate_context *context;
ocf_parallelize_t parallelize;
ocf_core_id_t core_id;
ocf_core_t core;
unsigned shards_cnt = OCF_ACP_RECOVERY_SHARDS_CNT;
unsigned shards_cnt = OCF_ACP_POPULATE_SHARDS_CNT;
unsigned shard_id;
uint64_t core_size;
uint64_t num_chunks;
@ -470,8 +470,8 @@ void cleaning_policy_acp_recovery(ocf_cache_t cache,
int result;
result = ocf_parallelize_create(&parallelize, cache,
OCF_ACP_RECOVERY_SHARDS_CNT, sizeof(*context),
ocf_acp_recovery_handle, ocf_acp_recovery_finish);
OCF_ACP_POPULATE_SHARDS_CNT, sizeof(*context),
ocf_acp_populate_handle, ocf_acp_populate_finish);
if (result) {
cmpl(priv, result);
return;

View File

@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __LAYER_CLEANING_POLICY_AGGRESSIVE_H__
@ -12,8 +12,8 @@ void cleaning_policy_acp_setup(ocf_cache_t cache);
int cleaning_policy_acp_initialize(ocf_cache_t cache, int init_metadata);
void cleaning_policy_acp_recovery(ocf_cache_t cache,
ocf_cleaning_recovery_end_t cmpl, void *priv);
void cleaning_policy_acp_populate(ocf_cache_t cache,
ocf_cleaning_populate_end_t cmpl, void *priv);
void cleaning_policy_acp_deinitialize(ocf_cache_t cache);

View File

@ -445,22 +445,22 @@ int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata)
return 0;
}
#define OCF_ALRU_RECOVERY_SHARDS_CNT 32
#define OCF_ALRU_POPULATE_SHARDS_CNT 32
struct ocf_alru_recovery_context {
struct ocf_alru_populate_context {
ocf_cache_t cache;
struct {
struct {
ocf_cache_line_t head;
ocf_cache_line_t tail;
} part[OCF_USER_IO_CLASS_MAX];
} shard[OCF_ALRU_RECOVERY_SHARDS_CNT] __attribute__((aligned(64)));
} shard[OCF_ALRU_POPULATE_SHARDS_CNT] __attribute__((aligned(64)));
ocf_cleaning_recovery_end_t cmpl;
ocf_cleaning_populate_end_t cmpl;
void *priv;
};
static void add_alru_head_recovery(struct ocf_alru_recovery_context *context,
static void add_alru_head_populate(struct ocf_alru_populate_context *context,
unsigned shard_id, ocf_core_id_t part_id,
ocf_cache_line_t cline)
{
@ -499,10 +499,10 @@ static void add_alru_head_recovery(struct ocf_alru_recovery_context *context,
}
}
static int ocf_alru_recovery_handle(ocf_parallelize_t parallelize,
static int ocf_alru_populate_handle(ocf_parallelize_t parallelize,
void *priv, unsigned shard_id, unsigned shards_cnt)
{
struct ocf_alru_recovery_context *context = priv;
struct ocf_alru_populate_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_cache_line_t entries = cache->device->collision_table_entries;
ocf_cache_line_t terminator = entries;
@ -537,7 +537,7 @@ static int ocf_alru_recovery_handle(ocf_parallelize_t parallelize,
if (!metadata_test_dirty(cache, cline)) {
cleaning_policy_alru_init_cache_block(cache, cline);
} else {
add_alru_head_recovery(context, shard_id,
add_alru_head_populate(context, shard_id,
part_id, cline);
++part_size[part_id];
}
@ -551,10 +551,10 @@ static int ocf_alru_recovery_handle(ocf_parallelize_t parallelize,
return 0;
}
static void ocf_alru_recovery_finish(ocf_parallelize_t parallelize,
static void ocf_alru_populate_finish(ocf_parallelize_t parallelize,
void *priv, int error)
{
struct ocf_alru_recovery_context *context = priv;
struct ocf_alru_populate_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_part_id_t part_id;
ocf_cache_line_t head, tail;
@ -564,7 +564,7 @@ static void ocf_alru_recovery_finish(ocf_parallelize_t parallelize,
goto end;
for (part_id = 0; part_id < OCF_USER_IO_CLASS_MAX; part_id++) {
for (shard = 0; shard < OCF_ALRU_RECOVERY_SHARDS_CNT; shard++) {
for (shard = 0; shard < OCF_ALRU_POPULATE_SHARDS_CNT; shard++) {
head = context->shard[shard].part[part_id].head;
tail = context->shard[shard].part[part_id].tail;
@ -580,10 +580,10 @@ end:
ocf_parallelize_destroy(parallelize);
}
void cleaning_policy_alru_recovery(ocf_cache_t cache,
ocf_cleaning_recovery_end_t cmpl, void *priv)
void cleaning_policy_alru_populate(ocf_cache_t cache,
ocf_cleaning_populate_end_t cmpl, void *priv)
{
struct ocf_alru_recovery_context *context;
struct ocf_alru_populate_context *context;
ocf_parallelize_t parallelize;
struct alru_cleaning_policy *part_alru;
struct ocf_user_part *user_part;
@ -591,8 +591,8 @@ void cleaning_policy_alru_recovery(ocf_cache_t cache,
int result;
result = ocf_parallelize_create(&parallelize, cache,
OCF_ALRU_RECOVERY_SHARDS_CNT, sizeof(*context),
ocf_alru_recovery_handle, ocf_alru_recovery_finish);
OCF_ALRU_POPULATE_SHARDS_CNT, sizeof(*context),
ocf_alru_populate_handle, ocf_alru_populate_finish);
if (result) {
cmpl(priv, result);
return;

View File

@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __LAYER_CLEANING_POLICY_ALRU_H__
@ -11,8 +11,8 @@
void cleaning_policy_alru_setup(ocf_cache_t cache);
int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata);
void cleaning_policy_alru_recovery(ocf_cache_t cache,
ocf_cleaning_recovery_end_t cmpl, void *priv);
void cleaning_policy_alru_populate(ocf_cache_t cache,
ocf_cleaning_populate_end_t cmpl, void *priv);
void cleaning_policy_alru_deinitialize(ocf_cache_t cache);
void cleaning_policy_alru_init_cache_block(ocf_cache_t cache,
uint32_t cache_line);

View File

@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -54,6 +54,6 @@ void ocf_kick_cleaner(ocf_cache_t cache);
void ocf_stop_cleaner(ocf_cache_t cache);
typedef void (*ocf_cleaning_recovery_end_t)(void *priv, int error);
typedef void (*ocf_cleaning_populate_end_t)(void *priv, int error);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -14,8 +14,8 @@
struct cleaning_policy_ops {
void (*setup)(ocf_cache_t cache);
int (*initialize)(ocf_cache_t cache, int init_metadata);
void (*recovery)(ocf_cache_t cache,
ocf_cleaning_recovery_end_t cmpl, void *priv);
void (*populate)(ocf_cache_t cache,
ocf_cleaning_populate_end_t cmpl, void *priv);
void (*deinitialize)(ocf_cache_t cache);
int (*add_core)(ocf_cache_t cache, ocf_core_id_t core_id);
void (*remove_core)(ocf_cache_t cache, ocf_core_id_t core_id);
@ -45,7 +45,7 @@ static struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max] = {
.purge_range = cleaning_policy_alru_purge_range,
.set_hot_cache_line = cleaning_policy_alru_set_hot_cache_line,
.initialize = cleaning_policy_alru_initialize,
.recovery = cleaning_policy_alru_recovery,
.populate = cleaning_policy_alru_populate,
.deinitialize = cleaning_policy_alru_deinitialize,
.set_cleaning_param = cleaning_policy_alru_set_cleaning_param,
.get_cleaning_param = cleaning_policy_alru_get_cleaning_param,
@ -59,7 +59,7 @@ static struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max] = {
.purge_range = cleaning_policy_acp_purge_range,
.set_hot_cache_line = cleaning_policy_acp_set_hot_cache_line,
.initialize = cleaning_policy_acp_initialize,
.recovery = cleaning_policy_acp_recovery,
.populate = cleaning_policy_acp_populate,
.deinitialize = cleaning_policy_acp_deinitialize,
.set_cleaning_param = cleaning_policy_acp_set_cleaning_param,
.get_cleaning_param = cleaning_policy_acp_get_cleaning_param,
@ -91,18 +91,18 @@ static inline int ocf_cleaning_initialize(ocf_cache_t cache,
return cleaning_policy_ops[policy].initialize(cache, init_metadata);
}
static inline void ocf_cleaning_recovery(ocf_cache_t cache,
static inline void ocf_cleaning_populate(ocf_cache_t cache,
ocf_cleaning_t policy,
ocf_cleaning_recovery_end_t cmpl, void *priv)
ocf_cleaning_populate_end_t cmpl, void *priv)
{
ENV_BUG_ON(policy >= ocf_cleaning_max);
if (unlikely(!cleaning_policy_ops[policy].recovery)) {
if (unlikely(!cleaning_policy_ops[policy].populate)) {
cmpl(priv, 0);
return;
}
cleaning_policy_ops[policy].recovery(cache, cmpl, priv);
cleaning_policy_ops[policy].populate(cache, cmpl, priv);
}
static inline void ocf_cleaning_deinitialize(ocf_cache_t cache)

View File

@ -709,7 +709,7 @@ static void _ocf_mngt_load_rebuild_metadata(ocf_pipeline_t pipeline,
ocf_pipeline_next(pipeline);
}
static void _ocf_mngt_cleaning_recovery_complete(void *priv, int error)
static void _ocf_mngt_cleaning_populate_complete(void *priv, int error)
{
struct ocf_cache_attach_context *context = priv;
@ -729,8 +729,8 @@ static void _ocf_mngt_load_init_cleaning(ocf_pipeline_t pipeline,
OCF_PL_NEXT_ON_SUCCESS_RET(pipeline, result);
}
ocf_cleaning_recovery(cache, cache->cleaner.policy,
_ocf_mngt_cleaning_recovery_complete, context);
ocf_cleaning_populate(cache, cache->cleaner.policy,
_ocf_mngt_cleaning_populate_complete, context);
}
static void _ocf_mngt_init_metadata_complete(void *priv, int error)