Associate core metadata with core object

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga 2019-05-24 17:56:21 +02:00
parent 8f681e28c5
commit 711de86bff
19 changed files with 334 additions and 385 deletions

View File

@ -14,7 +14,6 @@
#include "../engine/engine_common.h"
#include "../concurrency/ocf_cache_concurrency.h"
#include "cleaning_priv.h"
#include "../utils/utils_core.h"
#define OCF_ACP_DEBUG 0
@ -193,20 +192,23 @@ static struct acp_chunk_info *_acp_get_chunk(struct ocf_cache *cache,
static void _acp_remove_cores(struct ocf_cache *cache)
{
int i;
ocf_core_t core;
ocf_core_id_t core_id;
for_each_core(cache, i)
cleaning_policy_acp_remove_core(cache, i);
for_each_core(cache, core, core_id)
cleaning_policy_acp_remove_core(cache, core_id);
}
static int _acp_load_cores(struct ocf_cache *cache)
{
int i;
ocf_core_t core;
ocf_core_id_t core_id;
int err = 0;
for_each_core(cache, i) {
OCF_DEBUG_PARAM(cache, "loading core %i\n", i);
err = cleaning_policy_acp_add_core(cache, i);
for_each_core(cache, core, core_id) {
OCF_DEBUG_PARAM(cache, "loading core %i\n", core_id);
err = cleaning_policy_acp_add_core(cache, core_id);
if (err)
break;
}
@ -680,7 +682,8 @@ void cleaning_policy_acp_remove_core(ocf_cache_t cache,
int cleaning_policy_acp_add_core(ocf_cache_t cache,
ocf_core_id_t core_id)
{
uint64_t core_size = cache->core_conf_meta[core_id].length;
ocf_core_t core = ocf_cache_get_core(cache, core_id);
uint64_t core_size = core->conf_meta->length;
uint64_t num_chunks = OCF_DIV_ROUND_UP(core_size, ACP_CHUNK_SIZE);
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
int i;

View File

@ -89,17 +89,15 @@ ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c)
static int _ocf_cleaner_run_check_dirty_inactive(ocf_cache_t cache)
{
int i;
ocf_core_t core;
ocf_core_id_t core_id;
if (!env_bit_test(ocf_cache_state_incomplete, &cache->cache_state))
return 0;
for (i = 0; i < OCF_CORE_MAX; ++i) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
if (cache->core[i].opened && env_atomic_read(&(cache->
core_runtime_meta[i].dirty_clines))) {
for_each_core(cache, core, core_id) {
if (core->opened && env_atomic_read(
&core->runtime_meta->dirty_clines)) {
return 0;
}
}

View File

@ -503,6 +503,10 @@ int ocf_metadata_hash_init(struct ocf_cache *cache,
struct ocf_metadata *metadata = &cache->metadata;
struct ocf_cache_line_settings *settings =
(struct ocf_cache_line_settings *)&metadata->settings;
struct ocf_core_meta_config *core_meta_config;
struct ocf_core_meta_runtime *core_meta_runtime;
ocf_core_t core;
ocf_core_id_t core_id;
uint32_t i = 0;
int result = 0;
@ -525,23 +529,28 @@ int ocf_metadata_hash_init(struct ocf_cache *cache,
if (result) {
ocf_metadata_hash_deinit(cache);
} else {
cache->conf_meta = METADATA_MEM_POOL(ctrl,
metadata_segment_sb_config);
/* Set core metadata */
cache->core_conf_meta = METADATA_MEM_POOL(ctrl,
metadata_segment_core_config);
cache->core_runtime_meta = METADATA_MEM_POOL(ctrl,
metadata_segment_core_runtime);
env_spinlock_init(&cache->metadata.lock.eviction);
env_rwlock_init(&cache->metadata.lock.status);
env_rwsem_init(&cache->metadata.lock.collision);
return result;
}
return result;
cache->conf_meta = METADATA_MEM_POOL(ctrl,
metadata_segment_sb_config);
/* Set core metadata */
core_meta_config = METADATA_MEM_POOL(ctrl,
metadata_segment_core_config);
core_meta_runtime = METADATA_MEM_POOL(ctrl,
metadata_segment_core_runtime);
for_each_core_all(cache, core, core_id) {
core->conf_meta = &core_meta_config[core_id];
core->runtime_meta = &core_meta_runtime[core_id];
}
env_spinlock_init(&cache->metadata.lock.eviction);
env_rwlock_init(&cache->metadata.lock.status);
env_rwsem_init(&cache->metadata.lock.collision);
return 0;
}
/* metadata segment data + iterators */
@ -1276,24 +1285,23 @@ static void ocf_medatata_hash_load_superblock_post(ocf_pipeline_t pipeline,
ocf_cache_t cache = context->cache;
struct ocf_metadata_uuid *muuid;
struct ocf_volume_uuid uuid;
uint32_t i;
ocf_volume_type_t volume_type;
ocf_core_t core;
ocf_core_id_t core_id;
ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
for (i = 0; i < OCF_CORE_MAX; i++) {
if (!cache->core_conf_meta[i].added)
continue;
muuid = ocf_metadata_get_core_uuid(cache, i);
for_each_core(cache, core, core_id) {
muuid = ocf_metadata_get_core_uuid(cache, core_id);
uuid.data = muuid->data;
uuid.size = muuid->size;
volume_type = ocf_ctx_get_volume_type(cache->owner,
core->conf_meta->type);
/* Initialize core volume */
ocf_volume_init(&cache->core[i].volume,
ocf_ctx_get_volume_type(cache->owner,
cache->core_conf_meta[i].type),
&uuid, false);
ocf_volume_init(&core->volume, volume_type, &uuid, false);
}
/* Restore all dynamics items */
@ -1401,12 +1409,13 @@ static void ocf_medatata_hash_flush_superblock_prepare(ocf_pipeline_t pipeline,
{
struct ocf_metadata_hash_context *context = priv;
ocf_cache_t cache = context->cache;
uint32_t i;
ocf_core_t core;
ocf_core_id_t core_id;
/* Synchronize core objects types */
for (i = 0; i < OCF_CORE_MAX; i++) {
cache->core_conf_meta[i].type = ocf_ctx_get_volume_type_id(
cache->owner, cache->core[i].volume.type);
for_each_core(cache, core, core_id) {
core->conf_meta->type = ocf_ctx_get_volume_type_id(
cache->owner, core->volume.type);
}
ocf_pipeline_next(pipeline);
@ -1800,10 +1809,11 @@ static void ocf_metadata_hash_load_all(ocf_cache_t cache,
ocf_pipeline_next(pipeline);
}
static void _recovery_rebuild_cline_metadata(struct ocf_cache *cache,
static void _recovery_rebuild_cline_metadata(ocf_cache_t cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t cache_line)
{
ocf_core_t core = ocf_cache_get_core(cache, core_id);
ocf_part_id_t part_id;
ocf_cache_line_t hash_index;
@ -1821,17 +1831,16 @@ static void _recovery_rebuild_cline_metadata(struct ocf_cache *cache,
ocf_eviction_set_hot_cache_line(cache, cache_line);
env_atomic_inc(&cache->core_runtime_meta[core_id].cached_clines);
env_atomic_inc(&cache->core_runtime_meta[core_id].
env_atomic_inc(&core->runtime_meta->cached_clines);
env_atomic_inc(&core->runtime_meta->
part_counters[part_id].cached_clines);
if (metadata_test_dirty(cache, cache_line)) {
env_atomic_inc(&cache->core_runtime_meta[core_id].
dirty_clines);
env_atomic_inc(&cache->core_runtime_meta[core_id].
env_atomic_inc(&core->runtime_meta->dirty_clines);
env_atomic_inc(&core->runtime_meta->
part_counters[part_id].dirty_clines);
env_atomic64_cmpxchg(&cache->core_runtime_meta[core_id].
dirty_since, 0, env_get_tick_count());
env_atomic64_cmpxchg(&core->runtime_meta->dirty_since,
0, env_get_tick_count());
}
}
@ -1962,16 +1971,18 @@ static void _ocf_metadata_hash_load_recovery_legacy(ocf_cache_t cache,
static ocf_core_id_t _ocf_metadata_hash_find_core_by_seq(
struct ocf_cache *cache, ocf_seq_no_t seq_no)
{
ocf_core_id_t i;
ocf_core_t core;
ocf_core_id_t core_id;
if (seq_no == OCF_SEQ_NO_INVALID)
return OCF_CORE_ID_INVALID;
for (i = OCF_CORE_ID_MIN; i <= OCF_CORE_ID_MAX; i++)
if (cache->core_conf_meta[i].seq_no == seq_no)
for_each_core_all(cache, core, core_id) {
if (core->conf_meta->seq_no == seq_no)
break;
}
return i;
return core_id;
}
static void ocf_metadata_hash_load_atomic_metadata_complete(

View File

@ -282,21 +282,19 @@ static void __init_metadata_version(ocf_cache_t cache)
static void __reset_stats(ocf_cache_t cache)
{
int core_id;
ocf_core_t core;
ocf_core_id_t core_id;
ocf_part_id_t i;
for (core_id = 0; core_id < OCF_CORE_MAX; core_id++) {
env_atomic_set(&cache->core_runtime_meta[core_id].
cached_clines, 0);
env_atomic_set(&cache->core_runtime_meta[core_id].
dirty_clines, 0);
env_atomic64_set(&cache->core_runtime_meta[core_id].
dirty_since, 0);
for_each_core_all(cache, core, core_id) {
env_atomic_set(&core->runtime_meta->cached_clines, 0);
env_atomic_set(&core->runtime_meta->dirty_clines, 0);
env_atomic64_set(&core->runtime_meta->dirty_since, 0);
for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
env_atomic_set(&cache->core_runtime_meta[core_id].
env_atomic_set(&core->runtime_meta->
part_counters[i].cached_clines, 0);
env_atomic_set(&cache->core_runtime_meta[core_id].
env_atomic_set(&core->runtime_meta->
part_counters[i].dirty_clines, 0);
}
}
@ -365,7 +363,9 @@ static int _ocf_mngt_init_instance_add_cores(
ocf_cache_t cache = context->cache;
/* FIXME: This is temporary hack. Remove after storing name it meta. */
char core_name[OCF_CORE_NAME_SIZE];
int ret = -1, i;
ocf_core_t core;
ocf_core_id_t core_id;
int ret = -1;
uint64_t hd_lines = 0;
OCF_ASSERT_PLUGGED(cache);
@ -381,17 +381,13 @@ static int _ocf_mngt_init_instance_add_cores(
cache->conf_meta->core_count = 0;
/* Check in metadata which cores were added into cache */
for (i = 0; i < OCF_CORE_MAX; i++) {
for_each_core(cache, core, core_id) {
ocf_volume_t tvolume = NULL;
ocf_core_t core = &cache->core[i];
if (!cache->core_conf_meta[i].added)
continue;
if (!cache->core[i].volume.type)
if (!core->volume.type)
goto err;
ret = snprintf(core_name, sizeof(core_name), "core%d", i);
ret = snprintf(core_name, sizeof(core_name), "core%d", core_id);
if (ret < 0 || ret >= sizeof(core_name))
goto err;
@ -411,22 +407,23 @@ static int _ocf_mngt_init_instance_add_cores(
core->opened = true;
ocf_cache_log(cache, log_info,
"Attached core %u from pool\n", i);
"Attached core %u from pool\n",
core_id);
} else if (context->cfg.open_cores) {
ret = ocf_volume_open(&core->volume, NULL);
if (ret == -OCF_ERR_NOT_OPEN_EXC) {
ocf_cache_log(cache, log_warn,
"Cannot open core %u. "
"Cache is busy", i);
"Cache is busy", core_id);
} else if (ret) {
ocf_cache_log(cache, log_warn,
"Cannot open core %u", i);
"Cannot open core %u", core_id);
} else {
core->opened = true;
}
}
env_bit_set(i, cache->conf_meta->valid_core_bitmap);
env_bit_set(core_id, cache->conf_meta->valid_core_bitmap);
cache->conf_meta->core_count++;
core->volume.cache = cache;
@ -444,13 +441,12 @@ static int _ocf_mngt_init_instance_add_cores(
cache->ocf_core_inactive_count++;
ocf_cache_log(cache, log_warn,
"Cannot find core %u in pool"
", core added as inactive\n", i);
", core added as inactive\n", core_id);
continue;
}
hd_lines = ocf_bytes_2_lines(cache,
ocf_volume_get_length(
&cache->core[i].volume));
ocf_volume_get_length(&core->volume));
if (hd_lines) {
ocf_cache_log(cache, log_info,
@ -1884,19 +1880,18 @@ static void ocf_mngt_cache_stop_remove_cores(ocf_pipeline_t pipeline,
{
struct ocf_mngt_cache_stop_context *context = priv;
ocf_cache_t cache = context->cache;
int i, j, no;
no = cache->conf_meta->core_count;
ocf_core_t core;
ocf_core_id_t core_id;
int no = cache->conf_meta->core_count;
/* All exported objects removed, cleaning up rest. */
for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
cache_mng_core_remove_from_cache(cache, i);
for_each_core(cache, core, core_id) {
cache_mng_core_remove_from_cache(core);
if (context->cache_attached)
cache_mng_core_remove_from_cleaning_pol(cache, i);
cache_mng_core_close(cache, i);
j++;
cache_mng_core_remove_from_cleaning_pol(core);
cache_mng_core_close(core);
if (--no == 0)
break;
}
ENV_BUG_ON(cache->conf_meta->core_count != 0);
@ -2114,6 +2109,19 @@ void ocf_mngt_cache_save(ocf_cache_t cache,
ocf_mngt_cache_save_flush_sb_complete, context);
}
static void _cache_mng_update_initial_dirty_clines(ocf_cache_t cache)
{
ocf_core_t core;
ocf_core_id_t core_id;
for_each_core(cache, core, core_id) {
env_atomic_set(&core->runtime_meta->initial_dirty_clines,
env_atomic_read(&core->runtime_meta->
dirty_clines));
}
}
static int _cache_mng_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
{
ocf_cache_mode_t mode_old = cache->conf_meta->cache_mode;
@ -2130,18 +2138,8 @@ static int _cache_mng_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
cache->conf_meta->cache_mode = mode;
if (ocf_cache_mode_wb == mode_old) {
int i;
for (i = 0; i != OCF_CORE_MAX; ++i) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
env_atomic_set(&cache->core_runtime_meta[i].
initial_dirty_clines,
env_atomic_read(&cache->
core_runtime_meta[i].dirty_clines));
}
}
if (mode_old == ocf_cache_mode_wb)
_cache_mng_update_initial_dirty_clines(cache);
ocf_cache_log(cache, log_info, "Changing cache mode from '%s' to '%s' "
"successful\n", ocf_get_io_iface_name(mode_old),
@ -2300,17 +2298,16 @@ static void ocf_mngt_cache_detach_update_metadata(ocf_pipeline_t pipeline,
{
struct ocf_mngt_cache_detach_context *context = priv;
ocf_cache_t cache = context->cache;
int i, j, no;
no = cache->conf_meta->core_count;
ocf_core_t core;
ocf_core_id_t core_id;
int no = cache->conf_meta->core_count;
/* remove cacheline metadata and cleaning policy meta for all cores */
for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
cache_mng_core_deinit_attached_meta(cache, i);
cache_mng_core_remove_from_cleaning_pol(cache, i);
j++;
for_each_core(cache, core, core_id) {
cache_mng_core_deinit_attached_meta(core);
cache_mng_core_remove_from_cleaning_pol(core);
if (--no == 0)
break;
}
ocf_pipeline_next(context->pipeline);

View File

@ -17,21 +17,22 @@
#include "../engine/engine_common.h"
/* Close if opened */
int cache_mng_core_close(ocf_cache_t cache, ocf_core_id_t core_id)
int cache_mng_core_close(ocf_core_t core)
{
if (!cache->core[core_id].opened)
if (!core->opened)
return -OCF_ERR_CORE_IN_INACTIVE_STATE;
ocf_volume_close(&cache->core[core_id].volume);
cache->core[core_id].opened = false;
ocf_volume_close(&core->volume);
core->opened = false;
return 0;
}
/* Remove core from cleaning policy */
void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache,
int core_id)
void cache_mng_core_remove_from_cleaning_pol(ocf_core_t core)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_core_id_t core_id = ocf_core_get_id(core);
ocf_cleaning_t clean_pol_type;
OCF_METADATA_LOCK_WR();
@ -48,16 +49,15 @@ void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache,
}
/* Deinitialize core metadata in attached metadata */
void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id)
void cache_mng_core_deinit_attached_meta(ocf_core_t core)
{
int retry = 1;
uint64_t core_size = 0;
ocf_cleaning_t clean_pol_type;
ocf_volume_t core;
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_core_id_t core_id = ocf_core_get_id(core);
core = &cache->core[core_id].volume;
core_size = ocf_volume_get_length(core);
core_size = ocf_volume_get_length(&core->volume);
if (!core_size)
core_size = ~0ULL;
@ -88,31 +88,34 @@ void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id)
}
/* Mark core as removed in metadata */
void cache_mng_core_remove_from_meta(struct ocf_cache *cache, int core_id)
void cache_mng_core_remove_from_meta(ocf_core_t core)
{
ocf_cache_t cache = ocf_core_get_cache(core);
OCF_METADATA_LOCK_WR();
/* In metadata mark data this core was removed from cache */
cache->core_conf_meta[core_id].added = false;
core->conf_meta->added = false;
/* Clear UUID of core */
ocf_mngt_core_clear_uuid_metadata(&cache->core[core_id]);
cache->core_conf_meta[core_id].seq_no = OCF_SEQ_NO_INVALID;
ocf_mngt_core_clear_uuid_metadata(core);
core->conf_meta->seq_no = OCF_SEQ_NO_INVALID;
OCF_METADATA_UNLOCK_WR();
}
/* Deinit in-memory structures related to this core */
void cache_mng_core_remove_from_cache(struct ocf_cache *cache, int core_id)
void cache_mng_core_remove_from_cache(ocf_core_t core)
{
env_free(cache->core[core_id].counters);
cache->core[core_id].counters = NULL;
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_core_id_t core_id = ocf_core_get_id(core);
env_free(core->counters);
core->counters = NULL;
env_bit_clear(core_id, cache->conf_meta->valid_core_bitmap);
if (!cache->core[core_id].opened &&
--cache->ocf_core_inactive_count == 0) {
if (!core->opened && --cache->ocf_core_inactive_count == 0)
env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state);
}
cache->conf_meta->core_count--;
}

View File

@ -7,16 +7,15 @@
#ifndef __OCF_MNGT_COMMON_H__
#define __OCF_MNGT_COMMON_H__
int cache_mng_core_close(ocf_cache_t cache, ocf_core_id_t core_id);
int cache_mng_core_close(ocf_core_t core);
void cache_mng_core_remove_from_meta(struct ocf_cache *cache, int core_id);
void cache_mng_core_remove_from_meta(ocf_core_t core);
void cache_mng_core_remove_from_cache(struct ocf_cache *cache, int core_id);
void cache_mng_core_remove_from_cache(ocf_core_t core);
void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id);
void cache_mng_core_deinit_attached_meta(ocf_core_t core);
void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache,
int core_id);
void cache_mng_core_remove_from_cleaning_pol(ocf_core_t core);
int _ocf_cleaning_thread(void *priv);

View File

@ -150,7 +150,7 @@ static void _ocf_mngt_cache_add_core_handle_error(
if (context->flags.counters_allocated) {
env_bit_clear(cfg->core_id,
cache->conf_meta->valid_core_bitmap);
cache->core_conf_meta[cfg->core_id].added = false;
core->conf_meta->added = false;
core->opened = false;
env_free(core->counters);
@ -199,7 +199,7 @@ static void _ocf_mngt_cache_add_core(ocf_cache_t cache,
uint64_t length;
int result = 0;
core = &cache->core[cfg->core_id];
core = ocf_cache_get_core(cache, cfg->core_id);
context->core = core;
volume = &core->volume;
@ -242,7 +242,7 @@ static void _ocf_mngt_cache_add_core(ocf_cache_t cache,
if (!length)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_CORE_NOT_AVAIL);
cache->core_conf_meta[cfg->core_id].length = length;
core->conf_meta->length = length;
clean_type = cache->conf_meta->cleaning_policy_type;
if (ocf_cache_is_device_attached(cache) &&
@ -265,30 +265,25 @@ static void _ocf_mngt_cache_add_core(ocf_cache_t cache,
/* When adding new core to cache, reset all core/cache statistics */
ocf_core_stats_initialize(core);
env_atomic_set(&cache->core_runtime_meta[cfg->core_id].
cached_clines, 0);
env_atomic_set(&cache->core_runtime_meta[cfg->core_id].
dirty_clines, 0);
env_atomic64_set(&cache->core_runtime_meta[cfg->core_id].
dirty_since, 0);
env_atomic_set(&core->runtime_meta->cached_clines, 0);
env_atomic_set(&core->runtime_meta->dirty_clines, 0);
env_atomic64_set(&core->runtime_meta->dirty_since, 0);
/* In metadata mark data this core was added into cache */
env_bit_set(cfg->core_id, cache->conf_meta->valid_core_bitmap);
cache->core_conf_meta[cfg->core_id].added = true;
core->conf_meta->added = true;
core->opened = true;
/* Set default cache parameters for sequential */
cache->core_conf_meta[cfg->core_id].seq_cutoff_policy =
ocf_seq_cutoff_policy_default;
cache->core_conf_meta[cfg->core_id].seq_cutoff_threshold =
cfg->seq_cutoff_threshold;
core->conf_meta->seq_cutoff_policy = ocf_seq_cutoff_policy_default;
core->conf_meta->seq_cutoff_threshold = cfg->seq_cutoff_threshold;
/* Add core sequence number for atomic metadata matching */
core_sequence_no = _ocf_mngt_get_core_seq_no(cache);
if (core_sequence_no == OCF_SEQ_NO_INVALID)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_TOO_MANY_CORES);
cache->core_conf_meta[cfg->core_id].seq_no = core_sequence_no;
core->conf_meta->seq_no = core_sequence_no;
/* Update super-block with core device addition */
ocf_metadata_flush_superblock(cache,
@ -661,7 +656,6 @@ static void _ocf_mngt_cache_remove_core(ocf_pipeline_t pipeline, void *priv,
struct ocf_mngt_cache_remove_core_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_core_t core = context->core;
ocf_core_id_t core_id = ocf_core_get_id(core);
ocf_core_log(core, log_debug, "Removing core\n");
@ -669,12 +663,12 @@ static void _ocf_mngt_cache_remove_core(ocf_pipeline_t pipeline, void *priv,
/* Deinit everything*/
if (ocf_cache_is_device_attached(cache)) {
cache_mng_core_deinit_attached_meta(cache, core_id);
cache_mng_core_remove_from_cleaning_pol(cache, core_id);
cache_mng_core_deinit_attached_meta(core);
cache_mng_core_remove_from_cleaning_pol(core);
}
cache_mng_core_remove_from_meta(cache, core_id);
cache_mng_core_remove_from_cache(cache, core_id);
cache_mng_core_close(cache, core_id);
cache_mng_core_remove_from_meta(core);
cache_mng_core_remove_from_cache(core);
cache_mng_core_close(core);
/* Update super-block with core device removal */
ocf_metadata_flush_superblock(cache,
@ -760,12 +754,11 @@ static void _ocf_mngt_cache_detach_core(ocf_pipeline_t pipeline,
struct ocf_mngt_cache_remove_core_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_core_t core = context->core;
ocf_core_id_t core_id = ocf_core_get_id(core);
int status;
ocf_core_log(core, log_debug, "Detaching core\n");
status = cache_mng_core_close(cache, core_id);
status = cache_mng_core_close(core);
if (status)
OCF_PL_FINISH_RET(pipeline, status);
@ -893,19 +886,13 @@ int ocf_mngt_core_set_uuid(ocf_core_t core, const struct ocf_volume_uuid *uuid)
int ocf_mngt_core_set_user_metadata(ocf_core_t core, void *data, size_t size)
{
ocf_cache_t cache;
uint32_t core_id;
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(data);
cache = ocf_core_get_cache(core);
core_id = ocf_core_get_id(core);
if (size > OCF_CORE_USER_DATA_SIZE)
return -EINVAL;
env_memcpy(cache->core_conf_meta[core_id].user_data,
env_memcpy(core->conf_meta->user_data,
OCF_CORE_USER_DATA_SIZE, data, size);
return 0;
@ -913,18 +900,13 @@ int ocf_mngt_core_set_user_metadata(ocf_core_t core, void *data, size_t size)
int ocf_mngt_core_get_user_metadata(ocf_core_t core, void *data, size_t size)
{
uint32_t core_id;
ocf_cache_t cache;
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(data);
core_id = ocf_core_get_id(core);
cache = ocf_core_get_cache(core);
if (size > sizeof(cache->core_conf_meta[core_id].user_data))
if (size > sizeof(core->conf_meta->user_data))
return -EINVAL;
env_memcpy(data, size, cache->core_conf_meta[core_id].user_data,
env_memcpy(data, size, core->conf_meta->user_data,
OCF_CORE_USER_DATA_SIZE);
return 0;
@ -933,10 +915,7 @@ int ocf_mngt_core_get_user_metadata(ocf_core_t core, void *data, size_t size)
static int _cache_mng_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx)
{
uint32_t threshold = *(uint32_t*) cntx;
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_core_id_t core_id = ocf_core_get_id(core);
uint32_t threshold_old = cache->core_conf_meta[core_id].
seq_cutoff_threshold;
uint32_t threshold_old = core->conf_meta->seq_cutoff_threshold;
if (threshold_old == threshold) {
ocf_core_log(core, log_info,
@ -944,7 +923,7 @@ static int _cache_mng_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx)
"already set\n", threshold);
return 0;
}
cache->core_conf_meta[core_id].seq_cutoff_threshold = threshold;
core->conf_meta->seq_cutoff_threshold = threshold;
ocf_core_log(core, log_info, "Changing sequential cutoff "
"threshold from %u to %u bytes successful\n",
@ -997,9 +976,7 @@ static const char *_cache_mng_seq_cutoff_policy_get_name(
static int _cache_mng_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx)
{
ocf_seq_cutoff_policy policy = *(ocf_seq_cutoff_policy*) cntx;
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_core_id_t core_id = ocf_core_get_id(core);
uint32_t policy_old = cache->core_conf_meta[core_id].seq_cutoff_policy;
uint32_t policy_old = core->conf_meta->seq_cutoff_policy;
if (policy_old == policy) {
ocf_core_log(core, log_info,
@ -1014,7 +991,7 @@ static int _cache_mng_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx)
return -OCF_ERR_INVAL;
}
cache->core_conf_meta[core_id].seq_cutoff_policy = policy;
core->conf_meta->seq_cutoff_policy = policy;
ocf_core_log(core, log_info,
"Changing sequential cutoff policy from %s to %s\n",

View File

@ -107,18 +107,14 @@ static void _ocf_mngt_end_flush(ocf_cache_t cache)
bool ocf_mngt_cache_is_dirty(ocf_cache_t cache)
{
uint32_t i;
ocf_core_t core;
ocf_core_id_t core_id;
OCF_CHECK_NULL(cache);
for (i = 0; i < OCF_CORE_MAX; ++i) {
if (!cache->core_conf_meta[i].added)
continue;
if (env_atomic_read(&(cache->core_runtime_meta[i].
dirty_clines))) {
for_each_core(cache, core, core_id) {
if (env_atomic_read(&core->runtime_meta->dirty_clines))
return true;
}
}
return false;
@ -133,16 +129,16 @@ bool ocf_mngt_cache_is_dirty(ocf_cache_t cache)
* NOTE:
* Table is not sorted.
*/
static int _ocf_mngt_get_sectors(struct ocf_cache *cache, int core_id,
static int _ocf_mngt_get_sectors(ocf_cache_t cache, ocf_core_id_t core_id,
struct flush_data **tbl, uint32_t *num)
{
ocf_core_t core = ocf_cache_get_core(cache, core_id);
uint64_t core_line;
ocf_core_id_t i_core_id;
struct flush_data *p;
uint32_t i, j, dirty = 0;
dirty = env_atomic_read(&cache->core_runtime_meta[core_id].
dirty_clines);
dirty = env_atomic_read(&core->runtime_meta->dirty_clines);
if (!dirty) {
*num = 0;
*tbl = NULL;
@ -202,7 +198,8 @@ static int _ocf_mngt_get_flush_containers(ocf_cache_t cache,
uint32_t num;
uint64_t core_line;
ocf_core_id_t core_id;
uint32_t i, j, dirty = 0;
ocf_core_t core;
uint32_t i, j = 0, dirty = 0;
int step = 0;
/*
@ -226,16 +223,13 @@ static int _ocf_mngt_get_flush_containers(ocf_cache_t cache,
return -OCF_ERR_NO_MEM;
}
for (i = 0, j = 0; i < OCF_CORE_MAX; i++) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
fc[j].core_id = i;
core_revmap[i] = j;
for_each_core(cache, core, core_id) {
fc[j].core_id = core_id;
core_revmap[core_id] = j;
/* Check for dirty blocks */
fc[j].count = env_atomic_read(&cache->
core_runtime_meta[i].dirty_clines);
fc[j].count = env_atomic_read(
&core->runtime_meta->dirty_clines);
dirty += fc[j].count;
if (fc[j].count) {
@ -600,7 +594,7 @@ static void _ocf_mngt_flush_finish(ocf_pipeline_t pipeline, void *priv,
{
struct ocf_mngt_cache_flush_context *context = priv;
ocf_cache_t cache = context->cache;
int64_t core_id;
ocf_core_t core = context->core;
if (!error) {
switch(context->op) {
@ -610,33 +604,32 @@ static void _ocf_mngt_flush_finish(ocf_pipeline_t pipeline, void *priv,
break;
case flush_core:
case purge_core:
core_id = ocf_core_get_id(context->core);
ENV_BUG_ON(env_atomic_read(&cache->core_runtime_meta
[core_id].dirty_clines));
ENV_BUG_ON(env_atomic_read(
&core->runtime_meta->dirty_clines));
break;
}
}
_ocf_mngt_end_flush(context->cache);
_ocf_mngt_end_flush(cache);
switch (context->op) {
case flush_cache:
context->cmpl.flush_cache(context->cache, context->priv, error);
context->cmpl.flush_cache(cache, context->priv, error);
break;
case flush_core:
context->cmpl.flush_core(context->core, context->priv, error);
context->cmpl.flush_core(core, context->priv, error);
break;
case purge_cache:
context->cmpl.purge_cache(context->cache, context->priv, error);
context->cmpl.purge_cache(cache, context->priv, error);
break;
case purge_core:
context->cmpl.purge_core(context->core, context->priv, error);
context->cmpl.purge_core(core, context->priv, error);
break;
default:
ENV_BUG();
}
ocf_pipeline_destroy(context->pipeline);
ocf_pipeline_destroy(pipeline);
}
static struct ocf_pipeline_properties _ocf_mngt_cache_flush_pipeline_properties = {

View File

@ -69,7 +69,6 @@ static uint32_t _calc_dirty_for(uint64_t dirty_since)
int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
{
uint32_t i;
uint32_t cache_occupancy_total = 0;
uint32_t dirty_blocks_total = 0;
uint32_t initial_dirty_blocks_total = 0;
@ -80,6 +79,8 @@ int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
uint64_t core_dirty_since;
uint32_t dirty_blocks_inactive = 0;
uint32_t cache_occupancy_inactive = 0;
ocf_core_t core;
ocf_core_id_t core_id;
OCF_CHECK_NULL(cache);
@ -101,50 +102,44 @@ int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
/* iterate through all possibly valid core objcts, as list of
* valid objects may be not continuous
*/
for (i = 0; i != OCF_CORE_MAX; ++i) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
for_each_core(cache, core, core_id) {
/* If current dirty blocks exceeds saved initial dirty
* blocks then update the latter
*/
curr_dirty_cnt = env_atomic_read(&cache->
core_runtime_meta[i].dirty_clines);
init_dirty_cnt = env_atomic_read(&cache->
core_runtime_meta[i].initial_dirty_clines);
if (init_dirty_cnt &&
(curr_dirty_cnt > init_dirty_cnt)) {
curr_dirty_cnt = env_atomic_read(
&core->runtime_meta->dirty_clines);
init_dirty_cnt = env_atomic_read(
&core->runtime_meta->initial_dirty_clines);
if (init_dirty_cnt && (curr_dirty_cnt > init_dirty_cnt)) {
env_atomic_set(
&cache->core_runtime_meta[i].
initial_dirty_clines,
env_atomic_read(&cache->
core_runtime_meta[i].dirty_clines));
&core->runtime_meta->initial_dirty_clines,
env_atomic_read(
&core->runtime_meta->dirty_clines));
}
cache_occupancy_total += env_atomic_read(&cache->
core_runtime_meta[i].cached_clines);
cache_occupancy_total += env_atomic_read(
&core->runtime_meta->cached_clines);
dirty_blocks_total += env_atomic_read(&(cache->
core_runtime_meta[i].dirty_clines));
initial_dirty_blocks_total += env_atomic_read(&(cache->
core_runtime_meta[i].initial_dirty_clines));
dirty_blocks_total += env_atomic_read(
&core->runtime_meta->dirty_clines);
initial_dirty_blocks_total += env_atomic_read(
&core->runtime_meta->initial_dirty_clines);
if (!cache->core[i].opened) {
cache_occupancy_inactive += env_atomic_read(&cache->
core_runtime_meta[i].cached_clines);
if (!core->opened) {
cache_occupancy_inactive += env_atomic_read(
&core->runtime_meta->cached_clines);
dirty_blocks_inactive += env_atomic_read(&(cache->
core_runtime_meta[i].dirty_clines));
dirty_blocks_inactive += env_atomic_read(
&core->runtime_meta->dirty_clines);
}
core_dirty_since = env_atomic64_read(&cache->
core_runtime_meta[i].dirty_since);
core_dirty_since = env_atomic64_read(
&core->runtime_meta->dirty_since);
if (core_dirty_since) {
dirty_since = (dirty_since ?
OCF_MIN(dirty_since, core_dirty_since) :
core_dirty_since);
}
flushed_total += env_atomic_read(
&cache->core[i].flushed);
flushed_total += env_atomic_read(&core->flushed);
}
info->dirty = dirty_blocks_total;

View File

@ -37,55 +37,6 @@ struct ocf_trace {
env_atomic64 trace_seq_ref;
};
struct ocf_metadata_uuid {
uint32_t size;
uint8_t data[OCF_VOLUME_UUID_MAX_SIZE];
} __packed;
#define OCF_CORE_USER_DATA_SIZE 64
struct ocf_core_meta_config {
uint8_t type;
/* This bit means that object was added into cache */
uint32_t added : 1;
/* Core sequence number used to correlate cache lines with cores
* when recovering from atomic device */
ocf_seq_no_t seq_no;
/* Sequential cutoff threshold (in bytes) */
uint32_t seq_cutoff_threshold;
/* Sequential cutoff policy */
ocf_seq_cutoff_policy seq_cutoff_policy;
/* core object size in bytes */
uint64_t length;
uint8_t user_data[OCF_CORE_USER_DATA_SIZE];
};
struct ocf_core_meta_runtime {
/* Number of blocks from that objects that currently are cached
* on the caching device.
*/
env_atomic cached_clines;
env_atomic dirty_clines;
env_atomic initial_dirty_clines;
env_atomic64 dirty_since;
struct {
/* clines within lru list (?) */
env_atomic cached_clines;
/* dirty clines assigned to this specific partition within
* cache device
*/
env_atomic dirty_clines;
} part_counters[OCF_IO_CLASS_MAX];
};
/**
* @brief Initialization mode of cache instance
*/
@ -193,8 +144,6 @@ struct ocf_cache {
uint16_t ocf_core_inactive_count;
struct ocf_core core[OCF_CORE_MAX];
struct ocf_core_meta_config *core_conf_meta;
struct ocf_core_meta_runtime *core_runtime_meta;
env_atomic flush_in_progress;
@ -224,6 +173,12 @@ struct ocf_cache {
void *priv;
};
static inline ocf_core_t ocf_cache_get_core(ocf_cache_t cache,
ocf_core_id_t core_id)
{
return &cache->core[core_id];
}
#define ocf_cache_log_prefix(cache, lvl, prefix, fmt, ...) \
ocf_log_prefix(ocf_cache_get_ctx(cache), lvl, "%s" prefix, \
fmt, ocf_cache_get_name(cache), ##__VA_ARGS__)

View File

@ -98,18 +98,12 @@ int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core)
uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core)
{
uint32_t core_id = ocf_core_get_id(core);
ocf_cache_t cache = ocf_core_get_cache(core);
return cache->core_conf_meta[core_id].seq_cutoff_threshold;
return core->conf_meta->seq_cutoff_threshold;
}
ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core)
{
uint32_t core_id = ocf_core_get_id(core);
ocf_cache_t cache = ocf_core_get_cache(core);
return cache->core_conf_meta[core_id].seq_cutoff_policy;
return core->conf_meta->seq_cutoff_policy;
}
int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,

View File

@ -32,12 +32,65 @@ struct ocf_core_io {
/*!< Timestamp */
};
struct ocf_metadata_uuid {
uint32_t size;
uint8_t data[OCF_VOLUME_UUID_MAX_SIZE];
} __packed;
#define OCF_CORE_USER_DATA_SIZE 64
struct ocf_core_meta_config {
uint8_t type;
/* This bit means that object was added into cache */
uint32_t added : 1;
/* Core sequence number used to correlate cache lines with cores
* when recovering from atomic device */
ocf_seq_no_t seq_no;
/* Sequential cutoff threshold (in bytes) */
uint32_t seq_cutoff_threshold;
/* Sequential cutoff policy */
ocf_seq_cutoff_policy seq_cutoff_policy;
/* core object size in bytes */
uint64_t length;
uint8_t user_data[OCF_CORE_USER_DATA_SIZE];
};
struct ocf_core_meta_runtime {
/* Number of blocks from that objects that currently are cached
* on the caching device.
*/
env_atomic cached_clines;
env_atomic dirty_clines;
env_atomic initial_dirty_clines;
env_atomic64 dirty_since;
struct {
/* clines within lru list (?) */
env_atomic cached_clines;
/* dirty clines assigned to this specific partition within
* cache device
*/
env_atomic dirty_clines;
} part_counters[OCF_IO_CLASS_MAX];
};
struct ocf_core {
char name[OCF_CORE_NAME_SIZE];
struct ocf_volume front_volume;
struct ocf_volume volume;
struct ocf_core_meta_config *conf_meta;
struct ocf_core_meta_runtime *runtime_meta;
struct {
uint64_t last;
uint64_t bytes;
@ -58,4 +111,11 @@ int ocf_core_volume_type_init(ocf_ctx_t ctx);
void ocf_core_volume_type_deinit(ocf_ctx_t ctx);
#define for_each_core_all(_cache, _core, _id) \
for (_id = 0; _core = &cache->core[_id], _id < OCF_CORE_MAX; _id++)
#define for_each_core(_cache, _core, _id) \
for_each_core_all(_cache, _core, _id) \
if (core->conf_meta->added)
#endif /* __OCF_CORE_PRIV_H__ */

View File

@ -40,11 +40,13 @@ int ocf_metadata_get_atomic_entry(ocf_cache_t cache,
ocf_cache_line_t line = ocf_atomic_addr2line(cache, addr);
uint8_t pos = ocf_atomic_addr2pos(cache, addr);
ocf_core_id_t core_id = OCF_CORE_MAX;
ocf_core_t core;
uint64_t core_line = 0;
ocf_metadata_get_core_info(cache, line, &core_id, &core_line);
core = ocf_cache_get_core(cache, core_id);
entry->core_seq_no = cache->core_conf_meta[core_id].seq_no;
entry->core_seq_no = core->conf_meta->seq_no;
entry->core_line = core_line;
entry->valid = metadata_test_valid_one(cache, line, pos);

View File

@ -9,7 +9,6 @@
#include "engine/cache_engine.h"
#include "utils/utils_part.h"
#include "utils/utils_cache_line.h"
#include "utils/utils_core.h"
#ifdef OCF_DEBUG_STATS
static void ocf_stats_debug_init(struct ocf_counters_debug *stats)
@ -165,10 +164,10 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
struct ocf_stats_io_class *stats)
{
ocf_cache_t cache;
uint32_t i;
uint32_t cache_occupancy_total = 0;
struct ocf_counters_part *part_stat;
ocf_core_id_t core_id;
ocf_core_t i_core;
ocf_core_id_t i_core_id;
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(stats);
@ -176,25 +175,22 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
if (part_id < OCF_IO_CLASS_ID_MIN || part_id > OCF_IO_CLASS_ID_MAX)
return -OCF_ERR_INVAL;
core_id = ocf_core_get_id(core);
cache = ocf_core_get_cache(core);
if (!ocf_part_is_valid(&cache->user_parts[part_id]))
return -OCF_ERR_IO_CLASS_NOT_EXIST;
for_each_core(cache, i) {
for_each_core(cache, i_core, i_core_id) {
cache_occupancy_total += env_atomic_read(
&cache->core_runtime_meta[i].cached_clines);
&i_core->runtime_meta->cached_clines);
}
part_stat = &core->counters->part_counters[part_id];
stats->occupancy_clines = env_atomic_read(&cache->
core_runtime_meta[core_id].part_counters[part_id].
cached_clines);
stats->dirty_clines = env_atomic_read(&cache->
core_runtime_meta[core_id].part_counters[part_id].
dirty_clines);
stats->occupancy_clines = env_atomic_read(&core->runtime_meta->
part_counters[part_id].cached_clines);
stats->dirty_clines = env_atomic_read(&core->runtime_meta->
part_counters[part_id].dirty_clines);
stats->free_clines = cache->conf_meta->cachelines -
cache_occupancy_total;
@ -242,7 +238,7 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
stats->seq_cutoff_policy = ocf_core_get_seq_cutoff_policy(core);
env_atomic_read(&cache->core_runtime_meta[core_id].cached_clines);
env_atomic_read(&core->runtime_meta->cached_clines);
copy_block_stats(&stats->core_volume, &core_stats->core_blocks);
copy_block_stats(&stats->cache_volume, &core_stats->cache_blocks);
@ -267,18 +263,16 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
accum_block_stats(&stats->core, &curr->blocks);
stats->cache_occupancy += env_atomic_read(&cache->
core_runtime_meta[core_id].part_counters[i].
cached_clines);
stats->dirty += env_atomic_read(&cache->
core_runtime_meta[core_id].part_counters[i].
dirty_clines);
stats->cache_occupancy += env_atomic_read(&core->runtime_meta->
part_counters[i].cached_clines);
stats->dirty += env_atomic_read(&core->runtime_meta->
part_counters[i].dirty_clines);
}
stats->flushed = env_atomic_read(&core->flushed);
stats->dirty_for = _calc_dirty_for(
env_atomic64_read(&cache->core_runtime_meta[core_id].dirty_since));
env_atomic64_read(&core->runtime_meta->dirty_since));
return 0;
}

View File

@ -47,15 +47,11 @@ static uint64_t _bytes4k(uint64_t bytes)
static uint64_t _get_cache_occupancy(ocf_cache_t cache)
{
uint64_t result = 0;
uint32_t i;
ocf_core_t core;
ocf_core_id_t core_id;
for (i = 0; i != OCF_CORE_MAX; ++i) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
result += env_atomic_read(
&cache->core_runtime_meta[i].cached_clines);
}
for_each_core(cache, core, core_id)
result += env_atomic_read(&core->runtime_meta->cached_clines);
return result;
}

View File

@ -22,6 +22,7 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, ocf_cache_line_t line,
ocf_core_id_t core_id, ocf_part_id_t part_id)
{
ocf_core_t core = ocf_cache_get_core(cache, core_id);
bool is_valid;
ENV_BUG_ON(core_id >= OCF_CORE_MAX);
@ -31,9 +32,8 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
/*
* Update the number of cached data for that core object
*/
env_atomic_dec(&cache->core_runtime_meta[core_id].
cached_clines);
env_atomic_dec(&cache->core_runtime_meta[core_id].
env_atomic_dec(&core->runtime_meta->cached_clines);
env_atomic_dec(&core->runtime_meta->
part_counters[part_id].cached_clines);
}
@ -81,19 +81,15 @@ void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
{
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
if (metadata_set_valid_sec_changed(cache, line, start_bit, end_bit)) {
/*
* Update the number of cached data for that core object
*/
env_atomic_inc(&cache->core_runtime_meta[core_id].
cached_clines);
env_atomic_inc(&cache->core_runtime_meta[core_id].
env_atomic_inc(&req->core->runtime_meta->cached_clines);
env_atomic_inc(&req->core->runtime_meta->
part_counters[part_id].cached_clines);
}
}
@ -101,32 +97,29 @@ void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
{
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
if (metadata_clear_dirty_sec_changed(cache, line, start_bit, end_bit)) {
/*
* Update the number of dirty cached data for that
* core object
*/
if (env_atomic_dec_and_test(&cache->core_runtime_meta[core_id].
if (env_atomic_dec_and_test(&req->core->runtime_meta->
dirty_clines)) {
/*
* If this is last dirty cline reset dirty
* timestamp
*/
env_atomic64_set(&cache->core_runtime_meta[core_id].
env_atomic64_set(&req->core->runtime_meta->
dirty_since, 0);
}
/*
* decrement dirty clines statistic for given cline
*/
env_atomic_dec(&cache->core_runtime_meta[core_id].
env_atomic_dec(&req->core->runtime_meta->
part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].clean_cline))
@ -141,30 +134,27 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
{
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
if (metadata_set_dirty_sec_changed(cache, line, start_bit, end_bit)) {
/*
* If this is first dirty cline set dirty timestamp
*/
env_atomic64_cmpxchg(&cache->core_runtime_meta[core_id].
dirty_since, 0, env_get_tick_count());
env_atomic64_cmpxchg(&req->core->runtime_meta->dirty_since,
0, env_get_tick_count());
/*
* Update the number of dirty cached data for that
* core object
*/
env_atomic_inc(&cache->core_runtime_meta[core_id].dirty_clines);
env_atomic_inc(&req->core->runtime_meta->dirty_clines);
/*
* increment dirty clines statistic for given cline
*/
env_atomic_inc(&cache->core_runtime_meta[core_id].
env_atomic_inc(&req->core->runtime_meta->
part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].dirty_cline))

View File

@ -1,13 +0,0 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_CORE_H__
#define __UTILS_CORE_H__
#define for_each_core(cache, iter) \
for (iter = 0; iter < OCF_CORE_MAX; iter++) \
if (cache->core_conf_meta[iter].added)
#endif /* __UTILS_CORE_H__ */

View File

@ -95,7 +95,6 @@ void ocf_part_move(struct ocf_request *req)
ocf_part_id_t id_old, id_new;
uint32_t i;
ocf_cleaning_t type = cache->conf_meta->cleaning_policy_type;
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ENV_BUG_ON(type >= ocf_cleaning_max);
@ -158,15 +157,15 @@ void ocf_part_move(struct ocf_request *req)
cleaning_policy_ops[type].
set_hot_cache_line(cache, line);
env_atomic_inc(&cache->core_runtime_meta[core_id].
env_atomic_inc(&req->core->runtime_meta->
part_counters[id_new].dirty_clines);
env_atomic_dec(&cache->core_runtime_meta[core_id].
env_atomic_dec(&req->core->runtime_meta->
part_counters[id_old].dirty_clines);
}
env_atomic_inc(&cache->core_runtime_meta[core_id].
env_atomic_inc(&req->core->runtime_meta->
part_counters[id_new].cached_clines);
env_atomic_dec(&cache->core_runtime_meta[core_id].
env_atomic_dec(&req->core->runtime_meta->
part_counters[id_old].cached_clines);
/* DONE */

View File

@ -76,8 +76,6 @@ int __wrap_ocf_metadata_flush_superblock(struct ocf_cache *cache)
bool __wrap_env_bit_test(int nr, const volatile unsigned long *addr)
{
function_called();
return mock();
}
void __wrap_env_atomic_set(env_atomic *a, int i)
@ -267,6 +265,11 @@ void __wrap_ocf_mngt_cache_save_finish(
{
}
void _cache_mng_update_initial_dirty_clines(ocf_cache_t cache)
{
function_called();
}
static void _cache_mng_set_cache_mode_test01(void **state)
{
ocf_cache_mode_t mode_old = -20;
@ -348,14 +351,7 @@ static void _cache_mng_set_cache_mode_test03(void **state)
expect_function_call(__wrap_ocf_cache_mode_is_valid);
will_return(__wrap_ocf_cache_mode_is_valid, 1);
for(i = 0; i != OCF_CORE_MAX; ++i) {
expect_function_call(__wrap_env_bit_test);
will_return(__wrap_env_bit_test, 1);
expect_function_call(__wrap_env_atomic_read);
will_return(__wrap_env_atomic_read, 1);
expect_function_call(__wrap_env_atomic_set);
}
expect_function_call(_cache_mng_update_initial_dirty_clines);
expect_function_call(__wrap_ocf_log_raw);
will_return(__wrap_ocf_log_raw, 0);