Merge pull request #171 from robertbaldyga/core-metadata-core-object

Associate core metadata with core object
This commit is contained in:
Michal Rakowski 2019-05-28 15:41:48 +02:00 committed by GitHub
commit 548ca5e5ad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 334 additions and 385 deletions

View File

@ -14,7 +14,6 @@
#include "../engine/engine_common.h" #include "../engine/engine_common.h"
#include "../concurrency/ocf_cache_concurrency.h" #include "../concurrency/ocf_cache_concurrency.h"
#include "cleaning_priv.h" #include "cleaning_priv.h"
#include "../utils/utils_core.h"
#define OCF_ACP_DEBUG 0 #define OCF_ACP_DEBUG 0
@ -193,20 +192,23 @@ static struct acp_chunk_info *_acp_get_chunk(struct ocf_cache *cache,
static void _acp_remove_cores(struct ocf_cache *cache) static void _acp_remove_cores(struct ocf_cache *cache)
{ {
int i; ocf_core_t core;
ocf_core_id_t core_id;
for_each_core(cache, i) for_each_core(cache, core, core_id)
cleaning_policy_acp_remove_core(cache, i); cleaning_policy_acp_remove_core(cache, core_id);
} }
static int _acp_load_cores(struct ocf_cache *cache) static int _acp_load_cores(struct ocf_cache *cache)
{ {
int i;
ocf_core_t core;
ocf_core_id_t core_id;
int err = 0; int err = 0;
for_each_core(cache, i) { for_each_core(cache, core, core_id) {
OCF_DEBUG_PARAM(cache, "loading core %i\n", i); OCF_DEBUG_PARAM(cache, "loading core %i\n", core_id);
err = cleaning_policy_acp_add_core(cache, i); err = cleaning_policy_acp_add_core(cache, core_id);
if (err) if (err)
break; break;
} }
@ -680,7 +682,8 @@ void cleaning_policy_acp_remove_core(ocf_cache_t cache,
int cleaning_policy_acp_add_core(ocf_cache_t cache, int cleaning_policy_acp_add_core(ocf_cache_t cache,
ocf_core_id_t core_id) ocf_core_id_t core_id)
{ {
uint64_t core_size = cache->core_conf_meta[core_id].length; ocf_core_t core = ocf_cache_get_core(cache, core_id);
uint64_t core_size = core->conf_meta->length;
uint64_t num_chunks = OCF_DIV_ROUND_UP(core_size, ACP_CHUNK_SIZE); uint64_t num_chunks = OCF_DIV_ROUND_UP(core_size, ACP_CHUNK_SIZE);
struct acp_context *acp = _acp_get_ctx_from_cache(cache); struct acp_context *acp = _acp_get_ctx_from_cache(cache);
int i; int i;

View File

@ -89,17 +89,15 @@ ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c)
static int _ocf_cleaner_run_check_dirty_inactive(ocf_cache_t cache) static int _ocf_cleaner_run_check_dirty_inactive(ocf_cache_t cache)
{ {
int i; ocf_core_t core;
ocf_core_id_t core_id;
if (!env_bit_test(ocf_cache_state_incomplete, &cache->cache_state)) if (!env_bit_test(ocf_cache_state_incomplete, &cache->cache_state))
return 0; return 0;
for (i = 0; i < OCF_CORE_MAX; ++i) { for_each_core(cache, core, core_id) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap)) if (core->opened && env_atomic_read(
continue; &core->runtime_meta->dirty_clines)) {
if (cache->core[i].opened && env_atomic_read(&(cache->
core_runtime_meta[i].dirty_clines))) {
return 0; return 0;
} }
} }

View File

@ -503,6 +503,10 @@ int ocf_metadata_hash_init(struct ocf_cache *cache,
struct ocf_metadata *metadata = &cache->metadata; struct ocf_metadata *metadata = &cache->metadata;
struct ocf_cache_line_settings *settings = struct ocf_cache_line_settings *settings =
(struct ocf_cache_line_settings *)&metadata->settings; (struct ocf_cache_line_settings *)&metadata->settings;
struct ocf_core_meta_config *core_meta_config;
struct ocf_core_meta_runtime *core_meta_runtime;
ocf_core_t core;
ocf_core_id_t core_id;
uint32_t i = 0; uint32_t i = 0;
int result = 0; int result = 0;
@ -525,23 +529,28 @@ int ocf_metadata_hash_init(struct ocf_cache *cache,
if (result) { if (result) {
ocf_metadata_hash_deinit(cache); ocf_metadata_hash_deinit(cache);
} else { return result;
}
cache->conf_meta = METADATA_MEM_POOL(ctrl, cache->conf_meta = METADATA_MEM_POOL(ctrl,
metadata_segment_sb_config); metadata_segment_sb_config);
/* Set core metadata */ /* Set core metadata */
cache->core_conf_meta = METADATA_MEM_POOL(ctrl, core_meta_config = METADATA_MEM_POOL(ctrl,
metadata_segment_core_config); metadata_segment_core_config);
core_meta_runtime = METADATA_MEM_POOL(ctrl,
cache->core_runtime_meta = METADATA_MEM_POOL(ctrl,
metadata_segment_core_runtime); metadata_segment_core_runtime);
for_each_core_all(cache, core, core_id) {
core->conf_meta = &core_meta_config[core_id];
core->runtime_meta = &core_meta_runtime[core_id];
}
env_spinlock_init(&cache->metadata.lock.eviction); env_spinlock_init(&cache->metadata.lock.eviction);
env_rwlock_init(&cache->metadata.lock.status); env_rwlock_init(&cache->metadata.lock.status);
env_rwsem_init(&cache->metadata.lock.collision); env_rwsem_init(&cache->metadata.lock.collision);
}
return result; return 0;
} }
/* metadata segment data + iterators */ /* metadata segment data + iterators */
@ -1276,24 +1285,23 @@ static void ocf_medatata_hash_load_superblock_post(ocf_pipeline_t pipeline,
ocf_cache_t cache = context->cache; ocf_cache_t cache = context->cache;
struct ocf_metadata_uuid *muuid; struct ocf_metadata_uuid *muuid;
struct ocf_volume_uuid uuid; struct ocf_volume_uuid uuid;
uint32_t i; ocf_volume_type_t volume_type;
ocf_core_t core;
ocf_core_id_t core_id;
ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv; ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config); sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
for (i = 0; i < OCF_CORE_MAX; i++) { for_each_core(cache, core, core_id) {
if (!cache->core_conf_meta[i].added) muuid = ocf_metadata_get_core_uuid(cache, core_id);
continue;
muuid = ocf_metadata_get_core_uuid(cache, i);
uuid.data = muuid->data; uuid.data = muuid->data;
uuid.size = muuid->size; uuid.size = muuid->size;
volume_type = ocf_ctx_get_volume_type(cache->owner,
core->conf_meta->type);
/* Initialize core volume */ /* Initialize core volume */
ocf_volume_init(&cache->core[i].volume, ocf_volume_init(&core->volume, volume_type, &uuid, false);
ocf_ctx_get_volume_type(cache->owner,
cache->core_conf_meta[i].type),
&uuid, false);
} }
/* Restore all dynamics items */ /* Restore all dynamics items */
@ -1401,12 +1409,13 @@ static void ocf_medatata_hash_flush_superblock_prepare(ocf_pipeline_t pipeline,
{ {
struct ocf_metadata_hash_context *context = priv; struct ocf_metadata_hash_context *context = priv;
ocf_cache_t cache = context->cache; ocf_cache_t cache = context->cache;
uint32_t i; ocf_core_t core;
ocf_core_id_t core_id;
/* Synchronize core objects types */ /* Synchronize core objects types */
for (i = 0; i < OCF_CORE_MAX; i++) { for_each_core(cache, core, core_id) {
cache->core_conf_meta[i].type = ocf_ctx_get_volume_type_id( core->conf_meta->type = ocf_ctx_get_volume_type_id(
cache->owner, cache->core[i].volume.type); cache->owner, core->volume.type);
} }
ocf_pipeline_next(pipeline); ocf_pipeline_next(pipeline);
@ -1800,10 +1809,11 @@ static void ocf_metadata_hash_load_all(ocf_cache_t cache,
ocf_pipeline_next(pipeline); ocf_pipeline_next(pipeline);
} }
static void _recovery_rebuild_cline_metadata(struct ocf_cache *cache, static void _recovery_rebuild_cline_metadata(ocf_cache_t cache,
ocf_core_id_t core_id, uint64_t core_line, ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t cache_line) ocf_cache_line_t cache_line)
{ {
ocf_core_t core = ocf_cache_get_core(cache, core_id);
ocf_part_id_t part_id; ocf_part_id_t part_id;
ocf_cache_line_t hash_index; ocf_cache_line_t hash_index;
@ -1821,17 +1831,16 @@ static void _recovery_rebuild_cline_metadata(struct ocf_cache *cache,
ocf_eviction_set_hot_cache_line(cache, cache_line); ocf_eviction_set_hot_cache_line(cache, cache_line);
env_atomic_inc(&cache->core_runtime_meta[core_id].cached_clines); env_atomic_inc(&core->runtime_meta->cached_clines);
env_atomic_inc(&cache->core_runtime_meta[core_id]. env_atomic_inc(&core->runtime_meta->
part_counters[part_id].cached_clines); part_counters[part_id].cached_clines);
if (metadata_test_dirty(cache, cache_line)) { if (metadata_test_dirty(cache, cache_line)) {
env_atomic_inc(&cache->core_runtime_meta[core_id]. env_atomic_inc(&core->runtime_meta->dirty_clines);
dirty_clines); env_atomic_inc(&core->runtime_meta->
env_atomic_inc(&cache->core_runtime_meta[core_id].
part_counters[part_id].dirty_clines); part_counters[part_id].dirty_clines);
env_atomic64_cmpxchg(&cache->core_runtime_meta[core_id]. env_atomic64_cmpxchg(&core->runtime_meta->dirty_since,
dirty_since, 0, env_get_tick_count()); 0, env_get_tick_count());
} }
} }
@ -1962,16 +1971,18 @@ static void _ocf_metadata_hash_load_recovery_legacy(ocf_cache_t cache,
static ocf_core_id_t _ocf_metadata_hash_find_core_by_seq( static ocf_core_id_t _ocf_metadata_hash_find_core_by_seq(
struct ocf_cache *cache, ocf_seq_no_t seq_no) struct ocf_cache *cache, ocf_seq_no_t seq_no)
{ {
ocf_core_id_t i; ocf_core_t core;
ocf_core_id_t core_id;
if (seq_no == OCF_SEQ_NO_INVALID) if (seq_no == OCF_SEQ_NO_INVALID)
return OCF_CORE_ID_INVALID; return OCF_CORE_ID_INVALID;
for (i = OCF_CORE_ID_MIN; i <= OCF_CORE_ID_MAX; i++) for_each_core_all(cache, core, core_id) {
if (cache->core_conf_meta[i].seq_no == seq_no) if (core->conf_meta->seq_no == seq_no)
break; break;
}
return i; return core_id;
} }
static void ocf_metadata_hash_load_atomic_metadata_complete( static void ocf_metadata_hash_load_atomic_metadata_complete(

View File

@ -282,21 +282,19 @@ static void __init_metadata_version(ocf_cache_t cache)
static void __reset_stats(ocf_cache_t cache) static void __reset_stats(ocf_cache_t cache)
{ {
int core_id; ocf_core_t core;
ocf_core_id_t core_id;
ocf_part_id_t i; ocf_part_id_t i;
for (core_id = 0; core_id < OCF_CORE_MAX; core_id++) { for_each_core_all(cache, core, core_id) {
env_atomic_set(&cache->core_runtime_meta[core_id]. env_atomic_set(&core->runtime_meta->cached_clines, 0);
cached_clines, 0); env_atomic_set(&core->runtime_meta->dirty_clines, 0);
env_atomic_set(&cache->core_runtime_meta[core_id]. env_atomic64_set(&core->runtime_meta->dirty_since, 0);
dirty_clines, 0);
env_atomic64_set(&cache->core_runtime_meta[core_id].
dirty_since, 0);
for (i = 0; i != OCF_IO_CLASS_MAX; i++) { for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
env_atomic_set(&cache->core_runtime_meta[core_id]. env_atomic_set(&core->runtime_meta->
part_counters[i].cached_clines, 0); part_counters[i].cached_clines, 0);
env_atomic_set(&cache->core_runtime_meta[core_id]. env_atomic_set(&core->runtime_meta->
part_counters[i].dirty_clines, 0); part_counters[i].dirty_clines, 0);
} }
} }
@ -365,7 +363,9 @@ static int _ocf_mngt_init_instance_add_cores(
ocf_cache_t cache = context->cache; ocf_cache_t cache = context->cache;
/* FIXME: This is temporary hack. Remove after storing name it meta. */ /* FIXME: This is temporary hack. Remove after storing name it meta. */
char core_name[OCF_CORE_NAME_SIZE]; char core_name[OCF_CORE_NAME_SIZE];
int ret = -1, i; ocf_core_t core;
ocf_core_id_t core_id;
int ret = -1;
uint64_t hd_lines = 0; uint64_t hd_lines = 0;
OCF_ASSERT_PLUGGED(cache); OCF_ASSERT_PLUGGED(cache);
@ -381,17 +381,13 @@ static int _ocf_mngt_init_instance_add_cores(
cache->conf_meta->core_count = 0; cache->conf_meta->core_count = 0;
/* Check in metadata which cores were added into cache */ /* Check in metadata which cores were added into cache */
for (i = 0; i < OCF_CORE_MAX; i++) { for_each_core(cache, core, core_id) {
ocf_volume_t tvolume = NULL; ocf_volume_t tvolume = NULL;
ocf_core_t core = &cache->core[i];
if (!cache->core_conf_meta[i].added) if (!core->volume.type)
continue;
if (!cache->core[i].volume.type)
goto err; goto err;
ret = snprintf(core_name, sizeof(core_name), "core%d", i); ret = snprintf(core_name, sizeof(core_name), "core%d", core_id);
if (ret < 0 || ret >= sizeof(core_name)) if (ret < 0 || ret >= sizeof(core_name))
goto err; goto err;
@ -411,22 +407,23 @@ static int _ocf_mngt_init_instance_add_cores(
core->opened = true; core->opened = true;
ocf_cache_log(cache, log_info, ocf_cache_log(cache, log_info,
"Attached core %u from pool\n", i); "Attached core %u from pool\n",
core_id);
} else if (context->cfg.open_cores) { } else if (context->cfg.open_cores) {
ret = ocf_volume_open(&core->volume, NULL); ret = ocf_volume_open(&core->volume, NULL);
if (ret == -OCF_ERR_NOT_OPEN_EXC) { if (ret == -OCF_ERR_NOT_OPEN_EXC) {
ocf_cache_log(cache, log_warn, ocf_cache_log(cache, log_warn,
"Cannot open core %u. " "Cannot open core %u. "
"Cache is busy", i); "Cache is busy", core_id);
} else if (ret) { } else if (ret) {
ocf_cache_log(cache, log_warn, ocf_cache_log(cache, log_warn,
"Cannot open core %u", i); "Cannot open core %u", core_id);
} else { } else {
core->opened = true; core->opened = true;
} }
} }
env_bit_set(i, cache->conf_meta->valid_core_bitmap); env_bit_set(core_id, cache->conf_meta->valid_core_bitmap);
cache->conf_meta->core_count++; cache->conf_meta->core_count++;
core->volume.cache = cache; core->volume.cache = cache;
@ -444,13 +441,12 @@ static int _ocf_mngt_init_instance_add_cores(
cache->ocf_core_inactive_count++; cache->ocf_core_inactive_count++;
ocf_cache_log(cache, log_warn, ocf_cache_log(cache, log_warn,
"Cannot find core %u in pool" "Cannot find core %u in pool"
", core added as inactive\n", i); ", core added as inactive\n", core_id);
continue; continue;
} }
hd_lines = ocf_bytes_2_lines(cache, hd_lines = ocf_bytes_2_lines(cache,
ocf_volume_get_length( ocf_volume_get_length(&core->volume));
&cache->core[i].volume));
if (hd_lines) { if (hd_lines) {
ocf_cache_log(cache, log_info, ocf_cache_log(cache, log_info,
@ -1884,19 +1880,18 @@ static void ocf_mngt_cache_stop_remove_cores(ocf_pipeline_t pipeline,
{ {
struct ocf_mngt_cache_stop_context *context = priv; struct ocf_mngt_cache_stop_context *context = priv;
ocf_cache_t cache = context->cache; ocf_cache_t cache = context->cache;
int i, j, no; ocf_core_t core;
ocf_core_id_t core_id;
no = cache->conf_meta->core_count; int no = cache->conf_meta->core_count;
/* All exported objects removed, cleaning up rest. */ /* All exported objects removed, cleaning up rest. */
for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) { for_each_core(cache, core, core_id) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap)) cache_mng_core_remove_from_cache(core);
continue;
cache_mng_core_remove_from_cache(cache, i);
if (context->cache_attached) if (context->cache_attached)
cache_mng_core_remove_from_cleaning_pol(cache, i); cache_mng_core_remove_from_cleaning_pol(core);
cache_mng_core_close(cache, i); cache_mng_core_close(core);
j++; if (--no == 0)
break;
} }
ENV_BUG_ON(cache->conf_meta->core_count != 0); ENV_BUG_ON(cache->conf_meta->core_count != 0);
@ -2114,6 +2109,19 @@ void ocf_mngt_cache_save(ocf_cache_t cache,
ocf_mngt_cache_save_flush_sb_complete, context); ocf_mngt_cache_save_flush_sb_complete, context);
} }
static void _cache_mng_update_initial_dirty_clines(ocf_cache_t cache)
{
ocf_core_t core;
ocf_core_id_t core_id;
for_each_core(cache, core, core_id) {
env_atomic_set(&core->runtime_meta->initial_dirty_clines,
env_atomic_read(&core->runtime_meta->
dirty_clines));
}
}
static int _cache_mng_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode) static int _cache_mng_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
{ {
ocf_cache_mode_t mode_old = cache->conf_meta->cache_mode; ocf_cache_mode_t mode_old = cache->conf_meta->cache_mode;
@ -2130,18 +2138,8 @@ static int _cache_mng_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
cache->conf_meta->cache_mode = mode; cache->conf_meta->cache_mode = mode;
if (ocf_cache_mode_wb == mode_old) { if (mode_old == ocf_cache_mode_wb)
int i; _cache_mng_update_initial_dirty_clines(cache);
for (i = 0; i != OCF_CORE_MAX; ++i) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
env_atomic_set(&cache->core_runtime_meta[i].
initial_dirty_clines,
env_atomic_read(&cache->
core_runtime_meta[i].dirty_clines));
}
}
ocf_cache_log(cache, log_info, "Changing cache mode from '%s' to '%s' " ocf_cache_log(cache, log_info, "Changing cache mode from '%s' to '%s' "
"successful\n", ocf_get_io_iface_name(mode_old), "successful\n", ocf_get_io_iface_name(mode_old),
@ -2300,17 +2298,16 @@ static void ocf_mngt_cache_detach_update_metadata(ocf_pipeline_t pipeline,
{ {
struct ocf_mngt_cache_detach_context *context = priv; struct ocf_mngt_cache_detach_context *context = priv;
ocf_cache_t cache = context->cache; ocf_cache_t cache = context->cache;
int i, j, no; ocf_core_t core;
ocf_core_id_t core_id;
no = cache->conf_meta->core_count; int no = cache->conf_meta->core_count;
/* remove cacheline metadata and cleaning policy meta for all cores */ /* remove cacheline metadata and cleaning policy meta for all cores */
for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) { for_each_core(cache, core, core_id) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap)) cache_mng_core_deinit_attached_meta(core);
continue; cache_mng_core_remove_from_cleaning_pol(core);
cache_mng_core_deinit_attached_meta(cache, i); if (--no == 0)
cache_mng_core_remove_from_cleaning_pol(cache, i); break;
j++;
} }
ocf_pipeline_next(context->pipeline); ocf_pipeline_next(context->pipeline);

View File

@ -17,21 +17,22 @@
#include "../engine/engine_common.h" #include "../engine/engine_common.h"
/* Close if opened */ /* Close if opened */
int cache_mng_core_close(ocf_cache_t cache, ocf_core_id_t core_id) int cache_mng_core_close(ocf_core_t core)
{ {
if (!cache->core[core_id].opened) if (!core->opened)
return -OCF_ERR_CORE_IN_INACTIVE_STATE; return -OCF_ERR_CORE_IN_INACTIVE_STATE;
ocf_volume_close(&cache->core[core_id].volume); ocf_volume_close(&core->volume);
cache->core[core_id].opened = false; core->opened = false;
return 0; return 0;
} }
/* Remove core from cleaning policy */ /* Remove core from cleaning policy */
void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache, void cache_mng_core_remove_from_cleaning_pol(ocf_core_t core)
int core_id)
{ {
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_core_id_t core_id = ocf_core_get_id(core);
ocf_cleaning_t clean_pol_type; ocf_cleaning_t clean_pol_type;
OCF_METADATA_LOCK_WR(); OCF_METADATA_LOCK_WR();
@ -48,16 +49,15 @@ void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache,
} }
/* Deinitialize core metadata in attached metadata */ /* Deinitialize core metadata in attached metadata */
void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id) void cache_mng_core_deinit_attached_meta(ocf_core_t core)
{ {
int retry = 1; int retry = 1;
uint64_t core_size = 0; uint64_t core_size = 0;
ocf_cleaning_t clean_pol_type; ocf_cleaning_t clean_pol_type;
ocf_volume_t core; ocf_cache_t cache = ocf_core_get_cache(core);
ocf_core_id_t core_id = ocf_core_get_id(core);
core = &cache->core[core_id].volume; core_size = ocf_volume_get_length(&core->volume);
core_size = ocf_volume_get_length(core);
if (!core_size) if (!core_size)
core_size = ~0ULL; core_size = ~0ULL;
@ -88,31 +88,34 @@ void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id)
} }
/* Mark core as removed in metadata */ /* Mark core as removed in metadata */
void cache_mng_core_remove_from_meta(struct ocf_cache *cache, int core_id) void cache_mng_core_remove_from_meta(ocf_core_t core)
{ {
ocf_cache_t cache = ocf_core_get_cache(core);
OCF_METADATA_LOCK_WR(); OCF_METADATA_LOCK_WR();
/* In metadata mark data this core was removed from cache */ /* In metadata mark data this core was removed from cache */
cache->core_conf_meta[core_id].added = false; core->conf_meta->added = false;
/* Clear UUID of core */ /* Clear UUID of core */
ocf_mngt_core_clear_uuid_metadata(&cache->core[core_id]); ocf_mngt_core_clear_uuid_metadata(core);
cache->core_conf_meta[core_id].seq_no = OCF_SEQ_NO_INVALID; core->conf_meta->seq_no = OCF_SEQ_NO_INVALID;
OCF_METADATA_UNLOCK_WR(); OCF_METADATA_UNLOCK_WR();
} }
/* Deinit in-memory structures related to this core */ /* Deinit in-memory structures related to this core */
void cache_mng_core_remove_from_cache(struct ocf_cache *cache, int core_id) void cache_mng_core_remove_from_cache(ocf_core_t core)
{ {
env_free(cache->core[core_id].counters); ocf_cache_t cache = ocf_core_get_cache(core);
cache->core[core_id].counters = NULL; ocf_core_id_t core_id = ocf_core_get_id(core);
env_free(core->counters);
core->counters = NULL;
env_bit_clear(core_id, cache->conf_meta->valid_core_bitmap); env_bit_clear(core_id, cache->conf_meta->valid_core_bitmap);
if (!cache->core[core_id].opened && if (!core->opened && --cache->ocf_core_inactive_count == 0)
--cache->ocf_core_inactive_count == 0) {
env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state); env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state);
}
cache->conf_meta->core_count--; cache->conf_meta->core_count--;
} }

View File

@ -7,16 +7,15 @@
#ifndef __OCF_MNGT_COMMON_H__ #ifndef __OCF_MNGT_COMMON_H__
#define __OCF_MNGT_COMMON_H__ #define __OCF_MNGT_COMMON_H__
int cache_mng_core_close(ocf_cache_t cache, ocf_core_id_t core_id); int cache_mng_core_close(ocf_core_t core);
void cache_mng_core_remove_from_meta(struct ocf_cache *cache, int core_id); void cache_mng_core_remove_from_meta(ocf_core_t core);
void cache_mng_core_remove_from_cache(struct ocf_cache *cache, int core_id); void cache_mng_core_remove_from_cache(ocf_core_t core);
void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id); void cache_mng_core_deinit_attached_meta(ocf_core_t core);
void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache, void cache_mng_core_remove_from_cleaning_pol(ocf_core_t core);
int core_id);
int _ocf_cleaning_thread(void *priv); int _ocf_cleaning_thread(void *priv);

View File

@ -150,7 +150,7 @@ static void _ocf_mngt_cache_add_core_handle_error(
if (context->flags.counters_allocated) { if (context->flags.counters_allocated) {
env_bit_clear(cfg->core_id, env_bit_clear(cfg->core_id,
cache->conf_meta->valid_core_bitmap); cache->conf_meta->valid_core_bitmap);
cache->core_conf_meta[cfg->core_id].added = false; core->conf_meta->added = false;
core->opened = false; core->opened = false;
env_free(core->counters); env_free(core->counters);
@ -199,7 +199,7 @@ static void _ocf_mngt_cache_add_core(ocf_cache_t cache,
uint64_t length; uint64_t length;
int result = 0; int result = 0;
core = &cache->core[cfg->core_id]; core = ocf_cache_get_core(cache, cfg->core_id);
context->core = core; context->core = core;
volume = &core->volume; volume = &core->volume;
@ -242,7 +242,7 @@ static void _ocf_mngt_cache_add_core(ocf_cache_t cache,
if (!length) if (!length)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_CORE_NOT_AVAIL); OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_CORE_NOT_AVAIL);
cache->core_conf_meta[cfg->core_id].length = length; core->conf_meta->length = length;
clean_type = cache->conf_meta->cleaning_policy_type; clean_type = cache->conf_meta->cleaning_policy_type;
if (ocf_cache_is_device_attached(cache) && if (ocf_cache_is_device_attached(cache) &&
@ -265,30 +265,25 @@ static void _ocf_mngt_cache_add_core(ocf_cache_t cache,
/* When adding new core to cache, reset all core/cache statistics */ /* When adding new core to cache, reset all core/cache statistics */
ocf_core_stats_initialize(core); ocf_core_stats_initialize(core);
env_atomic_set(&cache->core_runtime_meta[cfg->core_id]. env_atomic_set(&core->runtime_meta->cached_clines, 0);
cached_clines, 0); env_atomic_set(&core->runtime_meta->dirty_clines, 0);
env_atomic_set(&cache->core_runtime_meta[cfg->core_id]. env_atomic64_set(&core->runtime_meta->dirty_since, 0);
dirty_clines, 0);
env_atomic64_set(&cache->core_runtime_meta[cfg->core_id].
dirty_since, 0);
/* In metadata mark data this core was added into cache */ /* In metadata mark data this core was added into cache */
env_bit_set(cfg->core_id, cache->conf_meta->valid_core_bitmap); env_bit_set(cfg->core_id, cache->conf_meta->valid_core_bitmap);
cache->core_conf_meta[cfg->core_id].added = true; core->conf_meta->added = true;
core->opened = true; core->opened = true;
/* Set default cache parameters for sequential */ /* Set default cache parameters for sequential */
cache->core_conf_meta[cfg->core_id].seq_cutoff_policy = core->conf_meta->seq_cutoff_policy = ocf_seq_cutoff_policy_default;
ocf_seq_cutoff_policy_default; core->conf_meta->seq_cutoff_threshold = cfg->seq_cutoff_threshold;
cache->core_conf_meta[cfg->core_id].seq_cutoff_threshold =
cfg->seq_cutoff_threshold;
/* Add core sequence number for atomic metadata matching */ /* Add core sequence number for atomic metadata matching */
core_sequence_no = _ocf_mngt_get_core_seq_no(cache); core_sequence_no = _ocf_mngt_get_core_seq_no(cache);
if (core_sequence_no == OCF_SEQ_NO_INVALID) if (core_sequence_no == OCF_SEQ_NO_INVALID)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_TOO_MANY_CORES); OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_TOO_MANY_CORES);
cache->core_conf_meta[cfg->core_id].seq_no = core_sequence_no; core->conf_meta->seq_no = core_sequence_no;
/* Update super-block with core device addition */ /* Update super-block with core device addition */
ocf_metadata_flush_superblock(cache, ocf_metadata_flush_superblock(cache,
@ -661,7 +656,6 @@ static void _ocf_mngt_cache_remove_core(ocf_pipeline_t pipeline, void *priv,
struct ocf_mngt_cache_remove_core_context *context = priv; struct ocf_mngt_cache_remove_core_context *context = priv;
ocf_cache_t cache = context->cache; ocf_cache_t cache = context->cache;
ocf_core_t core = context->core; ocf_core_t core = context->core;
ocf_core_id_t core_id = ocf_core_get_id(core);
ocf_core_log(core, log_debug, "Removing core\n"); ocf_core_log(core, log_debug, "Removing core\n");
@ -669,12 +663,12 @@ static void _ocf_mngt_cache_remove_core(ocf_pipeline_t pipeline, void *priv,
/* Deinit everything*/ /* Deinit everything*/
if (ocf_cache_is_device_attached(cache)) { if (ocf_cache_is_device_attached(cache)) {
cache_mng_core_deinit_attached_meta(cache, core_id); cache_mng_core_deinit_attached_meta(core);
cache_mng_core_remove_from_cleaning_pol(cache, core_id); cache_mng_core_remove_from_cleaning_pol(core);
} }
cache_mng_core_remove_from_meta(cache, core_id); cache_mng_core_remove_from_meta(core);
cache_mng_core_remove_from_cache(cache, core_id); cache_mng_core_remove_from_cache(core);
cache_mng_core_close(cache, core_id); cache_mng_core_close(core);
/* Update super-block with core device removal */ /* Update super-block with core device removal */
ocf_metadata_flush_superblock(cache, ocf_metadata_flush_superblock(cache,
@ -760,12 +754,11 @@ static void _ocf_mngt_cache_detach_core(ocf_pipeline_t pipeline,
struct ocf_mngt_cache_remove_core_context *context = priv; struct ocf_mngt_cache_remove_core_context *context = priv;
ocf_cache_t cache = context->cache; ocf_cache_t cache = context->cache;
ocf_core_t core = context->core; ocf_core_t core = context->core;
ocf_core_id_t core_id = ocf_core_get_id(core);
int status; int status;
ocf_core_log(core, log_debug, "Detaching core\n"); ocf_core_log(core, log_debug, "Detaching core\n");
status = cache_mng_core_close(cache, core_id); status = cache_mng_core_close(core);
if (status) if (status)
OCF_PL_FINISH_RET(pipeline, status); OCF_PL_FINISH_RET(pipeline, status);
@ -893,19 +886,13 @@ int ocf_mngt_core_set_uuid(ocf_core_t core, const struct ocf_volume_uuid *uuid)
int ocf_mngt_core_set_user_metadata(ocf_core_t core, void *data, size_t size) int ocf_mngt_core_set_user_metadata(ocf_core_t core, void *data, size_t size)
{ {
ocf_cache_t cache;
uint32_t core_id;
OCF_CHECK_NULL(core); OCF_CHECK_NULL(core);
OCF_CHECK_NULL(data); OCF_CHECK_NULL(data);
cache = ocf_core_get_cache(core);
core_id = ocf_core_get_id(core);
if (size > OCF_CORE_USER_DATA_SIZE) if (size > OCF_CORE_USER_DATA_SIZE)
return -EINVAL; return -EINVAL;
env_memcpy(cache->core_conf_meta[core_id].user_data, env_memcpy(core->conf_meta->user_data,
OCF_CORE_USER_DATA_SIZE, data, size); OCF_CORE_USER_DATA_SIZE, data, size);
return 0; return 0;
@ -913,18 +900,13 @@ int ocf_mngt_core_set_user_metadata(ocf_core_t core, void *data, size_t size)
int ocf_mngt_core_get_user_metadata(ocf_core_t core, void *data, size_t size) int ocf_mngt_core_get_user_metadata(ocf_core_t core, void *data, size_t size)
{ {
uint32_t core_id;
ocf_cache_t cache;
OCF_CHECK_NULL(core); OCF_CHECK_NULL(core);
OCF_CHECK_NULL(data);
core_id = ocf_core_get_id(core); if (size > sizeof(core->conf_meta->user_data))
cache = ocf_core_get_cache(core);
if (size > sizeof(cache->core_conf_meta[core_id].user_data))
return -EINVAL; return -EINVAL;
env_memcpy(data, size, cache->core_conf_meta[core_id].user_data, env_memcpy(data, size, core->conf_meta->user_data,
OCF_CORE_USER_DATA_SIZE); OCF_CORE_USER_DATA_SIZE);
return 0; return 0;
@ -933,10 +915,7 @@ int ocf_mngt_core_get_user_metadata(ocf_core_t core, void *data, size_t size)
static int _cache_mng_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx) static int _cache_mng_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx)
{ {
uint32_t threshold = *(uint32_t*) cntx; uint32_t threshold = *(uint32_t*) cntx;
ocf_cache_t cache = ocf_core_get_cache(core); uint32_t threshold_old = core->conf_meta->seq_cutoff_threshold;
ocf_core_id_t core_id = ocf_core_get_id(core);
uint32_t threshold_old = cache->core_conf_meta[core_id].
seq_cutoff_threshold;
if (threshold_old == threshold) { if (threshold_old == threshold) {
ocf_core_log(core, log_info, ocf_core_log(core, log_info,
@ -944,7 +923,7 @@ static int _cache_mng_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx)
"already set\n", threshold); "already set\n", threshold);
return 0; return 0;
} }
cache->core_conf_meta[core_id].seq_cutoff_threshold = threshold; core->conf_meta->seq_cutoff_threshold = threshold;
ocf_core_log(core, log_info, "Changing sequential cutoff " ocf_core_log(core, log_info, "Changing sequential cutoff "
"threshold from %u to %u bytes successful\n", "threshold from %u to %u bytes successful\n",
@ -997,9 +976,7 @@ static const char *_cache_mng_seq_cutoff_policy_get_name(
static int _cache_mng_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx) static int _cache_mng_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx)
{ {
ocf_seq_cutoff_policy policy = *(ocf_seq_cutoff_policy*) cntx; ocf_seq_cutoff_policy policy = *(ocf_seq_cutoff_policy*) cntx;
ocf_cache_t cache = ocf_core_get_cache(core); uint32_t policy_old = core->conf_meta->seq_cutoff_policy;
ocf_core_id_t core_id = ocf_core_get_id(core);
uint32_t policy_old = cache->core_conf_meta[core_id].seq_cutoff_policy;
if (policy_old == policy) { if (policy_old == policy) {
ocf_core_log(core, log_info, ocf_core_log(core, log_info,
@ -1014,7 +991,7 @@ static int _cache_mng_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx)
return -OCF_ERR_INVAL; return -OCF_ERR_INVAL;
} }
cache->core_conf_meta[core_id].seq_cutoff_policy = policy; core->conf_meta->seq_cutoff_policy = policy;
ocf_core_log(core, log_info, ocf_core_log(core, log_info,
"Changing sequential cutoff policy from %s to %s\n", "Changing sequential cutoff policy from %s to %s\n",

View File

@ -107,19 +107,15 @@ static void _ocf_mngt_end_flush(ocf_cache_t cache)
bool ocf_mngt_cache_is_dirty(ocf_cache_t cache) bool ocf_mngt_cache_is_dirty(ocf_cache_t cache)
{ {
uint32_t i; ocf_core_t core;
ocf_core_id_t core_id;
OCF_CHECK_NULL(cache); OCF_CHECK_NULL(cache);
for (i = 0; i < OCF_CORE_MAX; ++i) { for_each_core(cache, core, core_id) {
if (!cache->core_conf_meta[i].added) if (env_atomic_read(&core->runtime_meta->dirty_clines))
continue;
if (env_atomic_read(&(cache->core_runtime_meta[i].
dirty_clines))) {
return true; return true;
} }
}
return false; return false;
} }
@ -133,16 +129,16 @@ bool ocf_mngt_cache_is_dirty(ocf_cache_t cache)
* NOTE: * NOTE:
* Table is not sorted. * Table is not sorted.
*/ */
static int _ocf_mngt_get_sectors(struct ocf_cache *cache, int core_id, static int _ocf_mngt_get_sectors(ocf_cache_t cache, ocf_core_id_t core_id,
struct flush_data **tbl, uint32_t *num) struct flush_data **tbl, uint32_t *num)
{ {
ocf_core_t core = ocf_cache_get_core(cache, core_id);
uint64_t core_line; uint64_t core_line;
ocf_core_id_t i_core_id; ocf_core_id_t i_core_id;
struct flush_data *p; struct flush_data *p;
uint32_t i, j, dirty = 0; uint32_t i, j, dirty = 0;
dirty = env_atomic_read(&cache->core_runtime_meta[core_id]. dirty = env_atomic_read(&core->runtime_meta->dirty_clines);
dirty_clines);
if (!dirty) { if (!dirty) {
*num = 0; *num = 0;
*tbl = NULL; *tbl = NULL;
@ -202,7 +198,8 @@ static int _ocf_mngt_get_flush_containers(ocf_cache_t cache,
uint32_t num; uint32_t num;
uint64_t core_line; uint64_t core_line;
ocf_core_id_t core_id; ocf_core_id_t core_id;
uint32_t i, j, dirty = 0; ocf_core_t core;
uint32_t i, j = 0, dirty = 0;
int step = 0; int step = 0;
/* /*
@ -226,16 +223,13 @@ static int _ocf_mngt_get_flush_containers(ocf_cache_t cache,
return -OCF_ERR_NO_MEM; return -OCF_ERR_NO_MEM;
} }
for (i = 0, j = 0; i < OCF_CORE_MAX; i++) { for_each_core(cache, core, core_id) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap)) fc[j].core_id = core_id;
continue; core_revmap[core_id] = j;
fc[j].core_id = i;
core_revmap[i] = j;
/* Check for dirty blocks */ /* Check for dirty blocks */
fc[j].count = env_atomic_read(&cache-> fc[j].count = env_atomic_read(
core_runtime_meta[i].dirty_clines); &core->runtime_meta->dirty_clines);
dirty += fc[j].count; dirty += fc[j].count;
if (fc[j].count) { if (fc[j].count) {
@ -600,7 +594,7 @@ static void _ocf_mngt_flush_finish(ocf_pipeline_t pipeline, void *priv,
{ {
struct ocf_mngt_cache_flush_context *context = priv; struct ocf_mngt_cache_flush_context *context = priv;
ocf_cache_t cache = context->cache; ocf_cache_t cache = context->cache;
int64_t core_id; ocf_core_t core = context->core;
if (!error) { if (!error) {
switch(context->op) { switch(context->op) {
@ -610,33 +604,32 @@ static void _ocf_mngt_flush_finish(ocf_pipeline_t pipeline, void *priv,
break; break;
case flush_core: case flush_core:
case purge_core: case purge_core:
core_id = ocf_core_get_id(context->core); ENV_BUG_ON(env_atomic_read(
ENV_BUG_ON(env_atomic_read(&cache->core_runtime_meta &core->runtime_meta->dirty_clines));
[core_id].dirty_clines));
break; break;
} }
} }
_ocf_mngt_end_flush(context->cache); _ocf_mngt_end_flush(cache);
switch (context->op) { switch (context->op) {
case flush_cache: case flush_cache:
context->cmpl.flush_cache(context->cache, context->priv, error); context->cmpl.flush_cache(cache, context->priv, error);
break; break;
case flush_core: case flush_core:
context->cmpl.flush_core(context->core, context->priv, error); context->cmpl.flush_core(core, context->priv, error);
break; break;
case purge_cache: case purge_cache:
context->cmpl.purge_cache(context->cache, context->priv, error); context->cmpl.purge_cache(cache, context->priv, error);
break; break;
case purge_core: case purge_core:
context->cmpl.purge_core(context->core, context->priv, error); context->cmpl.purge_core(core, context->priv, error);
break; break;
default: default:
ENV_BUG(); ENV_BUG();
} }
ocf_pipeline_destroy(context->pipeline); ocf_pipeline_destroy(pipeline);
} }
static struct ocf_pipeline_properties _ocf_mngt_cache_flush_pipeline_properties = { static struct ocf_pipeline_properties _ocf_mngt_cache_flush_pipeline_properties = {

View File

@ -69,7 +69,6 @@ static uint32_t _calc_dirty_for(uint64_t dirty_since)
int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info) int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
{ {
uint32_t i;
uint32_t cache_occupancy_total = 0; uint32_t cache_occupancy_total = 0;
uint32_t dirty_blocks_total = 0; uint32_t dirty_blocks_total = 0;
uint32_t initial_dirty_blocks_total = 0; uint32_t initial_dirty_blocks_total = 0;
@ -80,6 +79,8 @@ int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
uint64_t core_dirty_since; uint64_t core_dirty_since;
uint32_t dirty_blocks_inactive = 0; uint32_t dirty_blocks_inactive = 0;
uint32_t cache_occupancy_inactive = 0; uint32_t cache_occupancy_inactive = 0;
ocf_core_t core;
ocf_core_id_t core_id;
OCF_CHECK_NULL(cache); OCF_CHECK_NULL(cache);
@ -101,50 +102,44 @@ int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
/* iterate through all possibly valid core objcts, as list of /* iterate through all possibly valid core objcts, as list of
* valid objects may be not continuous * valid objects may be not continuous
*/ */
for (i = 0; i != OCF_CORE_MAX; ++i) { for_each_core(cache, core, core_id) {
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
/* If current dirty blocks exceeds saved initial dirty /* If current dirty blocks exceeds saved initial dirty
* blocks then update the latter * blocks then update the latter
*/ */
curr_dirty_cnt = env_atomic_read(&cache-> curr_dirty_cnt = env_atomic_read(
core_runtime_meta[i].dirty_clines); &core->runtime_meta->dirty_clines);
init_dirty_cnt = env_atomic_read(&cache-> init_dirty_cnt = env_atomic_read(
core_runtime_meta[i].initial_dirty_clines); &core->runtime_meta->initial_dirty_clines);
if (init_dirty_cnt && if (init_dirty_cnt && (curr_dirty_cnt > init_dirty_cnt)) {
(curr_dirty_cnt > init_dirty_cnt)) {
env_atomic_set( env_atomic_set(
&cache->core_runtime_meta[i]. &core->runtime_meta->initial_dirty_clines,
initial_dirty_clines, env_atomic_read(
env_atomic_read(&cache-> &core->runtime_meta->dirty_clines));
core_runtime_meta[i].dirty_clines));
} }
cache_occupancy_total += env_atomic_read(&cache-> cache_occupancy_total += env_atomic_read(
core_runtime_meta[i].cached_clines); &core->runtime_meta->cached_clines);
dirty_blocks_total += env_atomic_read(&(cache-> dirty_blocks_total += env_atomic_read(
core_runtime_meta[i].dirty_clines)); &core->runtime_meta->dirty_clines);
initial_dirty_blocks_total += env_atomic_read(&(cache-> initial_dirty_blocks_total += env_atomic_read(
core_runtime_meta[i].initial_dirty_clines)); &core->runtime_meta->initial_dirty_clines);
if (!cache->core[i].opened) { if (!core->opened) {
cache_occupancy_inactive += env_atomic_read(&cache-> cache_occupancy_inactive += env_atomic_read(
core_runtime_meta[i].cached_clines); &core->runtime_meta->cached_clines);
dirty_blocks_inactive += env_atomic_read(&(cache-> dirty_blocks_inactive += env_atomic_read(
core_runtime_meta[i].dirty_clines)); &core->runtime_meta->dirty_clines);
} }
core_dirty_since = env_atomic64_read(&cache-> core_dirty_since = env_atomic64_read(
core_runtime_meta[i].dirty_since); &core->runtime_meta->dirty_since);
if (core_dirty_since) { if (core_dirty_since) {
dirty_since = (dirty_since ? dirty_since = (dirty_since ?
OCF_MIN(dirty_since, core_dirty_since) : OCF_MIN(dirty_since, core_dirty_since) :
core_dirty_since); core_dirty_since);
} }
flushed_total += env_atomic_read( flushed_total += env_atomic_read(&core->flushed);
&cache->core[i].flushed);
} }
info->dirty = dirty_blocks_total; info->dirty = dirty_blocks_total;

View File

@ -37,55 +37,6 @@ struct ocf_trace {
env_atomic64 trace_seq_ref; env_atomic64 trace_seq_ref;
}; };
struct ocf_metadata_uuid {
uint32_t size;
uint8_t data[OCF_VOLUME_UUID_MAX_SIZE];
} __packed;
#define OCF_CORE_USER_DATA_SIZE 64
struct ocf_core_meta_config {
uint8_t type;
/* This bit means that object was added into cache */
uint32_t added : 1;
/* Core sequence number used to correlate cache lines with cores
* when recovering from atomic device */
ocf_seq_no_t seq_no;
/* Sequential cutoff threshold (in bytes) */
uint32_t seq_cutoff_threshold;
/* Sequential cutoff policy */
ocf_seq_cutoff_policy seq_cutoff_policy;
/* core object size in bytes */
uint64_t length;
uint8_t user_data[OCF_CORE_USER_DATA_SIZE];
};
struct ocf_core_meta_runtime {
/* Number of blocks from that objects that currently are cached
* on the caching device.
*/
env_atomic cached_clines;
env_atomic dirty_clines;
env_atomic initial_dirty_clines;
env_atomic64 dirty_since;
struct {
/* clines within lru list (?) */
env_atomic cached_clines;
/* dirty clines assigned to this specific partition within
* cache device
*/
env_atomic dirty_clines;
} part_counters[OCF_IO_CLASS_MAX];
};
/** /**
* @brief Initialization mode of cache instance * @brief Initialization mode of cache instance
*/ */
@ -193,8 +144,6 @@ struct ocf_cache {
uint16_t ocf_core_inactive_count; uint16_t ocf_core_inactive_count;
struct ocf_core core[OCF_CORE_MAX]; struct ocf_core core[OCF_CORE_MAX];
struct ocf_core_meta_config *core_conf_meta;
struct ocf_core_meta_runtime *core_runtime_meta;
env_atomic flush_in_progress; env_atomic flush_in_progress;
@ -224,6 +173,12 @@ struct ocf_cache {
void *priv; void *priv;
}; };
static inline ocf_core_t ocf_cache_get_core(ocf_cache_t cache,
ocf_core_id_t core_id)
{
return &cache->core[core_id];
}
#define ocf_cache_log_prefix(cache, lvl, prefix, fmt, ...) \ #define ocf_cache_log_prefix(cache, lvl, prefix, fmt, ...) \
ocf_log_prefix(ocf_cache_get_ctx(cache), lvl, "%s" prefix, \ ocf_log_prefix(ocf_cache_get_ctx(cache), lvl, "%s" prefix, \
fmt, ocf_cache_get_name(cache), ##__VA_ARGS__) fmt, ocf_cache_get_name(cache), ##__VA_ARGS__)

View File

@ -98,18 +98,12 @@ int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core)
uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core) uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core)
{ {
uint32_t core_id = ocf_core_get_id(core); return core->conf_meta->seq_cutoff_threshold;
ocf_cache_t cache = ocf_core_get_cache(core);
return cache->core_conf_meta[core_id].seq_cutoff_threshold;
} }
ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core) ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core)
{ {
uint32_t core_id = ocf_core_get_id(core); return core->conf_meta->seq_cutoff_policy;
ocf_cache_t cache = ocf_core_get_cache(core);
return cache->core_conf_meta[core_id].seq_cutoff_policy;
} }
int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx, int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,

View File

@ -32,12 +32,65 @@ struct ocf_core_io {
/*!< Timestamp */ /*!< Timestamp */
}; };
struct ocf_metadata_uuid {
uint32_t size;
uint8_t data[OCF_VOLUME_UUID_MAX_SIZE];
} __packed;
#define OCF_CORE_USER_DATA_SIZE 64
struct ocf_core_meta_config {
uint8_t type;
/* This bit means that object was added into cache */
uint32_t added : 1;
/* Core sequence number used to correlate cache lines with cores
* when recovering from atomic device */
ocf_seq_no_t seq_no;
/* Sequential cutoff threshold (in bytes) */
uint32_t seq_cutoff_threshold;
/* Sequential cutoff policy */
ocf_seq_cutoff_policy seq_cutoff_policy;
/* core object size in bytes */
uint64_t length;
uint8_t user_data[OCF_CORE_USER_DATA_SIZE];
};
struct ocf_core_meta_runtime {
/* Number of blocks from that objects that currently are cached
* on the caching device.
*/
env_atomic cached_clines;
env_atomic dirty_clines;
env_atomic initial_dirty_clines;
env_atomic64 dirty_since;
struct {
/* clines within lru list (?) */
env_atomic cached_clines;
/* dirty clines assigned to this specific partition within
* cache device
*/
env_atomic dirty_clines;
} part_counters[OCF_IO_CLASS_MAX];
};
struct ocf_core { struct ocf_core {
char name[OCF_CORE_NAME_SIZE]; char name[OCF_CORE_NAME_SIZE];
struct ocf_volume front_volume; struct ocf_volume front_volume;
struct ocf_volume volume; struct ocf_volume volume;
struct ocf_core_meta_config *conf_meta;
struct ocf_core_meta_runtime *runtime_meta;
struct { struct {
uint64_t last; uint64_t last;
uint64_t bytes; uint64_t bytes;
@ -58,4 +111,11 @@ int ocf_core_volume_type_init(ocf_ctx_t ctx);
void ocf_core_volume_type_deinit(ocf_ctx_t ctx); void ocf_core_volume_type_deinit(ocf_ctx_t ctx);
#define for_each_core_all(_cache, _core, _id) \
for (_id = 0; _core = &cache->core[_id], _id < OCF_CORE_MAX; _id++)
#define for_each_core(_cache, _core, _id) \
for_each_core_all(_cache, _core, _id) \
if (core->conf_meta->added)
#endif /* __OCF_CORE_PRIV_H__ */ #endif /* __OCF_CORE_PRIV_H__ */

View File

@ -40,11 +40,13 @@ int ocf_metadata_get_atomic_entry(ocf_cache_t cache,
ocf_cache_line_t line = ocf_atomic_addr2line(cache, addr); ocf_cache_line_t line = ocf_atomic_addr2line(cache, addr);
uint8_t pos = ocf_atomic_addr2pos(cache, addr); uint8_t pos = ocf_atomic_addr2pos(cache, addr);
ocf_core_id_t core_id = OCF_CORE_MAX; ocf_core_id_t core_id = OCF_CORE_MAX;
ocf_core_t core;
uint64_t core_line = 0; uint64_t core_line = 0;
ocf_metadata_get_core_info(cache, line, &core_id, &core_line); ocf_metadata_get_core_info(cache, line, &core_id, &core_line);
core = ocf_cache_get_core(cache, core_id);
entry->core_seq_no = cache->core_conf_meta[core_id].seq_no; entry->core_seq_no = core->conf_meta->seq_no;
entry->core_line = core_line; entry->core_line = core_line;
entry->valid = metadata_test_valid_one(cache, line, pos); entry->valid = metadata_test_valid_one(cache, line, pos);

View File

@ -9,7 +9,6 @@
#include "engine/cache_engine.h" #include "engine/cache_engine.h"
#include "utils/utils_part.h" #include "utils/utils_part.h"
#include "utils/utils_cache_line.h" #include "utils/utils_cache_line.h"
#include "utils/utils_core.h"
#ifdef OCF_DEBUG_STATS #ifdef OCF_DEBUG_STATS
static void ocf_stats_debug_init(struct ocf_counters_debug *stats) static void ocf_stats_debug_init(struct ocf_counters_debug *stats)
@ -165,10 +164,10 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
struct ocf_stats_io_class *stats) struct ocf_stats_io_class *stats)
{ {
ocf_cache_t cache; ocf_cache_t cache;
uint32_t i;
uint32_t cache_occupancy_total = 0; uint32_t cache_occupancy_total = 0;
struct ocf_counters_part *part_stat; struct ocf_counters_part *part_stat;
ocf_core_id_t core_id; ocf_core_t i_core;
ocf_core_id_t i_core_id;
OCF_CHECK_NULL(core); OCF_CHECK_NULL(core);
OCF_CHECK_NULL(stats); OCF_CHECK_NULL(stats);
@ -176,25 +175,22 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
if (part_id < OCF_IO_CLASS_ID_MIN || part_id > OCF_IO_CLASS_ID_MAX) if (part_id < OCF_IO_CLASS_ID_MIN || part_id > OCF_IO_CLASS_ID_MAX)
return -OCF_ERR_INVAL; return -OCF_ERR_INVAL;
core_id = ocf_core_get_id(core);
cache = ocf_core_get_cache(core); cache = ocf_core_get_cache(core);
if (!ocf_part_is_valid(&cache->user_parts[part_id])) if (!ocf_part_is_valid(&cache->user_parts[part_id]))
return -OCF_ERR_IO_CLASS_NOT_EXIST; return -OCF_ERR_IO_CLASS_NOT_EXIST;
for_each_core(cache, i) { for_each_core(cache, i_core, i_core_id) {
cache_occupancy_total += env_atomic_read( cache_occupancy_total += env_atomic_read(
&cache->core_runtime_meta[i].cached_clines); &i_core->runtime_meta->cached_clines);
} }
part_stat = &core->counters->part_counters[part_id]; part_stat = &core->counters->part_counters[part_id];
stats->occupancy_clines = env_atomic_read(&cache-> stats->occupancy_clines = env_atomic_read(&core->runtime_meta->
core_runtime_meta[core_id].part_counters[part_id]. part_counters[part_id].cached_clines);
cached_clines); stats->dirty_clines = env_atomic_read(&core->runtime_meta->
stats->dirty_clines = env_atomic_read(&cache-> part_counters[part_id].dirty_clines);
core_runtime_meta[core_id].part_counters[part_id].
dirty_clines);
stats->free_clines = cache->conf_meta->cachelines - stats->free_clines = cache->conf_meta->cachelines -
cache_occupancy_total; cache_occupancy_total;
@ -242,7 +238,7 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
stats->seq_cutoff_policy = ocf_core_get_seq_cutoff_policy(core); stats->seq_cutoff_policy = ocf_core_get_seq_cutoff_policy(core);
env_atomic_read(&cache->core_runtime_meta[core_id].cached_clines); env_atomic_read(&core->runtime_meta->cached_clines);
copy_block_stats(&stats->core_volume, &core_stats->core_blocks); copy_block_stats(&stats->core_volume, &core_stats->core_blocks);
copy_block_stats(&stats->cache_volume, &core_stats->cache_blocks); copy_block_stats(&stats->cache_volume, &core_stats->cache_blocks);
@ -267,18 +263,16 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
accum_block_stats(&stats->core, &curr->blocks); accum_block_stats(&stats->core, &curr->blocks);
stats->cache_occupancy += env_atomic_read(&cache-> stats->cache_occupancy += env_atomic_read(&core->runtime_meta->
core_runtime_meta[core_id].part_counters[i]. part_counters[i].cached_clines);
cached_clines); stats->dirty += env_atomic_read(&core->runtime_meta->
stats->dirty += env_atomic_read(&cache-> part_counters[i].dirty_clines);
core_runtime_meta[core_id].part_counters[i].
dirty_clines);
} }
stats->flushed = env_atomic_read(&core->flushed); stats->flushed = env_atomic_read(&core->flushed);
stats->dirty_for = _calc_dirty_for( stats->dirty_for = _calc_dirty_for(
env_atomic64_read(&cache->core_runtime_meta[core_id].dirty_since)); env_atomic64_read(&core->runtime_meta->dirty_since));
return 0; return 0;
} }

View File

@ -47,15 +47,11 @@ static uint64_t _bytes4k(uint64_t bytes)
static uint64_t _get_cache_occupancy(ocf_cache_t cache) static uint64_t _get_cache_occupancy(ocf_cache_t cache)
{ {
uint64_t result = 0; uint64_t result = 0;
uint32_t i; ocf_core_t core;
ocf_core_id_t core_id;
for (i = 0; i != OCF_CORE_MAX; ++i) { for_each_core(cache, core, core_id)
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap)) result += env_atomic_read(&core->runtime_meta->cached_clines);
continue;
result += env_atomic_read(
&cache->core_runtime_meta[i].cached_clines);
}
return result; return result;
} }

View File

@ -22,6 +22,7 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, ocf_cache_line_t line, uint8_t end_bit, ocf_cache_line_t line,
ocf_core_id_t core_id, ocf_part_id_t part_id) ocf_core_id_t core_id, ocf_part_id_t part_id)
{ {
ocf_core_t core = ocf_cache_get_core(cache, core_id);
bool is_valid; bool is_valid;
ENV_BUG_ON(core_id >= OCF_CORE_MAX); ENV_BUG_ON(core_id >= OCF_CORE_MAX);
@ -31,9 +32,8 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
/* /*
* Update the number of cached data for that core object * Update the number of cached data for that core object
*/ */
env_atomic_dec(&cache->core_runtime_meta[core_id]. env_atomic_dec(&core->runtime_meta->cached_clines);
cached_clines); env_atomic_dec(&core->runtime_meta->
env_atomic_dec(&cache->core_runtime_meta[core_id].
part_counters[part_id].cached_clines); part_counters[part_id].cached_clines);
} }
@ -81,19 +81,15 @@ void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit, void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx) uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
{ {
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ocf_cache_line_t line = req->map[map_idx].coll_idx; ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line); ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
if (metadata_set_valid_sec_changed(cache, line, start_bit, end_bit)) { if (metadata_set_valid_sec_changed(cache, line, start_bit, end_bit)) {
/* /*
* Update the number of cached data for that core object * Update the number of cached data for that core object
*/ */
env_atomic_inc(&cache->core_runtime_meta[core_id]. env_atomic_inc(&req->core->runtime_meta->cached_clines);
cached_clines); env_atomic_inc(&req->core->runtime_meta->
env_atomic_inc(&cache->core_runtime_meta[core_id].
part_counters[part_id].cached_clines); part_counters[part_id].cached_clines);
} }
} }
@ -101,32 +97,29 @@ void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit, void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx) uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
{ {
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ocf_cache_line_t line = req->map[map_idx].coll_idx; ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line); ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
uint8_t evp_type = cache->conf_meta->eviction_policy_type; uint8_t evp_type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
if (metadata_clear_dirty_sec_changed(cache, line, start_bit, end_bit)) { if (metadata_clear_dirty_sec_changed(cache, line, start_bit, end_bit)) {
/* /*
* Update the number of dirty cached data for that * Update the number of dirty cached data for that
* core object * core object
*/ */
if (env_atomic_dec_and_test(&cache->core_runtime_meta[core_id]. if (env_atomic_dec_and_test(&req->core->runtime_meta->
dirty_clines)) { dirty_clines)) {
/* /*
* If this is last dirty cline reset dirty * If this is last dirty cline reset dirty
* timestamp * timestamp
*/ */
env_atomic64_set(&cache->core_runtime_meta[core_id]. env_atomic64_set(&req->core->runtime_meta->
dirty_since, 0); dirty_since, 0);
} }
/* /*
* decrement dirty clines statistic for given cline * decrement dirty clines statistic for given cline
*/ */
env_atomic_dec(&cache->core_runtime_meta[core_id]. env_atomic_dec(&req->core->runtime_meta->
part_counters[part_id].dirty_clines); part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].clean_cline)) if (likely(evict_policy_ops[evp_type].clean_cline))
@ -141,30 +134,27 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit, void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx) uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
{ {
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ocf_cache_line_t line = req->map[map_idx].coll_idx; ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line); ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
uint8_t evp_type = cache->conf_meta->eviction_policy_type; uint8_t evp_type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
if (metadata_set_dirty_sec_changed(cache, line, start_bit, end_bit)) { if (metadata_set_dirty_sec_changed(cache, line, start_bit, end_bit)) {
/* /*
* If this is first dirty cline set dirty timestamp * If this is first dirty cline set dirty timestamp
*/ */
env_atomic64_cmpxchg(&cache->core_runtime_meta[core_id]. env_atomic64_cmpxchg(&req->core->runtime_meta->dirty_since,
dirty_since, 0, env_get_tick_count()); 0, env_get_tick_count());
/* /*
* Update the number of dirty cached data for that * Update the number of dirty cached data for that
* core object * core object
*/ */
env_atomic_inc(&cache->core_runtime_meta[core_id].dirty_clines); env_atomic_inc(&req->core->runtime_meta->dirty_clines);
/* /*
* increment dirty clines statistic for given cline * increment dirty clines statistic for given cline
*/ */
env_atomic_inc(&cache->core_runtime_meta[core_id]. env_atomic_inc(&req->core->runtime_meta->
part_counters[part_id].dirty_clines); part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].dirty_cline)) if (likely(evict_policy_ops[evp_type].dirty_cline))

View File

@ -1,13 +0,0 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_CORE_H__
#define __UTILS_CORE_H__
#define for_each_core(cache, iter) \
for (iter = 0; iter < OCF_CORE_MAX; iter++) \
if (cache->core_conf_meta[iter].added)
#endif /* __UTILS_CORE_H__ */

View File

@ -95,7 +95,6 @@ void ocf_part_move(struct ocf_request *req)
ocf_part_id_t id_old, id_new; ocf_part_id_t id_old, id_new;
uint32_t i; uint32_t i;
ocf_cleaning_t type = cache->conf_meta->cleaning_policy_type; ocf_cleaning_t type = cache->conf_meta->cleaning_policy_type;
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ENV_BUG_ON(type >= ocf_cleaning_max); ENV_BUG_ON(type >= ocf_cleaning_max);
@ -158,15 +157,15 @@ void ocf_part_move(struct ocf_request *req)
cleaning_policy_ops[type]. cleaning_policy_ops[type].
set_hot_cache_line(cache, line); set_hot_cache_line(cache, line);
env_atomic_inc(&cache->core_runtime_meta[core_id]. env_atomic_inc(&req->core->runtime_meta->
part_counters[id_new].dirty_clines); part_counters[id_new].dirty_clines);
env_atomic_dec(&cache->core_runtime_meta[core_id]. env_atomic_dec(&req->core->runtime_meta->
part_counters[id_old].dirty_clines); part_counters[id_old].dirty_clines);
} }
env_atomic_inc(&cache->core_runtime_meta[core_id]. env_atomic_inc(&req->core->runtime_meta->
part_counters[id_new].cached_clines); part_counters[id_new].cached_clines);
env_atomic_dec(&cache->core_runtime_meta[core_id]. env_atomic_dec(&req->core->runtime_meta->
part_counters[id_old].cached_clines); part_counters[id_old].cached_clines);
/* DONE */ /* DONE */

View File

@ -76,8 +76,6 @@ int __wrap_ocf_metadata_flush_superblock(struct ocf_cache *cache)
bool __wrap_env_bit_test(int nr, const volatile unsigned long *addr) bool __wrap_env_bit_test(int nr, const volatile unsigned long *addr)
{ {
function_called();
return mock();
} }
void __wrap_env_atomic_set(env_atomic *a, int i) void __wrap_env_atomic_set(env_atomic *a, int i)
@ -267,6 +265,11 @@ void __wrap_ocf_mngt_cache_save_finish(
{ {
} }
void _cache_mng_update_initial_dirty_clines(ocf_cache_t cache)
{
function_called();
}
static void _cache_mng_set_cache_mode_test01(void **state) static void _cache_mng_set_cache_mode_test01(void **state)
{ {
ocf_cache_mode_t mode_old = -20; ocf_cache_mode_t mode_old = -20;
@ -348,14 +351,7 @@ static void _cache_mng_set_cache_mode_test03(void **state)
expect_function_call(__wrap_ocf_cache_mode_is_valid); expect_function_call(__wrap_ocf_cache_mode_is_valid);
will_return(__wrap_ocf_cache_mode_is_valid, 1); will_return(__wrap_ocf_cache_mode_is_valid, 1);
for(i = 0; i != OCF_CORE_MAX; ++i) { expect_function_call(_cache_mng_update_initial_dirty_clines);
expect_function_call(__wrap_env_bit_test);
will_return(__wrap_env_bit_test, 1);
expect_function_call(__wrap_env_atomic_read);
will_return(__wrap_env_atomic_read, 1);
expect_function_call(__wrap_env_atomic_set);
}
expect_function_call(__wrap_ocf_log_raw); expect_function_call(__wrap_ocf_log_raw);
will_return(__wrap_ocf_log_raw, 0); will_return(__wrap_ocf_log_raw, 0);