Remove runtime recovery in standby mode

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga 2022-01-12 06:11:14 +01:00
parent 76684ed8a9
commit 805ea14529
4 changed files with 3 additions and 266 deletions

View File

@ -24,203 +24,6 @@
#define MAX_PASSIVE_IO_SIZE (32*MiB)
static inline void _reset_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
/* The cacheline used to be dirty but it is not anymore so it needs to be
moved to a clean lru list */
ocf_lru_clean_cline(cache, &cache->user_parts[PARTITION_DEFAULT].part,
cline);
metadata_init_status_bits(cache, cline);
ocf_lru_rm_cline(cache, cline);
ocf_metadata_set_partition_id(cache, cline, PARTITION_FREELIST);
ocf_metadata_set_core_info(cache, cline, OCF_CORE_MAX, ULLONG_MAX);
}
static inline void remove_from_freelist(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_part_id_t lru_list;
struct ocf_lru_list *list;
lru_list = (cline % OCF_NUM_LRU_LISTS);
list = ocf_lru_get_list(&cache->free, lru_list, true);
OCF_METADATA_LRU_WR_LOCK(cline);
ocf_lru_remove_locked(cache, list, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
}
static inline void remove_from_default(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_part_id_t part_id = PARTITION_DEFAULT;
ocf_part_id_t lru_list;
struct ocf_lru_list *list;
lru_list = (cline % OCF_NUM_LRU_LISTS);
list = ocf_lru_get_list(&cache->user_parts[part_id].part, lru_list, false);
OCF_METADATA_LRU_WR_LOCK(cline);
ocf_lru_remove_locked(cache, list, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
env_atomic_dec(&cache->user_parts[part_id].part.runtime->curr_size);
}
static void handle_previously_invalid(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_core_id_t core_id;
uint64_t core_line;
uint32_t lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue);
/* Pio lock provides exclusive access to the collision page thus either
mapping or status bits can't be changed by a concurrent thread */
ocf_metadata_get_core_info(cache, cline, &core_id, &core_line);
if (metadata_test_dirty(cache, cline) && core_id < OCF_CORE_MAX) {
ENV_BUG_ON(!metadata_test_valid_any(cache, cline));
/* Moving cline from the freelist to the default partitioin */
remove_from_freelist(cache, cline);
ocf_hb_cline_prot_lock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
OCF_METADATA_LRU_WR_LOCK(cline);
ocf_cline_rebuild_metadata(cache, core_id, core_line, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
ocf_cleaning_init_cache_block(cache, cline);
ocf_cleaning_set_hot_cache_line(cache, cline);
} else {
/* Cline stays on the freelist*/
/* To prevent random values in the metadata fill it with the defaults */
metadata_init_status_bits(cache, cline);
ocf_metadata_set_core_info(cache, cline, OCF_CORE_MAX, ULLONG_MAX);
}
}
static void handle_previously_valid(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_core_id_t core_id;
uint64_t core_line;
uint32_t lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue);
/* Pio lock provides exclusive access to the collision page thus either
mapping or status bits can't be changed by a concurrent thread */
ocf_metadata_get_core_info(cache, cline, &core_id, &core_line);
if (metadata_test_dirty(cache, cline) && core_id < OCF_CORE_MAX) {
/* Cline stays on the default partition*/
ENV_BUG_ON(!metadata_test_valid_any(cache, cline));
remove_from_default(cache, cline);
ocf_hb_cline_prot_lock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
OCF_METADATA_LRU_WR_LOCK(cline);
ocf_cline_rebuild_metadata(cache, core_id, core_line, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
ocf_cleaning_set_hot_cache_line(cache, cline);
} else {
/* Moving cline from the default partition to the freelist */
ocf_cleaning_purge_cache_block(cache, cline);
_reset_cline(cache, cline);
}
}
static inline void update_list_segment(ocf_cache_t cache,
ocf_cache_line_t start, ocf_cache_line_t count)
{
ocf_cache_line_t cline, end;
for (cline = start, end = start + count; cline < end; cline++) {
ocf_part_id_t part_id;
metadata_clear_dirty_if_invalid(cache, cline);
metadata_clear_valid_if_clean(cache, cline);
part_id = ocf_metadata_get_partition_id(cache, cline);
switch (part_id) {
case PARTITION_FREELIST:
handle_previously_invalid(cache, cline);
break;
case PARTITION_DEFAULT:
handle_previously_valid(cache, cline);
break;
default:
ocf_cache_log(cache, log_crit, "Passive update: invalid "
"part id for cacheline %u: %hu\n", cline, part_id);
ENV_BUG();
break;
}
}
}
static void _dec_core_stats(ocf_core_t core)
{
ocf_part_id_t part = PARTITION_DEFAULT;
env_atomic *core_occupancy_counter = &core->runtime_meta->cached_clines;
env_atomic *part_occupancy_counter =
&core->runtime_meta->part_counters[part].cached_clines;
env_atomic *core_dirty_counter = &core->runtime_meta->dirty_clines;
env_atomic *part_dirty_counter =
&core->runtime_meta->part_counters[part].dirty_clines;
ENV_BUG_ON(env_atomic_dec_return(core_occupancy_counter) < 0);
ENV_BUG_ON(env_atomic_dec_return(part_occupancy_counter) < 0);
ENV_BUG_ON(env_atomic_dec_return(core_dirty_counter) < 0);
ENV_BUG_ON(env_atomic_dec_return(part_dirty_counter) < 0);
}
static void cleanup_old_mapping(ocf_cache_t cache, ocf_cache_line_t start,
ocf_cache_line_t count)
{
ocf_cache_line_t cline, end;
uint32_t lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue);
for (cline = start, end = start + count; cline < end; cline++) {
ocf_core_id_t core_id;
uint64_t core_line;
ocf_core_t core;
ocf_metadata_get_core_info(cache, cline, &core_id, &core_line);
ENV_BUG_ON(core_id > OCF_CORE_ID_INVALID);
core = ocf_cache_get_core(cache, core_id);
if (!core)
continue;
ENV_BUG_ON(ocf_metadata_get_partition_id(cache, cline) !=
PARTITION_DEFAULT);
_dec_core_stats(core);
ocf_hb_cline_prot_lock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
ocf_metadata_remove_from_collision(cache, cline, PARTITION_DEFAULT);
ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
}
}
static int passive_io_resume(struct ocf_request *req)
{
ocf_cache_t cache = req->cache;
@ -231,7 +34,6 @@ static int passive_io_resume(struct ocf_request *req)
uint64_t io_pages_count = BYTES_TO_PAGES(io->bytes);
uint64_t io_end_page = io_start_page + io_pages_count - 1;
ocf_end_io_t io_cmpl = req->master_io_req;
ocf_cache_line_t cache_etries = ocf_metadata_collision_table_entries(cache);
enum ocf_metadata_segment_id update_segments[] = {
metadata_segment_sb_config,
metadata_segment_collision,
@ -239,7 +41,6 @@ static int passive_io_resume(struct ocf_request *req)
int i;
for (i = 0; i < ARRAY_SIZE(update_segments); i++) {
ocf_cache_line_t cache_line_count, cache_line_range_start;
enum ocf_metadata_segment_id seg = update_segments[i];
struct ocf_metadata_raw *raw = &(ctrl->raw_desc[seg]);
uint64_t raw_start_page = raw->ssd_pages_offset;
@ -256,33 +57,9 @@ static int passive_io_resume(struct ocf_request *req)
overlap_page = overlap_start - raw_start_page;
overlap_count = overlap_end - overlap_start + 1;
if (seg == metadata_segment_collision) {
/* The range of cachelines with potentially updated collision
section */
cache_line_range_start = overlap_page * raw->entries_in_page;
cache_line_count = raw->entries_in_page * overlap_count;
/* The last page of collision section may contain fewer entries than
entries_in_page */
cache_line_count = OCF_MIN(cache_etries - cache_line_range_start,
cache_line_count);
/* The collision is not updated yet but the range of affected
cachelines is already known. Remove the old mapping related info
from the metadata */
cleanup_old_mapping(cache, cache_line_range_start,
cache_line_count);
}
ctx_data_seek(cache->owner, data, ctx_data_seek_begin,
PAGES_TO_BYTES(overlap_start_data));
ocf_metadata_raw_update(cache, raw, data, overlap_page, overlap_count);
if (seg != metadata_segment_collision)
continue;
update_list_segment(cache, cache_line_range_start,
cache_line_count);
}
ocf_pio_async_unlock(req->cache->standby.concurrency, req);

View File

@ -2319,8 +2319,6 @@ struct ocf_pipeline_properties _ocf_mngt_cache_standby_load_pipeline_properties
OCF_PL_STEP(_ocf_mngt_load_superblock),
OCF_PL_STEP(_ocf_mngt_load_metadata_recovery),
OCF_PL_STEP(_ocf_mngt_init_cleaner),
OCF_PL_STEP(_ocf_mngt_standby_init_structures_load),
OCF_PL_STEP(_ocf_mngt_load_init_cleaning),
OCF_PL_STEP(_ocf_mngt_standby_preapre_mempool),
OCF_PL_STEP(_ocf_mngt_standby_init_pio_concurrency),
OCF_PL_STEP(_ocf_mngt_load_rebuild_metadata),
@ -2545,6 +2543,9 @@ struct ocf_pipeline_properties _ocf_mngt_cache_activate_pipeline_properties = {
OCF_PL_STEP(_ocf_mngt_test_volume),
OCF_PL_STEP(_ocf_mngt_init_promotion),
OCF_PL_STEP(_ocf_mngt_load_add_cores),
OCF_PL_STEP(_ocf_mngt_standby_init_structures_load),
OCF_PL_STEP(_ocf_mngt_load_rebuild_metadata),
OCF_PL_STEP(_ocf_mngt_load_init_cleaning),
OCF_PL_STEP(_ocf_mngt_attach_shutdown_status),
OCF_PL_STEP(_ocf_mngt_attach_post_init),
OCF_PL_STEP_TERMINATOR(),

View File

@ -167,40 +167,3 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
ocf_cleaning_set_hot_cache_line(cache, line);
}
void ocf_cline_rebuild_metadata(ocf_cache_t cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t cache_line)
{
ocf_core_t core = ocf_cache_get_core(cache, core_id);
ocf_part_id_t part_id;
ocf_cache_line_t hash_index;
struct ocf_part_runtime *part;
part_id = PARTITION_DEFAULT;
part = cache->user_parts[part_id].part.runtime;
ocf_metadata_set_partition_id(cache, cache_line, part_id);
env_atomic_inc(&part->curr_size);
hash_index = ocf_metadata_hash_func(cache, core_line, core_id);
ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
cache_line);
ocf_lru_init_cline(cache, cache_line);
ocf_lru_add(cache, cache_line);
env_atomic_inc(&core->runtime_meta->cached_clines);
env_atomic_inc(&core->runtime_meta->
part_counters[part_id].cached_clines);
if (metadata_test_dirty(cache, cache_line)) {
env_atomic_inc(&core->runtime_meta->dirty_clines);
env_atomic_inc(&core->runtime_meta->
part_counters[part_id].dirty_clines);
if (!env_atomic64_read(&core->runtime_meta->dirty_since))
env_atomic64_cmpxchg(&core->runtime_meta->dirty_since, 0,
env_ticks_to_secs(env_get_tick_count()));
}
}

View File

@ -123,10 +123,6 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
void ocf_cline_rebuild_metadata(ocf_cache_t cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t cache_line);
/**
* @brief Remove cache line from cleaning policy
*