Lru populate unsafe

The unsafe mode is useful if the metadata of added cores is incomplete.

Such scenario is possible when starting cache to standby mode from partially
vaild metadata.

Signed-off-by: Michal Mielewczyk <michal.mielewczyk@intel.com>
This commit is contained in:
Michal Mielewczyk 2021-11-16 10:31:27 +01:00
parent 4deaa1e133
commit 8f58add152
3 changed files with 21 additions and 9 deletions

View File

@ -210,12 +210,19 @@ static void __init_parts_attached(ocf_cache_t cache)
ocf_lru_init(cache, &cache->free);
}
static void __populate_free(ocf_cache_t cache)
static void __populate_free_unsafe(ocf_cache_t cache)
{
uint64_t free_clines = ocf_metadata_collision_table_entries(cache);
ocf_lru_populate(cache, free_clines, false);
}
static void __populate_free_safe(ocf_cache_t cache)
{
uint64_t free_clines = ocf_metadata_collision_table_entries(cache) -
ocf_get_cache_occupancy(cache);
ocf_lru_populate(cache, free_clines);
ocf_lru_populate(cache, free_clines, true);
}
static ocf_error_t __init_cleaning_policy(ocf_cache_t cache)
@ -307,7 +314,7 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache)
ocf_metadata_init_hash_table(cache);
ocf_metadata_init_collision(cache);
__init_parts_attached(cache);
__populate_free(cache);
__populate_free_safe(cache);
result = __init_cleaning_policy(cache);
if (result) {
@ -536,7 +543,7 @@ static void _ocf_mngt_load_post_metadata_load(ocf_pipeline_t pipeline,
if (context->metadata.shutdown_status != ocf_metadata_clean_shutdown) {
_ocf_mngt_recovery_rebuild_metadata(cache);
__populate_free(cache);
__populate_free_safe(cache);
}
cleaning_policy = cache->conf_meta->cleaning_policy_type;

View File

@ -891,7 +891,8 @@ static ocf_cache_line_t next_phys_invalid(ocf_cache_t cache,
}
/* put invalid cachelines on freelist partition lru list */
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines,
bool safe)
{
ocf_cache_line_t phys, cline;
ocf_cache_line_t collision_table_entries =
@ -905,7 +906,10 @@ void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
for (i = 0; i < num_free_clines; i++) {
/* find first invalid cacheline */
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys == collision_table_entries);
if (safe)
ENV_BUG_ON(phys == collision_table_entries);
else if (phys == collision_table_entries)
break;
cline = ocf_metadata_map_phy2lg(cache, phys);
++phys;
@ -921,9 +925,9 @@ void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
/* we should have reached the last invalid cache line */
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys != collision_table_entries);
ENV_BUG_ON(safe && phys != collision_table_entries);
env_atomic_set(&cache->free.runtime->curr_size, num_free_clines);
env_atomic_set(&cache->free.runtime->curr_size, i);
}
static bool _is_cache_line_acting(struct ocf_cache *cache,

View File

@ -31,7 +31,8 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_upart, struct ocf_part *dst_upart);
uint32_t ocf_lru_num_free(ocf_cache_t cache);
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines);
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines,
bool safe);
struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
uint32_t lru_idx, bool clean);
void ocf_lru_remove_locked(ocf_cache_t cache, struct ocf_lru_list *list,