Remove "metadata_layout" parameter of the cache

This feature is replaced with LRU list shuffling.

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga
2022-03-07 17:20:03 +01:00
parent 9a956f59cd
commit d5b2c65a39
20 changed files with 33 additions and 277 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -634,23 +634,11 @@ static void ocf_metadata_flush_unlock_collision_page(
page);
}
static void ocf_metadata_init_layout(struct ocf_cache *cache,
ocf_metadata_layout_t layout)
{
ENV_BUG_ON(layout >= ocf_metadata_layout_max || layout < 0);
/* Initialize metadata location interface*/
if (cache->metadata.is_volatile)
layout = ocf_metadata_layout_seq;
cache->metadata.layout = layout;
}
/*
* Initialize hash metadata interface
*/
int ocf_metadata_init_variable_size(struct ocf_cache *cache,
uint64_t device_size, ocf_cache_line_size_t line_size,
ocf_metadata_layout_t layout)
uint64_t device_size, ocf_cache_line_size_t line_size)
{
int result = 0;
uint32_t i = 0;
@@ -683,8 +671,6 @@ int ocf_metadata_init_variable_size(struct ocf_cache *cache,
ctrl->mapping_size = ocf_metadata_status_sizeof(line_size)
+ sizeof(struct ocf_metadata_map);
ocf_metadata_init_layout(cache, layout);
/* Initial setup of dynamic size RAW containers */
for (i = metadata_segment_variable_size_start;
i < metadata_segment_max; i++) {
@@ -1268,7 +1254,6 @@ static int ocf_metadata_load_atomic_metadata_drain(void *priv,
ctx_data_rd_check(cache->owner, &meta, data, sizeof(meta));
line = (sector_addr + i) / ocf_line_sectors(cache);
line = ocf_metadata_map_phy2lg(cache, line);
pos = (sector_addr + i) % ocf_line_sectors(cache);
core_seq_no = meta.core_seq_no;
core_line = meta.core_line;
@@ -1631,7 +1616,6 @@ static void ocf_metadata_load_properties_cmpl(
OCF_CMPL_RET(priv, result, NULL);
properties.line_size = superblock->line_size;
properties.layout = superblock->metadata_layout;
properties.cache_mode = superblock->cache_mode;
properties.shutdown_status = superblock->clean_shutdown;
properties.dirty_flushed = superblock->dirty_flushed;

View File

@@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -44,8 +44,7 @@ int ocf_metadata_init(struct ocf_cache *cache,
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_init_variable_size(struct ocf_cache *cache,
uint64_t device_size, ocf_cache_line_size_t cache_line_size,
ocf_metadata_layout_t layout);
uint64_t device_size, ocf_cache_line_size_t cache_line_size);
/**
* @brief Initialize collision table
@@ -199,7 +198,6 @@ void ocf_metadata_set_hash(struct ocf_cache *cache,
struct ocf_metadata_load_properties {
enum ocf_metadata_shutdown_status shutdown_status;
uint8_t dirty_flushed;
ocf_metadata_layout_t layout;
ocf_cache_mode_t cache_mode;
ocf_cache_line_size_t line_size;
char *cache_name;

View File

@@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,143 +8,6 @@
#include "metadata_internal.h"
#include "../utils/utils_cache_line.h"
static ocf_cache_line_t ocf_metadata_map_lg2phy_seq(
struct ocf_cache *cache, ocf_cache_line_t coll_idx)
{
return coll_idx;
}
static ocf_cache_line_t ocf_metadata_map_phy2lg_seq(
struct ocf_cache *cache, ocf_cache_line_t cache_line)
{
return cache_line;
}
/**
* This function is mapping collision index to appropriate cache line
* (logical cache line to physical one mapping).
*
* It is necessary because we want to generate sequential workload with
* data to cache device.
* Our collision list, for example, looks:
* 0 3 6 9
* 1 4 7 10
* 2 5 8
* All collision index in each column is on the same page
* on cache device. We don't want send request x times to the same
* page. To don't do it we use collision index by row, but in this
* case we can't use collision index directly as cache line,
* because we will generate non sequential workload (we will write
* pages: 0 -> 3 -> 6 ...). To map collision index in correct way
* we use this function.
*
* After use this function, collision index in the above array
* corresponds with below cache line:
* 0 1 2 3
* 4 5 6 7
* 8 9 10
*
* @param cache - cache instance
* @param idx - index in collision list
* @return mapped cache line
*/static ocf_cache_line_t ocf_metadata_map_lg2phy_striping(
struct ocf_cache *cache, ocf_cache_line_t coll_idx)
{
ocf_cache_line_t cache_line = 0, offset = 0;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
unsigned int entries_in_page =
ctrl->raw_desc[metadata_segment_collision].entries_in_page;
unsigned int pages =
ctrl->raw_desc[metadata_segment_collision].ssd_pages;
ocf_cache_line_t collision_table_entries =
cache->device->collision_table_entries;
ocf_cache_line_t delta =
(entries_in_page * pages) - collision_table_entries;
unsigned int row = coll_idx % entries_in_page;
if (row > entries_in_page - delta)
offset = row - (entries_in_page - delta);
else
offset = 0;
cache_line = (row * pages) + (coll_idx / entries_in_page) - offset;
return cache_line;
}
/**
* @brief Map physical cache line on cache device to logical one
* @note This function is the inverse of map_coll_idx_to_cache_line
*
* @param cache Cache instance
* @param phy Physical cache line of cache device
* @return Logical cache line
*/
static ocf_cache_line_t ocf_metadata_map_phy2lg_striping(
struct ocf_cache *cache, ocf_cache_line_t cache_line)
{
ocf_cache_line_t coll_idx = 0;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
struct ocf_metadata_raw *raw =
&ctrl->raw_desc[metadata_segment_collision];
unsigned int pages = raw->ssd_pages;
unsigned int entries_in_page = raw->entries_in_page;
unsigned int entries_in_last_page = raw->entries % entries_in_page ?:
entries_in_page;
unsigned int row = 0, coll = 0;
unsigned int last = entries_in_last_page * pages;
if (cache_line < last) {
row = cache_line % pages;
coll = cache_line / pages;
} else {
cache_line -= last;
row = cache_line % (pages - 1);
coll = cache_line / (pages - 1) + entries_in_last_page;
}
coll_idx = (row * entries_in_page) + coll;
return coll_idx;
}
ocf_cache_line_t ocf_metadata_map_lg2phy(
struct ocf_cache *cache, ocf_cache_line_t coll_idx)
{
switch (cache->metadata.layout) {
case ocf_metadata_layout_striping:
return ocf_metadata_map_lg2phy_striping(
cache, coll_idx);
case ocf_metadata_layout_seq:
return ocf_metadata_map_lg2phy_seq(
cache, coll_idx);
default:
ENV_BUG();
return 0;
}
}
ocf_cache_line_t ocf_metadata_map_phy2lg(
struct ocf_cache *cache, ocf_cache_line_t cache_line)
{
switch (cache->metadata.layout) {
case ocf_metadata_layout_striping:
return ocf_metadata_map_phy2lg_striping(
cache, cache_line);
case ocf_metadata_layout_seq:
return ocf_metadata_map_phy2lg_seq(
cache, cache_line);
default:
ENV_BUG();
return 0;
}
}
void ocf_metadata_set_collision_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next,
ocf_cache_line_t prev)

View File

@@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -34,12 +34,6 @@ struct ocf_metadata_map {
/*!< Entry status structure e.g. valid, dirty...*/
} __attribute__((packed));
ocf_cache_line_t ocf_metadata_map_lg2phy(
struct ocf_cache *cache, ocf_cache_line_t coll_idx);
ocf_cache_line_t ocf_metadata_map_phy2lg(
struct ocf_cache *cache, ocf_cache_line_t cache_line);
void ocf_metadata_set_collision_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next, ocf_cache_line_t prev);

View File

@@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -109,7 +109,7 @@ void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
static inline void _raw_atomic_add_page(struct ocf_cache *cache,
uint32_t *clines_tab, uint64_t line, int *idx)
{
clines_tab[*idx] = ocf_metadata_map_lg2phy(cache, line);
clines_tab[*idx] = line;
(*idx)++;
}
@@ -122,7 +122,7 @@ static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
uint64_t start_addr;
int result = 0;
start_addr = ocf_metadata_map_lg2phy(cache, map->coll_idx);
start_addr = map->coll_idx;
start_addr *= ocf_line_size(cache);
start_addr += cache->device->metadata_offset;

View File

@@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -60,9 +60,6 @@ struct ocf_metadata_lock
* @brief Metadata control structure
*/
struct ocf_metadata {
ocf_metadata_layout_t layout;
/*!< Per-cacheline metadata layout */
void *priv;
/*!< Private data of metadata service interface */

View File

@@ -1,5 +1,5 @@
/*
* Copyright(c) 2020-2021 Intel Corporation
* Copyright(c) 2020-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -167,11 +167,6 @@ int ocf_metadata_validate_superblock(ocf_ctx_t ctx,
return -OCF_ERR_INVAL;
}
if ((unsigned)superblock->metadata_layout >= ocf_metadata_layout_max) {
ocf_log_invalid_superblock("metadata layout");
return -OCF_ERR_INVAL;
}
if (superblock->core_count > OCF_CORE_MAX) {
ocf_log_invalid_superblock("core count");
return -OCF_ERR_INVAL;

View File

@@ -1,5 +1,5 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2012-2022 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -40,7 +40,6 @@ struct ocf_superblock_config {
uint32_t valid_parts_no;
ocf_cache_line_size_t line_size;
ocf_metadata_layout_t metadata_layout;
uint32_t core_count;
unsigned long valid_core_bitmap[(OCF_CORE_MAX /