Spliting metadata implementation to match header files

Moving metadata implementation out of obsolete metadata_hash.c
to .c files corresponding to function declaration header files.
This requires adding shared header for metadata implementation
metadata_internal.h. Some metadata header files did not have
a corresponding .c file - in this case it is added in this
commit.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2020-11-23 22:35:52 -06:00
parent 02405e989d
commit b074d77797
13 changed files with 2904 additions and 2845 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
/*
* copyright(c) 2020 intel corporation
* spdx-license-identifier: bsd-3-clause-clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "metadata_cleaning_policy.h"
#include "metadata_internal.h"
/*
* Cleaning policy - Get
*/
struct cleaning_policy_meta *
ocf_metadata_get_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_metadata_ctrl *ctrl
= (struct ocf_metadata_ctrl *) cache->metadata.priv;
return ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_cleaning]), line);
}

View File

@ -5,8 +5,225 @@
#include "ocf/ocf.h" #include "ocf/ocf.h"
#include "metadata.h" #include "metadata.h"
#include "metadata_internal.h"
#include "../utils/utils_cache_line.h" #include "../utils/utils_cache_line.h"
static ocf_cache_line_t ocf_metadata_map_lg2phy_seq(
struct ocf_cache *cache, ocf_cache_line_t coll_idx)
{
return coll_idx;
}
static ocf_cache_line_t ocf_metadata_map_phy2lg_seq(
struct ocf_cache *cache, ocf_cache_line_t cache_line)
{
return cache_line;
}
/**
* This function is mapping collision index to appropriate cache line
* (logical cache line to physical one mapping).
*
* It is necessary because we want to generate sequential workload with
* data to cache device.
* Our collision list, for example, looks:
* 0 3 6 9
* 1 4 7 10
* 2 5 8
* All collision index in each column is on the same page
* on cache device. We don't want send request x times to the same
* page. To don't do it we use collision index by row, but in this
* case we can't use collision index directly as cache line,
* because we will generate non sequential workload (we will write
* pages: 0 -> 3 -> 6 ...). To map collision index in correct way
* we use this function.
*
* After use this function, collision index in the above array
* corresponds with below cache line:
* 0 1 2 3
* 4 5 6 7
* 8 9 10
*
* @param cache - cache instance
* @param idx - index in collision list
* @return mapped cache line
*/static ocf_cache_line_t ocf_metadata_map_lg2phy_striping(
struct ocf_cache *cache, ocf_cache_line_t coll_idx)
{
ocf_cache_line_t cache_line = 0, offset = 0;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
unsigned int entries_in_page =
ctrl->raw_desc[metadata_segment_collision].entries_in_page;
unsigned int pages =
ctrl->raw_desc[metadata_segment_collision].ssd_pages;
ocf_cache_line_t collision_table_entries =
cache->device->collision_table_entries;
ocf_cache_line_t delta =
(entries_in_page * pages) - collision_table_entries;
unsigned int row = coll_idx % entries_in_page;
if (row > entries_in_page - delta)
offset = row - (entries_in_page - delta);
else
offset = 0;
cache_line = (row * pages) + (coll_idx / entries_in_page) - offset;
return cache_line;
}
/**
* @brief Map physical cache line on cache device to logical one
* @note This function is the inverse of map_coll_idx_to_cache_line
*
* @param cache Cache instance
* @param phy Physical cache line of cache device
* @return Logical cache line
*/
static ocf_cache_line_t ocf_metadata_map_phy2lg_striping(
struct ocf_cache *cache, ocf_cache_line_t cache_line)
{
ocf_cache_line_t coll_idx = 0;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
struct ocf_metadata_raw *raw =
&ctrl->raw_desc[metadata_segment_collision];
unsigned int pages = raw->ssd_pages;
unsigned int entries_in_page = raw->entries_in_page;
unsigned int entries_in_last_page = raw->entries % entries_in_page ?:
entries_in_page;
unsigned int row = 0, coll = 0;
unsigned int last = entries_in_last_page * pages;
if (cache_line < last) {
row = cache_line % pages;
coll = cache_line / pages;
} else {
cache_line -= last;
row = cache_line % (pages - 1);
coll = cache_line / (pages - 1) + entries_in_last_page;
}
coll_idx = (row * entries_in_page) + coll;
return coll_idx;
}
ocf_cache_line_t ocf_metadata_map_lg2phy(
struct ocf_cache *cache, ocf_cache_line_t coll_idx)
{
switch (cache->metadata.layout) {
case ocf_metadata_layout_striping:
return ocf_metadata_map_lg2phy_striping(
cache, coll_idx);
case ocf_metadata_layout_seq:
return ocf_metadata_map_lg2phy_seq(
cache, coll_idx);
default:
ENV_BUG();
return 0;
}
}
ocf_cache_line_t ocf_metadata_map_phy2lg(
struct ocf_cache *cache, ocf_cache_line_t cache_line)
{
switch (cache->metadata.layout) {
case ocf_metadata_layout_striping:
return ocf_metadata_map_phy2lg_striping(
cache, cache_line);
case ocf_metadata_layout_seq:
return ocf_metadata_map_phy2lg_seq(
cache, cache_line);
default:
ENV_BUG();
return 0;
}
}
void ocf_metadata_set_collision_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next,
ocf_cache_line_t prev)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info) {
info->next_col = next;
info->prev_col = prev;
} else {
ocf_metadata_error(cache);
}
}
void ocf_metadata_set_collision_next(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info)
info->next_col = next;
else
ocf_metadata_error(cache);
}
void ocf_metadata_set_collision_prev(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info)
info->prev_col = prev;
else
ocf_metadata_error(cache);
}
void ocf_metadata_get_collision_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t *next,
ocf_cache_line_t *prev)
{
const struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
ENV_BUG_ON(NULL == next && NULL == prev);
info = ocf_metadata_raw_rd_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info) {
if (next)
*next = info->next_col;
if (prev)
*prev = info->prev_col;
} else {
ocf_metadata_error(cache);
if (next)
*next = cache->device->collision_table_entries;
if (prev)
*prev = cache->device->collision_table_entries;
}
}
/* /*
* *
*/ */
@ -86,3 +303,29 @@ void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
ocf_metadata_set_core_info(cache, line, ocf_metadata_set_core_info(cache, line,
OCF_CORE_MAX, ULLONG_MAX); OCF_CORE_MAX, ULLONG_MAX);
} }
void ocf_metadata_start_collision_shared_access(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
struct ocf_metadata_raw *raw =
&ctrl->raw_desc[metadata_segment_collision];
uint32_t page = ocf_metadata_raw_page(raw, line);
ocf_collision_start_shared_access(&cache->metadata.lock, page);
}
void ocf_metadata_end_collision_shared_access(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
struct ocf_metadata_raw *raw =
&ctrl->raw_desc[metadata_segment_collision];
uint32_t page = ocf_metadata_raw_page(raw, line);
ocf_collision_end_shared_access(&cache->metadata.lock, page);
}

View File

@ -85,10 +85,10 @@ void ocf_metadata_add_to_collision(struct ocf_cache *cache,
void ocf_metadata_remove_from_collision(struct ocf_cache *cache, void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id); ocf_cache_line_t line, ocf_part_id_t part_id);
void ocf_metadata_start_collision_shared_access( void ocf_metadata_start_collision_shared_access(
struct ocf_cache *cache, ocf_cache_line_t line); struct ocf_cache *cache, ocf_cache_line_t line);
void ocf_metadata_end_collision_shared_access( void ocf_metadata_end_collision_shared_access(
struct ocf_cache *cache, ocf_cache_line_t line); struct ocf_cache *cache, ocf_cache_line_t line);
#endif /* METADATA_COLLISION_H_ */ #endif /* METADATA_COLLISION_H_ */

View File

@ -0,0 +1,88 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_priv.h"
#include "metadata.h"
#include "metadata_core.h"
#include "metadata_internal.h"
#include "metadata_raw.h"
void ocf_metadata_get_core_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t *core_id,
uint64_t *core_sector)
{
const struct ocf_metadata_map *collision;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
collision = ocf_metadata_raw_rd_access(cache,
&(ctrl->raw_desc[metadata_segment_collision]), line);
if (collision) {
if (core_id)
*core_id = collision->core_id;
if (core_sector)
*core_sector = collision->core_line;
} else {
ocf_metadata_error(cache);
if (core_id)
*core_id = OCF_CORE_MAX;
if (core_sector)
*core_sector = ULLONG_MAX;
}
}
void ocf_metadata_set_core_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t core_id,
uint64_t core_sector)
{
struct ocf_metadata_map *collision;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
collision = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_collision]), line);
if (collision) {
collision->core_id = core_id;
collision->core_line = core_sector;
} else {
ocf_metadata_error(cache);
}
}
ocf_core_id_t ocf_metadata_get_core_id(struct ocf_cache *cache,
ocf_cache_line_t line)
{
const struct ocf_metadata_map *collision;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
collision = ocf_metadata_raw_rd_access(cache,
&(ctrl->raw_desc[metadata_segment_collision]), line);
if (collision)
return collision->core_id;
ocf_metadata_error(cache);
return OCF_CORE_MAX;
}
struct ocf_metadata_uuid *ocf_metadata_get_core_uuid(
struct ocf_cache *cache, ocf_core_id_t core_id)
{
struct ocf_metadata_uuid *muuid;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
muuid = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_core_uuid]), core_id);
if (!muuid)
ocf_metadata_error(cache);
return muuid;
}

View File

@ -6,6 +6,8 @@
#ifndef __METADATA_CORE_H__ #ifndef __METADATA_CORE_H__
#define __METADATA_CORE_H__ #define __METADATA_CORE_H__
#include <ocf/ocf_types.h>
void ocf_metadata_get_core_info(struct ocf_cache *cache, void ocf_metadata_get_core_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t *core_id, ocf_cache_line_t line, ocf_core_id_t *core_id,
uint64_t *core_sector); uint64_t *core_sector);

View File

@ -0,0 +1,25 @@
/*
* copyright(c )2020 intel corporation
* spdx-license-identifier: bsd-3-clause-clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "metadata_eviction_policy.h"
#include "metadata_internal.h"
/*
* Eviction policy - Get
*/
union eviction_policy_meta *
ocf_metadata_get_eviction_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_metadata_ctrl *ctrl
= (struct ocf_metadata_ctrl *) cache->metadata.priv;
return ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_eviction]), line);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_INTERNAL_H__
#define __METADATA_INTERNAL_H__
#include <ocf/ocf_def.h>
#include "../ocf_cache_priv.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#define METADATA_MEM_POOL(ctrl, section) ctrl->raw_desc[section].mem_pool
/*
* Metadata control structure
*/
struct ocf_metadata_ctrl {
ocf_cache_line_t cachelines;
ocf_cache_line_t start_page;
ocf_cache_line_t count_pages;
uint32_t device_lines;
size_t mapping_size;
struct ocf_metadata_raw raw_desc[metadata_segment_max];
};
struct ocf_metadata_context {
ocf_metadata_end_t cmpl;
void *priv;
ocf_pipeline_t pipeline;
ocf_cache_t cache;
struct ocf_metadata_raw segment_copy[metadata_segment_fixed_size_max];
};
extern const char * const ocf_metadata_segment_names[];
#endif

View File

@ -5,8 +5,89 @@
#include "ocf/ocf.h" #include "ocf/ocf.h"
#include "metadata.h" #include "metadata.h"
#include "metadata_internal.h"
#include "../utils/utils_part.h" #include "../utils/utils_part.h"
void ocf_metadata_get_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t *part_id,
ocf_cache_line_t *next_line, ocf_cache_line_t *prev_line)
{
const struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_rd_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info) {
if (part_id)
*part_id = info->partition_id;
if (next_line)
*next_line = info->partition_next;
if (prev_line)
*prev_line = info->partition_prev;
} else {
ocf_metadata_error(cache);
if (part_id)
*part_id = PARTITION_DEFAULT;
if (next_line)
*next_line = cache->device->collision_table_entries;
if (prev_line)
*prev_line = cache->device->collision_table_entries;
}
}
void ocf_metadata_set_partition_next(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next_line)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info)
info->partition_next = next_line;
else
ocf_metadata_error(cache);
}
void ocf_metadata_set_partition_prev(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev_line)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info)
info->partition_prev = prev_line;
else
ocf_metadata_error(cache);
}
void ocf_metadata_set_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info) {
info->partition_id = part_id;
info->partition_next = next_line;
info->partition_prev = prev_line;
} else {
ocf_metadata_error(cache);
}
}
/* Sets the given collision_index as the new _head_ of the Partition list. */ /* Sets the given collision_index as the new _head_ of the Partition list. */
static void update_partition_head(struct ocf_cache *cache, static void update_partition_head(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line) ocf_part_id_t part_id, ocf_cache_line_t line)

View File

@ -0,0 +1,107 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata_internal.h"
#include "metadata_superblock.h"
static void ocf_metadata_generic_complete(void *priv, int error)
{
struct ocf_metadata_context *context = priv;
OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
}
static void ocf_metadata_check_crc_skip(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg, bool skip_on_dirty_shutdown)
{
struct ocf_metadata_context *context = priv;
int segment = ocf_pipeline_arg_get_int(arg);
struct ocf_metadata_ctrl *ctrl;
struct ocf_superblock_config *sb_config;
ocf_cache_t cache = context->cache;
uint32_t crc;
ctrl = (struct ocf_metadata_ctrl *)cache->metadata.priv;
sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
if (!sb_config->clean_shutdown && skip_on_dirty_shutdown)
OCF_PL_NEXT_RET(pipeline);
crc = ocf_metadata_raw_checksum(cache, &(ctrl->raw_desc[segment]));
if (crc != sb_config->checksum[segment]) {
/* Checksum does not match */
if (!sb_config->clean_shutdown) {
ocf_cache_log(cache, log_warn,
"Loading %s WARNING, invalid checksum",
ocf_metadata_segment_names[segment]);
} else {
ocf_cache_log(cache, log_err,
"Loading %s ERROR, invalid checksum",
ocf_metadata_segment_names[segment]);
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
}
}
ocf_pipeline_next(pipeline);
}
void ocf_metadata_check_crc(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
ocf_metadata_check_crc_skip(pipeline, priv, arg, false);
}
void ocf_metadata_check_crc_if_clean(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
ocf_metadata_check_crc_skip(pipeline, priv, arg, true);
}
void ocf_metadata_calculate_crc(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_metadata_context *context = priv;
int segment = ocf_pipeline_arg_get_int(arg);
struct ocf_metadata_ctrl *ctrl;
struct ocf_superblock_config *sb_config;
ocf_cache_t cache = context->cache;
ctrl = (struct ocf_metadata_ctrl *)cache->metadata.priv;
sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
sb_config->checksum[segment] = ocf_metadata_raw_checksum(cache,
&(ctrl->raw_desc[segment]));
ocf_pipeline_next(pipeline);
}
void ocf_metadata_flush_segment(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_metadata_context *context = priv;
int segment = ocf_pipeline_arg_get_int(arg);
struct ocf_metadata_ctrl *ctrl;
ocf_cache_t cache = context->cache;
ctrl = (struct ocf_metadata_ctrl *)cache->metadata.priv;
ocf_metadata_raw_flush_all(cache, &ctrl->raw_desc[segment],
ocf_metadata_generic_complete, context);
}
void ocf_metadata_load_segment(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_metadata_context *context = priv;
int segment = ocf_pipeline_arg_get_int(arg);
struct ocf_metadata_ctrl *ctrl;
ocf_cache_t cache = context->cache;
ctrl = (struct ocf_metadata_ctrl *)cache->metadata.priv;
ocf_metadata_raw_load_all(cache, &ctrl->raw_desc[segment],
ocf_metadata_generic_complete, context);
}

View File

@ -0,0 +1,27 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_SEGMENT_OPS_H__
#define __METADATA_SEGMENT_OPS_H__
#include "metadata_raw.h"
#include <ocf/ocf_def.h>
void ocf_metadata_check_crc_if_clean(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg);
void ocf_metadata_check_crc(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg);
void ocf_metadata_calculate_crc(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg);
void ocf_metadata_flush_segment(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg);
void ocf_metadata_load_segment(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg);
#endif

View File

@ -0,0 +1,424 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_core.h"
#include "metadata_internal.h"
#include "metadata_segment_ops.h"
#include "metadata_superblock.h"
#include "../ocf_priv.h"
#include "../utils/utils_io.h"
#define OCF_METADATA_SUPERBLOCK_DEBUG 0
#if 1 == OCF_METADATA_SUPERBLOCK_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][Superblock] %s\n", \
__func__)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][Superblock] %s - " \
format"\n", __func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
/**
* @brief Super Block - Set Shutdown Status
*
* @param shutdown_status - status to be assigned to cache.
*
* @return Operation status (0 success, otherwise error)
*/
void ocf_metadata_set_shutdown_status(ocf_cache_t cache,
enum ocf_metadata_shutdown_status shutdown_status,
ocf_metadata_end_t cmpl, void *priv)
{
struct ocf_metadata_ctrl *ctrl;
struct ocf_superblock_config *superblock;
OCF_DEBUG_TRACE(cache);
/*
* Get metadata hash service control structure
*/
ctrl = (struct ocf_metadata_ctrl *) cache->metadata.priv;
/*
* Get super block
*/
superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
/* Set shutdown status */
superblock->clean_shutdown = shutdown_status;
superblock->magic_number = CACHE_MAGIC_NUMBER;
/* Flush superblock */
ocf_metadata_flush_superblock(cache, cmpl, priv);
}
static void ocf_metadata_store_segment(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_metadata_context *context = priv;
int segment = ocf_pipeline_arg_get_int(arg);
struct ocf_metadata_ctrl *ctrl;
ocf_cache_t cache = context->cache;
int error;
ctrl = (struct ocf_metadata_ctrl *)cache->metadata.priv;
context->segment_copy[segment].mem_pool =
env_malloc(ctrl->raw_desc[segment].mem_pool_limit, ENV_MEM_NORMAL);
if (!context->segment_copy[segment].mem_pool)
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_NO_MEM);
error = env_memcpy(context->segment_copy[segment].mem_pool,
ctrl->raw_desc[segment].mem_pool_limit, METADATA_MEM_POOL(ctrl, segment),
ctrl->raw_desc[segment].mem_pool_limit);
if (error) {
env_free(context->segment_copy[segment].mem_pool);
context->segment_copy[segment].mem_pool = NULL;
OCF_PL_FINISH_RET(pipeline, error);
}
ocf_pipeline_next(pipeline);
}
static void ocf_metadata_check_crc_sb_config(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_metadata_context *context = priv;
struct ocf_metadata_ctrl *ctrl;
struct ocf_superblock_config *sb_config;
ocf_cache_t cache = context->cache;
int segment = metadata_segment_sb_config;
uint32_t crc;
ctrl = (struct ocf_metadata_ctrl *)cache->metadata.priv;
sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
crc = env_crc32(0, (void *)sb_config,
offsetof(struct ocf_superblock_config, checksum));
if (crc != sb_config->checksum[segment]) {
/* Checksum does not match */
ocf_cache_log(cache, log_err,
"Loading %s ERROR, invalid checksum",
ocf_metadata_segment_names[segment]);
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
}
ocf_pipeline_next(pipeline);
}
static void ocf_metadata_load_superblock_post(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_metadata_context *context = priv;
struct ocf_metadata_ctrl *ctrl;
struct ocf_superblock_config *sb_config;
ocf_cache_t cache = context->cache;
struct ocf_metadata_uuid *muuid;
struct ocf_volume_uuid uuid;
ocf_volume_type_t volume_type;
ocf_core_t core;
ocf_core_id_t core_id;
ctrl = (struct ocf_metadata_ctrl *)cache->metadata.priv;
sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
for_each_core_metadata(cache, core, core_id) {
muuid = ocf_metadata_get_core_uuid(cache, core_id);
uuid.data = muuid->data;
uuid.size = muuid->size;
volume_type = ocf_ctx_get_volume_type(cache->owner,
core->conf_meta->type);
/* Initialize core volume */
ocf_volume_init(&core->volume, volume_type, &uuid, false);
core->has_volume = true;
}
/* Restore all dynamics items */
if (sb_config->core_count > OCF_CORE_MAX) {
ocf_cache_log(cache, log_err,
"Loading cache state ERROR, invalid cores count\n");
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
}
if (sb_config->valid_parts_no > OCF_IO_CLASS_MAX) {
ocf_cache_log(cache, log_err,
"Loading cache state ERROR, invalid partition count\n");
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
}
ocf_pipeline_next(pipeline);
}
static void ocf_metadata_load_sb_restore(
struct ocf_metadata_context *context)
{
ocf_cache_t cache = context->cache;
struct ocf_metadata_ctrl *ctrl;
int segment, error;
ctrl = (struct ocf_metadata_ctrl *)cache->metadata.priv;
for (segment = metadata_segment_sb_config;
segment < metadata_segment_fixed_size_max; segment++) {
if (!context->segment_copy[segment].mem_pool)
continue;
error = env_memcpy(METADATA_MEM_POOL(ctrl, segment),
ctrl->raw_desc[segment].mem_pool_limit,
context->segment_copy[segment].mem_pool,
ctrl->raw_desc[segment].mem_pool_limit);
ENV_BUG_ON(error);
}
}
static void ocf_metadata_load_superblock_finish(ocf_pipeline_t pipeline,
void *priv, int error)
{
struct ocf_metadata_context *context = priv;
ocf_cache_t cache = context->cache;
int segment;
if (error) {
ocf_cache_log(cache, log_err, "Metadata read FAILURE\n");
ocf_metadata_error(cache);
ocf_metadata_load_sb_restore(context);
}
for (segment = metadata_segment_sb_config;
segment < metadata_segment_fixed_size_max; segment++) {
if (context->segment_copy[segment].mem_pool)
env_free(context->segment_copy[segment].mem_pool);
}
context->cmpl(context->priv, error);
ocf_pipeline_destroy(pipeline);
}
struct ocf_pipeline_arg ocf_metadata_load_sb_store_segment_args[] = {
OCF_PL_ARG_INT(metadata_segment_sb_config),
OCF_PL_ARG_INT(metadata_segment_sb_runtime),
OCF_PL_ARG_INT(metadata_segment_part_config),
OCF_PL_ARG_INT(metadata_segment_part_runtime),
OCF_PL_ARG_INT(metadata_segment_core_config),
OCF_PL_ARG_TERMINATOR(),
};
struct ocf_pipeline_arg ocf_metadata_load_sb_load_segment_args[] = {
OCF_PL_ARG_INT(metadata_segment_sb_config),
OCF_PL_ARG_INT(metadata_segment_sb_runtime),
OCF_PL_ARG_INT(metadata_segment_part_config),
OCF_PL_ARG_INT(metadata_segment_part_runtime),
OCF_PL_ARG_INT(metadata_segment_core_config),
OCF_PL_ARG_INT(metadata_segment_core_uuid),
OCF_PL_ARG_TERMINATOR(),
};
struct ocf_pipeline_arg ocf_metadata_load_sb_check_crc_args[] = {
OCF_PL_ARG_INT(metadata_segment_part_config),
OCF_PL_ARG_INT(metadata_segment_core_config),
OCF_PL_ARG_INT(metadata_segment_core_uuid),
OCF_PL_ARG_TERMINATOR(),
};
struct ocf_pipeline_arg ocf_metadata_load_sb_check_crc_args_clean[] = {
OCF_PL_ARG_INT(metadata_segment_sb_runtime),
OCF_PL_ARG_INT(metadata_segment_part_runtime),
OCF_PL_ARG_TERMINATOR(),
};
struct ocf_pipeline_properties ocf_metadata_load_sb_pipeline_props = {
.priv_size = sizeof(struct ocf_metadata_context),
.finish = ocf_metadata_load_superblock_finish,
.steps = {
OCF_PL_STEP_FOREACH(ocf_metadata_store_segment,
ocf_metadata_load_sb_store_segment_args),
OCF_PL_STEP_FOREACH(ocf_metadata_load_segment,
ocf_metadata_load_sb_load_segment_args),
OCF_PL_STEP(ocf_metadata_check_crc_sb_config),
OCF_PL_STEP_FOREACH(ocf_metadata_check_crc,
ocf_metadata_load_sb_check_crc_args),
OCF_PL_STEP_FOREACH(ocf_metadata_check_crc_if_clean,
ocf_metadata_load_sb_check_crc_args_clean),
OCF_PL_STEP(ocf_metadata_load_superblock_post),
OCF_PL_STEP_TERMINATOR(),
},
};
/*
* Super Block - Load, This function has to prevent to pointers overwrite
*/
void ocf_metadata_load_superblock(ocf_cache_t cache, ocf_metadata_end_t cmpl,
void *priv)
{
struct ocf_metadata_context *context;
ocf_pipeline_t pipeline;
struct ocf_metadata_ctrl *ctrl;
struct ocf_superblock_config *sb_config;
struct ocf_superblock_runtime *sb_runtime;
int result;
OCF_DEBUG_TRACE(cache);
ctrl = (struct ocf_metadata_ctrl *) cache->metadata.priv;
ENV_BUG_ON(!ctrl);
sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
ENV_BUG_ON(!sb_config);
sb_runtime = METADATA_MEM_POOL(ctrl, metadata_segment_sb_runtime);
ENV_BUG_ON(!sb_runtime);
result = ocf_pipeline_create(&pipeline, cache,
&ocf_metadata_load_sb_pipeline_props);
if (result)
OCF_CMPL_RET(priv, result);
context = ocf_pipeline_get_priv(pipeline);
context->cmpl = cmpl;
context->priv = priv;
context->pipeline = pipeline;
context->cache = cache;
ocf_pipeline_next(pipeline);
}
static void ocf_metadata_flush_superblock_prepare(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_metadata_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_core_t core;
ocf_core_id_t core_id;
/* Synchronize core objects types */
for_each_core_metadata(cache, core, core_id) {
core->conf_meta->type = ocf_ctx_get_volume_type_id(
cache->owner, core->volume.type);
}
ocf_pipeline_next(pipeline);
}
static void ocf_metadata_calculate_crc_sb_config(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_metadata_context *context = priv;
struct ocf_metadata_ctrl *ctrl;
struct ocf_superblock_config *sb_config;
ocf_cache_t cache = context->cache;
ctrl = (struct ocf_metadata_ctrl *)cache->metadata.priv;
sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
sb_config->checksum[metadata_segment_sb_config] = env_crc32(0,
(void *)sb_config,
offsetof(struct ocf_superblock_config, checksum));
ocf_pipeline_next(pipeline);
}
static void ocf_metadata_flush_superblock_finish(ocf_pipeline_t pipeline,
void *priv, int error)
{
struct ocf_metadata_context *context = priv;
ocf_cache_t cache = context->cache;
if (error)
ocf_metadata_error(cache);
context->cmpl(context->priv, error);
ocf_pipeline_destroy(pipeline);
}
static void ocf_metadata_flush_disk_end(void *priv, int error)
{
struct ocf_metadata_context *context = priv;
ocf_pipeline_t pipeline = context->pipeline;
if (error) {
OCF_PL_FINISH_RET(pipeline, error);
return;
}
ocf_pipeline_next(pipeline);
}
static void ocf_metadata_flush_disk(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_metadata_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_submit_volume_flush(ocf_cache_get_volume(cache),
ocf_metadata_flush_disk_end, context);
}
struct ocf_pipeline_arg ocf_metadata_flush_sb_calculate_crc_args[] = {
OCF_PL_ARG_INT(metadata_segment_part_config),
OCF_PL_ARG_INT(metadata_segment_core_config),
OCF_PL_ARG_INT(metadata_segment_core_uuid),
OCF_PL_ARG_TERMINATOR(),
};
struct ocf_pipeline_arg ocf_metadata_flush_sb_flush_segment_args[] = {
OCF_PL_ARG_INT(metadata_segment_sb_config),
OCF_PL_ARG_INT(metadata_segment_part_config),
OCF_PL_ARG_INT(metadata_segment_core_config),
OCF_PL_ARG_INT(metadata_segment_core_uuid),
OCF_PL_ARG_TERMINATOR(),
};
struct ocf_pipeline_properties ocf_metadata_flush_sb_pipeline_props = {
.priv_size = sizeof(struct ocf_metadata_context),
.finish = ocf_metadata_flush_superblock_finish,
.steps = {
OCF_PL_STEP(ocf_metadata_flush_superblock_prepare),
OCF_PL_STEP(ocf_metadata_calculate_crc_sb_config),
OCF_PL_STEP_FOREACH(ocf_metadata_calculate_crc,
ocf_metadata_flush_sb_calculate_crc_args),
OCF_PL_STEP_FOREACH(ocf_metadata_flush_segment,
ocf_metadata_flush_sb_flush_segment_args),
OCF_PL_STEP(ocf_metadata_flush_disk),
OCF_PL_STEP_TERMINATOR(),
},
};
/*
* Super Block - FLUSH
*/
void ocf_metadata_flush_superblock(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv)
{
struct ocf_metadata_context *context;
ocf_pipeline_t pipeline;
int result;
OCF_DEBUG_TRACE(cache);
result = ocf_pipeline_create(&pipeline, cache,
&ocf_metadata_flush_sb_pipeline_props);
if (result)
OCF_CMPL_RET(priv, result);
context = ocf_pipeline_get_priv(pipeline);
context->cmpl = cmpl;
context->priv = priv;
context->pipeline = pipeline;
context->cache = cache;
ocf_pipeline_next(pipeline);
}