Initial commit

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga
2018-11-29 15:14:21 +01:00
commit a8e1ce8cc5
178 changed files with 35378 additions and 0 deletions

388
src/metadata/metadata.c Normal file
View File

@@ -0,0 +1,388 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_io.h"
#include "../ocf_priv.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#define OCF_METADATA_DEBUG 0
#if 1 == OCF_METADATA_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][Hash] %s\n", __func__)
#else
#define OCF_DEBUG_TRACE(cache)
#endif
int ocf_metadata_init(struct ocf_cache *cache,
ocf_cache_line_size_t cache_line_size)
{
struct ocf_metadata_iface *iface = (struct ocf_metadata_iface *)
&cache->metadata.iface;
int ret;
OCF_DEBUG_TRACE(cache);
ENV_BUG_ON(cache->metadata.iface_priv);
ret = ocf_metadata_io_init(cache);
if (ret)
return ret;
*iface = *metadata_hash_get_iface();
ret = cache->metadata.iface.init(cache, cache_line_size);
if (ret)
ocf_metadata_io_deinit(cache);
return ret;
}
int ocf_metadata_init_variable_size(struct ocf_cache *cache, uint64_t device_size,
ocf_cache_line_size_t cache_line_size,
ocf_metadata_layout_t layout)
{
OCF_DEBUG_TRACE(cache);
return cache->metadata.iface.init_variable_size(cache, device_size,
cache_line_size, layout);
}
void ocf_metadata_init_freelist_partition(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
cache->metadata.iface.layout_iface->init_freelist(cache);
}
void ocf_metadata_init_hash_table(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
cache->metadata.iface.init_hash_table(cache);
}
void ocf_metadata_deinit(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
if (cache->metadata.iface.deinit) {
cache->metadata.iface.deinit(cache);
}
ocf_metadata_io_deinit(cache);
}
void ocf_metadata_deinit_variable_size(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
if (cache->metadata.iface.deinit_variable_size)
cache->metadata.iface.deinit_variable_size(cache);
}
size_t ocf_metadata_size_of(struct ocf_cache *cache)
{
return cache->metadata.iface.size_of(cache);
}
void ocf_metadata_error(struct ocf_cache *cache)
{
if (cache->device->metadata_error == 0)
ocf_cache_log(cache, log_err, "Metadata Error\n");
env_bit_clear(ocf_cache_state_running, &cache->cache_state);
cache->device->metadata_error = -1;
}
ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache)
{
return cache->metadata.iface.pages(cache);
}
ocf_cache_line_t
ocf_metadata_get_cachelines_count(struct ocf_cache *cache)
{
return cache->metadata.iface.cachelines(cache);
}
int ocf_metadata_flush_all(struct ocf_cache *cache)
{
int result;
OCF_METADATA_LOCK_WR();
result = cache->metadata.iface.flush_all(cache);
OCF_METADATA_UNLOCK_WR();
return result;
}
void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line)
{
cache->metadata.iface.flush(cache, line);
}
int ocf_metadata_load_all(struct ocf_cache *cache)
{
int result;
OCF_METADATA_LOCK_WR();
result = cache->metadata.iface.load_all(cache);
OCF_METADATA_UNLOCK_WR();
return result;
}
int ocf_metadata_load_recovery(struct ocf_cache *cache)
{
return cache->metadata.iface.load_recovery(cache);
}
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
cache->metadata.iface.flush_mark(cache, rq, map_idx, to_state,
start, stop);
}
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, ocf_end_t complete)
{
cache->metadata.iface.flush_do_asynch(cache, rq, complete);
}
static inline int ocf_metadata_check_properties(void)
{
uint32_t field_offset;
/* Because metadata basic properties are on the beginning of super block
* read/write only first page of supper block.
*
* For safety reason check if offset of metadata properties are in first
* page of super block.
*
* Maybe in future super block fields order may be changed and metadata
* variant may go out first page of super block
*/
field_offset = offsetof(struct ocf_superblock_config, line_size);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
/* The same checking for magic number */
field_offset = offsetof(struct ocf_superblock_config, magic_number);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
/* The same checking for IO interface type */
field_offset = offsetof(struct ocf_superblock_config, cache_mode);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
/* And the same for version location within superblock structure */
field_offset = offsetof(struct ocf_superblock_config, metadata_version);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
return 0;
}
static int ocf_metadata_read_properties(ocf_ctx_t ctx, ocf_data_obj_t cache_obj,
struct ocf_superblock_config *superblock)
{
ctx_data_t *data;
struct ocf_io *io;
int result = 0;
if (ocf_metadata_check_properties())
return -EINVAL;
/* Allocate resources for IO */
io = ocf_dobj_new_io(cache_obj);
data = ctx_data_alloc(ctx, 1);
/* Check allocation result */
if (!io || !data) {
ocf_log(ctx, log_err, "Memory allocation error");
result = -ENOMEM;
goto out;
}
/*
* Read first page of cache device in order to recover metadata
* properties
*/
result = ocf_io_set_data(io, data, 0);
if (result) {
ocf_log(ctx, log_err, "Metadata IO configuration error\n");
result = -EIO;
goto out;
}
ocf_io_configure(io, 0, PAGE_SIZE, OCF_READ, 0, 0);
result = ocf_submit_io_wait(io);
if (result) {
ocf_log(ctx, log_err, "Metadata IO request submit error\n");
result = -EIO;
goto out;
}
/* Read data from data into super block buffer */
ctx_data_rd_check(ctx, superblock, data,
PAGE_SIZE);
out:
if (io)
ocf_io_put(io);
ctx_data_free(ctx, data);
return result;
}
/**
* @brief function loads individual properties from metadata set
* @param cache_obj object from which to load metadata
* @param variant - field to which save metadata variant; if NULL,
* metadata variant won't be read.
* @param cache mode; if NULL is passed it won't be read
* @param shutdown_status - dirty shutdown or clean shutdown
* @param dirty_flushed - if all dirty data was flushed prior to closing
* the cache
* @return 0 upon successful completion
*/
int ocf_metadata_load_properties(ocf_data_obj_t cache_obj,
ocf_cache_line_size_t *line_size,
ocf_metadata_layout_t *layout,
ocf_cache_mode_t *cache_mode,
enum ocf_metadata_shutdown_status *shutdown_status,
uint8_t *dirty_flushed)
{
struct ocf_superblock_config *superblock;
int err_value = 0;
/* Allocate first page of super block */
superblock = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!superblock) {
ocf_cache_log(cache_obj->cache, log_err,
"Allocation memory error");
return -ENOMEM;
}
OCF_DEBUG_TRACE(cache);
err_value = ocf_metadata_read_properties(cache_obj->cache->owner,
cache_obj, superblock);
if (err_value)
goto ocf_metadata_load_variant_ERROR;
if (superblock->magic_number != CACHE_MAGIC_NUMBER) {
err_value = -ENODATA;
ocf_cache_log(cache_obj->cache, log_info,
"Can not detect pre-existing metadata\n");
goto ocf_metadata_load_variant_ERROR;
}
if (METADATA_VERSION() != superblock->metadata_version) {
err_value = -EBADF;
ocf_cache_log(cache_obj->cache, log_err,
"Metadata version mismatch!\n");
goto ocf_metadata_load_variant_ERROR;
}
if (line_size) {
if (ocf_cache_line_size_is_valid(superblock->line_size)) {
*line_size = superblock->line_size;
} else {
err_value = -EINVAL;
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid cache line size!\n");
}
}
if (layout) {
if (superblock->metadata_layout >= ocf_metadata_layout_max ||
superblock->metadata_layout < 0) {
err_value = -EINVAL;
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid metadata layout!\n");
} else {
*layout = superblock->metadata_layout;
}
}
if (cache_mode) {
if (superblock->cache_mode < ocf_cache_mode_max) {
*cache_mode = superblock->cache_mode;
} else {
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid cache mode!\n");
err_value = -EINVAL;
}
}
if (shutdown_status != NULL) {
if (superblock->clean_shutdown <= ocf_metadata_clean_shutdown) {
*shutdown_status = superblock->clean_shutdown;
} else {
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid shutdown status!\n");
err_value = -EINVAL;
}
}
if (dirty_flushed != NULL) {
if (superblock->dirty_flushed <= DIRTY_FLUSHED) {
*dirty_flushed = superblock->dirty_flushed;
} else {
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid flush status!\n");
err_value = -EINVAL;
}
}
ocf_metadata_load_variant_ERROR:
env_free(superblock);
return err_value;
}
int ocf_metadata_probe(ocf_ctx_t ctx, ocf_data_obj_t cache_obj,
bool *clean_shutdown, bool *cache_dirty)
{
struct ocf_superblock_config *superblock;
int result = 0;
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(cache_obj);
/* Allocate first page of super block */
superblock = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!superblock) {
ocf_log(ctx, log_err, "Allocation memory error");
return -ENOMEM;
}
OCF_DEBUG_TRACE(cache);
result = ocf_metadata_read_properties(ctx, cache_obj, superblock);
if (result)
goto ocf_metadata_probe_END;
if (superblock->magic_number != CACHE_MAGIC_NUMBER) {
result = -ENODATA;
goto ocf_metadata_probe_END;
}
if (clean_shutdown != NULL) {
*clean_shutdown = (superblock->clean_shutdown !=
ocf_metadata_dirty_shutdown);
}
if (cache_dirty != NULL)
*cache_dirty = (superblock->dirty_flushed == DIRTY_NOT_FLUSHED);
if (METADATA_VERSION() != superblock->metadata_version)
result = -EBADF;
ocf_metadata_probe_END:
env_free(superblock);
return result;
}

336
src/metadata/metadata.h Normal file
View File

@@ -0,0 +1,336 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_H__
#define __METADATA_H__
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
static inline void ocf_metadata_eviction_lock(struct ocf_cache *cache)
{
env_spinlock_lock(&cache->metadata.lock.eviction);
}
static inline void ocf_metadata_eviction_unlock(struct ocf_cache *cache)
{
env_spinlock_unlock(&cache->metadata.lock.eviction);
}
#define OCF_METADATA_EVICTION_LOCK() \
ocf_metadata_eviction_lock(cache)
#define OCF_METADATA_EVICTION_UNLOCK() \
ocf_metadata_eviction_unlock(cache)
static inline void ocf_metadata_lock(struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwsem_down_write(&cache->metadata.lock.collision);
else if (rw == OCF_METADATA_RD)
env_rwsem_down_read(&cache->metadata.lock.collision);
else
ENV_BUG();
}
static inline void ocf_metadata_unlock(struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwsem_up_write(&cache->metadata.lock.collision);
else if (rw == OCF_METADATA_RD)
env_rwsem_up_read(&cache->metadata.lock.collision);
else
ENV_BUG();
}
static inline int ocf_metadata_try_lock(struct ocf_cache *cache, int rw)
{
int result = -1;
if (rw == OCF_METADATA_WR) {
result = env_rwsem_down_write_trylock(
&cache->metadata.lock.collision);
} else if (rw == OCF_METADATA_RD) {
result = env_rwsem_down_read_trylock(
&cache->metadata.lock.collision);
} else {
ENV_BUG();
}
if (!result)
return -1;
return 0;
}
static inline void ocf_metadata_status_bits_lock(
struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwlock_write_lock(&cache->metadata.lock.status);
else if (rw == OCF_METADATA_RD)
env_rwlock_read_lock(&cache->metadata.lock.status);
else
ENV_BUG();
}
static inline void ocf_metadata_status_bits_unlock(
struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwlock_write_unlock(&cache->metadata.lock.status);
else if (rw == OCF_METADATA_RD)
env_rwlock_read_unlock(&cache->metadata.lock.status);
else
ENV_BUG();
}
#define OCF_METADATA_LOCK_RD() \
ocf_metadata_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_UNLOCK_RD() \
ocf_metadata_unlock(cache, OCF_METADATA_RD)
#define OCF_METADATA_LOCK_RD_TRY() \
ocf_metadata_try_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_LOCK_WR() \
ocf_metadata_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_LOCK_WR_TRY() \
ocf_metadata_try_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_UNLOCK_WR() \
ocf_metadata_unlock(cache, OCF_METADATA_WR)
#define OCF_METADATA_BITS_LOCK_RD() \
ocf_metadata_status_bits_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_BITS_UNLOCK_RD() \
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_RD)
#define OCF_METADATA_BITS_LOCK_WR() \
ocf_metadata_status_bits_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_BITS_UNLOCK_WR() \
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_WR)
#define OCF_METADATA_FLUSH_LOCK() \
ocf_metadata_flush_lock(cache)
#define OCF_METADATA_FLUSH_UNLOCK() \
ocf_metadata_flush_unlock(cache)
#include "metadata_cleaning_policy.h"
#include "metadata_eviction_policy.h"
#include "metadata_partition.h"
#include "metadata_hash.h"
#include "metadata_superblock.h"
#include "metadata_status.h"
#include "metadata_collision.h"
#include "metadata_core.h"
#include "metadata_misc.h"
#define INVALID 0
#define VALID 1
#define CLEAN 2
#define DIRTY 3
/**
* @brief Initialize metadata
*
* @param cache - Cache instance
* @param cache_line_size Cache line size
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_init(struct ocf_cache *cache,
ocf_cache_line_size_t cache_line_size);
/**
* @brief Initialize per-cacheline metadata
*
* @param cache - Cache instance
* @param device_size - Device size in bytes
* @param cache_line_size Cache line size
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_init_variable_size(struct ocf_cache *cache,
uint64_t device_size, ocf_cache_line_size_t cache_line_size,
ocf_metadata_layout_t layout);
/**
* @brief Initialize collision table
*
* @param cache - Cache instance
*/
void ocf_metadata_init_freelist_partition(struct ocf_cache *cache);
/**
* @brief Initialize hash table
*
* @param cache - Cache instance
*/
void ocf_metadata_init_hash_table(struct ocf_cache *cache);
/**
* @brief De-Initialize metadata
*
* @param cache - Cache instance
*/
void ocf_metadata_deinit(struct ocf_cache *cache);
/**
* @brief De-Initialize per-cacheline metadata
*
* @param cache - Cache instance
*/
void ocf_metadata_deinit_variable_size(struct ocf_cache *cache);
/**
* @brief Get memory footprint
*
* @param cache - Cache instance
* @return 0 - memory footprint
*/
size_t ocf_metadata_size_of(struct ocf_cache *cache);
/**
* @brief Handle metadata error
*
* @param cache - Cache instance
*/
void ocf_metadata_error(struct ocf_cache *cache);
/**
* @brief Get amount of cache lines
*
* @param cache - Cache instance
* @return Amount of cache lines (cache device lines - metadata space)
*/
ocf_cache_line_t
ocf_metadata_get_cachelines_count(struct ocf_cache *cache);
/**
* @brief Get amount of pages required for metadata
*
* @param cache - Cache instance
* @return Pages required for store metadata on cache device
*/
ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache);
/**
* @brief Flush metadata
*
* @param cache
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_flush_all(struct ocf_cache *cache);
/**
* @brief Flush metadata for specified cache line
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line);
/**
* @brief Mark specified cache line to be flushed
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/**
* @brief Flush marked cache lines asynchronously
*
* @param cache - Cache instance
* @param queue - I/O queue to which metadata flush should be submitted
* @param remaining - request remaining
* @param complete - flushing request callback
* @param context - context that will be passed into callback
*/
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, ocf_end_t complete);
/**
* @brief Load metadata
*
* @param cache - Cache instance
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_load_all(struct ocf_cache *cache);
/**
* @brief Load metadata required for recovery procedure
*
* @param cache Cache instance
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_load_recovery(struct ocf_cache *cache);
/*
* NOTE Hash table is specific for hash table metadata service implementation
* and should be used internally by metadata service.
* At the moment there is no high level metadata interface because of that
* temporary defined in this file.
*/
static inline ocf_cache_line_t
ocf_metadata_get_hash(struct ocf_cache *cache, ocf_cache_line_t index)
{
return cache->metadata.iface.get_hash(cache, index);
}
static inline void ocf_metadata_set_hash(struct ocf_cache *cache,
ocf_cache_line_t index, ocf_cache_line_t line)
{
cache->metadata.iface.set_hash(cache, index, line);
}
static inline void ocf_metadata_flush_hash(struct ocf_cache *cache,
ocf_cache_line_t index)
{
cache->metadata.iface.flush_hash(cache, index);
}
static inline ocf_cache_line_t ocf_metadata_entries_hash(
struct ocf_cache *cache)
{
return cache->metadata.iface.entries_hash(cache);
}
int ocf_metadata_load_properties(ocf_data_obj_t cache_obj,
ocf_cache_line_size_t *line_size,
ocf_metadata_layout_t *layout,
ocf_cache_mode_t *cache_mode,
enum ocf_metadata_shutdown_status *shutdown_status,
uint8_t *dirty_flushed);
/**
* @brief Validate cache line size
*
* @param size Cache line size
* @return true - cache line size is valid, false - cache line is invalid
*/
static inline bool ocf_metadata_line_size_is_valid(uint32_t size)
{
switch (size) {
case 4 * KiB:
case 8 * KiB:
case 16 * KiB:
case 32 * KiB:
case 64 * KiB:
return true;
default:
return false;
}
}
#endif /* METADATA_H_ */

240
src/metadata/metadata_bit.h Normal file
View File

@@ -0,0 +1,240 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
/*******************************************************************************
* Sector mask getter
******************************************************************************/
static inline uint64_t _get_mask(uint8_t start, uint8_t stop)
{
uint64_t mask = 0;
ENV_BUG_ON(start >= 64);
ENV_BUG_ON(stop >= 64);
ENV_BUG_ON(stop < start);
mask = ~mask;
mask >>= start + (63 - stop);
mask <<= start;
return mask;
}
#define _get_mask_u8(start, stop) _get_mask(start, stop)
#define _get_mask_u16(start, stop) _get_mask(start, stop)
#define _get_mask_u32(start, stop) _get_mask(start, stop)
#define _get_mask_u64(start, stop) _get_mask(start, stop)
typedef __uint128_t u128;
static inline u128 _get_mask_u128(uint8_t start, uint8_t stop)
{
u128 mask = 0;
ENV_BUG_ON(start >= 128);
ENV_BUG_ON(stop >= 128);
ENV_BUG_ON(stop < start);
mask = ~mask;
mask >>= start + (127 - stop);
mask <<= start;
return mask;
}
#define ocf_metadata_bit_struct(type) \
struct ocf_metadata_map_##type { \
struct ocf_metadata_map map; \
type valid; \
type dirty; \
} __attribute__((packed))
#define ocf_metadata_bit_func(what, type) \
static bool _ocf_metadata_test_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
const struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (all) { \
if (mask == (map[line].what & mask)) { \
return true; \
} else { \
return false; \
} \
} else { \
if (map[line].what & mask) { \
return true; \
} else { \
return false; \
} \
} \
} \
\
static bool _ocf_metadata_test_out_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
const struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (map[line].what & ~mask) { \
return true; \
} else { \
return false; \
} \
} \
\
static bool _ocf_metadata_clear_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
map[line].what &= ~mask; \
\
if (map[line].what) { \
return true; \
} else { \
return false; \
} \
} \
\
static bool _ocf_metadata_set_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
bool result; \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
result = map[line].what ? true : false; \
\
map[line].what |= mask; \
\
return result; \
} \
\
static bool _ocf_metadata_test_and_set_##what##_##type( \
struct ocf_cache *cache, ocf_cache_line_t line, \
uint8_t start, uint8_t stop, bool all) \
{ \
bool test; \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (all) { \
if (mask == (map[line].what & mask)) { \
test = true; \
} else { \
test = false; \
} \
} else { \
if (map[line].what & mask) { \
test = true; \
} else { \
test = false; \
} \
} \
\
map[line].what |= mask; \
return test; \
} \
\
static bool _ocf_metadata_test_and_clear_##what##_##type( \
struct ocf_cache *cache, ocf_cache_line_t line, \
uint8_t start, uint8_t stop, bool all) \
{ \
bool test; \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (all) { \
if (mask == (map[line].what & mask)) { \
test = true; \
} else { \
test = false; \
} \
} else { \
if (map[line].what & mask) { \
test = true; \
} else { \
test = false; \
} \
} \
\
map[line].what &= ~mask; \
return test; \
} \
ocf_metadata_bit_struct(u8);
ocf_metadata_bit_struct(u16);
ocf_metadata_bit_struct(u32);
ocf_metadata_bit_struct(u64);
ocf_metadata_bit_struct(u128);
ocf_metadata_bit_func(dirty, u8);
ocf_metadata_bit_func(dirty, u16);
ocf_metadata_bit_func(dirty, u32);
ocf_metadata_bit_func(dirty, u64);
ocf_metadata_bit_func(dirty, u128);
ocf_metadata_bit_func(valid, u8);
ocf_metadata_bit_func(valid, u16);
ocf_metadata_bit_func(valid, u32);
ocf_metadata_bit_func(valid, u64);
ocf_metadata_bit_func(valid, u128);

View File

@@ -0,0 +1,39 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_CLEANING_POLICY_H__
#define __METADATA_CLEANING_POLICY_H__
/*
* GET
*/
static inline void
ocf_metadata_get_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line, struct cleaning_policy_meta *policy)
{
cache->metadata.iface.get_cleaning_policy(cache, line, policy);
}
/*
* SET
*/
static inline void
ocf_metadata_set_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line, struct cleaning_policy_meta *policy)
{
cache->metadata.iface.set_cleaning_policy(cache, line, policy);
}
/*
* FLUSH
*/
static inline void
ocf_metadata_flush_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
cache->metadata.iface.flush_cleaning_policy(cache, line);
}
#endif /* METADATA_CLEANING_POLICY_H_ */

View File

@@ -0,0 +1,88 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "../utils/utils_cache_line.h"
/*
*
*/
void ocf_metadata_add_to_collision(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t hash, ocf_cache_line_t cache_line)
{
ocf_cache_line_t prev_cache_line = ocf_metadata_get_hash(cache, hash);
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ocf_cache_line_t hash_entries = cache->device->hash_table_entries;
ENV_BUG_ON(!(hash < hash_entries));
ENV_BUG_ON(!(cache_line < line_entries));
/* Setup new node */
ocf_metadata_set_core_info(cache, cache_line, core_id,
core_line);
/* Update collision info:
* - next is set to value from hash table;
* - previous is set to collision table entries value
*/
ocf_metadata_set_collision_info(cache, cache_line, prev_cache_line,
line_entries);
/* Update previous head */
if (prev_cache_line != line_entries) {
ocf_metadata_set_collision_prev(cache, prev_cache_line,
cache_line);
}
/* Update hash Table: hash table contains pointer to
* collision table so it contains indexes in collision table
*/
ocf_metadata_set_hash(cache, hash, cache_line);
}
/*
*
*/
void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id)
{
ocf_core_id_t core_id;
uint64_t core_sector;
ocf_cache_line_t hash_father;
ocf_cache_line_t prev_line, next_line;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ocf_cache_line_t hash_entries = cache->device->hash_table_entries;
ENV_BUG_ON(!(line < line_entries));
ocf_metadata_get_collision_info(cache, line, &next_line, &prev_line);
/* Update previous node if any. */
if (prev_line != line_entries)
ocf_metadata_set_collision_next(cache, prev_line, next_line);
/* Update next node if any. */
if (next_line != line_entries)
ocf_metadata_set_collision_prev(cache, next_line, prev_line);
ocf_metadata_get_core_info(cache, line, &core_id, &core_sector);
/* Update hash table, because if it was pointing to the given node it
* must now point to the given's node next
*/
hash_father = ocf_metadata_hash_func(cache, core_sector, core_id);
ENV_BUG_ON(!(hash_father < hash_entries));
if (ocf_metadata_get_hash(cache, hash_father) == line)
ocf_metadata_set_hash(cache, hash_father, next_line);
ocf_metadata_set_collision_info(cache, line,
line_entries, line_entries);
ocf_metadata_set_core_info(cache, line,
OCF_CORE_MAX, ULLONG_MAX);
}

View File

@@ -0,0 +1,102 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_COLLISION_H__
#define __METADATA_COLLISION_H__
/**
* @brief Metadata map structure
*/
struct ocf_metadata_list_info {
ocf_cache_line_t prev_col;
/*!< Previous cache line in collision list */
ocf_cache_line_t next_col;
/*!< Next cache line in collision list*/
ocf_cache_line_t partition_prev;
/*!< Previous cache line in the same partition*/
ocf_cache_line_t partition_next;
/*!< Next cache line in the same partition*/
ocf_part_id_t partition_id : 8;
/*!< ID of partition where is assigned this cache line*/
} __attribute__((packed));
/**
* @brief Metadata map structure
*/
struct ocf_metadata_map {
uint64_t core_line;
/*!< Core line addres on cache mapped by this strcture */
uint16_t core_id;
/*!< ID of core where is assigned this cache line*/
uint8_t status[];
/*!< Entry status structure e.g. valid, dirty...*/
} __attribute__((packed));
static inline ocf_cache_line_t ocf_metadata_map_lg2phy(
struct ocf_cache *cache, ocf_cache_line_t coll_idx)
{
return cache->metadata.iface.layout_iface->lg2phy(cache,
coll_idx);
}
static inline ocf_cache_line_t ocf_metadata_map_phy2lg(
struct ocf_cache *cache, ocf_cache_line_t cache_line)
{
return cache->metadata.iface.layout_iface->phy2lg(cache,
cache_line);
}
static inline void ocf_metadata_set_collision_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next, ocf_cache_line_t prev)
{
cache->metadata.iface.set_collision_info(cache, line, next, prev);
}
static inline void ocf_metadata_get_collision_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t *next, ocf_cache_line_t *prev)
{
cache->metadata.iface.get_collision_info(cache, line, next, prev);
}
static inline void ocf_metadata_set_collision_next(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next)
{
cache->metadata.iface.set_collision_next(cache, line, next);
}
static inline void ocf_metadata_set_collision_prev(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t prev)
{
cache->metadata.iface.set_collision_prev(cache, line, prev);
}
static inline ocf_cache_line_t ocf_metadata_get_collision_next(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_collision_next(cache, line);
}
static inline ocf_cache_line_t ocf_metadata_get_collision_prev(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_collision_prev(cache, line);
}
void ocf_metadata_add_to_collision(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t hash, ocf_cache_line_t cache_line);
void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id);
#endif /* METADATA_COLLISION_H_ */

View File

@@ -0,0 +1,51 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_CORE_H__
#define __METADATA_CORE_H__
static inline void ocf_metadata_set_core_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t core_id,
uint64_t core_sector)
{
cache->metadata.iface.set_core_info(cache, line, core_id,
core_sector);
}
static inline void ocf_metadata_get_core_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t *core_id,
uint64_t *core_sector)
{
cache->metadata.iface.get_core_info(cache, line, core_id,
core_sector);
}
static inline void ocf_metadata_get_core_and_part_id(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_core_id_t *core_id, ocf_part_id_t *part_id)
{
cache->metadata.iface.get_core_and_part_id(cache, line, core_id,
part_id);
}
static inline ocf_core_id_t ocf_metadata_get_core_id(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_core_id(cache, line);
}
static inline uint64_t ocf_metadata_get_core_sector(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_core_sector(cache, line);
}
static inline struct ocf_metadata_uuid *ocf_metadata_get_core_uuid(
struct ocf_cache *cache, ocf_core_id_t core_id)
{
return cache->metadata.iface.get_core_uuid(cache, core_id);
}
#endif /* METADATA_CORE_H_ */

View File

@@ -0,0 +1,35 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_EVICTION_H__
#define __METADATA_EVICTION_H__
static inline void ocf_metadata_get_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line,
union eviction_policy_meta *eviction)
{
cache->metadata.iface.get_eviction_policy(cache, line, eviction);
}
/*
* SET
*/
static inline void ocf_metadata_set_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line,
union eviction_policy_meta *eviction)
{
cache->metadata.iface.set_eviction_policy(cache, line, eviction);
}
/*
* FLUSH
*/
static inline void ocf_metadata_flush_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line)
{
cache->metadata.iface.flush_eviction_policy(cache, line);
}
#endif /* METADATA_EVICTION_H_ */

2462
src/metadata/metadata_hash.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,49 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_HASH_H__
#define __METADATA_HASH_H__
/**
* @file metadata_.h
* @brief Metadata Service - Hash Implementation
*/
#include "../ocf_request.h"
/**
* @brief Metada hash elements type
*/
enum ocf_metadata_segment {
metadata_segment_sb_config = 0, /*!< Super block conf */
metadata_segment_sb_runtime, /*!< Super block runtime */
metadata_segment_reserved, /*!< Reserved space on disk */
metadata_segment_core_config, /*!< Core Config Metadata */
metadata_segment_core_runtime, /*!< Core Runtime Metadata */
metadata_segment_core_uuid, /*!< Core UUID */
/* .... new fixed size sections go here */
metadata_segment_fixed_size_max,
metadata_segment_variable_size_start = metadata_segment_fixed_size_max,
/* sections with size dependent on cache device size go here: */
metadata_segment_cleaning = /*!< Cleaning policy */
metadata_segment_variable_size_start,
metadata_segment_eviction, /*!< Eviction policy */
metadata_segment_collision, /*!< Collision */
metadata_segment_list_info, /*!< Collision */
metadata_segment_hash, /*!< Hash */
/* .... new variable size sections go here */
metadata_segment_max, /*!< MAX */
};
/**
* @brief Get metadata interface implementation
*
* @return metadata interface
*/
const struct ocf_metadata_iface *metadata_hash_get_iface(void);
#endif /* METADATA_HASH_H_ */

629
src/metadata/metadata_io.c Normal file
View File

@@ -0,0 +1,629 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_io.h"
#include "../ocf_priv.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../engine/engine_bf.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_allocator.h"
#include "../utils/utils_io.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_IO_DEBUG 0
#if 1 == OCF_METADATA_IO_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][IO] %s\n", __func__)
#define OCF_DEBUG_MSG(cache, msg) \
ocf_cache_log(cache, log_info, "[Metadata][IO] %s - %s\n", \
__func__, msg)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][IO] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
static void metadata_io_write_i_end_asynch(void *private_data, int error);
static int ocf_restart_meta_io(struct ocf_request *req);
static struct ocf_io_if meta_restart_if = {
.read = ocf_restart_meta_io,
.write = ocf_restart_meta_io
};
/*
* Get max pages for IO
*/
static uint32_t metadata_io_max_page(struct ocf_cache *cache)
{
return ocf_data_obj_get_max_io_size(&cache->device->obj) /
BYTES_TO_SECTORS(PAGE_SIZE);
}
/*
* Iterative read end callback
*/
static void metadata_io_read_i_atomic_end(struct ocf_io *io, int error)
{
struct metadata_io_request_atomic *meta_atom_req = io->priv1;
OCF_DEBUG_TRACE(ocf_data_obj_get_cache(io->obj));
meta_atom_req->error |= error;
env_completion_complete(&meta_atom_req->complete);
}
/*
* Iterative read request
*/
int metadata_io_read_i_atomic(struct ocf_cache *cache,
ocf_metadata_atomic_io_event_t hndl)
{
uint64_t i;
uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE;
uint64_t io_sectors_count = cache->device->collision_table_entries *
ocf_line_sectors(cache);
uint64_t count, curr_count;
int result = 0;
struct ocf_io *io;
ctx_data_t *data;
struct metadata_io_request_atomic meta_atom_req;
unsigned char step = 0;
OCF_DEBUG_TRACE(cache);
/* Allocate one 4k page for metadata*/
data = ctx_data_alloc(cache->owner, 1);
if (!data)
return -ENOMEM;
count = io_sectors_count;
for (i = 0; i < io_sectors_count; i += curr_count) {
/* Get sectors count of this IO iteration */
curr_count = MIN(max_sectors_count, count);
env_completion_init(&meta_atom_req.complete);
meta_atom_req.error = 0;
/* Reset position in data buffer */
ctx_data_seek(cache->owner, data, ctx_data_seek_begin, 0);
/* Allocate new IO */
io = ocf_new_cache_io(cache);
if (!io) {
result = -ENOMEM;
break;
}
/* Setup IO */
ocf_io_configure(io,
cache->device->metadata_offset +
SECTORS_TO_BYTES(i),
SECTORS_TO_BYTES(curr_count),
OCF_READ, 0, 0);
ocf_io_set_cmpl(io, &meta_atom_req, NULL,
metadata_io_read_i_atomic_end);
result = ocf_io_set_data(io, data, 0);
if (result) {
ocf_io_put(io);
break;
}
/* Submit IO */
ocf_dobj_submit_metadata(io);
ocf_io_put(io);
/* Wait for completion of IO */
env_completion_wait(&meta_atom_req.complete);
/* Check for error */
if (meta_atom_req.error) {
result = meta_atom_req.error;
break;
}
result |= hndl(cache, i, curr_count, data);
if (result)
break;
count -= curr_count;
OCF_COND_RESCHED(step, 128);
}
/* Memory free */
ctx_data_free(cache->owner, data);
return result;
}
static int ocf_restart_meta_io(struct ocf_request *req)
{
struct ocf_io *io;
struct metadata_io_request *meta_io_req;
struct ocf_cache *cache;
int i;
int ret;
cache = req->cache;
meta_io_req = req->priv;
/* Fill with the latest metadata. */
OCF_METADATA_LOCK_RD();
for (i = 0; i < meta_io_req->count; i++) {
meta_io_req->on_meta_fill(cache, meta_io_req->data,
meta_io_req->page + i, meta_io_req->context);
}
OCF_METADATA_UNLOCK_RD();
io = ocf_new_cache_io(cache);
if (!io) {
metadata_io_write_i_end_asynch(meta_io_req, -ENOMEM);
return 0;
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(meta_io_req->page),
PAGES_TO_BYTES(meta_io_req->count),
OCF_WRITE, 0, 0);
ocf_io_set_default_cmpl(io, meta_io_req,
metadata_io_write_i_end_asynch);
ret = ocf_io_set_data(io, meta_io_req->data, 0);
if (ret) {
ocf_io_put(io);
metadata_io_write_i_end_asynch(meta_io_req, ret);
return ret;
}
ocf_dobj_submit_io(io);
return 0;
}
/*
* Iterative asynchronous write callback
*/
static void metadata_io_write_i_end_asynch(void *private_data, int error)
{
struct metadata_io_request *request = (private_data);
struct metadata_io_request_asynch *a_req;
struct ocf_cache *cache;
OCF_CHECK_NULL(request);
cache = request->cache;
a_req = request->asynch;
OCF_CHECK_NULL(a_req);
OCF_CHECK_NULL(a_req->on_complete);
if (error) {
request->error |= error;
request->asynch->error |= error;
}
if (env_atomic_dec_return(&request->req_remaining))
return;
OCF_DEBUG_PARAM(cache, "Page = %u", request->page);
ctx_data_free(cache->owner, request->data);
request->data = NULL;
if (env_atomic_dec_return(&a_req->req_remaining)) {
env_atomic_set(&request->finished, 1);
ocf_metadata_updater_kick(cache);
return;
}
OCF_DEBUG_MSG(cache, "Asynchronous IO completed");
/* All IOs have been finished, call IO end callback */
a_req->on_complete(request->cache, a_req->context, request->error);
/*
* If it's last request, we mark is as finished
* after calling IO end callback
*/
env_atomic_set(&request->finished, 1);
ocf_metadata_updater_kick(cache);
}
static void metadata_io_req_error(struct ocf_cache *cache,
struct metadata_io_request_asynch *a_req,
uint32_t i, int error)
{
a_req->error |= error;
a_req->reqs[i].error |= error;
a_req->reqs[i].count = 0;
if (a_req->reqs[i].data)
ctx_data_free(cache->owner, a_req->reqs[i].data);
a_req->reqs[i].data = NULL;
}
/*
* Iterative write request asynchronously
*/
int metadata_io_write_i_asynch(struct ocf_cache *cache, uint32_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_hndl_on_write_t compl_hndl)
{
uint32_t curr_count, written;
uint32_t max_count = metadata_io_max_page(cache);
uint32_t io_count = DIV_ROUND_UP(count, max_count);
uint32_t i, i_fill;
int error = 0, ret;
struct ocf_io *io;
/* Allocation and initialization of asynchronous metadata IO request */
struct metadata_io_request_asynch *a_req;
if (count == 0)
return 0;
a_req = env_zalloc(sizeof(*a_req), ENV_MEM_NOIO);
if (!a_req)
return -OCF_ERR_NO_MEM;
env_atomic_set(&a_req->req_remaining, io_count);
env_atomic_set(&a_req->req_active, io_count);
a_req->on_complete = compl_hndl;
a_req->context = context;
a_req->page = page;
/* Allocate particular requests and initialize them */
OCF_REALLOC_CP(&a_req->reqs, sizeof(a_req->reqs[0]),
io_count, &a_req->reqs_limit);
if (!a_req->reqs) {
env_free(a_req);
ocf_cache_log(cache, log_warn,
"No memory during metadata IO\n");
return -OCF_ERR_NO_MEM;
}
/* IO Requests initialization */
for (i = 0; i < io_count; i++) {
env_atomic_set(&(a_req->reqs[i].req_remaining), 1);
env_atomic_set(&(a_req->reqs[i].finished), 0);
a_req->reqs[i].asynch = a_req;
}
OCF_DEBUG_PARAM(cache, "IO count = %u", io_count);
i = 0;
written = 0;
while (count) {
/* Get pages count of this IO iteration */
if (count > max_count)
curr_count = max_count;
else
curr_count = count;
/* Fill request */
a_req->reqs[i].cache = cache;
a_req->reqs[i].context = context;
a_req->reqs[i].page = page + written;
a_req->reqs[i].count = curr_count;
a_req->reqs[i].on_meta_fill = fill_hndl;
a_req->reqs[i].fl_req.io_if = &meta_restart_if;
a_req->reqs[i].fl_req.io_queue = queue;
a_req->reqs[i].fl_req.cache = cache;
a_req->reqs[i].fl_req.priv = &a_req->reqs[i];
a_req->reqs[i].fl_req.info.internal = true;
/*
* We don't want allocate map for this request in
* threads.
*/
a_req->reqs[i].fl_req.map = LIST_POISON1;
INIT_LIST_HEAD(&a_req->reqs[i].list);
a_req->reqs[i].data = ctx_data_alloc(cache->owner, curr_count);
if (!a_req->reqs[i].data) {
error = -OCF_ERR_NO_MEM;
metadata_io_req_error(cache, a_req, i, error);
break;
}
/* Issue IO if it is not overlapping with anything else */
ret = metadata_updater_check_overlaps(cache, &a_req->reqs[i]);
if (ret == 0) {
/* Allocate new IO */
io = ocf_new_cache_io(cache);
if (!io) {
error = -OCF_ERR_NO_MEM;
metadata_io_req_error(cache, a_req, i, error);
break;
}
for (i_fill = 0; i_fill < curr_count; i_fill++) {
fill_hndl(cache, a_req->reqs[i].data,
page + written + i_fill,
context);
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(a_req->reqs[i].page),
PAGES_TO_BYTES(a_req->reqs[i].count),
OCF_WRITE, 0, 0);
ocf_io_set_default_cmpl(io, &a_req->reqs[i],
metadata_io_write_i_end_asynch);
error = ocf_io_set_data(io, a_req->reqs[i].data, 0);
if (error) {
ocf_io_put(io);
metadata_io_req_error(cache, a_req, i, error);
break;
}
ocf_dobj_submit_io(io);
}
count -= curr_count;
written += curr_count;
i++;
}
if (error == 0) {
/* No error, return 0 that indicates operation successful */
return 0;
}
OCF_DEBUG_MSG(cache, "ERROR");
if (i == 0) {
/*
* If no requests were submitted, we just call completion
* callback, free memory and return error.
*/
compl_hndl(cache, context, error);
OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit);
env_free(a_req);
return error;
}
/*
* Decrement total reaming requests with IO that were not triggered.
* If we reached zero, we need to call completion callback.
*/
if (env_atomic_sub_return(io_count - i, &a_req->req_remaining) == 0)
compl_hndl(cache, context, error);
/*
* Decrement total active requests with IO that were not triggered.
* If we reached zero, we need to free memory.
*/
if (env_atomic_sub_return(io_count - i, &a_req->req_active) == 0) {
OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit);
env_free(a_req);
}
return error;
}
int ocf_metadata_io_init(ocf_cache_t cache)
{
return ocf_metadata_updater_init(cache);
}
void ocf_metadata_io_deinit(ocf_cache_t cache)
{
ocf_metadata_updater_stop(cache);
}
static void metadata_io_end(struct ocf_io *io, int error)
{
struct metadata_io *mio = io->priv1;
ctx_data_t *data = ocf_io_get_data(io);
uint32_t page = BYTES_TO_PAGES(io->addr);
uint32_t count = BYTES_TO_PAGES(io->bytes);
struct ocf_cache *cache = mio->cache;
uint32_t i = 0;
if (error) {
mio->error |= error;
goto out;
}
for (i = 0; mio->dir == OCF_READ && i < count; i++) {
mio->error |= mio->hndl_fn(cache, data, page + i,
mio->hndl_cntx);
}
out:
ctx_data_free(cache->owner, data);
ocf_io_put(io);
if (env_atomic_dec_return(&mio->rq_remaining))
return;
env_completion_complete(&mio->completion);
}
static int metadata_submit_io(
struct ocf_cache *cache,
struct metadata_io *mio,
uint32_t count,
uint32_t written)
{
ctx_data_t *data;
struct ocf_io *io;
int err;
int i;
/* Allocate IO */
io = ocf_new_cache_io(cache);
if (!io) {
err = -ENOMEM;
goto error;
}
/* Allocate data buffer for this IO */
data = ctx_data_alloc(cache->owner, count);
if (!data) {
err = -ENOMEM;
goto put_io;
}
/* Fill data */
for (i = 0; mio->dir == OCF_WRITE && i < count; i++) {
err = mio->hndl_fn(cache, data,
mio->page + written + i, mio->hndl_cntx);
if (err)
goto free_data;
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(mio->page + written),
PAGES_TO_BYTES(count),
mio->dir, 0, 0);
ocf_io_set_cmpl(io, mio, NULL, metadata_io_end);
err = ocf_io_set_data(io, data, 0);
if (err)
goto free_data;
/* Submit IO */
env_atomic_inc(&mio->rq_remaining);
ocf_dobj_submit_io(io);
return 0;
free_data:
ctx_data_free(cache->owner, data);
put_io:
ocf_io_put(io);
error:
mio->error = err;
return err;
}
/*
*
*/
static int metadata_io(struct metadata_io *mio)
{
uint32_t max_count = metadata_io_max_page(mio->cache);
uint32_t this_count, written = 0;
uint32_t count = mio->count;
unsigned char step = 0;
int err;
struct ocf_cache *cache = mio->cache;
/* Check direction value correctness */
switch (mio->dir) {
case OCF_WRITE:
case OCF_READ:
break;
default:
return -EINVAL;
}
env_atomic_set(&mio->rq_remaining, 1);
env_completion_init(&mio->completion);
while (count) {
this_count = MIN(count, max_count);
err = metadata_submit_io(cache, mio, this_count, written);
if (err)
break;
/* Update counters */
count -= this_count;
written += this_count;
OCF_COND_RESCHED(step, 128);
}
if (env_atomic_dec_return(&mio->rq_remaining) == 0)
env_completion_complete(&mio->completion);
/* Wait for all IO to be finished */
env_completion_wait(&mio->completion);
return mio->error;
}
/*
*
*/
int metadata_io_write_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
{
struct metadata_io mio = {
.dir = OCF_WRITE,
.cache = cache,
.page = page,
.count = count,
.hndl_fn = hndl_fn,
.hndl_cntx = hndl_cntx,
};
return metadata_io(&mio);
}
/*
*
*/
int metadata_io_read_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
{
struct metadata_io mio = {
.dir = OCF_READ,
.cache = cache,
.page = page,
.count = count,
.hndl_fn = hndl_fn,
.hndl_cntx = hndl_cntx,
};
return metadata_io(&mio);
}
/*
*
*/
static int metadata_io_write_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ctx_data_wr_check(cache->owner, data, context, PAGE_SIZE);
return 0;
}
/*
* Write request
*/
int metadata_io_write(struct ocf_cache *cache,
void *data, uint32_t page)
{
struct metadata_io mio = {
.dir = OCF_WRITE,
.cache = cache,
.page = page,
.count = 1,
.hndl_fn = metadata_io_write_fill,
.hndl_cntx = data,
};
return metadata_io(&mio);
}

188
src/metadata/metadata_io.h Normal file
View File

@@ -0,0 +1,188 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_IO_H__
#define __METADATA_IO_H__
/**
* @file metadata_io.h
* @brief Metadata IO utilities
*/
/**
* @brief Metadata IO event
*
* The client of metadata IO service if informed trough this event:
* - on completion of read from cache device
* - on fill data which will be written into cache device
*
* @param data[in,out] Environment data for read ot write IO
* @param page[in] Page which is issued
* @param context[in] context caller
*
* @retval 0 Success
* @retval Non-zero Error which will bee finally returned to the caller
*/
typedef int (*ocf_metadata_io_event_t)(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context);
/**
* @brief Metadata write end callback
*
* @param cache - Cache instance
* @param context - Read context
* @param error - error
* @param page - page that was written
*/
typedef void (*ocf_metadata_io_hndl_on_write_t)(struct ocf_cache *cache,
void *context, int error);
struct metadata_io_request_asynch;
/*
* IO request context
*/
struct metadata_io_request {
struct ocf_cache *cache;
void *context;
uint32_t page;
uint32_t count;
ocf_metadata_io_event_t on_meta_fill;
env_atomic req_remaining;
ctx_data_t *data;
env_completion completion;
int error;
struct metadata_io_request_asynch *asynch;
env_atomic finished;
struct ocf_request fl_req;
struct list_head list;
};
/*
* IO request context
*/
struct metadata_io_request_atomic {
env_completion complete;
int error;
};
/*
*
*/
struct metadata_io {
int error;
int dir;
struct ocf_cache *cache;
uint32_t page;
uint32_t count;
env_completion completion;
env_atomic rq_remaining;
ocf_metadata_io_event_t hndl_fn;
void *hndl_cntx;
};
/*
* Asynchronous IO request context
*/
struct metadata_io_request_asynch {
struct ocf_cache *cache;
struct metadata_io_request *reqs;
void *context;
int error;
size_t reqs_limit;
env_atomic req_remaining;
env_atomic req_active;
uint32_t page;
ocf_metadata_io_hndl_on_write_t on_complete;
};
/**
* @brief Metadata read end callback
*
* @param cache Cache instance
* @param sector_addr Begin sector of metadata
* @param sector_no Number of sectors
* @param data Data environment buffer with atomic metadata
*
* @retval 0 Success
* @retval Non-zero Error which will bee finally returned to the caller
*/
typedef int (*ocf_metadata_atomic_io_event_t)(
struct ocf_cache *cache, uint64_t sector_addr,
uint32_t sector_no, ctx_data_t *data);
/**
* @brief Write page request
*
* @param cache - Cache instance
* @param data - Data to be written for specified page
* @param page - Page of SSD (cache device) where data has to be placed
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write(struct ocf_cache *cache,
void *data, uint32_t page);
int metadata_io_read_i_atomic(struct ocf_cache *cache,
ocf_metadata_atomic_io_event_t hndl);
/**
* @brief Iterative pages write
*
* @param cache - Cache instance
* @param page - Start page of SSD (cache device) where data will be written
* @param count - Counts of page to be processed
* @param hndl_fn - Fill callback is called to fill each pages with data
* @param hndl_cntx - Caller context which is passed on fill callback request
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx);
/**
* * @brief Iterative pages read
*
* @param cache - Cache instance
* @param page - Start page of SSD (cache device) of data will be read
* @param count - Counts of page to be processed
* @param hndl_fn - Callback function is called on each page read completion
* @param hndl_cntx - Caller context passed during handle function call
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_read_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx);
/**
* @brief Iterative asynchronous pages write
*
* @param cache - Cache instance
* @param context - Read context
* @param page - Start page of SSD (cache device) where data will be written
* @param count - Counts of page to be processed
* @param fill - Fill callback
* @param complete - All IOs completed callback
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write_i_asynch(struct ocf_cache *cache, uint32_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_hndl_on_write_t compl_hndl);
/**
* Function for initializing metadata io.
*/
int ocf_metadata_io_init(ocf_cache_t cache);
/**
* Function for deinitializing metadata io.
*/
void ocf_metadata_io_deinit(ocf_cache_t cache);
#endif /* METADATA_IO_UTILS_H_ */

View File

@@ -0,0 +1,126 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "../utils/utils_cache_line.h"
static bool _is_cache_line_acting(struct ocf_cache *cache,
uint32_t cache_line, ocf_core_id_t core_id,
uint64_t start_line, uint64_t end_line)
{
ocf_core_id_t tmp_core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&tmp_core_id, &core_line);
if (core_id != OCF_CORE_ID_INVALID) {
if (core_id != tmp_core_id)
return false;
if (core_line < start_line || core_line > end_line)
return false;
} else if (tmp_core_id == OCF_CORE_ID_INVALID) {
return false;
}
return true;
}
/*
* Iterates over cache lines that belong to the core device with
* core ID = core_id whose core byte addresses are in the range
* [start_byte, end_byte] and applies actor(cache, cache_line) to all
* matching cache lines
*
* set partition_id to PARTITION_INVALID to not care about partition_id
*
* METADATA lock must be held before calling this function
*/
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor)
{
uint32_t step = 0;
ocf_cache_line_t i, next_i;
uint64_t start_line, end_line;
int ret = 0;
start_line = ocf_bytes_2_lines(cache, start_byte);
end_line = ocf_bytes_2_lines(cache, end_byte);
if (part_id != PARTITION_INVALID) {
for (i = cache->user_parts[part_id].runtime->head;
i != cache->device->collision_table_entries;
i = next_i) {
next_i = ocf_metadata_get_partition_next(cache, i);
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(cache, i))
ret = -EAGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
} else {
for (i = 0; i < cache->device->collision_table_entries; ++i) {
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(cache, i))
ret = -EAGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
}
return ret;
}
/* the caller must hold the relevant cache block concurrency reader lock
* and the metadata lock
*/
void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
ocf_part_id_t partition_id =
ocf_metadata_get_partition_id(cache, cache_line);
ocf_metadata_remove_from_collision(cache, cache_line, partition_id);
ocf_metadata_remove_from_partition(cache, partition_id, cache_line);
ocf_metadata_add_to_free_list(cache, cache_line);
}
static void _ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache),
cache_line);
/*
* This is especially for removing inactive core
*/
metadata_clear_dirty(cache, cache_line);
}
/* caller must hold metadata lock
* set core_id to -1 to clean the whole cache device
*/
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte)
{
return ocf_metadata_actor(cache, PARTITION_INVALID, core_id,
start_byte, end_byte, _ocf_metadata_sparse_cache_line);
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_MISC_H__
#define __METADATA_MISC_H__
static inline ocf_cache_line_t ocf_metadata_hash_func(ocf_cache_t cache,
uint64_t cache_line_num, ocf_core_id_t core_id)
{
return (ocf_cache_line_t) ((cache_line_num * (core_id + 1)) %
cache->device->hash_table_entries);
}
void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte);
typedef void (*ocf_metadata_actor_t)(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor);
#endif /* __METADATA_MISC_H__ */

View File

@@ -0,0 +1,227 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "../utils/utils_part.h"
/* Sets the given collision_index as the new _head_ of the Partition list. */
static void update_partition_head(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
struct ocf_user_part *part = &cache->user_parts[part_id];
part->runtime->head = line;
}
void ocf_metadata_remove_from_free_list(struct ocf_cache *cache,
ocf_cache_line_t cline)
{
struct ocf_part *free_list = cache->device->freelist_part;
int is_head, is_tail;
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ocf_cache_line_t prev, next;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ENV_BUG_ON(cline >= line_entries);
/* Get Partition info */
ocf_metadata_get_partition_info(cache, cline, NULL, &next, &prev);
/* Find out if this node is Partition _head_ */
is_head = (prev == line_entries);
is_tail = (next == line_entries);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && (free_list->curr_size == 1)) {
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
free_list->head = line_entries;
free_list->tail = line_entries;
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(next >= line_entries);
free_list->head = next;
ocf_metadata_set_partition_prev(cache, next, line_entries);
ocf_metadata_set_partition_next(cache, cline, line_entries);
} else if (is_tail) {
/* Case 3: else if this cline is partition list tail */
ENV_BUG_ON(prev >= line_entries);
free_list->tail = prev;
ocf_metadata_set_partition_prev(cache, cline, line_entries);
ocf_metadata_set_partition_next(cache, prev, line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(next >= line_entries || prev >= line_entries);
/* Update prev and next nodes */
ocf_metadata_set_partition_prev(cache, next, prev);
ocf_metadata_set_partition_next(cache, prev, next);
/* Update the given node */
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
}
free_list->curr_size--;
}
void ocf_metadata_add_to_free_list(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_part *free_list = cache->device->freelist_part;
ocf_cache_line_t tail;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ENV_BUG_ON(line >= line_entries);
if (free_list->curr_size == 0) {
free_list->head = line;
free_list->tail = line;
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, line_entries);
} else {
tail = free_list->tail;
ENV_BUG_ON(tail >= line_entries);
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, tail);
ocf_metadata_set_partition_next(cache, tail, line);
free_list->tail = line;
}
free_list->curr_size++;
}
/* Adds the given collision_index to the _head_ of the Partition list */
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
ocf_cache_line_t line_head;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
ENV_BUG_ON(!(line < line_entries));
/* First node to be added/ */
if (!part->runtime->curr_size) {
update_partition_head(cache, part_id, line);
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
if (!ocf_part_is_valid(part)) {
/* Partition becomes empty, and is not valid
* update list of partitions
*/
ocf_part_sort(cache);
}
} else {
/* Not the first node to be added. */
line_head = part->runtime->head;
ENV_BUG_ON(!(line_head < line_entries));
ocf_metadata_set_partition_info(cache, line, part_id,
line_head, line_entries);
ocf_metadata_set_partition_prev(cache, line_head, line);
update_partition_head(cache, part_id, line);
}
part->runtime->curr_size++;
}
/* Deletes the node with the given collision_index from the Partition list */
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
int is_head, is_tail;
ocf_cache_line_t prev_line, next_line;
uint32_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
ENV_BUG_ON(!(line < line_entries));
/* Get Partition info */
ocf_metadata_get_partition_info(cache, line, NULL,
&next_line, &prev_line);
/* Find out if this node is Partition _head_ */
is_head = (prev_line == line_entries);
is_tail = (next_line == line_entries);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && (part->runtime->curr_size == 1)) {
ocf_metadata_set_partition_info(cache, line,
part_id, line_entries, line_entries);
update_partition_head(cache, part_id, line_entries);
if (!ocf_part_is_valid(part)) {
/* Partition becomes not empty, and is not valid
* update list of partitions
*/
ocf_part_sort(cache);
}
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(!(next_line < line_entries));
update_partition_head(cache, part_id, next_line);
ocf_metadata_set_partition_next(cache, line, line_entries);
ocf_metadata_set_partition_prev(cache, next_line,
line_entries);
} else if (is_tail) {
/* Case 3: else if this collision_index is partition list tail
*/
ENV_BUG_ON(!(prev_line < line_entries));
ocf_metadata_set_partition_prev(cache, line, line_entries);
ocf_metadata_set_partition_next(cache, prev_line,
line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(!(next_line < line_entries));
ENV_BUG_ON(!(prev_line < line_entries));
/* Update prev and next nodes */
ocf_metadata_set_partition_next(cache, prev_line, next_line);
ocf_metadata_set_partition_prev(cache, next_line, prev_line);
/* Update the given node */
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
}
part->runtime->curr_size--;
}

View File

@@ -0,0 +1,78 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_PARTITION_H__
#define __METADATA_PARTITION_H__
#include "metadata_partition_structs.h"
#include "../ocf_cache_priv.h"
#define PARTITION_DEFAULT 0
#define PARTITION_INVALID ((ocf_part_id_t)-1)
#define PARTITION_SIZE_MAX ((ocf_cache_line_t)-1)
static inline ocf_part_id_t ocf_metadata_get_partition_id(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_partition_id(cache, line);
}
static inline ocf_cache_line_t ocf_metadata_get_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_partition_next(cache, line);
}
static inline ocf_cache_line_t ocf_metadata_get_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_partition_prev(cache, line);
}
static inline void ocf_metadata_get_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t *part_id, ocf_cache_line_t *next_line,
ocf_cache_line_t *prev_line)
{
cache->metadata.iface.get_partition_info(cache, line, part_id,
next_line, prev_line);
}
static inline void ocf_metadata_set_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next_line)
{
cache->metadata.iface.set_partition_next(cache, line, next_line);
}
static inline void ocf_metadata_set_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t prev_line)
{
cache->metadata.iface.set_partition_prev(cache, line, prev_line);
}
static inline void ocf_metadata_set_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t part_id, ocf_cache_line_t next_line,
ocf_cache_line_t prev_line)
{
cache->metadata.iface.set_partition_info(cache, line, part_id,
next_line, prev_line);
}
void ocf_metadata_add_to_free_list(struct ocf_cache *cache,
ocf_cache_line_t cline);
void ocf_metadata_remove_from_free_list(struct ocf_cache *cache,
ocf_cache_line_t cline);
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
#endif /* __METADATA_PARTITION_H__ */

View File

@@ -0,0 +1,50 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_PARTITION_STRUCTS_H__
#define __METADATA_PARTITION_STRUCTS_H__
#include "../utils/utils_list.h"
#include "../cleaning/cleaning.h"
#include "../eviction/eviction.h"
struct ocf_part {
ocf_cache_line_t head;
ocf_cache_line_t tail;
uint32_t curr_size;
};
struct ocf_user_part_config {
char name[OCF_IO_CLASS_NAME_MAX];
uint32_t min_size;
uint32_t max_size;
int16_t priority;
ocf_cache_mode_t cache_mode;
struct {
uint8_t valid : 1;
uint8_t added : 1;
uint8_t eviction : 1;
/*!< This bits is setting during partition sorting,
* and means that can evict from this partition
*/
} flags;
};
struct ocf_user_part_runtime {
uint32_t curr_size;
uint32_t head;
struct eviction_policy eviction;
struct cleaning_policy cleaning;
};
struct ocf_user_part {
struct ocf_user_part_config *config;
struct ocf_user_part_runtime *runtime;
struct ocf_lst_entry lst_valid;
};
#endif /* __METADATA_PARTITION_STRUCTS_H__ */

609
src/metadata/metadata_raw.c Normal file
View File

@@ -0,0 +1,609 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#include "metadata_io.h"
#include "metadata_raw_atomic.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_RAW_DEBUG 0
#if 1 == OCF_METADATA_RAW_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(log_info, "[Metadata][Raw] %s\n", __func__)
#define OCF_DEBUG_MSG(cache, msg) \
ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - %s\n", \
__func__, msg)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
/*******************************************************************************
* Common RAW Implementation
******************************************************************************/
/*
* Check if page is valid for specified RAW descriptor
*/
static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page)
{
ENV_BUG_ON(page < raw->ssd_pages_offset);
ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages));
return true;
}
/*******************************************************************************
* RAW RAM Implementation
******************************************************************************/
#define _RAW_RAM_ADDR(raw, line) \
(raw->mem_pool + (((uint64_t)raw->entry_size * (line))))
#define _RAW_RAM_PAGE(raw, line) \
((line) / raw->entries_in_page)
#define _RAW_RAM_PAGE_SSD(raw, line) \
(raw->ssd_pages_offset + _RAW_RAM_PAGE(raw, line))
#define _RAW_RAM_ADDR_PAGE(raw, line) \
(_RAW_RAM_ADDR(raw, \
_RAW_RAM_PAGE(raw, line) * raw->entries_in_page))
#define _RAW_RAM_GET(raw, line, data) \
env_memcpy(data, raw->entry_size, _RAW_RAM_ADDR(raw, (line)), \
raw->entry_size)
#define _RAW_RAM_SET(raw, line, data) \
env_memcpy(_RAW_RAM_ADDR(raw, line), raw->entry_size, \
data, raw->entry_size)
/*
* RAM Implementation - De-Initialize
*/
static int _raw_ram_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
if (raw->mem_pool) {
env_vfree(raw->mem_pool);
raw->mem_pool = NULL;
}
return 0;
}
/*
* RAM Implementation - Initialize
*/
static int _raw_ram_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
size_t mem_pool_size;
OCF_DEBUG_TRACE(cache);
/* Allocate memory pool for entries */
mem_pool_size = raw->ssd_pages;
mem_pool_size *= PAGE_SIZE;
raw->mem_pool_limit = mem_pool_size;
raw->mem_pool = env_vzalloc(mem_pool_size);
if (!raw->mem_pool)
return -ENOMEM;
return 0;
}
/*
* RAM Implementation - Size of
*/
static size_t _raw_ram_size_of(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
size_t size;
size = raw->ssd_pages;
size *= PAGE_SIZE;
return size;
}
/*
* RAM Implementation - Size on SSD
*/
static uint32_t _raw_ram_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
const size_t alignment = 128 * KiB / PAGE_SIZE;
return DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment;
}
/*
* RAM Implementation - Checksum
*/
static uint32_t _raw_ram_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
uint64_t i;
uint32_t step = 0;
uint32_t crc = 0;
for (i = 0; i < raw->ssd_pages; i++) {
crc = env_crc32(crc, raw->mem_pool + PAGE_SIZE * i, PAGE_SIZE);
OCF_COND_RESCHED(step, 10000);
}
return crc;
}
/*
* RAM Implementation - Get entry
*/
static int _raw_ram_get(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_GET(raw, line, data);
}
/*
* RAM Implementation - Read only entry access
*/
static const void *_raw_ram_rd_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_ADDR(raw, line);
}
/*
* RAM Implementation - Read only entry access
*/
static void *_raw_ram_wr_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_ADDR(raw, line);
}
/*
* RAM Implementation - Set Entry
*/
static int _raw_ram_set(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_SET(raw, line, data);
}
/*
* RAM Implementation - Flush specified element from SSD
*/
static int _raw_ram_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
OCF_DEBUG_PARAM(cache, "Line = %u", line);
OCF_DEBUG_PARAM(cache, "Page = %llu", _RAW_RAM_PAGE(raw, line));
ENV_BUG_ON(!_raw_is_valid(raw, line, raw->entry_size));
return metadata_io_write(cache, _RAW_RAM_ADDR_PAGE(raw, line),
_RAW_RAM_PAGE_SSD(raw, line));
}
/*
* RAM Implementation - Load all IO callback
*/
static int _raw_ram_load_all_io(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ocf_cache_line_t line;
uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *) context;
uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE);
raw_page = page - raw->ssd_pages_offset;
line = raw_page * raw->entries_in_page;
OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
ctx_data_rd_check(cache->owner, _RAW_RAM_ADDR(raw, line), data, size);
ctx_data_seek(cache->owner, data, ctx_data_seek_current,
PAGE_SIZE - size);
return 0;
}
/*
* RAM Implementation - Load all metadata elements from SSD
*/
static int _raw_ram_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
return metadata_io_read_i(cache, raw->ssd_pages_offset,
raw->ssd_pages, _raw_ram_load_all_io, raw);
}
/*
* RAM Implementation - Flush IO callback - Fill page
*/
static int _raw_ram_flush_all_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ocf_cache_line_t line;
uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context;
uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE);
raw_page = page - raw->ssd_pages_offset;
line = raw_page * raw->entries_in_page;
OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size);
ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size);
return 0;
}
/*
* RAM Implementation - Flush all elements
*/
static int _raw_ram_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
return metadata_io_write_i(cache, raw->ssd_pages_offset,
raw->ssd_pages, _raw_ram_flush_all_fill, raw);
}
/*
* RAM RAM Implementation - Mark to Flush
*/
static void _raw_ram_flush_mark(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t map_idx, int to_state,
uint8_t start, uint8_t stop)
{
if (to_state == DIRTY || to_state == CLEAN) {
rq->map[map_idx].flush = true;
rq->info.flush_metadata = true;
}
}
/*******************************************************************************
* RAM RAM Implementation - Do Flush Asynchronously
******************************************************************************/
struct _raw_ram_flush_ctx {
struct ocf_metadata_raw *raw;
struct ocf_request *rq;
ocf_metadata_asynch_flush_hndl complete;
env_atomic flush_req_cnt;
int error;
};
static void _raw_ram_flush_do_asynch_io_complete(struct ocf_cache *cache,
void *context, int error)
{
struct _raw_ram_flush_ctx *ctx = context;
if (error) {
ctx->error = error;
ocf_metadata_error(cache);
}
if (env_atomic_dec_return(&ctx->flush_req_cnt))
return;
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
/* Call metadata flush completed call back */
ctx->rq->error |= ctx->error;
ctx->complete(ctx->rq, ctx->error);
env_free(ctx);
}
/*
* RAM Implementation - Flush IO callback - Fill page
*/
static int _raw_ram_flush_do_asynch_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ocf_cache_line_t line;
uint32_t raw_page;
struct _raw_ram_flush_ctx *ctx = context;
struct ocf_metadata_raw *raw = NULL;
uint64_t size;
ENV_BUG_ON(!ctx);
raw = ctx->raw;
ENV_BUG_ON(!raw);
size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(size > PAGE_SIZE);
raw_page = page - raw->ssd_pages_offset;
line = raw_page * raw->entries_in_page;
OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size);
ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size);
return 0;
}
/*
* RAM RAM Implementation - Do Flush
*/
int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2)
{
uint32_t *page1 = (uint32_t *)item1;
uint32_t *page2 = (uint32_t *)item2;
if (*page1 > *page2)
return 1;
if (*page1 < *page2)
return -1;
return 0;
}
static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *rq,
uint32_t *pages_tab, struct ocf_metadata_raw *raw,
int *pages_to_flush) {
int i, j = 0;
int line_no = rq->core_line_count;
struct ocf_map_info *map;
for (i = 0; i < line_no; i++) {
map = &rq->map[i];
if (map->flush) {
pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx);
j++;
}
}
*pages_to_flush = j;
}
static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete)
{
int result = 0, i;
uint32_t __pages_tab[MAX_STACK_TAB_SIZE];
uint32_t *pages_tab;
int line_no = rq->core_line_count;
int pages_to_flush;
uint32_t start_page = 0;
uint32_t count = 0;
struct _raw_ram_flush_ctx *ctx;
ENV_BUG_ON(!complete);
OCF_DEBUG_TRACE(cache);
if (!rq->info.flush_metadata) {
/* Nothing to flush call flush callback */
complete(rq, 0);
return 0;
}
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
if (!ctx) {
complete(rq, -ENOMEM);
return -ENOMEM;
}
ctx->rq = rq;
ctx->complete = complete;
ctx->raw = raw;
env_atomic_set(&ctx->flush_req_cnt, 1);
if (line_no <= MAX_STACK_TAB_SIZE) {
pages_tab = __pages_tab;
} else {
pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO);
if (!pages_tab) {
env_free(ctx);
complete(rq, -ENOMEM);
return -ENOMEM;
}
}
/* While sorting in progress keep request remaining equal to 1,
* to prevent freeing of asynchronous context
*/
__raw_ram_flush_do_asynch_add_pages(rq, pages_tab, raw,
&pages_to_flush);
env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab),
_raw_ram_flush_do_page_cmp, NULL);
i = 0;
while (i < pages_to_flush) {
start_page = pages_tab[i];
count = 1;
while (true) {
if ((i + 1) >= pages_to_flush)
break;
if (pages_tab[i] == pages_tab[i + 1]) {
i++;
continue;
}
if ((pages_tab[i] + 1) != pages_tab[i + 1])
break;
i++;
count++;
}
env_atomic_inc(&ctx->flush_req_cnt);
result |= metadata_io_write_i_asynch(cache, rq->io_queue, ctx,
raw->ssd_pages_offset + start_page, count,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete);
if (result)
break;
i++;
}
_raw_ram_flush_do_asynch_io_complete(cache, ctx, result);
if (line_no > MAX_STACK_TAB_SIZE)
env_free(pages_tab);
return result;
}
/*******************************************************************************
* RAW Interfaces definitions
******************************************************************************/
#include "metadata_raw_dynamic.h"
#include "metadata_raw_volatile.h"
static const struct raw_iface IRAW[metadata_raw_type_max] = {
[metadata_raw_type_ram] = {
.init = _raw_ram_init,
.deinit = _raw_ram_deinit,
.size_of = _raw_ram_size_of,
.size_on_ssd = _raw_ram_size_on_ssd,
.checksum = _raw_ram_checksum,
.get = _raw_ram_get,
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
.flush = _raw_ram_flush,
.load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all,
.flush_mark = _raw_ram_flush_mark,
.flush_do_asynch = _raw_ram_flush_do_asynch,
},
[metadata_raw_type_dynamic] = {
.init = raw_dynamic_init,
.deinit = raw_dynamic_deinit,
.size_of = raw_dynamic_size_of,
.size_on_ssd = raw_dynamic_size_on_ssd,
.checksum = raw_dynamic_checksum,
.get = raw_dynamic_get,
.set = raw_dynamic_set,
.rd_access = raw_dynamic_rd_access,
.wr_access = raw_dynamic_wr_access,
.flush = raw_dynamic_flush,
.load_all = raw_dynamic_load_all,
.flush_all = raw_dynamic_flush_all,
.flush_mark = raw_dynamic_flush_mark,
.flush_do_asynch = raw_dynamic_flush_do_asynch,
},
[metadata_raw_type_volatile] = {
.init = _raw_ram_init,
.deinit = _raw_ram_deinit,
.size_of = _raw_ram_size_of,
.size_on_ssd = raw_volatile_size_on_ssd,
.checksum = raw_volatile_checksum,
.get = _raw_ram_get,
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
.flush = raw_volatile_flush,
.load_all = raw_volatile_load_all,
.flush_all = raw_volatile_flush_all,
.flush_mark = raw_volatile_flush_mark,
.flush_do_asynch = raw_volatile_flush_do_asynch,
},
[metadata_raw_type_atomic] = {
.init = _raw_ram_init,
.deinit = _raw_ram_deinit,
.size_of = _raw_ram_size_of,
.size_on_ssd = _raw_ram_size_on_ssd,
.checksum = _raw_ram_checksum,
.get = _raw_ram_get,
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
.flush = _raw_ram_flush,
.load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all,
.flush_mark = raw_atomic_flush_mark,
.flush_do_asynch = raw_atomic_flush_do_asynch,
},
};
/*******************************************************************************
* RAW Top interface implementation
******************************************************************************/
int ocf_metadata_raw_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
ENV_BUG_ON(raw->raw_type < metadata_raw_type_min);
ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max);
raw->iface = &(IRAW[raw->raw_type]);
return raw->iface->init(cache, raw);
}
int ocf_metadata_raw_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
int result;
if (!raw->iface)
return 0;
result = raw->iface->deinit(cache, raw);
raw->iface = NULL;
return result;
}
size_t ocf_metadata_raw_size_on_ssd(struct ocf_cache* cache,
struct ocf_metadata_raw* raw)
{
ENV_BUG_ON(raw->raw_type < metadata_raw_type_min);
ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max);
return IRAW[raw->raw_type].size_on_ssd(cache, raw);
}

352
src/metadata/metadata_raw.h Normal file
View File

@@ -0,0 +1,352 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_RAW_H__
#define __METADATA_RAW_H__
/**
* @file metadata_raw.h
* @brief Metadata RAW container implementation
*/
/**
* @brief Metadata raw type
*/
enum ocf_metadata_raw_type {
/**
* @brief Default implementation with support of
* flushing to/landing from SSD
*/
metadata_raw_type_ram = 0,
/**
* @brief Dynamic implementation, elements are allocated when first
* time called
*/
metadata_raw_type_dynamic,
/**
* @brief This containers does not flush metadata on SSD and does not
* Support loading from SSD
*/
metadata_raw_type_volatile,
/**
* @brief Implementation for atomic device used as cache
*/
metadata_raw_type_atomic,
metadata_raw_type_max, /*!< MAX */
metadata_raw_type_min = metadata_raw_type_ram /*!< MAX */
};
/**
* @brief RAW instance descriptor
*/
struct ocf_metadata_raw {
/**
* @name Metadata and RAW types
*/
enum ocf_metadata_segment metadata_segment; /*!< Metadata segment */
enum ocf_metadata_raw_type raw_type; /*!< RAW implementation type */
/**
* @name Metdata elements description
*/
uint32_t entry_size; /*!< Size of particular entry */
uint32_t entries_in_page; /*!< Numbers of entries in one page*/
uint64_t entries; /*!< Numbers of entries */
/**
* @name Location on cache device description
*/
uint64_t ssd_pages_offset; /*!< SSD (Cache device) Page offset */
uint64_t ssd_pages; /*!< Numbers of pages that are required */
const struct raw_iface *iface; /*!< RAW container interface*/
/**
* @name Private RAW elements
*/
void *mem_pool; /*!< Private memory pool*/
size_t mem_pool_limit; /*! Current memory pool size (limit) */
void *priv; /*!< Private data - context */
};
/**
* RAW container interface
*/
struct raw_iface {
int (*init)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
int (*deinit)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
size_t (*size_of)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/**
* @brief Return size which metadata take on cache device
*
* @param cache Cache instance
* @param raw RAW container of metadata
*
* @return Number of pages (4 kiB) on cache device
*/
uint32_t (*size_on_ssd)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
uint32_t (*checksum)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
int (*get)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
int (*set)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
const void* (*rd_access)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size);
void* (*wr_access)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size);
int (*flush)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line);
int (*load_all)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
int (*flush_all)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start,
uint8_t stop);
int (*flush_do_asynch)(struct ocf_cache *cache, struct ocf_request *rq,
struct ocf_metadata_raw *raw,
ocf_metadata_asynch_flush_hndl complete);
};
/**
* @brief Initialize RAW instance
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
int ocf_metadata_raw_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/**
* @brief De-Initialize RAW instance
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
int ocf_metadata_raw_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/**
* @brief Get memory footprint
*
* @param cache Cache instance
* @param raw RAW descriptor
* @return Memory footprint
*/
static inline size_t ocf_metadata_raw_size_of(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
if (!raw->iface)
return 0;
return raw->iface->size_of(cache, raw);
}
/**
* @brief Get SSD footprint
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return Size on SSD
*/
size_t ocf_metadata_raw_size_on_ssd(struct ocf_cache* cache,
struct ocf_metadata_raw* raw);
/**
* @brief Calculate metadata checksum
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return Checksum
*/
static inline uint32_t ocf_metadata_raw_checksum(struct ocf_cache* cache,
struct ocf_metadata_raw* raw)
{
return raw->iface->checksum(cache, raw);
}
/**
* @brief Get specified element of metadata
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be get
* @param data - Data where metadata entry will be copied into
* @param size - Size of data
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_get(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data,
uint32_t size)
{
return raw->iface->get(cache, raw, line, data, size);
}
/**
* @brief Access specified element of metadata directly
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be get
* @param data - Data where metadata entry will be copied into
* @param size - Size of data
* @return 0 - Point to accessed data, in case of error NULL
*/
static inline void *ocf_metadata_raw_wr_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
return raw->iface->wr_access(cache, raw, line, size);
}
/**
* @brief Access specified element of metadata directly
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be get
* @param data - Data where metadata entry will be copied into
* @param size - Size of data
* @return 0 - Point to accessed data, in case of error NULL
*/
static inline const void *ocf_metadata_raw_rd_access(
struct ocf_cache *cache, struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size)
{
return raw->iface->rd_access(cache, raw, line, size);
}
/**
* @brief Set specified element of metadata
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be set
* @param data - Data taht will be copied into metadata entry
* @param size - Size of data
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_set(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data,
uint32_t size)
{
return raw->iface->set(cache, raw, line, data, size);
}
/**
* @brief Flush specified element of metadata into SSD
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be flushed
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
return raw->iface->flush(cache, raw, line);
}
/**
* @brief Load all entries from SSD cache (cahce cache)
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return raw->iface->load_all(cache, raw);
}
/**
* @brief Flush all entries for into SSD cache (cahce cache)
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return raw->iface->flush_all(cache, raw);
}
static inline void ocf_metadata_raw_flush_mark(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
raw->iface->flush_mark(cache, rq, map_idx, to_state, start, stop);
}
static inline int ocf_metadata_raw_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_metadata_asynch_flush_hndl complete)
{
return raw->iface->flush_do_asynch(cache, rq, raw, complete);
}
/*
* Check if line is valid for specified RAW descriptor
*/
static inline bool _raw_is_valid(struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size)
{
if (!raw)
return false;
if (size != raw->entry_size)
return false;
if (line >= raw->entries)
return false;
return true;
}
static inline void _raw_bug_on(struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
}
#define MAX_STACK_TAB_SIZE 32
int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2);
#endif /* METADATA_RAW_H_ */

View File

@@ -0,0 +1,260 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_io.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#include "metadata_raw_atomic.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_RAW_ATOMIC_DEBUG 0
#if 1 == OCF_METADATA_RAW_ATOMIC_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s\n", __func__)
#define OCF_DEBUG_MSG(cache, msg) \
ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s - %s\n", \
__func__, msg)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
struct _raw_atomic_flush_ctx {
struct ocf_request *rq;
ocf_metadata_asynch_flush_hndl complete;
env_atomic flush_req_cnt;
};
static void _raw_atomic_io_discard_cmpl(struct _raw_atomic_flush_ctx *ctx,
int error)
{
if (error)
ctx->rq->error = error;
if (env_atomic_dec_return(&ctx->flush_req_cnt))
return;
if (ctx->rq->error)
ocf_metadata_error(ctx->rq->cache);
/* Call metadata flush completed call back */
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
ctx->complete(ctx->rq, ctx->rq->error);
env_free(ctx);
}
static void _raw_atomic_io_discard_end(struct ocf_io *io, int error)
{
struct _raw_atomic_flush_ctx *ctx = io->priv1;
ocf_io_put(io); /* Release IO */
_raw_atomic_io_discard_cmpl(ctx, error);
}
static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
uint64_t start_addr, uint32_t len, struct _raw_atomic_flush_ctx *ctx)
{
struct ocf_request *rq = context;
struct ocf_io *io = ocf_new_cache_io(cache);
if (!io) {
rq->error = -ENOMEM;
return rq->error;
}
OCF_DEBUG_PARAM(cache, "Page to flushing = %u, count of pages = %u",
start_line, len);
env_atomic_inc(&ctx->flush_req_cnt);
ocf_io_configure(io, start_addr, len, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, ctx, NULL, _raw_atomic_io_discard_end);
if (cache->device->obj.features.discard_zeroes)
ocf_dobj_submit_discard(io);
else
ocf_dobj_submit_write_zeroes(io);
return rq->error;
}
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
if (to_state == INVALID) {
rq->map[map_idx].flush = true;
rq->map[map_idx].start_flush = start;
rq->map[map_idx].stop_flush = stop;
rq->info.flush_metadata = true;
}
}
#define MAX_STACK_TAB_SIZE 32
static inline void _raw_atomic_add_page(struct ocf_cache *cache,
uint32_t *clines_tab, uint64_t line, int *idx)
{
clines_tab[*idx] = ocf_metadata_map_lg2phy(cache, line);
(*idx)++;
}
static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
struct ocf_request *rq, int map_idx,
struct _raw_atomic_flush_ctx *ctx)
{
struct ocf_map_info *map = &rq->map[map_idx];
uint32_t len = 0;
uint64_t start_addr;
int result = 0;
start_addr = ocf_metadata_map_lg2phy(cache, map->coll_idx);
start_addr *= ocf_line_size(cache);
start_addr += cache->device->metadata_offset;
start_addr += SECTORS_TO_BYTES(map->start_flush);
len = SECTORS_TO_BYTES(map->stop_flush - map->start_flush);
len += SECTORS_TO_BYTES(1);
result = _raw_atomic_io_discard_do(cache, rq, start_addr, len, ctx);
return result;
}
int raw_atomic_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete)
{
int result = 0, i;
uint32_t __clines_tab[MAX_STACK_TAB_SIZE];
uint32_t *clines_tab;
int clines_to_flush = 0;
uint32_t len = 0;
int line_no = rq->core_line_count;
struct ocf_map_info *map;
uint64_t start_addr;
struct _raw_atomic_flush_ctx *ctx;
ENV_BUG_ON(!complete);
if (!rq->info.flush_metadata) {
/* Nothing to flush call flush callback */
complete(rq, 0);
return 0;
}
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
if (!ctx) {
complete(rq, -ENOMEM);
return -ENOMEM;
}
ctx->rq = rq;
ctx->complete = complete;
env_atomic_set(&ctx->flush_req_cnt, 1);
if (line_no == 1) {
map = &rq->map[0];
if (map->flush && map->status != LOOKUP_MISS) {
result = _raw_atomic_flush_do_asynch_sec(cache, rq,
0, ctx);
}
_raw_atomic_io_discard_cmpl(ctx, result);
return result;
}
if (line_no <= MAX_STACK_TAB_SIZE) {
clines_tab = __clines_tab;
} else {
clines_tab = env_zalloc(sizeof(*clines_tab) * line_no,
ENV_MEM_NOIO);
if (!clines_tab) {
complete(rq, -ENOMEM);
env_free(ctx);
return -ENOMEM;
}
}
for (i = 0; i < line_no; i++) {
map = &rq->map[i];
if (!map->flush || map->status == LOOKUP_MISS)
continue;
if (i == 0) {
/* First */
if (map->start_flush) {
_raw_atomic_flush_do_asynch_sec(cache, rq, i,
ctx);
} else {
_raw_atomic_add_page(cache, clines_tab,
map->coll_idx, &clines_to_flush);
}
} else if (i == (line_no - 1)) {
/* Last */
if (map->stop_flush != ocf_line_end_sector(cache)) {
_raw_atomic_flush_do_asynch_sec(cache, rq,
i, ctx);
} else {
_raw_atomic_add_page(cache, clines_tab,
map->coll_idx, &clines_to_flush);
}
} else {
/* Middle */
_raw_atomic_add_page(cache, clines_tab, map->coll_idx,
&clines_to_flush);
}
}
env_sort(clines_tab, clines_to_flush, sizeof(*clines_tab),
_raw_ram_flush_do_page_cmp, NULL);
i = 0;
while (i < clines_to_flush) {
start_addr = clines_tab[i];
start_addr *= ocf_line_size(cache);
start_addr += cache->device->metadata_offset;
len = ocf_line_size(cache);
while (true) {
if ((i + 1) >= clines_to_flush)
break;
if ((clines_tab[i] + 1) != clines_tab[i + 1])
break;
i++;
len += ocf_line_size(cache);
}
result |= _raw_atomic_io_discard_do(cache, rq, start_addr,
len, ctx);
if (result)
break;
i++;
}
_raw_atomic_io_discard_cmpl(ctx, result);
if (line_no > MAX_STACK_TAB_SIZE)
env_free(clines_tab);
return result;
}

View File

@@ -0,0 +1,16 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_RAW_ATOMIC_H__
#define __METADATA_RAW_ATOMIC_H__
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
int raw_atomic_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete);
#endif /* __METADATA_RAW_ATOMIC_H__ */

View File

@@ -0,0 +1,446 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#include "metadata_raw_dynamic.h"
#include "metadata_io.h"
#include "../utils/utils_io.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_RAW_DEBUG 0
#if 1 == OCF_METADATA_RAW_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s\n", __func__)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
/*******************************************************************************
* Common RAW Implementation
******************************************************************************/
/*
* Check if page is valid for specified RAW descriptor
*/
static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page)
{
ENV_BUG_ON(page < raw->ssd_pages_offset);
ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages));
return true;
}
/*******************************************************************************
* RAW dynamic Implementation
******************************************************************************/
#define _RAW_DYNAMIC_PAGE(raw, line) \
((line) / raw->entries_in_page)
#define _RAW_DYNAMIC_PAGE_OFFSET(raw, line) \
((line % raw->entries_in_page) * raw->entry_size)
/*
* RAW DYNAMIC control structure
*/
struct _raw_ctrl {
env_mutex lock;
env_atomic count;
void *pages[];
};
static void *_raw_dynamic_get_item(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, uint32_t size)
{
void *new = NULL;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
uint32_t page = _RAW_DYNAMIC_PAGE(raw, line);
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
OCF_DEBUG_PARAM(cache, "Accessing item %u on page %u", line, page);
if (!ctrl->pages[page]) {
/* No page, allocate one, and set*/
/* This RAW container has some restrictions and need to check
* this limitation:
* 1. no atomic context when allocation
* 2. Only one allocator in time
*/
ENV_BUG_ON(env_in_interrupt());
env_mutex_lock(&ctrl->lock);
if (ctrl->pages[page]) {
/* Page has been already allocated, skip allocation */
goto _raw_dynamic_get_item_SKIP;
}
OCF_DEBUG_PARAM(cache, "New page allocation - %u", page);
new = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (new) {
ctrl->pages[page] = new;
env_atomic_inc(&ctrl->count);
}
_raw_dynamic_get_item_SKIP:
env_mutex_unlock(&ctrl->lock);
}
if (ctrl->pages[page])
return ctrl->pages[page] + _RAW_DYNAMIC_PAGE_OFFSET(raw, line);
return NULL;
}
/*
* RAM DYNAMIC Implementation - De-Initialize
*/
int raw_dynamic_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
uint32_t i;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
if (!ctrl)
return 0;
OCF_DEBUG_TRACE(cache);
for (i = 0; i < raw->ssd_pages; i++)
env_free(ctrl->pages[i]);
env_vfree(ctrl);
raw->priv = NULL;
return 0;
}
/*
* RAM DYNAMIC Implementation - Initialize
*/
int raw_dynamic_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl;
size_t size = sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
OCF_DEBUG_TRACE(cache);
if (raw->entry_size > PAGE_SIZE)
return -1;
ctrl = env_vmalloc(size);
if (!ctrl)
return -1;
ENV_BUG_ON(env_memset(ctrl, size, 0));
if (env_mutex_init(&ctrl->lock)) {
env_vfree(ctrl);
return -1;
}
raw->priv = ctrl;
return 0;
}
/*
* RAW DYNAMIC Implementation - Size of
*/
size_t raw_dynamic_size_of(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
size_t size;
/* Size of allocated items */
size = env_atomic_read(&ctrl->count);
size *= PAGE_SIZE;
/* Size of control structure */
size += sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
OCF_DEBUG_PARAM(cache, "Count = %d, Size = %lu",
env_atomic_read(&ctrl->count), size);
return size;
}
/*
* RAW DYNAMIC Implementation - Size on SSD
*/
uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
const size_t alignment = 128 * KiB / PAGE_SIZE;
return DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment;
}
/*
* RAM DYNAMIC Implementation - Checksum
*/
uint32_t raw_dynamic_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
uint64_t i;
uint32_t step = 0;
uint32_t crc = 0;
for (i = 0; i < raw->ssd_pages; i++) {
if (ctrl->pages[i])
crc = env_crc32(crc, ctrl->pages[i], PAGE_SIZE);
OCF_COND_RESCHED(step, 10000);
}
return crc;
}
/*
* RAM DYNAMIC Implementation - Get
*/
int raw_dynamic_get(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
void *item = _raw_dynamic_get_item(cache, raw, line, size);
if (!item) {
ENV_BUG_ON(env_memset(data, size, 0));
ocf_metadata_error(cache);
return -1;
}
return env_memcpy(data, size, item, size);
}
/*
* RAM DYNAMIC Implementation - Set
*/
int raw_dynamic_set(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
void *item = _raw_dynamic_get_item(cache, raw, line, size);
if (!item) {
ocf_metadata_error(cache);
return -1;
}
return env_memcpy(item, size, data, size);
}
/*
* RAM DYNAMIC Implementation - access
*/
const void *raw_dynamic_rd_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
return _raw_dynamic_get_item(cache, raw, line, size);
}
/*
* RAM DYNAMIC Implementation - access
*/
void *raw_dynamic_wr_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
return _raw_dynamic_get_item(cache, raw, line, size);
}
int raw_dynamic_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
uint32_t page = _RAW_DYNAMIC_PAGE(raw, line);
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
OCF_DEBUG_PARAM(cache, "Line %u, page = %u", line, page);
ENV_BUG_ON(!ctrl->pages[page]);
return metadata_io_write(cache, ctrl->pages[page],
raw->ssd_pages_offset + page);
}
/*
* RAM DYNAMIC Implementation - Load all
*/
#define RAW_DYNAMIC_LOAD_PAGES 128
int raw_dynamic_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
uint64_t i = 0, i_page = 0;
uint64_t count = RAW_DYNAMIC_LOAD_PAGES;
int error = 0, cmp;
struct ocf_io *io;
ctx_data_t *data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES);
char *page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
char *zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!data || !page || !zpage) {
ctx_data_free(cache->owner, data);
env_free(page);
env_free(zpage);
return -ENOMEM;
}
OCF_DEBUG_TRACE(cache);
/* Loading, need to load all metadata, when page is zero set, no need
* to allocate space for it
*/
while (i < raw->ssd_pages) {
if (i + count > raw->ssd_pages)
count = raw->ssd_pages - i;
/* Allocate IO */
io = ocf_new_cache_io(cache);
if (!io) {
error = -ENOMEM;
break;
}
/* Setup IO */
error = ocf_io_set_data(io, data, 0);
if (error) {
ocf_io_put(io);
break;
}
ocf_io_configure(io,
PAGES_TO_BYTES(raw->ssd_pages_offset + i),
PAGES_TO_BYTES(count), OCF_READ, 0, 0);
/* Submit IO */
error = ocf_submit_io_wait(io);
ocf_io_put(io);
io = NULL;
if (error)
break;
/* Reset head of data buffer */
ctx_data_seek_check(cache->owner, data,
ctx_data_seek_begin, 0);
for (i_page = 0; i_page < count; i_page++, i++) {
if (!page) {
page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!page) {
/* Allocation error */
error = -ENOMEM;
break;
}
}
ctx_data_rd_check(cache->owner, page, data, PAGE_SIZE);
error = env_memcmp(zpage, PAGE_SIZE, page,
PAGE_SIZE, &cmp);
if (error)
break;
if (cmp == 0) {
OCF_DEBUG_PARAM(cache, "Zero loaded %llu", i);
continue;
}
OCF_DEBUG_PARAM(cache, "Non-zero loaded %llu", i);
ctrl->pages[i] = page;
page = NULL;
env_atomic_inc(&ctrl->count);
}
if (error)
break;
}
env_free(zpage);
env_free(page);
ctx_data_free(cache->owner, data);
return error;
}
/*
* RAM DYNAMIC Implementation - Flush all
*/
/*
* RAM Implementation - Flush IO callback - Fill page
*/
static int _raw_dynamic_flush_all_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
raw_page = page - raw->ssd_pages_offset;
if (ctrl->pages[raw_page]) {
OCF_DEBUG_PARAM(cache, "Page = %u", raw_page);
ctx_data_wr_check(cache->owner, data, ctrl->pages[raw_page],
PAGE_SIZE);
} else {
OCF_DEBUG_PARAM(cache, "Zero fill, Page = %u", raw_page);
/* Page was not allocated before set only zeros */
ctx_data_zero_check(cache->owner, data, PAGE_SIZE);
}
return 0;
}
int raw_dynamic_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
return metadata_io_write_i(cache, raw->ssd_pages_offset,
raw->ssd_pages, _raw_dynamic_flush_all_fill, raw);
}
/*
* RAM DYNAMIC Implementation - Mark to Flush
*/
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
ENV_BUG();
}
/*
* RAM DYNAMIC Implementation - Do flushing asynchronously
*/
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete)
{
ENV_BUG();
return -ENOSYS;
}

View File

@@ -0,0 +1,106 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_RAW_DYNAMIC_H__
#define __METADATA_RAW_DYNAMIC_H__
/**
* @file metadata_raw_dynamic.h
* @brief Metadata RAW container implementation for dynamic numbers of elements
*/
/*
* RAW DYNAMIC - Initialize
*/
int raw_dynamic_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - De-Initialize
*/
int raw_dynamic_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Get size of memory footprint of this RAW metadata container
*/
size_t raw_dynamic_size_of(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC Implementation - Size on SSD
*/
uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC Implementation - Checksum
*/
uint32_t raw_dynamic_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Get specified entry
*/
int raw_dynamic_get(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
/*
* RAW DYNAMIC - Set specified entry
*/
int raw_dynamic_set(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
/*
* RAW DYNAMIC - Read only access for specified entry
*/
const void *raw_dynamic_rd_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size);
/*
* RAW DYNAMIC - Write access for specified entry
*/
void *raw_dynamic_wr_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size);
/*
* RAW DYNAMIC - Flush specified entry
*/
int raw_dynamic_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line);
/*
* RAW DYNAMIC - Load all metadata of this RAW metadata container
* from cache device
*/
int raw_dynamic_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Flush all metadata of this RAW metadata container
* to cache device
*/
int raw_dynamic_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Mark specified entry to be flushed
*/
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/*
* DYNAMIC Implementation - Do Flush Asynchronously
*/
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete);
#endif /* METADATA_RAW_H_ */

View File

@@ -0,0 +1,74 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#include "metadata_io.h"
#include "metadata_raw_volatile.h"
/*
* RAW volatile Implementation - Size on SSD
*/
uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return 0;
}
/*
* RAW volatile Implementation - Checksum
*/
uint32_t raw_volatile_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return 0;
}
/*
* RAW volatile Implementation - Flush specified element to SSD
*/
int raw_volatile_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
return 0;
}
/*
* RAW volatile Implementation - Load all metadata elements from SSD
*/
int raw_volatile_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return -ENOTSUP;
}
/*
* RAM Implementation - Flush all elements
*/
int raw_volatile_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return 0;
}
/*
* RAM RAM Implementation - Mark to Flush
*/
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
}
/*
* RAM RAM Implementation - Do Flush asynchronously
*/
int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete)
{
complete(rq, 0);
return 0;
}

View File

@@ -0,0 +1,52 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_RAW_VOLATILE_H__
#define __METADATA_RAW_VOLATILE_H__
/*
* RAW volatile Implementation - Size on SSD
*/
uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW volatile Implementation - Checksum
*/
uint32_t raw_volatile_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW volatile Implementation - Flush specified element to SSD
*/
int raw_volatile_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line);
/*
* RAW volatile Implementation - Load all metadata elements from SSD
*/
int raw_volatile_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW volatile Implementation - Flush all elements
*/
int raw_volatile_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAM RAW volatile Implementation - Mark to Flush
*/
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/*
* RAM RAW volatile Implementation - Do Flush asynchronously
*/
int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete);
#endif /* __METADATA_RAW_VOLATILE_H__ */

View File

@@ -0,0 +1,435 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_STATUS_H__
#define __METADATA_STATUS_H__
#include "../ocf_request.h"
/*******************************************************************************
* Dirty
******************************************************************************/
static inline void metadata_init_status_bits(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
cache->metadata.iface.clear_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline bool metadata_test_dirty_all(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, true);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_dirty(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline void metadata_set_dirty(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_dirty(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline bool metadata_test_and_clear_dirty(
struct ocf_cache *cache, ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_clear_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
static inline bool metadata_test_and_set_dirty(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_set_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
/*******************************************************************************
* Dirty - Sector Implementation
******************************************************************************/
static inline bool metadata_test_dirty_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_dirty(cache, line,
start, stop, false);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_dirty_all_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_dirty(cache, line,
start, stop, true);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_dirty_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
return metadata_test_dirty_sec(cache, line, pos, pos);
}
static inline bool metadata_test_dirty_out_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_out_dirty(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline void metadata_set_dirty_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_dirty(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_dirty_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_dirty(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_set_dirty_sec_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_dirty(cache, line, pos, pos);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_dirty_sec_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_dirty(cache, line, pos, pos);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline bool metadata_test_and_clear_dirty_sec(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool test = false;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_clear_dirty(cache, line,
start, stop, false);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
/*
* Marks given cache line's bits as clean
*
* @return true if the cache line was dirty and became clean
* @return false for other cases
*/
static inline bool metadata_clear_dirty_sec_changed(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool was_dirty, is_dirty = false;
OCF_METADATA_BITS_LOCK_WR();
was_dirty = cache->metadata.iface.test_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end,
false);
if (was_dirty) {
is_dirty = cache->metadata.iface.clear_dirty(cache, line,
start, stop);
}
OCF_METADATA_BITS_UNLOCK_WR();
return was_dirty && !is_dirty;
}
/*
* Marks given cache line's bits as dirty
*
* @return true if the cache line was clean and became dirty
* @return false if the cache line was dirty before marking bits
*/
static inline bool metadata_set_dirty_sec_changed(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool was_dirty;
OCF_METADATA_BITS_LOCK_WR();
was_dirty = cache->metadata.iface.set_dirty(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
return !was_dirty;
}
/*******************************************************************************
* Valid
******************************************************************************/
static inline bool metadata_test_valid_any(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_valid(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, true);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline void metadata_set_valid(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_valid(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline bool metadata_test_and_clear_valid(
struct ocf_cache *cache, ocf_cache_line_t line)
{
bool test = false;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_clear_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, true);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
static inline bool metadata_test_and_set_valid(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test = false;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_set_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, true);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
/*******************************************************************************
* Valid - Sector Implementation
******************************************************************************/
static inline bool metadata_test_valid_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_valid(cache, line,
start, stop, true);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_valid_any_out_sec(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool test = false;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_out_valid(cache, line,
start, stop);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_valid_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
return metadata_test_valid_sec(cache, line, pos, pos);
}
/*
* Marks given cache line's bits as valid
*
* @return true if any of the cache line's bits was valid before this operation
* @return false if the cache line was invalid (all bits invalid) before this
* operation
*/
static inline bool metadata_set_valid_sec_changed(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool was_any_valid;
OCF_METADATA_BITS_LOCK_WR();
was_any_valid = cache->metadata.iface.set_valid(cache, line,
start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
return !was_any_valid;
}
static inline void metadata_clear_valid_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_valid(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_valid_sec_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_valid(cache, line, pos, pos);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_set_valid_sec_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_valid(cache, line, pos, pos);
OCF_METADATA_BITS_UNLOCK_WR();
}
/*
* Marks given cache line's bits as invalid
*
* @return true if any of the cache line's bits was valid and the cache line
* became invalid (all bits invalid) after the operation
* @return false in other cases
*/
static inline bool metadata_clear_valid_sec_changed(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop, bool *is_valid)
{
bool was_any_valid;
OCF_METADATA_BITS_LOCK_WR();
was_any_valid = cache->metadata.iface.test_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
*is_valid = cache->metadata.iface.clear_valid(cache, line,
start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
return was_any_valid && !*is_valid;
}
#endif /* METADATA_STATUS_H_ */

View File

@@ -0,0 +1,491 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_STRUCTS_H__
#define __METADATA_STRUCTS_H__
#include "../eviction/eviction.h"
#include "../cleaning/cleaning.h"
#include "../ocf_request.h"
/**
* @file metadata_priv.h
* @brief Metadata private structures
*/
/**
* @brief Metadata shutdown status
*/
enum ocf_metadata_shutdown_status {
ocf_metadata_clean_shutdown = 1, /*!< OCF shutdown graceful*/
ocf_metadata_dirty_shutdown = 0, /*!< Dirty OCF shutdown*/
ocf_metadata_detached = 2, /*!< Cache device detached */
};
/**
* @brief Asynchronous metadata request completed
*
* @param cache - Cache instance
* @param error - Indicates operation result, 0 - Finished successfully
* @param line - cache line for which completion is signaled
* @param context - Context of metadata request
*/
typedef void (*ocf_metadata_asynch_hndl)(struct ocf_cache *cache,
int error, ocf_cache_line_t line, void *context);
typedef void (*ocf_metadata_asynch_flush_hndl)(void *context, int error);
/*
* Metadata cache line location on pages interface
*/
struct ocf_metadata_layout_iface {
/**
* @brief Initialize freelist partition
*
* @param cache - Cache instance
*/
void (*init_freelist)(struct ocf_cache *cache);
/**
* This function is mapping collision index to appropriate cache line
* (logical cache line to physical one mapping).
*
* It is necessary because we want to generate sequential workload with
* data to cache device.
* Our collision list, for example, looks:
* 0 3 6 9
* 1 4 7 10
* 2 5 8
* All collision index in each column is on the same page
* on cache device. We don't want send request x times to the same
* page. To don't do it we use collision index by row, but in this
* case we can't use collision index directly as cache line,
* because we will generate non sequential workload (we will write
* pages: 0 -> 3 -> 6 ...). To map collision index in correct way
* we use this function.
*
* After use this function, collision index in the above array
* corresponds with below cache line:
* 0 1 2 3
* 4 5 6 7
* 8 9 10
*
* @param cache - cache instance
* @param idx - index in collision list
* @return mapped cache line
*/
ocf_cache_line_t (*lg2phy)(struct ocf_cache *cache,
ocf_cache_line_t coll_idx);
/**
* @brief Map physical cache line on cache device to logical one
* @note This function is the inverse of map_coll_idx_to_cache_line
*
* @param cache Cache instance
* @param phy Physical cache line of cache device
* @return Logical cache line
*/
ocf_cache_line_t (*phy2lg)(struct ocf_cache *cache,
ocf_cache_line_t phy);
};
/**
* OCF Metadata interface
*/
struct ocf_metadata_iface {
/**
* @brief Initialize metadata
*
* @param cache - Cache instance
* @param cache_line_size - Cache line size
* @return 0 - Operation success otherwise failure
*/
int (*init)(struct ocf_cache *cache,
ocf_cache_line_size_t cache_line_size);
/**
* @brief Initialize variable size metadata sections
*
* @param cache - Cache instance
* @param device_size - Cache size in bytes
* @param cache_line_size - Cache line size
* @param layout Metadata layout
* @return 0 - Operation success otherwise failure
*/
int (*init_variable_size)(struct ocf_cache *cache, uint64_t device_size,
ocf_cache_line_size_t cache_line_size,
ocf_metadata_layout_t layout);
/**
* @brief Metadata cache line location on pages interface
*/
const struct ocf_metadata_layout_iface *layout_iface;
/**
* @brief Initialize hash table
*
* @param cache - Cache instance
*/
void (*init_hash_table)(struct ocf_cache *cache);
/**
* @brief De-Initialize metadata
*
* @param cache - Cache instance
*/
void (*deinit)(struct ocf_cache *cache);
/**
* @brief De-Initialize variable size metadata segments
*
* @param cache - Cache instance
*/
void (*deinit_variable_size)(struct ocf_cache *cache);
/**
* @brief Get memory footprint
*
* @param cache - Cache instance
* @return 0 - memory footprint
*/
size_t (*size_of)(struct ocf_cache *cache);
/**
* @brief Get amount of pages required for metadata
*
* @param cache - Cache instance
* @return Pages required for store metadata on cache device
*/
ocf_cache_line_t (*pages)(struct ocf_cache *cache);
/**
* @brief Get amount of cache lines
*
* @param cache - Cache instance
* @return Amount of cache lines (cache device lines - metadata space)
*/
ocf_cache_line_t (*cachelines)(struct ocf_cache *cache);
/**
* @brief Load metadata from cache device
*
* @param[in] cache - Cache instance
* @return 0 - Operation success otherwise failure
*/
int (*load_all)(struct ocf_cache *cache);
/**
* @brief Load metadata from recovery procedure
* recovery
* @param[in] cache - Cache instance
* @return 0 - Operation success otherwise failure
*/
int (*load_recovery)(struct ocf_cache *cache);
/**
* @brief Flush metadata into cahce cache
*
* @param[in] cache - Cache instance
* @return 0 - Operation success otherwise failure
*/
int (*flush_all)(struct ocf_cache *cache);
/**
* @brief Flush metadata for specified cache line
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void (*flush)(struct ocf_cache *cache, ocf_cache_line_t line);
/**
* @brief Mark specified cache line to be flushed
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start,
uint8_t stop);
/**
* @brief Flush marked cache lines asynchronously
*
* @param cache - Cache instance
* @param queue - I/O queue to which metadata flush should be submitted
* @param remaining - request remaining
* @param complete - flushing request callback
* @param context - context that will be passed into callback
*/
void (*flush_do_asynch)(struct ocf_cache *cache,
struct ocf_request *rq, ocf_end_t complete);
/* TODO Provide documentation below */
enum ocf_metadata_shutdown_status (*get_shutdown_status)(
struct ocf_cache *cache);
int (*set_shutdown_status)(struct ocf_cache *cache,
enum ocf_metadata_shutdown_status shutdown_status);
int (*load_superblock)(struct ocf_cache *cache);
int (*flush_superblock)(struct ocf_cache *cache);
uint64_t (*get_reserved_lba)(struct ocf_cache *cache);
/**
* @brief Get eviction policy
*
* @param[in] cache - Cache instance
* @param[in] line - cache line for which eviction policy is requested
* @param[out] eviction_policy - Eviction policy
*/
void (*get_eviction_policy)(struct ocf_cache *cache,
ocf_cache_line_t line,
union eviction_policy_meta *eviction_policy);
/**
* @brief Set eviction policy
*
* @param[in] cache - Cache instance
* @param[in] line - Eviction policy values which will be stored in
* metadata service
* @param[out] eviction_policy - Eviction policy
*/
void (*set_eviction_policy)(struct ocf_cache *cache,
ocf_cache_line_t line,
union eviction_policy_meta *eviction_policy);
/**
* @brief Flush eviction policy for given cache line
*
* @param[in] cache - Cache instance
* @param[in] line - Cache line for which flushing has to be performed
*/
void (*flush_eviction_policy)(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief Get cleaning policy
*
* @param[in] cache - Cache instance
* @param[in] line - cache line for which cleaning policy is requested
* @param[out] cleaning_policy - Cleaning policy
*/
void (*get_cleaning_policy)(struct ocf_cache *cache,
ocf_cache_line_t line,
struct cleaning_policy_meta *cleaning_policy);
/**
* @brief Set cleaning policy
*
* @param[in] cache - Cache instance
* @param[in] line
* @param[in] cleaning_policy - Cleaning policy values which will be
* stored in metadata service
*/
void (*set_cleaning_policy)(struct ocf_cache *cache,
ocf_cache_line_t line,
struct cleaning_policy_meta *cleaning_policy);
/**
* @brief Flush cleaning policy for given cache line
*
* @param[in] cache - Cache instance
* @param[in] line - Cache line for which flushing has to be performed
*/
void (*flush_cleaning_policy)(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief Get hash table for specified index
*
* @param[in] cache - Cache instance
* @param[in] index - Hash table index
* @return Cache line value under specified hash table index
*/
ocf_cache_line_t (*get_hash)(struct ocf_cache *cache,
ocf_cache_line_t index);
/**
* @brief Set hash table value for specified index
*
* @param[in] cache - Cache instance
* @param[in] index - Hash table index
* @param[in] line - Cache line value to be set under specified hash
* table index
*/
void (*set_hash)(struct ocf_cache *cache,
ocf_cache_line_t index, ocf_cache_line_t line);
/**
* @brief Flush has table for specified index
*
* @param[in] cache - Cache instance
* @param[in] index - Hash table index
*/
void (*flush_hash)(struct ocf_cache *cache,
ocf_cache_line_t index);
/**
* @brief Get hash table entries
*
* @param[in] cache - Cache instance
* @return Hash table entries
*/
ocf_cache_line_t (*entries_hash)(struct ocf_cache *cache);
/* TODO Provide documentation below */
void (*set_core_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t core_id,
uint64_t core_sector);
void (*get_core_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t *core_id,
uint64_t *core_sector);
ocf_core_id_t (*get_core_id)(struct ocf_cache *cache,
ocf_cache_line_t line);
uint64_t (*get_core_sector)(struct ocf_cache *cache,
ocf_cache_line_t line);
void (*get_core_and_part_id)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t *core_id,
ocf_part_id_t *part_id);
struct ocf_metadata_uuid *(*get_core_uuid)(
struct ocf_cache *cache, ocf_core_id_t core_id);
void (*set_collision_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next,
ocf_cache_line_t prev);
void (*get_collision_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t *next,
ocf_cache_line_t *prev);
void (*set_collision_next)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next);
void (*set_collision_prev)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev);
ocf_cache_line_t (*get_collision_next)(struct ocf_cache *cache,
ocf_cache_line_t line);
ocf_cache_line_t (*get_collision_prev)(struct ocf_cache *cache,
ocf_cache_line_t line);
ocf_part_id_t (*get_partition_id)(struct ocf_cache *cache,
ocf_cache_line_t line);
ocf_cache_line_t (*get_partition_next)(struct ocf_cache *cache,
ocf_cache_line_t line);
ocf_cache_line_t (*get_partition_prev)(struct ocf_cache *cache,
ocf_cache_line_t line);
void (*get_partition_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t *part_id,
ocf_cache_line_t *next_line,
ocf_cache_line_t *prev_line);
void (*set_partition_next)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next_line);
void (*set_partition_prev)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev_line);
void (*set_partition_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line);
const struct ocf_metadata_status*
(*rd_status_access)(struct ocf_cache *cache,
ocf_cache_line_t line);
struct ocf_metadata_status*
(*wr_status_access)(struct ocf_cache *cache,
ocf_cache_line_t line);
bool (*test_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_out_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*clear_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*set_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*test_and_set_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_and_clear_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_out_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*clear_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*set_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*test_and_set_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_and_clear_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
};
struct ocf_cache_line_settings {
ocf_cache_line_size_t size;
uint64_t sector_count;
uint64_t sector_start;
uint64_t sector_end;
};
/**
* @brief Metadata control structure
*/
struct ocf_metadata {
const struct ocf_metadata_iface iface;
/*!< Metadata service interface */
void *iface_priv;
/*!< Private data of metadata service interface */
const struct ocf_cache_line_settings settings;
/*!< Cache line configuration */
bool is_volatile;
/*!< true if metadata used in volatile mode (RAM only) */
struct {
env_rwsem collision; /*!< lock for collision table */
env_rwlock status; /*!< Fast lock for status bits */
env_spinlock eviction; /*!< Fast lock for eviction policy */
} lock;
};
#define OCF_METADATA_RD 0
#define OCF_METADATA_WR 1
#endif /* __METADATA_STRUCTS_H__ */

View File

@@ -0,0 +1,93 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_SUPERBLOCK_H__
#define __METADATA_SUPERBLOCK_H__
#define CACHE_MAGIC_NUMBER 0x187E1CA6
/**
* @brief OCF cache metadata configuration superblock
*/
struct ocf_superblock_config {
/** WARNING: Metadata probe disregards metadata version when
* checking if the cache is dirty - position of next two fields
* shouldn't change!! */
uint8_t clean_shutdown;
uint8_t dirty_flushed;
uint32_t magic_number;
uint32_t metadata_version;
/* Currently set cache mode */
ocf_cache_mode_t cache_mode;
ocf_cache_line_t cachelines;
uint32_t valid_parts_no;
ocf_cache_line_size_t line_size;
ocf_metadata_layout_t metadata_layout;
uint32_t core_obj_count;
unsigned long valid_object_bitmap[(OCF_CORE_MAX /
(sizeof(unsigned long) * 8)) + 1];
ocf_cleaning_t cleaning_policy_type;
struct cleaning_policy_config cleaning[CLEANING_POLICY_TYPE_MAX];
ocf_eviction_t eviction_policy_type;
/* Current core sequence number */
ocf_core_id_t curr_core_seq_no;
struct ocf_user_part_config user_parts[OCF_IO_CLASS_MAX + 1];
/*
* Checksum for each metadata region.
* This field has to be the last one!
*/
uint32_t checksum[metadata_segment_max];
};
/**
* @brief OCF cache metadata runtime superblock
*/
struct ocf_superblock_runtime {
struct ocf_part freelist_part;
struct ocf_user_part_runtime user_parts[OCF_IO_CLASS_MAX + 1];
uint32_t cleaning_thread_access;
};
static inline int ocf_metadata_set_shutdown_status(
struct ocf_cache *cache,
enum ocf_metadata_shutdown_status shutdown_status)
{
return cache->metadata.iface.set_shutdown_status(cache,
shutdown_status);
}
static inline int ocf_metadata_load_superblock(struct ocf_cache *cache)
{
return cache->metadata.iface.load_superblock(cache);
}
static inline
int ocf_metadata_flush_superblock(struct ocf_cache *cache)
{
if (cache->device)
return cache->metadata.iface.flush_superblock(cache);
return 0;
}
static inline uint64_t ocf_metadata_get_reserved_lba(
struct ocf_cache *cache)
{
return cache->metadata.iface.get_reserved_lba(cache);
}
#endif /* METADATA_SUPERBLOCK_H_ */

View File

@@ -0,0 +1,152 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_io.h"
#include "metadata_updater_priv.h"
#include "../ocf_priv.h"
#include "../engine/engine_common.h"
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
#include "../utils/utils_io.h"
#include "../utils/utils_allocator.h"
int ocf_metadata_updater_init(ocf_cache_t cache)
{
ocf_metadata_updater_t mu = &cache->metadata_updater;
struct ocf_metadata_io_syncher *syncher = &mu->syncher;
INIT_LIST_HEAD(&syncher->in_progress_head);
INIT_LIST_HEAD(&syncher->pending_head);
env_mutex_init(&syncher->lock);
return ctx_metadata_updater_init(cache->owner, mu);
}
void ocf_metadata_updater_kick(ocf_cache_t cache)
{
ctx_metadata_updater_kick(cache->owner, &cache->metadata_updater);
}
void ocf_metadata_updater_stop(ocf_cache_t cache)
{
ctx_metadata_updater_stop(cache->owner, &cache->metadata_updater);
}
void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv)
{
OCF_CHECK_NULL(mu);
mu->priv = priv;
}
void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu)
{
OCF_CHECK_NULL(mu);
return mu->priv;
}
ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu)
{
OCF_CHECK_NULL(mu);
return container_of(mu, struct ocf_cache, metadata_updater);
}
static int _metadata_updater_iterate_in_progress(ocf_cache_t cache,
struct metadata_io_request *new_req)
{
struct metadata_io_request_asynch *a_req;
struct ocf_metadata_io_syncher *syncher =
&cache->metadata_updater.syncher;
struct metadata_io_request *curr, *temp;
list_for_each_entry_safe(curr, temp, &syncher->in_progress_head, list) {
if (env_atomic_read(&curr->finished)) {
a_req = curr->asynch;
ENV_BUG_ON(!a_req);
list_del(&curr->list);
if (env_atomic_dec_return(&a_req->req_active) == 0) {
OCF_REALLOC_DEINIT(&a_req->reqs,
&a_req->reqs_limit);
env_free(a_req);
}
continue;
}
if (new_req) {
/* If request specified, check if overlap occurs. */
if (ocf_io_overlaps(new_req->page, new_req->count,
curr->page, curr->count)) {
return 1;
}
}
}
return 0;
}
int metadata_updater_check_overlaps(ocf_cache_t cache,
struct metadata_io_request *req)
{
struct ocf_metadata_io_syncher *syncher =
&cache->metadata_updater.syncher;
int ret;
env_mutex_lock(&syncher->lock);
ret = _metadata_updater_iterate_in_progress(cache, req);
/* Either add it to in-progress list or pending list for deferred
* execution.
*/
if (ret == 0)
list_add_tail(&req->list, &syncher->in_progress_head);
else
list_add_tail(&req->list, &syncher->pending_head);
env_mutex_unlock(&syncher->lock);
return ret;
}
uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu)
{
struct metadata_io_request *curr, *temp;
struct ocf_metadata_io_syncher *syncher;
ocf_cache_t cache;
int ret;
OCF_CHECK_NULL(mu);
cache = ocf_metadata_updater_get_cache(mu);
syncher = &cache->metadata_updater.syncher;
env_mutex_lock(&syncher->lock);
if (list_empty(&syncher->pending_head)) {
/*
* If pending list is empty, we iterate over in progress
* list to free memory used by finished requests.
*/
_metadata_updater_iterate_in_progress(cache, NULL);
env_mutex_unlock(&syncher->lock);
env_cond_resched();
return 0;
}
list_for_each_entry_safe(curr, temp, &syncher->pending_head, list) {
ret = _metadata_updater_iterate_in_progress(cache, curr);
if (ret == 0) {
/* Move to in-progress list and kick the workers */
list_move_tail(&curr->list, &syncher->in_progress_head);
}
env_mutex_unlock(&syncher->lock);
if (ret == 0)
ocf_engine_push_rq_front(&curr->fl_req, true);
env_cond_resched();
env_mutex_lock(&syncher->lock);
}
env_mutex_unlock(&syncher->lock);
return 0;
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_UPDATER_PRIV_H__
#define __METADATA_UPDATER_PRIV_H__
#include "../ocf_def_priv.h"
#include "metadata_io.h"
struct ocf_metadata_updater {
/* Metadata flush synchronizer context */
struct ocf_metadata_io_syncher {
struct list_head in_progress_head;
struct list_head pending_head;
env_mutex lock;
} syncher;
void *priv;
};
int metadata_updater_check_overlaps(ocf_cache_t cache,
struct metadata_io_request *req);
int ocf_metadata_updater_init(struct ocf_cache *cache);
void ocf_metadata_updater_kick(struct ocf_cache *cache);
void ocf_metadata_updater_stop(struct ocf_cache *cache);
#endif /* __METADATA_UPDATER_PRIV_H__ */