Initial commit

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga
2018-11-29 15:14:21 +01:00
commit a8e1ce8cc5
178 changed files with 35378 additions and 0 deletions

735
src/cleaning/acp.c Normal file
View File

@@ -0,0 +1,735 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "cleaning.h"
#include "../metadata/metadata.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_rq.h"
#include "../cleaning/acp.h"
#include "../engine/engine_common.h"
#include "../concurrency/ocf_cache_concurrency.h"
#include "cleaning_priv.h"
#define OCF_ACP_DEBUG 0
#if 1 == OCF_ACP_DEBUG
#define OCF_DEBUG_PREFIX "[Clean] %s():%d "
#define OCF_DEBUG_LOG(cache, format, ...) \
ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
format"\n", __func__, __LINE__, ##__VA_ARGS__)
#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
##__VA_ARGS__)
#define ACP_DEBUG_INIT(acp) acp->checksum = 0
#define ACP_DEBUG_BEGIN(acp, cache_line) acp->checksum ^= cache_line
#define ACP_DEBUG_END(acp, cache_line) acp->checksum ^= cache_line
#define ACP_DEBUG_CHECK(acp) ENV_BUG_ON(acp->checksum)
#else
#define OCF_DEBUG_PREFIX
#define OCF_DEBUG_LOG(cache, format, ...)
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#define ACP_DEBUG_INIT(acp)
#define ACP_DEBUG_BEGIN(acp, cache_line)
#define ACP_DEBUG_END(acp, cache_line)
#define ACP_DEBUG_CHECK(acp)
#endif
#define ACP_CHUNK_SIZE (100 * MiB)
/* minimal time to chunk cleaning after error */
#define ACP_CHUNK_CLEANING_BACKOFF_TIME 5
/* time to sleep when nothing to clean in ms */
#define ACP_BACKOFF_TIME_MS 1000
#define ACP_MAX_BUCKETS 11
/* Upper thresholds for buckets in percent dirty pages. First bucket should have
* threshold=0 - it isn't cleaned and we don't want dirty chunks staying dirty
* forever. Last bucket also should stay at 100 for obvious reasons */
static const uint16_t ACP_BUCKET_DEFAULTS[ACP_MAX_BUCKETS] = { 0, 10, 20, 30, 40,
50, 60, 70, 80, 90, 100 };
struct acp_flush_context {
/* number of cache lines in flush */
uint64_t size;
/* chunk_for error handling */
struct acp_chunk_info *chunk;
/* cache lines to flush */
struct flush_data data[OCF_ACP_MAX_FLUSH_MAX_BUFFERS];
/* flush error code */
int error;
};
struct acp_state {
/* currently cleaned chunk */
struct acp_chunk_info *chunk;
/* cache line iterator within current chunk */
unsigned iter;
/* true if there are cache lines to process
* current chunk */
bool in_progress;
};
struct acp_chunk_info {
struct list_head list;
uint64_t chunk_id;
uint64_t next_cleaning_timestamp;
ocf_core_id_t core_id;
uint16_t num_dirty;
uint8_t bucket_id;
};
struct acp_bucket {
struct list_head chunk_list;
uint16_t threshold; /* threshold in clines */
};
struct acp_context {
env_rwsem chunks_lock;
/* number of chunks per core */
uint64_t num_chunks[OCF_CORE_MAX];
/* per core array of all chunks */
struct acp_chunk_info *chunk_info[OCF_CORE_MAX];
struct acp_bucket bucket_info[ACP_MAX_BUCKETS];
/* total number of chunks in cache */
uint64_t chunks_total;
/* structure to keep track of I/O in progress */
struct acp_flush_context flush;
/* cleaning state persistent over subsequent calls to
perform_cleaning */
struct acp_state state;
#if 1 == OCF_ACP_DEBUG
/* debug only */
uint64_t checksum;
#endif
};
struct acp_core_line_info
{
ocf_cache_line_t cache_line;
ocf_core_id_t core_id;
uint64_t core_line;
};
#define ACP_LOCK_CHUNKS_RD() env_rwsem_down_read(&acp->chunks_lock)
#define ACP_UNLOCK_CHUNKS_RD() env_rwsem_up_read(&acp->chunks_lock)
#define ACP_LOCK_CHUNKS_WR() env_rwsem_down_write(&acp->chunks_lock)
#define ACP_UNLOCK_CHUNKS_WR() env_rwsem_up_write(&acp->chunks_lock)
static struct acp_context *_acp_get_ctx_from_cache(struct ocf_cache *cache)
{
return cache->cleaning_policy_context;
}
static struct acp_cleaning_policy_meta* _acp_meta_get(
struct ocf_cache *cache, uint32_t cache_line,
struct cleaning_policy_meta *policy_meta)
{
ocf_metadata_get_cleaning_policy(cache, cache_line, policy_meta);
return &policy_meta->meta.acp;
}
static void _acp_meta_set(struct ocf_cache *cache, uint32_t cache_line,
struct cleaning_policy_meta *policy_meta)
{
ocf_metadata_set_cleaning_policy(cache, cache_line, policy_meta);
}
static struct acp_core_line_info _acp_core_line_info(struct ocf_cache *cache,
ocf_cache_line_t cache_line)
{
struct acp_core_line_info acp_core_line_info = {.cache_line = cache_line, };
ocf_metadata_get_core_info(cache, cache_line, &acp_core_line_info.core_id,
&acp_core_line_info.core_line);
return acp_core_line_info;
}
static struct acp_chunk_info *_acp_get_chunk(struct ocf_cache *cache,
uint32_t cache_line)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
struct acp_core_line_info core_line =
_acp_core_line_info(cache, cache_line);
uint64_t chunk_id;
chunk_id = core_line.core_line * ocf_line_size(cache) / ACP_CHUNK_SIZE;
return &acp->chunk_info[core_line.core_id][chunk_id];
}
#define for_each_core(cache, iter) \
for (iter = 0; iter < OCF_CORE_MAX; iter++) \
if (cache->core_conf_meta[iter].added)
static void _acp_remove_cores(struct ocf_cache *cache)
{
int i;
for_each_core(cache, i)
cleaning_policy_acp_remove_core(cache, i);
}
static int _acp_load_cores(struct ocf_cache *cache)
{
int i;
int err = 0;
for_each_core(cache, i) {
OCF_DEBUG_PARAM(cache, "loading core %i\n", i);
err = cleaning_policy_acp_add_core(cache, i);
if (err)
break;
}
if (err)
_acp_remove_cores(cache);
return err;
}
void cleaning_policy_acp_init_cache_block(struct ocf_cache *cache,
uint32_t cache_line)
{
struct cleaning_policy_meta policy_meta;
struct acp_cleaning_policy_meta *acp_meta;
/* TODO: acp meta is going to be removed soon */
acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
acp_meta->dirty = 0;
_acp_meta_set(cache, cache_line, &policy_meta);
}
void cleaning_policy_acp_deinitialize(struct ocf_cache *cache)
{
_acp_remove_cores(cache);
env_vfree(cache->cleaning_policy_context);
cache->cleaning_policy_context = NULL;
}
static void _acp_rebuild(struct ocf_cache *cache)
{
ocf_cache_line_t cline;
ocf_core_id_t cline_core_id;
uint32_t step = 0;
for (cline = 0; cline < cache->device->collision_table_entries; cline++) {
ocf_metadata_get_core_and_part_id(cache, cline, &cline_core_id,
NULL);
OCF_COND_RESCHED_DEFAULT(step);
if (cline_core_id == OCF_CORE_MAX)
continue;
cleaning_policy_acp_init_cache_block(cache, cline);
if (!metadata_test_dirty(cache, cline))
continue;
cleaning_policy_acp_set_hot_cache_line(cache, cline);
}
ocf_cache_log(cache, log_info, "Finished rebuilding ACP metadata\n");
}
void cleaning_policy_acp_setup(struct ocf_cache *cache)
{
struct acp_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
config->thread_wakeup_time = OCF_ACP_DEFAULT_WAKE_UP;
config->flush_max_buffers = OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS;
}
int cleaning_policy_acp_initialize(struct ocf_cache *cache,
int init_metadata)
{
struct acp_context *acp;
int err, i;
/* bug if max chunk number would overflow dirty_no array type */
#if defined (BUILD_BUG_ON)
BUILD_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >=
1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8));
#else
ENV_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >=
1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8));
#endif
ENV_BUG_ON(cache->cleaning_policy_context);
cache->cleaning_policy_context = env_vzalloc(sizeof(struct acp_context));
if (!cache->cleaning_policy_context) {
ocf_cache_log(cache, log_err, "acp context allocation error\n");
return -OCF_ERR_NO_MEM;
}
acp = cache->cleaning_policy_context;
env_rwsem_init(&acp->chunks_lock);
for (i = 0; i < ACP_MAX_BUCKETS; i++) {
INIT_LIST_HEAD(&acp->bucket_info[i].chunk_list);
acp->bucket_info[i].threshold =
((ACP_CHUNK_SIZE/ocf_line_size(cache)) *
ACP_BUCKET_DEFAULTS[i]) / 100;
}
if (cache->conf_meta->core_obj_count > 0) {
err = _acp_load_cores(cache);
if (err) {
cleaning_policy_acp_deinitialize(cache);
return err;
}
}
_acp_rebuild(cache);
return 0;
}
int cleaning_policy_acp_set_cleaning_param(ocf_cache_t cache,
uint32_t param_id, uint32_t param_value)
{
struct acp_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
switch (param_id) {
case ocf_acp_wake_up_time:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ACP_MIN_WAKE_UP,
OCF_ACP_MAX_WAKE_UP,
"thread_wakeup_time");
config->thread_wakeup_time = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread "
"wake-up time: %d\n", config->thread_wakeup_time);
break;
case ocf_acp_flush_max_buffers:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ACP_MIN_FLUSH_MAX_BUFFERS,
OCF_ACP_MAX_FLUSH_MAX_BUFFERS,
"flush_max_buffers");
config->flush_max_buffers = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread max "
"buffers flushed per iteration: %d\n",
config->flush_max_buffers);
break;
default:
return -OCF_ERR_INVAL;
}
return 0;
}
int cleaning_policy_acp_get_cleaning_param(ocf_cache_t cache,
uint32_t param_id, uint32_t *param_value)
{
struct acp_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
switch (param_id) {
case ocf_acp_flush_max_buffers:
*param_value = config->flush_max_buffers;
break;
case ocf_acp_wake_up_time:
*param_value = config->thread_wakeup_time;
break;
default:
return -OCF_ERR_INVAL;
}
return 0;
}
/* attempt to lock cache line if it's dirty */
static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
uint32_t core_id, uint64_t core_line)
{
struct ocf_map_info info;
bool locked = false;
OCF_METADATA_LOCK_RD();
ocf_engine_lookup_map_entry(cache, &info, core_id,
core_line);
if (info.status == LOOKUP_HIT &&
metadata_test_dirty(cache, info.coll_idx) &&
ocf_cache_line_try_lock_rd(cache, info.coll_idx)) {
locked = true;
}
OCF_METADATA_UNLOCK_RD();
return locked ? info.coll_idx : cache->device->collision_table_entries;
}
static void _acp_handle_flush_error(struct ocf_cache *cache,
struct acp_context *acp)
{
struct acp_flush_context *flush = &acp->flush;
flush->chunk->next_cleaning_timestamp = env_get_tick_count() +
env_secs_to_ticks(ACP_CHUNK_CLEANING_BACKOFF_TIME);
if (ocf_cache_log_rl(cache)) {
ocf_core_log(&cache->core_obj[flush->chunk->core_id],
log_err, "Cleaning error (%d) in range"
" <%llu; %llu) backing off for %u seconds\n",
flush->error,
flush->chunk->chunk_id * ACP_CHUNK_SIZE,
(flush->chunk->chunk_id * ACP_CHUNK_SIZE) +
ACP_CHUNK_SIZE,
ACP_CHUNK_CLEANING_BACKOFF_TIME);
}
}
/* called after flush request completed */
static void _acp_flush_end(
struct ocf_cache *cache,
struct acp_context *acp)
{
struct acp_flush_context *flush = &acp->flush;
int i;
for (i = 0; i < flush->size; i++) {
ocf_cache_line_unlock_rd(cache, flush->data[i].cache_line);
ACP_DEBUG_END(acp, flush->data[i].cache_line);
}
if (flush->error)
_acp_handle_flush_error(cache, acp);
}
/* flush data */
static void _acp_flush(struct ocf_cache *cache, struct acp_context *acp,
uint32_t io_queue, struct acp_flush_context *flush)
{
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = false,
.metadata_locked = false,
.do_sort = false,
.io_queue = io_queue,
};
flush->error = ocf_cleaner_do_flush_data(cache, flush->data,
flush->size, &attribs);
_acp_flush_end(cache, acp);
}
static inline bool _acp_can_clean_chunk(struct ocf_cache *cache,
struct acp_chunk_info *chunk)
{
/* Check if core device is opened and if timeout after cleaning error
* expired or wasn't set in the first place */
return (cache->core_obj[chunk->core_id].opened &&
(chunk->next_cleaning_timestamp > env_get_tick_count() ||
!chunk->next_cleaning_timestamp));
}
static struct acp_chunk_info *_acp_get_cleaning_candidate(
struct ocf_cache *cache)
{
int i;
struct acp_chunk_info *cur;
struct acp_context *acp = cache->cleaning_policy_context;
ACP_LOCK_CHUNKS_RD();
/* go through all buckets in descending order, excluding bucket 0 which
* is supposed to contain all clean chunks */
for (i = ACP_MAX_BUCKETS - 1; i > 0; i--) {
list_for_each_entry(cur, &acp->bucket_info[i].chunk_list, list) {
if (_acp_can_clean_chunk(cache, cur)) {
ACP_UNLOCK_CHUNKS_RD();
return cur;
}
}
}
ACP_UNLOCK_CHUNKS_RD();
return NULL;
}
#define CHUNK_FINISHED -1
/* clean at most 'flush_max_buffers' cache lines from given chunk, starting
* at given cache line */
static int _acp_clean(struct ocf_cache *cache, uint32_t io_queue,
struct acp_chunk_info *chunk, unsigned start,
uint32_t flush_max_buffers)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
size_t lines_per_chunk = ACP_CHUNK_SIZE /
ocf_line_size(cache);
uint64_t first_core_line = chunk->chunk_id * lines_per_chunk;
unsigned i;
OCF_DEBUG_PARAM(cache, "lines per chunk %llu chunk %llu "
"first_core_line %llu\n",
(uint64_t)lines_per_chunk,
chunk->chunk_id,
first_core_line);
ACP_DEBUG_INIT(acp);
acp->flush.size = 0;
acp->flush.chunk = chunk;
for (i = start; i < lines_per_chunk && acp->flush.size < flush_max_buffers ; i++) {
uint64_t core_line = first_core_line + i;
ocf_cache_line_t cache_line;
cache_line = _acp_trylock_dirty(cache, chunk->core_id, core_line);
if (cache_line == cache->device->collision_table_entries)
continue;
acp->flush.data[acp->flush.size].core_id = chunk->core_id;
acp->flush.data[acp->flush.size].core_line = core_line;
acp->flush.data[acp->flush.size].cache_line = cache_line;
acp->flush.size++;
ACP_DEBUG_BEGIN(acp, cache_line);
}
if (acp->flush.size > 0) {
_acp_flush(cache, acp, io_queue, &acp->flush);
}
ACP_DEBUG_CHECK(acp);
return (i == lines_per_chunk) ? CHUNK_FINISHED : i;
}
#define NOTHING_TO_CLEAN 0
#define MORE_TO_CLEAN 1
/* Clean at most 'flush_max_buffers' cache lines from current or newly
* selected chunk */
static int _acp_clean_iteration(struct ocf_cache *cache, uint32_t io_queue,
uint32_t flush_max_buffers)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
struct acp_state *state = &acp->state;
if (!state->in_progress) {
/* get next chunk to clean */
state->chunk = _acp_get_cleaning_candidate(cache);
if (!state->chunk) {
/* nothing co clean */
return NOTHING_TO_CLEAN;
}
/* new cleaning cycle - reset state */
state->iter = 0;
state->in_progress = true;
}
state->iter = _acp_clean(cache, io_queue, state->chunk, state->iter,
flush_max_buffers);
if (state->iter == CHUNK_FINISHED) {
/* reached end of chunk - reset state */
state->in_progress = false;
}
return MORE_TO_CLEAN;
}
int cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache,
uint32_t io_queue)
{
struct acp_cleaning_policy_config *config;
int ret;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
if (NOTHING_TO_CLEAN == _acp_clean_iteration(cache, io_queue,
config->flush_max_buffers)) {
ret = ACP_BACKOFF_TIME_MS;
} else {
ret = config->thread_wakeup_time;
}
return ret;
}
static void _acp_update_bucket(struct acp_context *acp,
struct acp_chunk_info *chunk)
{
struct acp_bucket *bucket = &acp->bucket_info[chunk->bucket_id];
if (chunk->num_dirty > bucket->threshold) {
ENV_BUG_ON(chunk->bucket_id == ACP_MAX_BUCKETS - 1);
chunk->bucket_id++;
/* buckets are stored in array, move up one bucket.
* No overflow here. ENV_BUG_ON made sure of no incrementation on
* last bucket */
bucket++;
list_move_tail(&chunk->list, &bucket->chunk_list);
} else if (chunk->bucket_id &&
chunk->num_dirty <= (bucket - 1)->threshold) {
chunk->bucket_id--;
/* move down one bucket, we made sure we won't underflow */
bucket--;
list_move(&chunk->list, &bucket->chunk_list);
}
}
void cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
struct cleaning_policy_meta policy_meta;
struct acp_cleaning_policy_meta *acp_meta;
struct acp_chunk_info *chunk;
ACP_LOCK_CHUNKS_WR();
acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
chunk = _acp_get_chunk(cache, cache_line);
if (!acp_meta->dirty) {
acp_meta->dirty = 1;
_acp_meta_set(cache, cache_line, &policy_meta);
chunk->num_dirty++;
}
_acp_update_bucket(acp, chunk);
ACP_UNLOCK_CHUNKS_WR();
}
void cleaning_policy_acp_purge_block(struct ocf_cache *cache,
uint32_t cache_line)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
struct cleaning_policy_meta policy_meta;
struct acp_cleaning_policy_meta *acp_meta;
struct acp_chunk_info *chunk;
acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
chunk = _acp_get_chunk(cache, cache_line);
if (acp_meta->dirty) {
acp_meta->dirty = 0;
_acp_meta_set(cache, cache_line, &policy_meta);
chunk->num_dirty--;
}
_acp_update_bucket(acp, chunk);
}
int cleaning_policy_acp_purge_range(struct ocf_cache *cache,
int core_id, uint64_t start_byte, uint64_t end_byte)
{
return ocf_metadata_actor(cache, PARTITION_INVALID,
core_id, start_byte, end_byte,
cleaning_policy_acp_purge_block);
}
void cleaning_policy_acp_remove_core(ocf_cache_t cache,
ocf_core_id_t core_id)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
uint64_t i;
ENV_BUG_ON(acp->chunks_total < acp->num_chunks[core_id]);
if (acp->state.in_progress && acp->state.chunk->core_id == core_id) {
acp->state.in_progress = false;
acp->state.iter = 0;
acp->state.chunk = NULL;
}
ACP_LOCK_CHUNKS_WR();
for (i = 0; i < acp->num_chunks[core_id]; i++)
list_del(&acp->chunk_info[core_id][i].list);
acp->chunks_total -= acp->num_chunks[core_id];
acp->num_chunks[core_id] = 0;
env_vfree(acp->chunk_info[core_id]);
acp->chunk_info[core_id] = NULL;
ACP_UNLOCK_CHUNKS_WR();
}
int cleaning_policy_acp_add_core(ocf_cache_t cache,
ocf_core_id_t core_id)
{
uint64_t core_size = cache->core_conf_meta[core_id].length;
uint64_t num_chunks = DIV_ROUND_UP(core_size, ACP_CHUNK_SIZE);
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
int i;
OCF_DEBUG_PARAM(cache, "%s core_id %llu num_chunks %llu\n",
__func__, (uint64_t)core_id, (uint64_t) num_chunks);
ACP_LOCK_CHUNKS_WR();
ENV_BUG_ON(acp->chunk_info[core_id]);
acp->chunk_info[core_id] =
env_vzalloc(num_chunks * sizeof(acp->chunk_info[0][0]));
if (!acp->chunk_info[core_id]) {
ACP_UNLOCK_CHUNKS_WR();
OCF_DEBUG_PARAM(cache, "failed to allocate acp tables\n");
return -ENOMEM;
}
OCF_DEBUG_PARAM(cache, "successfully allocated acp tables\n");
/* increment counters */
acp->num_chunks[core_id] = num_chunks;
acp->chunks_total += num_chunks;
for (i = 0; i < acp->num_chunks[core_id]; i++) {
/* fill in chunk metadata and add to the clean bucket */
acp->chunk_info[core_id][i].core_id = core_id;
acp->chunk_info[core_id][i].chunk_id = i;
list_add(&acp->chunk_info[core_id][i].list,
&acp->bucket_info[0].chunk_list);
}
ACP_UNLOCK_CHUNKS_WR();
return 0;
}

45
src/cleaning/acp.h Normal file
View File

@@ -0,0 +1,45 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_CLEANING_POLICY_AGGRESSIVE_H__
#define __LAYER_CLEANING_POLICY_AGGRESSIVE_H__
#include "cleaning.h"
void cleaning_policy_acp_setup(struct ocf_cache *cache);
int cleaning_policy_acp_initialize(struct ocf_cache *cache,
int init_metadata);
void cleaning_policy_acp_deinitialize(struct ocf_cache *cache);
int cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache,
uint32_t io_queue);
void cleaning_policy_acp_init_cache_block(struct ocf_cache *cache,
uint32_t cache_line);
void cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache,
uint32_t cache_line);
void cleaning_policy_acp_purge_block(struct ocf_cache *cache,
uint32_t cache_line);
int cleaning_policy_acp_purge_range(struct ocf_cache *cache,
int core_id, uint64_t start_byte, uint64_t end_byte);
int cleaning_policy_acp_set_cleaning_param(struct ocf_cache *cache,
uint32_t param_id, uint32_t param_value);
int cleaning_policy_acp_get_cleaning_param(struct ocf_cache *cache,
uint32_t param_id, uint32_t *param_value);
int cleaning_policy_acp_add_core(ocf_cache_t cache, ocf_core_id_t core_id);
void cleaning_policy_acp_remove_core(ocf_cache_t cache,
ocf_core_id_t core_id);
#endif

View File

@@ -0,0 +1,23 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __CLEANING_AGGRESSIVE_STRUCTS_H__
#define __CLEANING_AGGRESSIVE_STRUCTS_H__
#include "../utils/utils_cleaner.h"
/* TODO: remove acp metadata */
struct acp_cleaning_policy_meta {
uint8_t dirty : 1;
};
/* cleaning policy per partition metadata */
struct acp_cleaning_policy_config {
uint32_t thread_wakeup_time; /* in milliseconds*/
uint32_t flush_max_buffers; /* in lines */
};
#endif

802
src/cleaning/alru.c Normal file
View File

@@ -0,0 +1,802 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "cleaning.h"
#include "alru.h"
#include "../metadata/metadata.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_part.h"
#include "../utils/utils_allocator.h"
#include "../concurrency/ocf_cache_concurrency.h"
#include "../ocf_def_priv.h"
#include "cleaning_priv.h"
#define is_alru_head(x) (x == collision_table_entries)
#define is_alru_tail(x) (x == collision_table_entries)
#define OCF_CLEANING_DEBUG 0
#if 1 == OCF_CLEANING_DEBUG
#define OCF_DEBUG_PREFIX "[Clean] %s():%d "
#define OCF_DEBUG_LOG(cache, format, ...) \
ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
format"\n", __func__, __LINE__, ##__VA_ARGS__)
#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
##__VA_ARGS__)
#else
#define OCF_DEBUG_PREFIX
#define OCF_DEBUG_LOG(cache, format, ...)
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
struct flush_merge_struct {
ocf_cache_line_t cache_line;
ocf_core_id_t core_id;
uint64_t core_sector;
};
/* -- Start of ALRU functions -- */
/* Sets the given collision_index as the new _head_ of the ALRU list. */
static inline void update_alru_head(struct ocf_cache *cache,
int partition_id, unsigned int collision_index)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
part->runtime->cleaning.policy.alru.lru_head = collision_index;
}
/* Sets the given collision_index as the new _tail_ of the ALRU list. */
static inline void update_alru_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
part->runtime->cleaning.policy.alru.lru_tail = collision_index;
}
/* Sets the given collision_index as the new _head_ and _tail_
* of the ALRU list.
*/
static inline void update_alru_head_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index)
{
update_alru_head(cache, partition_id, collision_index);
update_alru_tail(cache, partition_id, collision_index);
}
/* Adds the given collision_index to the _head_ of the ALRU list */
static void add_alru_head(struct ocf_cache *cache, int partition_id,
unsigned int collision_index)
{
unsigned int curr_head_index;
unsigned int collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
struct cleaning_policy_meta policy;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ENV_BUG_ON(env_atomic_read(
&part->runtime->cleaning.policy.alru.size) < 0);
ENV_WARN_ON(!metadata_test_dirty(cache, collision_index));
ENV_WARN_ON(!metadata_test_valid_any(cache, collision_index));
/* First node to be added/ */
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
update_alru_head_tail(cache, partition_id, collision_index);
ocf_metadata_get_cleaning_policy(cache, collision_index,
&policy);
policy.meta.alru.lru_next = collision_table_entries;
policy.meta.alru.lru_prev = collision_table_entries;
policy.meta.alru.timestamp = env_ticks_to_secs(
env_get_tick_count());
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
} else {
/* Not the first node to be added. */
curr_head_index = part->runtime->cleaning.policy.alru.lru_head;
ENV_BUG_ON(!(curr_head_index < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, collision_index,
&policy);
policy.meta.alru.lru_next = curr_head_index;
policy.meta.alru.lru_prev = collision_table_entries;
policy.meta.alru.timestamp = env_ticks_to_secs(
env_get_tick_count());
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
ocf_metadata_get_cleaning_policy(cache, curr_head_index,
&policy);
policy.meta.alru.lru_prev = collision_index;
ocf_metadata_set_cleaning_policy(cache, curr_head_index,
&policy);
update_alru_head(cache, partition_id, collision_index);
}
env_atomic_inc(&part->runtime->cleaning.policy.alru.size);
}
/* Deletes the node with the given collision_index from the ALRU list */
static void remove_alru_list(struct ocf_cache *cache, int partition_id,
unsigned int collision_index)
{
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
struct alru_cleaning_policy *cleaning_policy =
&part->runtime->cleaning.policy.alru;
struct cleaning_policy_meta policy;
ENV_BUG_ON(!(collision_index < collision_table_entries));
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
ocf_cache_log(cache, log_err, "ERROR: Attempt to remove item "
"from empty ALRU Cleaning Policy queue!\n");
ENV_BUG();
}
ocf_metadata_get_cleaning_policy(cache, collision_index, &policy);
/* Set prev and next (even if non existent) */
next_lru_node = policy.meta.alru.lru_next;
prev_lru_node = policy.meta.alru.lru_prev;
/* Check if entry is not part of the ALRU list */
if ((next_lru_node == collision_table_entries) &&
(prev_lru_node == collision_table_entries) &&
(cleaning_policy->lru_head != collision_index) &&
(cleaning_policy->lru_tail != collision_index)) {
return;
}
/* Case 0: If we are head AND tail, there is only one node. So unlink
* node and set that there is no node left in the list.
*/
if (cleaning_policy->lru_head == collision_index &&
cleaning_policy->lru_tail == collision_index) {
policy.meta.alru.lru_next = collision_table_entries;
policy.meta.alru.lru_prev = collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
update_alru_head_tail(cache, partition_id,
collision_table_entries);
}
/* Case 1: else if this collision_index is ALRU head, but not tail,
* update head and return
*/
else if ((cleaning_policy->lru_tail != collision_index) &&
(cleaning_policy->lru_head == collision_index)) {
struct cleaning_policy_meta next_policy;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, next_lru_node,
&next_policy);
update_alru_head(cache, partition_id, next_lru_node);
policy.meta.alru.lru_next = collision_table_entries;
next_policy.meta.alru.lru_prev = collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
ocf_metadata_set_cleaning_policy(cache, next_lru_node,
&next_policy);
}
/* Case 2: else if this collision_index is ALRU tail, but not head,
* update tail and return
*/
else if ((cleaning_policy->lru_head != collision_index) &&
(cleaning_policy->lru_tail == collision_index)) {
struct cleaning_policy_meta prev_policy;
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, prev_lru_node,
&prev_policy);
update_alru_tail(cache, partition_id, prev_lru_node);
policy.meta.alru.lru_prev = collision_table_entries;
prev_policy.meta.alru.lru_next = collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
ocf_metadata_set_cleaning_policy(cache, prev_lru_node,
&prev_policy);
}
/* Case 3: else this collision_index is a middle node. There is no
* change to the head and the tail pointers.
*/
else {
struct cleaning_policy_meta next_policy;
struct cleaning_policy_meta prev_policy;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, prev_lru_node,
&prev_policy);
ocf_metadata_get_cleaning_policy(cache, next_lru_node,
&next_policy);
/* Update prev and next nodes */
prev_policy.meta.alru.lru_next = policy.meta.alru.lru_next;
next_policy.meta.alru.lru_prev = policy.meta.alru.lru_prev;
/* Update the given node */
policy.meta.alru.lru_next = collision_table_entries;
policy.meta.alru.lru_prev = collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
ocf_metadata_set_cleaning_policy(cache, prev_lru_node,
&prev_policy);
ocf_metadata_set_cleaning_policy(cache, next_lru_node,
&next_policy);
}
env_atomic_dec(&part->runtime->cleaning.policy.alru.size);
}
static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
unsigned int collision_index)
{
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
struct alru_cleaning_policy *cleaning_policy =
&part->runtime->cleaning.policy.alru;
struct cleaning_policy_meta policy;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, collision_index, &policy);
next_lru_node = policy.meta.alru.lru_next;
prev_lru_node = policy.meta.alru.lru_prev;
return cleaning_policy->lru_tail == collision_index ||
cleaning_policy->lru_head == collision_index ||
next_lru_node != collision_table_entries ||
prev_lru_node != collision_table_entries;
}
/* -- End of ALRU functions -- */
void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache,
uint32_t cache_line)
{
struct cleaning_policy_meta policy;
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
policy.meta.alru.timestamp = 0;
policy.meta.alru.lru_prev = cache->device->collision_table_entries;
policy.meta.alru.lru_next = cache->device->collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, cache_line, &policy);
}
void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache,
uint32_t cache_line)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
cache_line);
remove_alru_list(cache, part_id, cache_line);
}
static void __cleaning_policy_alru_purge_cache_block_any(
struct ocf_cache *cache, uint32_t cache_line)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
cache_line);
if (is_on_alru_list(cache, part_id, cache_line))
remove_alru_list(cache, part_id, cache_line);
}
int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte) {
struct ocf_user_part *part;
ocf_part_id_t part_id;
int ret = 0;
for_each_part(cache, part, part_id) {
if (env_atomic_read(&part->runtime->cleaning.
policy.alru.size) == 0)
continue;
ret |= ocf_metadata_actor(cache, part_id,
core_id, start_byte, end_byte,
__cleaning_policy_alru_purge_cache_block_any);
}
return ret;
}
void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
cache_line);
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct cleaning_policy_meta policy;
ENV_WARN_ON(!metadata_test_dirty(cache, cache_line));
ENV_WARN_ON(!metadata_test_valid_any(cache, cache_line));
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
next_lru_node = policy.meta.alru.lru_next;
prev_lru_node = policy.meta.alru.lru_prev;
if ((next_lru_node != collision_table_entries) ||
(prev_lru_node != collision_table_entries) ||
((part->runtime->cleaning.policy.
alru.lru_head == cache_line) &&
(part->runtime->cleaning.policy.
alru.lru_tail == cache_line)))
remove_alru_list(cache, part_id, cache_line);
add_alru_head(cache, part_id, cache_line);
}
static void _alru_rebuild(struct ocf_cache *cache)
{
struct ocf_user_part *part;
ocf_part_id_t part_id;
ocf_core_id_t core_id;
ocf_cache_line_t cline;
uint32_t step = 0;
for_each_part(cache, part, part_id) {
/* ALRU initialization */
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
part->runtime->cleaning.policy.alru.lru_head =
cache->device->collision_table_entries;
part->runtime->cleaning.policy.alru.lru_tail =
cache->device->collision_table_entries;
cache->device->runtime_meta->cleaning_thread_access = 0;
}
for (cline = 0; cline < cache->device->collision_table_entries; cline++) {
ocf_metadata_get_core_and_part_id(cache, cline, &core_id,
NULL);
OCF_COND_RESCHED_DEFAULT(step);
if (core_id == OCF_CORE_MAX)
continue;
cleaning_policy_alru_init_cache_block(cache, cline);
if (!metadata_test_dirty(cache, cline))
continue;
cleaning_policy_alru_set_hot_cache_line(cache, cline);
}
}
static int cleaning_policy_alru_initialize_part(struct ocf_cache *cache,
struct ocf_user_part *part, int init_metadata)
{
if (init_metadata) {
/* ALRU initialization */
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
part->runtime->cleaning.policy.alru.lru_head =
cache->device->collision_table_entries;
part->runtime->cleaning.policy.alru.lru_tail =
cache->device->collision_table_entries;
}
cache->device->runtime_meta->cleaning_thread_access = 0;
return 0;
}
void cleaning_policy_alru_setup(struct ocf_cache *cache)
{
struct alru_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
config->thread_wakeup_time = OCF_ALRU_DEFAULT_WAKE_UP;
config->stale_buffer_time = OCF_ALRU_DEFAULT_STALENESS_TIME;
config->flush_max_buffers = OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS;
config->activity_threshold = OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD;
}
int cleaning_policy_alru_initialize(struct ocf_cache *cache, int init_metadata)
{
struct ocf_user_part *part;
ocf_part_id_t part_id;
for_each_part(cache, part, part_id) {
cleaning_policy_alru_initialize_part(cache,
part, init_metadata);
}
if (init_metadata)
_alru_rebuild(cache);
return 0;
}
int cleaning_policy_alru_set_cleaning_param(ocf_cache_t cache,
uint32_t param_id, uint32_t param_value)
{
struct alru_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
switch (param_id) {
case ocf_alru_wake_up_time:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ALRU_MIN_WAKE_UP,
OCF_ALRU_MAX_WAKE_UP,
"thread_wakeup_time");
config->thread_wakeup_time = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread "
"wake-up time: %d\n", config->thread_wakeup_time);
break;
case ocf_alru_stale_buffer_time:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ALRU_MIN_STALENESS_TIME,
OCF_ALRU_MAX_STALENESS_TIME,
"stale_buffer_time");
config->stale_buffer_time = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread "
"staleness time: %d\n", config->stale_buffer_time);
break;
case ocf_alru_flush_max_buffers:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ALRU_MIN_FLUSH_MAX_BUFFERS,
OCF_ALRU_MAX_FLUSH_MAX_BUFFERS,
"flush_max_buffers");
config->flush_max_buffers = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread max "
"buffers flushed per iteration: %d\n",
config->flush_max_buffers);
break;
case ocf_alru_activity_threshold:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ALRU_MIN_ACTIVITY_THRESHOLD,
OCF_ALRU_MAX_ACTIVITY_THRESHOLD,
"activity_threshold");
config->activity_threshold = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread "
"activity time threshold: %d\n",
config->activity_threshold);
break;
default:
return -OCF_ERR_INVAL;
}
return 0;
}
int cleaning_policy_alru_get_cleaning_param(ocf_cache_t cache,
uint32_t param_id, uint32_t *param_value)
{
struct alru_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
switch (param_id) {
case ocf_alru_wake_up_time:
*param_value = config->thread_wakeup_time;
break;
case ocf_alru_stale_buffer_time:
*param_value = config->stale_buffer_time;
break;
case ocf_alru_flush_max_buffers:
*param_value = config->flush_max_buffers;
break;
case ocf_alru_activity_threshold:
*param_value = config->activity_threshold;
break;
default:
return -OCF_ERR_INVAL;
}
return 0;
}
static inline uint32_t compute_timestamp(
const struct alru_cleaning_policy_config *config)
{
unsigned long time;
time = env_get_tick_count();
time -= env_secs_to_ticks(config->stale_buffer_time);
time = env_ticks_to_secs(time);
return (uint32_t) time;
}
static int check_for_io_activity(struct ocf_cache *cache,
struct alru_cleaning_policy_config *config)
{
unsigned int now, last;
now = env_ticks_to_msecs(env_get_tick_count());
last = env_atomic_read(&cache->last_access_ms);
if ((now - last) < config->activity_threshold)
return 1;
return 0;
}
static int cmp_ocf_user_parts(const void *p1, const void *p2) {
const struct ocf_user_part *t1 = *(const struct ocf_user_part**)p1;
const struct ocf_user_part *t2 = *(const struct ocf_user_part**)p2;
if (t1->config->priority > t2->config->priority)
return 1;
else if (t1->config->priority < t2->config->priority)
return -1;
return 0;
}
static void swp_ocf_user_part(void *part1, void *part2, int size) {
void *tmp = *(void **)part1;
*(void **)part1 = *(void **) part2;
*(void **)part2 = tmp;
}
static void get_parts_sorted(struct ocf_user_part **parts,
struct ocf_cache *cache) {
int i;
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
parts[i] = &cache->user_parts[i];
env_sort(parts, OCF_IO_CLASS_MAX, sizeof(struct ocf_user_part*),
cmp_ocf_user_parts, swp_ocf_user_part);
}
static int clean_later(ocf_cache_t cache, uint32_t *delta)
{
struct alru_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
*delta = env_ticks_to_secs(env_get_tick_count()) -
cache->device->runtime_meta->cleaning_thread_access;
if (*delta <= config->thread_wakeup_time)
return true;
return false;
}
static void get_block_to_flush(struct flush_data* dst,
ocf_cache_line_t cache_line, struct ocf_cache* cache)
{
ocf_core_id_t core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&core_id, &core_line);
dst->cache_line = cache_line;
dst->core_id = core_id;
dst->core_line = core_line;
}
static int more_blocks_to_flush(struct ocf_cache *cache,
ocf_cache_line_t cache_line, uint32_t last_access)
{
struct cleaning_policy_meta policy;
if (cache_line >= cache->device->collision_table_entries)
return false;
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
if (policy.meta.alru.timestamp >= last_access)
return false;
return true;
}
static int block_is_busy(struct ocf_cache *cache,
ocf_cache_line_t cache_line)
{
ocf_core_id_t core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&core_id, &core_line);
if (!cache->core_obj[core_id].opened)
return true;
if (ocf_cache_line_is_used(cache, cache_line))
return true;
return false;
}
static int get_data_to_flush(struct flush_data *dst, uint32_t clines_no,
struct ocf_cache *cache, struct ocf_user_part *part)
{
struct alru_cleaning_policy_config *config;
struct cleaning_policy_meta policy;
ocf_cache_line_t cache_line;
int to_flush = 0;
uint32_t last_access;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
cache_line = part->runtime->cleaning.policy.alru.lru_tail;
last_access = compute_timestamp(config);
OCF_DEBUG_PARAM(cache, "Last access=%u, timestamp=%u rel=%d",
last_access, policy.meta.alru.timestamp,
policy.meta.alru.timestamp < last_access);
while (to_flush < clines_no &&
more_blocks_to_flush(cache, cache_line, last_access)) {
if (!block_is_busy(cache, cache_line)) {
get_block_to_flush(&dst[to_flush], cache_line, cache);
to_flush++;
}
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
cache_line = policy.meta.alru.lru_prev;
}
OCF_DEBUG_PARAM(cache, "Collected items_to_clean=%u", to_flush);
return to_flush;
}
static int perform_flushing(int clines_no, struct ocf_cache *cache, uint32_t io_queue,
struct flush_data *flush_data, struct ocf_user_part *part)
{
int to_clean = get_data_to_flush(flush_data, clines_no, cache, part);
if (to_clean > 0) {
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = true,
.metadata_locked = true,
.do_sort = true,
.io_queue = io_queue
};
ocf_cleaner_do_flush_data(cache, flush_data,
to_clean, &attribs);
} else {
/* Update timestamp only if there are no items to be cleaned */
cache->device->runtime_meta->cleaning_thread_access =
env_ticks_to_secs(env_get_tick_count());
}
return to_clean;
}
static int is_cleanup_possible(ocf_cache_t cache)
{
struct alru_cleaning_policy_config *config;
uint32_t delta;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
if (check_for_io_activity(cache, config)) {
OCF_DEBUG_PARAM(cache, "IO activity detected");
return false;
}
if (clean_later(cache, &delta)) {
OCF_DEBUG_PARAM(cache,
"Cleaning policy configured to clean later "
"delta=%u wake_up=%u", delta,
config->thread_wakeup_time);
return false;
}
//Cleaning policy configured to not clean anything
if (config->flush_max_buffers == 0)
return false;
return true;
}
static int cleanup(struct ocf_cache *cache, uint32_t clines_no,
struct ocf_user_part *part, uint32_t io_queue)
{
struct flush_data *flush_data;
size_t flush_data_limit;
int flushed_blocks = 0;
if (!is_cleanup_possible(cache))
return flushed_blocks;
if (OCF_METADATA_LOCK_WR_TRY())
return flushed_blocks;
OCF_REALLOC_INIT(&flush_data, &flush_data_limit);
OCF_REALLOC(&flush_data, sizeof(flush_data[0]), clines_no,
&flush_data_limit);
if (!flush_data) {
OCF_METADATA_UNLOCK_WR();
ocf_cache_log(cache, log_warn, "No memory to allocate flush "
"data for ALRU cleaning policy");
return flushed_blocks;
}
flushed_blocks = perform_flushing(clines_no, cache, io_queue,
flush_data, part);
OCF_METADATA_UNLOCK_WR();
OCF_REALLOC_DEINIT(&flush_data, &flush_data_limit);
return flushed_blocks;
}
int cleaning_alru_perform_cleaning(ocf_cache_t cache, uint32_t io_queue)
{
struct ocf_user_part *parts[OCF_IO_CLASS_MAX];
int part_id = OCF_IO_CLASS_MAX - 1;
struct alru_cleaning_policy_config *config;
uint32_t clines_no;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
get_parts_sorted(parts, cache);
clines_no = config->flush_max_buffers;
while (part_id >= 0) {
clines_no -= cleanup(cache, clines_no,
parts[part_id], io_queue);
if (clines_no > 0)
part_id--;
else
break;
}
if (clines_no > 0)
return config->thread_wakeup_time * 1000;
return 0;
}

30
src/cleaning/alru.h Normal file
View File

@@ -0,0 +1,30 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_CLEANING_POLICY_ALRU_H__
#define __LAYER_CLEANING_POLICY_ALRU_H__
#include "cleaning.h"
#include "alru_structs.h"
void cleaning_policy_alru_setup(struct ocf_cache *cache);
int cleaning_policy_alru_initialize(struct ocf_cache *cache,
int init_metadata);
void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache,
uint32_t cache_line);
void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache,
uint32_t cache_line);
int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte);
void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
uint32_t cache_line);
int cleaning_policy_alru_set_cleaning_param(struct ocf_cache *cache,
uint32_t param_id, uint32_t param_value);
int cleaning_policy_alru_get_cleaning_param(struct ocf_cache *cache,
uint32_t param_id, uint32_t *param_value);
int cleaning_alru_perform_cleaning(struct ocf_cache *cache, uint32_t io_queue);
#endif

View File

@@ -0,0 +1,32 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __CLEANING_ALRU_STRUCTS_H__
#define __CLEANING_ALRU_STRUCTS_H__
#include "ocf/ocf.h"
#include "ocf_env.h"
struct alru_cleaning_policy_meta {
/* Lru pointers 2*4=8 bytes */
uint32_t timestamp;
uint32_t lru_prev;
uint32_t lru_next;
} __attribute__((packed));
struct alru_cleaning_policy_config {
uint32_t thread_wakeup_time; /* in seconds */
uint32_t stale_buffer_time; /* in seconds */
uint32_t flush_max_buffers; /* in lines */
uint32_t activity_threshold; /* in milliseconds */
};
struct alru_cleaning_policy {
env_atomic size;
uint32_t lru_head;
uint32_t lru_tail;
};
#endif

137
src/cleaning/cleaning.c Normal file
View File

@@ -0,0 +1,137 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "cleaning.h"
#include "alru.h"
#include "acp.h"
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
#include "../mngt/ocf_mngt_common.h"
#include "../metadata/metadata.h"
#define SLEEP_TIME_MS (1000)
struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max] = {
[ocf_cleaning_nop] = {
.name = "nop",
},
[ocf_cleaning_alru] = {
.setup = cleaning_policy_alru_setup,
.init_cache_block = cleaning_policy_alru_init_cache_block,
.purge_cache_block = cleaning_policy_alru_purge_cache_block,
.purge_range = cleaning_policy_alru_purge_range,
.set_hot_cache_line = cleaning_policy_alru_set_hot_cache_line,
.initialize = cleaning_policy_alru_initialize,
.set_cleaning_param = cleaning_policy_alru_set_cleaning_param,
.get_cleaning_param = cleaning_policy_alru_get_cleaning_param,
.perform_cleaning = cleaning_alru_perform_cleaning,
.name = "alru",
},
[ocf_cleaning_acp] = {
.setup = cleaning_policy_acp_setup,
.init_cache_block = cleaning_policy_acp_init_cache_block,
.purge_cache_block = cleaning_policy_acp_purge_block,
.purge_range = cleaning_policy_acp_purge_range,
.set_hot_cache_line = cleaning_policy_acp_set_hot_cache_line,
.initialize = cleaning_policy_acp_initialize,
.deinitialize = cleaning_policy_acp_deinitialize,
.set_cleaning_param = cleaning_policy_acp_set_cleaning_param,
.get_cleaning_param = cleaning_policy_acp_get_cleaning_param,
.add_core = cleaning_policy_acp_add_core,
.remove_core = cleaning_policy_acp_remove_core,
.perform_cleaning = cleaning_policy_acp_perform_cleaning,
.name = "acp",
},
};
int ocf_start_cleaner(struct ocf_cache *cache)
{
return ctx_cleaner_init(cache->owner, &cache->cleaner);
}
void ocf_stop_cleaner(struct ocf_cache *cache)
{
ctx_cleaner_stop(cache->owner, &cache->cleaner);
}
void ocf_cleaner_set_priv(ocf_cleaner_t c, void *priv)
{
OCF_CHECK_NULL(c);
c->priv = priv;
}
void *ocf_cleaner_get_priv(ocf_cleaner_t c)
{
OCF_CHECK_NULL(c);
return c->priv;
}
ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c)
{
OCF_CHECK_NULL(c);
return container_of(c, struct ocf_cache, cleaner);
}
static int _ocf_cleaner_run_check_dirty_inactive(struct ocf_cache *cache)
{
int i;
if (!env_bit_test(ocf_cache_state_incomplete, &cache->cache_state))
return 0;
for (i = 0; i < OCF_CORE_MAX; ++i) {
if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap))
continue;
if (cache->core_obj[i].opened && env_atomic_read(&(cache->
core_runtime_meta[i].dirty_clines))) {
return 0;
}
}
return 1;
}
uint32_t ocf_cleaner_run(ocf_cleaner_t c, uint32_t io_queue)
{
struct ocf_cache *cache;
ocf_cleaning_t clean_type;
int sleep = SLEEP_TIME_MS;
cache = ocf_cleaner_get_cache(c);
/* Do not involve cleaning when cache is not running
* (error, etc.).
*/
if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) ||
ocf_mngt_is_cache_locked(cache)) {
return SLEEP_TIME_MS;
}
/* Sleep in case there is management operation in progress. */
if (env_rwsem_down_write_trylock(&cache->lock) == 0)
return SLEEP_TIME_MS;
if (_ocf_cleaner_run_check_dirty_inactive(cache)) {
env_rwsem_up_write(&cache->lock);
return SLEEP_TIME_MS;
}
clean_type = cache->conf_meta->cleaning_policy_type;
ENV_BUG_ON(clean_type >= ocf_cleaning_max);
/* Call cleaning. */
if (cleaning_policy_ops[clean_type].perform_cleaning) {
sleep = cleaning_policy_ops[clean_type].
perform_cleaning(cache, io_queue);
}
env_rwsem_up_write(&cache->lock);
return sleep;
}

75
src/cleaning/cleaning.h Normal file
View File

@@ -0,0 +1,75 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_CLEANING_POLICY_H__
#define __LAYER_CLEANING_POLICY_H__
#include "alru_structs.h"
#include "nop_structs.h"
#include "acp_structs.h"
#define CLEANING_POLICY_CONFIG_BYTES 256
#define CLEANING_POLICY_TYPE_MAX 4
struct ocf_request;
struct cleaning_policy_config {
uint8_t data[CLEANING_POLICY_CONFIG_BYTES];
struct acp_cleaning_policy_config acp;
};
struct cleaning_policy {
union {
struct nop_cleaning_policy nop;
struct alru_cleaning_policy alru;
} policy;
};
/* Cleaning policy metadata per cache line */
struct cleaning_policy_meta {
union {
struct nop_cleaning_policy_meta nop;
struct alru_cleaning_policy_meta alru;
struct acp_cleaning_policy_meta acp;
} meta;
};
struct cleaning_policy_ops {
void (*setup)(struct ocf_cache *cache);
int (*initialize)(struct ocf_cache *cache, int init_metadata);
void (*deinitialize)(struct ocf_cache *cache);
int (*add_core)(struct ocf_cache *cache, ocf_core_id_t core_id);
void (*remove_core)(struct ocf_cache *cache, ocf_core_id_t core_id);
void (*init_cache_block)(struct ocf_cache *cache, uint32_t cache_line);
void (*purge_cache_block)(struct ocf_cache *cache,
uint32_t cache_line);
int (*purge_range)(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte);
void (*set_hot_cache_line)(struct ocf_cache *cache,
uint32_t cache_line);
int (*set_cleaning_param)(struct ocf_cache *cache,
uint32_t param_id, uint32_t param_value);
int (*get_cleaning_param)(struct ocf_cache *cache,
uint32_t param_id, uint32_t *param_value);
/**
* @brief Performs cleaning.
* @return requested time (in ms) of next call
*/
int (*perform_cleaning)(struct ocf_cache *cache,
uint32_t io_queue);
const char *name;
};
extern struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max];
struct ocf_cleaner {
void *priv;
};
int ocf_start_cleaner(struct ocf_cache *cache);
void ocf_stop_cleaner(struct ocf_cache *cache);
#endif

View File

@@ -0,0 +1,19 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
static inline void cleaning_policy_param_error(ocf_cache_t cache,
const char *param_name, uint32_t min, uint32_t max)
{
ocf_cache_log(cache, log_err, "Refusing setting flush "
"parameters because parameter %s is not within range "
"of <%d-%d>\n", param_name, min, max);
}
#define OCF_CLEANING_CHECK_PARAM(CACHE, VAL, MIN, MAX, NAME) ({ \
if (VAL < MIN || VAL > MAX) { \
cleaning_policy_param_error(CACHE, NAME, MIN, MAX); \
return -OCF_ERR_INVAL; \
} \
})

View File

@@ -0,0 +1,15 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__
#define __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__
struct nop_cleaning_policy_meta {
} __attribute__((packed));
struct nop_cleaning_policy {
};
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,176 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef OCF_CACHE_CONCURRENCY_H_
#define OCF_CACHE_CONCURRENCY_H_
/**
* @file utils_rq.h
* @brief OCF cache concurrency module
*/
/**
* @brief OCF cache concurrency module handle
*/
struct ocf_cache_concurrency;
/**
* @brief Initialize OCF cache concurrency module
*
* @param cache - OCF cache instance
* @return 0 - Initialization successful, otherwise ERROR
*/
int ocf_cache_concurrency_init(struct ocf_cache *cache);
/**
* @biref De-Initialize OCF cache concurrency module
*
* @param cache - OCF cache instance
*/
void ocf_cache_concurrency_deinit(struct ocf_cache *cache);
/**
* @brief Get number of waiting (suspended) OCF requests in due to cache
* overlapping
*
* @param cache - OCF cache instance
*
* @return Number of suspended OCF requests
*/
uint32_t ocf_cache_concurrency_suspended_no(struct ocf_cache *cache);
/**
* @brief Return memory footprint conusmed by cache concurrency module
*
* @param cache - OCF cache instance
*
* @return Memory footprint of cache concurrency module
*/
size_t ocf_cache_concurrency_size_of(struct ocf_cache *cache);
/**
* @brief Lock OCF request for WRITE access (Lock all cache lines in map info)
*
* @note rq->resume callback has to be set
*
* @param rq - OCF request
*
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
*
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired rq->resume be called
*/
int ocf_rq_trylock_wr(struct ocf_request *rq);
/**
* @brief Try complete lock of OCF request for WRITE access (Lock cache lines
* that marked as invalid)
*
* @param rq - OCF request
*
* @note If request not locked it will be added into waiting list
*
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
*
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired rq->resume be called
*/
int ocf_rq_retrylock_wr(struct ocf_request *rq);
/**
* @brief Lock OCF request for READ access (Lock all cache lines in map info)
*
* @note rq->resume callback has to be set
*
* @param rq - OCF request
*
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
*
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired rq->resume be called
*/
int ocf_rq_trylock_rd(struct ocf_request *rq);
/**
* @brief Unlock OCF request from WRITE access
*
* @param rq - OCF request
*/
void ocf_rq_unlock_wr(struct ocf_request *rq);
/**
* @brief Unlock OCF request from READ access
*
* @param rq - OCF request
*/
void ocf_rq_unlock_rd(struct ocf_request *rq);
/**
* @brief Unlock OCF request from READ or WRITE access
*
* @param rq - OCF request
*/
void ocf_rq_unlock(struct ocf_request *rq);
/**
* @Check if cache line is used.
*
* Cache line is used when:
* 1. It is locked for write or read access
* or
* 2. There is set locked bit in metadata
*
* @param cache - OCF cache instance
* @param line - Cache line to be unlocked
*
* @retval true - cache line is used
* @retval false - cache line is not used
*/
bool ocf_cache_line_is_used(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief Check if for specified cache line there are waiters
* on the waiting list
*
* @param cache - OCF cache instance
* @param line - Cache line to be checked for waiters
*
* @retval true - there are waiters
* @retval false - No waiters
*/
bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief un_lock request map info entry from from WRITE or READ access.
*
* @param cache - OCF cache instance
* @param rq - OCF request
* @param entry - request map entry number
*/
void ocf_rq_unlock_entry(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t entry);
/**
* @brief Release cache line read lock
*
* @param cache - OCF cache instance
* @param line - Cache line to be unlocked
*/
void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
/**
* @brief Attempt to lock cache line for read
*
* @param cache - OCF cache instance
* @param line - Cache line to be checked for waiters
*
* @retval true - read lock successfully acquired
* @retval false - failed to acquire read lock
*/
bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
#endif /* OCF_CONCURRENCY_H_ */

View File

@@ -0,0 +1,24 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_concurrency.h"
int ocf_concurrency_init(struct ocf_cache *cache)
{
int result = 0;
result = ocf_cache_concurrency_init(cache);
if (result)
ocf_concurrency_deinit(cache);
return result;
}
void ocf_concurrency_deinit(struct ocf_cache *cache)
{
ocf_cache_concurrency_deinit(cache);
}

View File

@@ -0,0 +1,43 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef OCF_CONCURRENCY_H_
#define OCF_CONCURRENCY_H_
#include "../ocf_cache_priv.h"
/**
* @file utils_rq.h
* @brief OCF concurrency
*/
/**
* @brief Lock result - Lock acquired successfully
*/
#define OCF_LOCK_ACQUIRED 0
/**
* @brief Lock result - Lock not acquired, lock request added into waiting list
*/
#define OCF_LOCK_NOT_ACQUIRED 1
/**
* @brief Initialize OCF concurrency module
*
* @param cache - OCF cache instance
* @return 0 - Initialization successful, otherwise ERROR
*/
int ocf_concurrency_init(struct ocf_cache *cache);
/**
* @biref De-Initialize OCF concurrency module
*
* @param cache - OCF cache instance
*/
void ocf_concurrency_deinit(struct ocf_cache *cache);
#include "ocf_cache_concurrency.h"
#endif /* OCF_CONCURRENCY_H_ */

314
src/engine/cache_engine.c Normal file
View File

@@ -0,0 +1,314 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_rd.h"
#include "engine_wt.h"
#include "engine_pt.h"
#include "engine_wi.h"
#include "engine_wa.h"
#include "engine_wb.h"
#include "engine_fast.h"
#include "engine_discard.h"
#include "engine_d2c.h"
#include "engine_ops.h"
#include "../utils/utils_part.h"
#include "../utils/utils_rq.h"
#include "../metadata/metadata.h"
#include "../layer_space_management.h"
enum ocf_io_if_type {
/* Public OCF IO interfaces to be set by user */
OCF_IO_WT_IF,
OCF_IO_WB_IF,
OCF_IO_WA_IF,
OCF_IO_WI_IF,
OCF_IO_PT_IF,
OCF_IO_MAX_IF,
/* Private OCF interfaces */
OCF_IO_FAST_IF,
OCF_IO_DISCARD_IF,
OCF_IO_D2C_IF,
OCF_IO_OPS_IF,
OCF_IO_PRIV_MAX_IF,
};
static const struct ocf_io_if IO_IFS[OCF_IO_PRIV_MAX_IF] = {
[OCF_IO_WT_IF] = {
.read = ocf_read_generic,
.write = ocf_write_wt,
.name = "Write Through"
},
[OCF_IO_WB_IF] = {
.read = ocf_read_generic,
.write = ocf_write_wb,
.name = "Write Back"
},
[OCF_IO_WA_IF] = {
.read = ocf_read_generic,
.write = ocf_write_wa,
.name = "Write Around"
},
[OCF_IO_WI_IF] = {
.read = ocf_read_generic,
.write = ocf_write_wi,
.name = "Write Invalidate"
},
[OCF_IO_PT_IF] = {
.read = ocf_read_pt,
.write = ocf_write_wi,
.name = "Pass Through",
},
[OCF_IO_FAST_IF] = {
.read = ocf_read_fast,
.write = ocf_write_fast,
.name = "Fast",
},
[OCF_IO_DISCARD_IF] = {
.read = ocf_discard,
.write = ocf_discard,
.name = "Discard",
},
[OCF_IO_D2C_IF] = {
.read = ocf_io_d2c,
.write = ocf_io_d2c,
.name = "Direct to core",
},
[OCF_IO_OPS_IF] = {
.read = ocf_engine_ops,
.write = ocf_engine_ops,
.name = "Ops engine",
},
};
static const struct ocf_io_if *cache_mode_io_if_map[ocf_req_cache_mode_max] = {
[ocf_req_cache_mode_wt] = &IO_IFS[OCF_IO_WT_IF],
[ocf_req_cache_mode_wb] = &IO_IFS[OCF_IO_WB_IF],
[ocf_req_cache_mode_wa] = &IO_IFS[OCF_IO_WA_IF],
[ocf_req_cache_mode_wi] = &IO_IFS[OCF_IO_WI_IF],
[ocf_req_cache_mode_pt] = &IO_IFS[OCF_IO_PT_IF],
[ocf_req_cache_mode_fast] = &IO_IFS[OCF_IO_FAST_IF],
[ocf_req_cache_mode_d2c] = &IO_IFS[OCF_IO_D2C_IF],
};
const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t req_cache_mode)
{
if (req_cache_mode == ocf_req_cache_mode_max)
return NULL;
return cache_mode_io_if_map[req_cache_mode];
}
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
struct ocf_queue *q)
{
unsigned long lock_flags;
struct ocf_request *rq;
OCF_CHECK_NULL(q);
/* LOCK */
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
if (list_empty(&q->io_list)) {
/* No items on the list */
env_spinlock_unlock_irqrestore(&q->io_list_lock,
lock_flags);
return NULL;
}
/* Get the first request and remove it from the list */
rq = list_first_entry(&q->io_list, struct ocf_request, list);
env_atomic_dec(&q->io_no);
list_del(&rq->list);
/* UNLOCK */
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
OCF_CHECK_NULL(rq);
if (ocf_rq_alloc_map(rq)) {
rq->complete(rq, rq->error);
return NULL;
}
return rq;
}
bool ocf_fallback_pt_is_on(ocf_cache_t cache)
{
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
return (cache->fallback_pt_error_threshold !=
OCF_CACHE_FALLBACK_PT_INACTIVE &&
env_atomic_read(&cache->fallback_pt_error_counter) >=
cache->fallback_pt_error_threshold);
}
#define SEQ_CUTOFF_FULL_MARGIN \
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
{
if (!env_atomic_read(&cache->attached))
return false;
return (cache->device->freelist_part->curr_size <= SEQ_CUTOFF_FULL_MARGIN);
}
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
switch (policy) {
case ocf_seq_cutoff_policy_always:
break;
case ocf_seq_cutoff_policy_full:
if (ocf_seq_cutoff_is_on(cache))
break;
case ocf_seq_cutoff_policy_never:
return false;
default:
ENV_WARN(true, "Invalid sequential cutoff policy!");
return false;
}
if (dir == core->seq_cutoff.rw &&
core->seq_cutoff.last == addr &&
core->seq_cutoff.bytes + bytes >=
ocf_core_get_seq_cutoff_threshold(core)) {
return true;
}
return false;
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
{
/*
* If IO is not consequent or has another direction,
* reset sequential cutoff state.
*/
if (req->byte_position != core->seq_cutoff.last ||
req->rw != core->seq_cutoff.rw) {
core->seq_cutoff.rw = req->rw;
core->seq_cutoff.bytes = 0;
}
/* Update last accessed position and bytes counter */
core->seq_cutoff.last = req->byte_position + req->byte_length;
core->seq_cutoff.bytes += req->byte_length;
}
ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
ocf_core_t core, struct ocf_io *io)
{
ocf_cache_mode_t mode;
if (cache->pt_unaligned_io && !ocf_rq_is_4k(io->addr, io->bytes))
return ocf_cache_mode_pt;
mode = ocf_part_get_cache_mode(cache,
ocf_part_class2id(cache, io->class));
if (!ocf_cache_mode_is_valid(mode))
mode = cache->conf_meta->cache_mode;
if (ocf_seq_cutoff_check(core, io->dir, io->addr, io->bytes))
mode = ocf_cache_mode_pt;
if (ocf_fallback_pt_is_on(cache))
mode = ocf_cache_mode_pt;
if (mode == ocf_cache_mode_wb &&
env_atomic_read(&cache->flush_started))
mode = ocf_cache_mode_wt;
return mode;
}
int ocf_engine_hndl_rq(struct ocf_request *rq,
ocf_req_cache_mode_t req_cache_mode)
{
ocf_cache_t cache = rq->cache;
OCF_CHECK_NULL(cache);
rq->io_if = ocf_get_io_if(req_cache_mode);
if (!rq->io_if)
return -EINVAL;
/* Till OCF engine is not synchronous fully need to push OCF request
* to into OCF workers
*/
ocf_engine_push_rq_back(rq, true);
return 0;
}
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
ocf_req_cache_mode_t req_cache_mode)
{
const struct ocf_io_if *io_if;
io_if = ocf_get_io_if(req_cache_mode);
if (!io_if)
return -EINVAL;
switch (rq->rw) {
case OCF_READ:
return io_if->read(rq);
case OCF_WRITE:
return io_if->write(rq);
default:
return OCF_FAST_PATH_NO;
}
}
static void ocf_engine_hndl_2dc_rq(struct ocf_request *rq)
{
if (OCF_READ == rq->rw)
IO_IFS[OCF_IO_D2C_IF].read(rq);
else if (OCF_WRITE == rq->rw)
IO_IFS[OCF_IO_D2C_IF].write(rq);
else
ENV_BUG();
}
void ocf_engine_hndl_discard_rq(struct ocf_request *rq)
{
if (rq->d2c) {
ocf_engine_hndl_2dc_rq(rq);
return;
}
if (OCF_READ == rq->rw)
IO_IFS[OCF_IO_DISCARD_IF].read(rq);
else if (OCF_WRITE == rq->rw)
IO_IFS[OCF_IO_DISCARD_IF].write(rq);
else
ENV_BUG();
}
void ocf_engine_hndl_ops_rq(struct ocf_request *rq)
{
if (rq->d2c)
rq->io_if = &IO_IFS[OCF_IO_D2C_IF];
else
rq->io_if = &IO_IFS[OCF_IO_OPS_IF];
ocf_engine_push_rq_back(rq, true);
}

82
src/engine/cache_engine.h Normal file
View File

@@ -0,0 +1,82 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __CACHE_ENGINE_H_
#define __CACHE_ENGINE_H_
struct ocf_thread_priv;
struct ocf_request;
#define LOOKUP_HIT 5
#define LOOKUP_MISS 6
#define LOOKUP_MAPPED 8
typedef enum {
/* modes inherited from user API */
ocf_req_cache_mode_wt = ocf_cache_mode_wt,
ocf_req_cache_mode_wb = ocf_cache_mode_wb,
ocf_req_cache_mode_wa = ocf_cache_mode_wa,
ocf_req_cache_mode_pt = ocf_cache_mode_pt,
ocf_req_cache_mode_wi = ocf_cache_mode_wi,
/* internal modes */
ocf_req_cache_mode_fast,
/*!< Fast path */
ocf_req_cache_mode_d2c,
/*!< Direct to Core - pass through to core without
touching cacheline metadata */
ocf_req_cache_mode_max,
} ocf_req_cache_mode_t;
struct ocf_io_if {
int (*read)(struct ocf_request *req);
int (*write)(struct ocf_request *req);
const char *name;
};
ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
ocf_core_t core, struct ocf_io *io);
const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t cache_mode);
static inline const char *ocf_get_io_iface_name(ocf_cache_mode_t cache_mode)
{
const struct ocf_io_if *iface = ocf_get_io_if(cache_mode);
return iface ? iface->name : "Unknown";
}
static inline bool ocf_cache_mode_is_valid(ocf_cache_mode_t mode)
{
return mode >= ocf_cache_mode_wt && mode < ocf_cache_mode_max;
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
bool ocf_fallback_pt_is_on(ocf_cache_t cache);
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes);
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
struct ocf_queue *q);
int ocf_engine_hndl_rq(struct ocf_request *rq,
ocf_req_cache_mode_t req_cache_mode);
#define OCF_FAST_PATH_YES 7
#define OCF_FAST_PATH_NO 13
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
ocf_req_cache_mode_t req_cache_mode);
void ocf_engine_hndl_discard_rq(struct ocf_request *rq);
void ocf_engine_hndl_ops_rq(struct ocf_request *rq);
#endif

105
src/engine/engine_bf.c Normal file
View File

@@ -0,0 +1,105 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
#include "engine_bf.h"
#include "engine_inv.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "bf"
#include "engine_debug.h"
/* Decrements and checks if queue may be unblocked again */
static inline void backfill_queue_dec_unblock(struct ocf_cache *cache)
{
env_atomic_dec(&cache->pending_read_misses_list_count);
if (!env_atomic_read(&cache->pending_read_misses_list_blocked))
return;
if (env_atomic_read(&cache->pending_read_misses_list_count)
< cache->backfill.queue_unblock_size)
env_atomic_set(&cache->pending_read_misses_list_blocked, 0);
}
static inline void backfill_queue_inc_block(struct ocf_cache *cache)
{
if (env_atomic_inc_return(&cache->pending_read_misses_list_count)
>= cache->backfill.max_queue_size)
env_atomic_set(&cache->pending_read_misses_list_blocked, 1);
}
static void _ocf_backfill_do_io(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *)private_data;
struct ocf_cache *cache = rq->cache;
if (error)
rq->error = error;
if (rq->error)
inc_fallback_pt_error_counter(rq->cache);
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
/* We must free the pages we have allocated */
ctx_data_secure_erase(cache->owner, rq->data);
ctx_data_munlock(cache->owner, rq->data);
ctx_data_free(cache->owner, rq->data);
rq->data = NULL;
if (rq->error) {
env_atomic_inc(&cache->core_obj[rq->core_id].
counters->cache_errors.write);
ocf_engine_invalidate(rq);
} else {
ocf_rq_unlock(rq);
/* always free the request at the last point
* of the completion path
*/
ocf_rq_put(rq);
}
}
}
static int _ocf_backfill_do(struct ocf_request *rq)
{
unsigned int reqs_to_issue;
backfill_queue_dec_unblock(rq->cache);
reqs_to_issue = ocf_engine_io_count(rq);
/* There will be #reqs_to_issue completions */
env_atomic_set(&rq->req_remaining, reqs_to_issue);
rq->data = rq->cp_data;
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_WRITE, reqs_to_issue,
_ocf_backfill_do_io, rq);
return 0;
}
static const struct ocf_io_if _io_if_backfill = {
.read = _ocf_backfill_do,
.write = _ocf_backfill_do,
};
void ocf_engine_backfill(struct ocf_request *rq)
{
backfill_queue_inc_block(rq->cache);
ocf_engine_push_rq_front_if(rq, &_io_if_backfill, true);
}

11
src/engine/engine_bf.h Normal file
View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_BF_H_
#define ENGINE_BF_H_
void ocf_engine_backfill(struct ocf_request *rq);
#endif /* ENGINE_BF_H_ */

621
src/engine/engine_common.c Normal file
View File

@@ -0,0 +1,621 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "engine_common.h"
#define OCF_ENGINE_DEBUG_IO_NAME "common"
#include "engine_debug.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cleaner.h"
#include "../metadata/metadata.h"
#include "../layer_space_management.h"
void ocf_engine_error(struct ocf_request *rq,
bool stop_cache, const char *msg)
{
struct ocf_cache *cache = rq->cache;
if (stop_cache)
env_bit_clear(ocf_cache_state_running, &cache->cache_state);
ocf_core_log(&cache->core_obj[rq->core_id], log_err,
"%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg,
BYTES_TO_SECTORS(rq->byte_position), rq->byte_length);
}
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
struct ocf_map_info *entry, ocf_core_id_t core_id,
uint64_t core_line)
{
ocf_cache_line_t line;
ocf_cache_line_t hash_key;
hash_key = ocf_metadata_hash_func(cache, core_line, core_id);
/* Initially assume that we have cache miss.
* Hash points to proper bucket.
*/
entry->hash_key = hash_key;
entry->status = LOOKUP_MISS;
entry->coll_idx = cache->device->collision_table_entries;
entry->core_line = core_line;
line = ocf_metadata_get_hash(cache, hash_key);
while (line != cache->device->collision_table_entries) {
ocf_core_id_t curr_core_id;
uint64_t curr_core_line;
ocf_metadata_get_core_info(cache, line, &curr_core_id,
&curr_core_line);
if (core_id == curr_core_id && curr_core_line == core_line) {
entry->coll_idx = line;
entry->status = LOOKUP_HIT;
break;
}
line = ocf_metadata_get_collision_next(cache, line);
}
}
static inline int _ocf_engine_check_map_entry(struct ocf_cache *cache,
struct ocf_map_info *entry, ocf_core_id_t core_id)
{
ocf_core_id_t _core_id;
uint64_t _core_line;
if (entry->status == LOOKUP_MISS)
return 0;
ENV_BUG_ON(entry->coll_idx >= cache->device->collision_table_entries);
ocf_metadata_get_core_info(cache, entry->coll_idx, &_core_id,
&_core_line);
if (core_id == _core_id && _core_line == entry->core_line)
return 0;
else
return -1;
}
void ocf_engine_update_rq_info(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t entry)
{
uint8_t start_sector = 0;
uint8_t end_sector = ocf_line_end_sector(cache);
struct ocf_map_info *_entry = &(rq->map[entry]);
if (entry == 0) {
start_sector = BYTES_TO_SECTORS(rq->byte_position)
% ocf_line_sectors(cache);
}
if (entry == rq->core_line_count - 1) {
end_sector = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1)% ocf_line_sectors(cache);
}
/* Handle return value */
switch (_entry->status) {
case LOOKUP_HIT:
if (metadata_test_valid_sec(cache, _entry->coll_idx,
start_sector, end_sector)) {
rq->info.hit_no++;
} else {
rq->info.invalid_no++;
}
/* Check request is dirty */
if (metadata_test_dirty(cache, _entry->coll_idx)) {
rq->info.dirty_any++;
/* Check if cache line is fully dirty */
if (metadata_test_dirty_all(cache, _entry->coll_idx))
rq->info.dirty_all++;
}
if (rq->part_id != ocf_metadata_get_partition_id(cache,
_entry->coll_idx)) {
/*
* Need to move this cache line into other partition
*/
_entry->re_part = rq->info.re_part = true;
}
break;
case LOOKUP_MISS:
rq->info.seq_req = false;
break;
case LOOKUP_MAPPED:
break;
default:
ENV_BUG();
break;
}
/* Check if cache hit is sequential */
if (rq->info.seq_req && entry) {
if (ocf_metadata_map_lg2phy(cache,
(rq->map[entry - 1].coll_idx)) + 1 !=
ocf_metadata_map_lg2phy(cache,
_entry->coll_idx)) {
rq->info.seq_req = false;
}
}
}
void ocf_engine_traverse(struct ocf_request *rq)
{
uint32_t i;
uint64_t core_line;
struct ocf_cache *cache = rq->cache;
ocf_core_id_t core_id = rq->core_id;
OCF_DEBUG_TRACE(rq->cache);
ocf_rq_clear_info(rq);
rq->info.seq_req = true;
for (i = 0, core_line = rq->core_line_first;
core_line <= rq->core_line_last; core_line++, i++) {
struct ocf_map_info *entry = &(rq->map[i]);
ocf_engine_lookup_map_entry(cache, entry, core_id,
core_line);
if (entry->status != LOOKUP_HIT) {
rq->info.seq_req = false;
/* There is miss then lookup for next map entry */
OCF_DEBUG_PARAM(cache, "Miss, core line = %llu",
entry->core_line);
continue;
}
OCF_DEBUG_PARAM(cache, "Hit, cache line %u, core line = %llu",
entry->coll_idx, entry->core_line);
/* Update eviction (LRU) */
ocf_eviction_set_hot_cache_line(cache, entry->coll_idx);
ocf_engine_update_rq_info(cache, rq, i);
}
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
"Yes" : "No");
}
int ocf_engine_check(struct ocf_request *rq)
{
int result = 0;
uint32_t i;
uint64_t core_line;
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
ocf_rq_clear_info(rq);
rq->info.seq_req = true;
for (i = 0, core_line = rq->core_line_first;
core_line <= rq->core_line_last; core_line++, i++) {
struct ocf_map_info *entry = &(rq->map[i]);
if (entry->status == LOOKUP_MISS) {
rq->info.seq_req = false;
continue;
}
if (_ocf_engine_check_map_entry(cache, entry, rq->core_id)) {
/* Mapping is invalid */
entry->invalid = true;
rq->info.seq_req = false;
OCF_DEBUG_PARAM(cache, "Invalid, Cache line %u",
entry->coll_idx);
result = -1;
} else {
entry->invalid = false;
OCF_DEBUG_PARAM(cache, "Valid, Cache line %u",
entry->coll_idx);
ocf_engine_update_rq_info(cache, rq, i);
}
}
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
"Yes" : "No");
return result;
}
static void ocf_engine_map_cache_line(struct ocf_request *rq,
uint64_t core_line, unsigned int hash_index,
ocf_cache_line_t *cache_line)
{
struct ocf_cache *cache = rq->cache;
ocf_part_id_t part_id = rq->part_id;
ocf_cleaning_t clean_policy_type;
if (cache->device->freelist_part->curr_size == 0) {
rq->info.eviction_error = 1;
return;
}
*cache_line = cache->device->freelist_part->head;
/* add_to_collision_list changes .next_col and other fields for entry
* so updated last_cache_line_give must be updated before calling it.
*/
ocf_metadata_remove_from_free_list(cache, *cache_line);
ocf_metadata_add_to_partition(cache, part_id, *cache_line);
/* Add the block to the corresponding collision list */
ocf_metadata_add_to_collision(cache, rq->core_id, core_line, hash_index,
*cache_line);
ocf_eviction_init_cache_line(cache, *cache_line, part_id);
/* Update LRU:: Move this node to head of lru list. */
ocf_eviction_set_hot_cache_line(cache, *cache_line);
/* Update dirty cache-block list */
clean_policy_type = cache->conf_meta->cleaning_policy_type;
ENV_BUG_ON(clean_policy_type >= ocf_cleaning_max);
if (cleaning_policy_ops[clean_policy_type].init_cache_block != NULL)
cleaning_policy_ops[clean_policy_type].
init_cache_block(cache, *cache_line);
}
static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
struct ocf_request *rq)
{
uint32_t i;
struct ocf_map_info *entry;
for (i = 0; i < rq->core_line_count; i++) {
entry = &(rq->map[i]);
switch (entry->status) {
case LOOKUP_HIT:
case LOOKUP_MISS:
break;
case LOOKUP_MAPPED:
OCF_DEBUG_RQ(rq, "Canceling cache line %u",
entry->coll_idx);
set_cache_line_invalid_no_flush(cache, 0,
ocf_line_end_sector(cache),
entry->coll_idx);
break;
default:
ENV_BUG();
break;
}
}
}
void ocf_engine_map(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
uint32_t i;
struct ocf_map_info *entry;
uint64_t core_line;
int status = LOOKUP_MAPPED;
ocf_core_id_t core_id = rq->core_id;
if (ocf_engine_unmapped_count(rq))
status = space_managment_evict_do(cache, rq,
ocf_engine_unmapped_count(rq));
if (rq->info.eviction_error)
return;
ocf_rq_clear_info(rq);
rq->info.seq_req = true;
OCF_DEBUG_TRACE(rq->cache);
for (i = 0, core_line = rq->core_line_first;
core_line <= rq->core_line_last; core_line++, i++) {
entry = &(rq->map[i]);
ocf_engine_lookup_map_entry(cache, entry, core_id, core_line);
if (entry->status != LOOKUP_HIT) {
ocf_engine_map_cache_line(rq, entry->core_line,
entry->hash_key, &entry->coll_idx);
if (rq->info.eviction_error) {
/*
* Eviction error (mapping error), need to
* clean, return and do pass through
*/
OCF_DEBUG_RQ(rq, "Eviction ERROR when mapping");
ocf_engine_map_hndl_error(cache, rq);
break;
}
entry->status = status;
}
OCF_DEBUG_PARAM(rq->cache,
"%s, cache line %u, core line = %llu",
entry->status == LOOKUP_HIT ? "Hit" : "Map",
entry->coll_idx, entry->core_line);
ocf_engine_update_rq_info(cache, rq, i);
}
OCF_DEBUG_PARAM(rq->cache, "Sequential - %s", rq->info.seq_req ?
"Yes" : "No");
}
static void _ocf_engine_clean_end(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
OCF_DEBUG_RQ(rq, "Cleaning ERROR");
rq->error |= error;
/* End request and do not processing */
ocf_rq_unlock(rq);
/* Complete request */
rq->complete(rq, error);
/* Release OCF request */
ocf_rq_put(rq);
} else {
rq->info.dirty_any = 0;
rq->info.dirty_all = 0;
ocf_engine_push_rq_front(rq, true);
}
}
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line)
{
struct ocf_cleaner_attribs *attribs = getter_context;
struct ocf_request *rq = attribs->cmpl_context;
for (; attribs->getter_item < rq->core_line_count;
attribs->getter_item++) {
struct ocf_map_info *entry = &rq->map[attribs->getter_item];
if (entry->status != LOOKUP_HIT)
continue;
if (!metadata_test_dirty(cache, entry->coll_idx))
continue;
/* Line to be cleaned found, go to next item and return */
*line = entry->coll_idx;
attribs->getter_item++;
return 0;
}
return -1;
}
void ocf_engine_clean(struct ocf_request *rq)
{
/* Initialize attributes for cleaner */
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = false,
.cmpl_context = rq,
.cmpl_fn = _ocf_engine_clean_end,
.getter = _ocf_engine_clean_getter,
.getter_context = &attribs,
.getter_item = 0,
.count = rq->info.dirty_any,
.io_queue = rq->io_queue
};
/* Start cleaning */
ocf_cleaner_fire(rq->cache, &attribs);
}
void ocf_engine_update_block_stats(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ocf_core_id_t core_id = rq->core_id;
ocf_part_id_t part_id = rq->part_id;
struct ocf_counters_block *blocks;
blocks = &cache->core_obj[core_id].counters->
part_counters[part_id].blocks;
if (rq->rw == OCF_READ)
env_atomic64_add(rq->byte_length, &blocks->read_bytes);
else if (rq->rw == OCF_WRITE)
env_atomic64_add(rq->byte_length, &blocks->write_bytes);
else
ENV_BUG();
}
void ocf_engine_update_request_stats(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ocf_core_id_t core_id = rq->core_id;
ocf_part_id_t part_id = rq->part_id;
struct ocf_counters_req *reqs;
switch (rq->rw) {
case OCF_READ:
reqs = &cache->core_obj[core_id].counters->
part_counters[part_id].read_reqs;
break;
case OCF_WRITE:
reqs = &cache->core_obj[core_id].counters->
part_counters[part_id].write_reqs;
break;
default:
ENV_BUG();
}
env_atomic64_inc(&reqs->total);
if (rq->info.hit_no == 0)
env_atomic64_inc(&reqs->full_miss);
else if (rq->info.hit_no < rq->core_line_count)
env_atomic64_inc(&reqs->partial_miss);
}
void ocf_engine_push_rq_back(struct ocf_request *rq, bool allow_sync)
{
struct ocf_cache *cache = rq->cache;
struct ocf_queue *q = NULL;
unsigned long lock_flags;
INIT_LIST_HEAD(&rq->list);
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
q = &cache->io_queues[rq->io_queue];
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add_tail(&rq->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
if (!rq->info.internal)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
}
void ocf_engine_push_rq_front(struct ocf_request *rq, bool allow_sync)
{
struct ocf_cache *cache = rq->cache;
struct ocf_queue *q = NULL;
unsigned long lock_flags;
INIT_LIST_HEAD(&rq->list);
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
q = &cache->io_queues[rq->io_queue];
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add(&rq->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
if (!rq->info.internal)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
}
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
const struct ocf_io_if *io_if,
bool allow_sync)
{
rq->error = 0; /* Please explain why!!! */
rq->io_if = io_if;
ocf_engine_push_rq_front(rq, allow_sync);
}
void inc_fallback_pt_error_counter(ocf_cache_t cache)
{
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
if (cache->fallback_pt_error_threshold == OCF_CACHE_FALLBACK_PT_INACTIVE)
return;
if (env_atomic_inc_return(&cache->fallback_pt_error_counter) ==
cache->fallback_pt_error_threshold) {
ocf_cache_log(cache, log_info, "Error threshold reached. "
"Fallback Pass Through activated\n");
}
}
static int _ocf_engine_refresh(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
int result;
OCF_METADATA_LOCK_RD();
/* Check under metadata RD lock */
result = ocf_engine_check(rq);
OCF_METADATA_UNLOCK_RD();
if (result == 0) {
/* Refresh successful, can process with original IO interface */
rq->io_if = rq->priv;
rq->resume = NULL;
rq->priv = NULL;
if (rq->rw == OCF_READ)
rq->io_if->read(rq);
else if (rq->rw == OCF_WRITE)
rq->io_if->write(rq);
else
ENV_BUG();
} else {
ENV_WARN(true, OCF_PREFIX_SHORT" Inconsistent request");
rq->error = -EINVAL;
/* Complete request */
rq->complete(rq, rq->error);
/* Release WRITE lock of request */
ocf_rq_unlock(rq);
/* Release OCF request */
ocf_rq_put(rq);
}
return 0;
}
static const struct ocf_io_if _io_if_refresh = {
.read = _ocf_engine_refresh,
.write = _ocf_engine_refresh,
};
void ocf_engine_on_resume(struct ocf_request *rq)
{
ENV_BUG_ON(rq->priv);
ENV_BUG_ON(ocf_engine_on_resume != rq->resume);
OCF_CHECK_NULL(rq->io_if);
/* Exchange IO interface */
rq->priv = (void *)rq->io_if;
OCF_DEBUG_RQ(rq, "On resume");
ocf_engine_push_rq_front_if(rq, &_io_if_refresh, false);
}

223
src/engine/engine_common.h Normal file
View File

@@ -0,0 +1,223 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_COMMON_H_
#define ENGINE_COMMON_H_
#include "../ocf_request.h"
/**
* @file engine_common.h
* @brief OCF cache engine common module
*/
/**
* @brief Signal and handle OCF request error
*
* @param rq OCF request
* @param stop_cache Indicates if OCF cache engine need to be stopped
* @param msg Error message to be printed into log
*/
void ocf_engine_error(struct ocf_request *rq, bool stop_cache,
const char *msg);
/**
* @brief Check if OCF request is hit
*
* @param rq OCF request
*
* @retval true HIT
* @retval false MISS
*/
static inline bool ocf_engine_is_hit(struct ocf_request *rq)
{
return rq->info.hit_no == rq->core_line_count;
}
/**
* @brief Check if OCF request is miss
*
* @param rq OCF request
*
* @retval true MISS
* @retval false HIT
*/
#define ocf_engine_is_miss(rq) (!ocf_engine_is_hit(rq))
/**
* @brief Check if all cache lines are mapped fully
*
* @param rq OCF request
*
* @retval true request is mapped fully
* @retval false request is not mapped fully and eviction might be run in
* order to complete mapping
*/
static inline bool ocf_engine_is_mapped(struct ocf_request *rq)
{
return rq->info.hit_no + rq->info.invalid_no == rq->core_line_count;
}
/**
* @brief Check if all cache lines are dirty
*
* @param rq OCF request
*
* @retval true request is dirty fully
* @retval false request is not dirty fully
*/
static inline bool ocf_engine_is_dirty_all(struct ocf_request *rq)
{
return rq->info.dirty_all == rq->core_line_count;
}
/**
* @brief Get number of mapped cache lines
*
* @param rq OCF request
*
* @return Number of mapped cache lines
*/
static inline uint32_t ocf_engine_mapped_count(struct ocf_request *rq)
{
return rq->info.hit_no + rq->info.invalid_no;
}
/**
* @brief Get number of unmapped cache lines
*
* @param rq OCF request
*
* @return Number of unmapped cache lines
*/
static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *rq)
{
return rq->core_line_count - (rq->info.hit_no + rq->info.invalid_no);
}
/**
* @brief Get number of IOs to perform cache read or write
*
* @param rq OCF request
*
* @return Count of cache IOs
*/
static inline uint32_t ocf_engine_io_count(struct ocf_request *rq)
{
return rq->info.seq_req ? 1 : rq->core_line_count;
}
/**
* @brief Clean request (flush dirty data to the core device)
*
* @param rq OCF request
*
* @note After successful cleaning:
* - Dirty status bits in request info will be cleared
* - Request will be pushed front, <B>IO interface need to be set</B>
*
* @note In case of failure:
* - unlock request
* - complete request to the application
* - free request
*/
void ocf_engine_clean(struct ocf_request *rq);
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
struct ocf_map_info *entry, ocf_core_id_t core_id,
uint64_t core_line);
/**
* @brief Traverse request in order to lookup cache lines If there are misses
* need to call eviction. This process is called 'mapping'.
*
* @note This function CALL EVICTION
*
* @param rq OCF request
*/
void ocf_engine_map(struct ocf_request *rq);
/**
* @brief Traverse OCF request (lookup cache)
*
* @note This function DO NOT CALL EVICTION. Only lookup in metadata is
* performed. Main purpose of this function is to check if there is a HIT.
*
* @param rq OCF request
*/
void ocf_engine_traverse(struct ocf_request *rq);
/**
* @brief Check if OCF request mapping is still valid
*
* @note If mapping entries is invalid it will be marked
*
* @param rq OCF request
*
* @retval 0 - OCF request mapping is valid
* @return Non zero - OCF request mapping is invalid and need to call re-mapping
*/
int ocf_engine_check(struct ocf_request *rq);
/**
* @brief Update OCF request info
*
* @param rq OCF request
*/
void ocf_engine_update_rq_info(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t entry);
/**
* @brief Update OCF request block statistics for an exported object
*
* @param rq OCF request
*/
void ocf_engine_update_block_stats(struct ocf_request *rq);
/**
* @brief Update OCF request request statistics for an exported object
* (not applicable to write wi and to read wt
*
* @param rq OCF request
*/
void ocf_engine_update_request_stats(struct ocf_request *rq);
/**
* @brief Push front OCF request to the OCF thread worker queue
*
* @param rq OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_rq_back(struct ocf_request *rq,
bool allow_sync);
/**
* @brief Push back OCF request to the OCF thread worker queue
*
* @param rq OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_rq_front(struct ocf_request *rq,
bool allow_sync);
/**
* @brief Set interface and push from request to the OCF thread worker queue
*
* @param rq OCF request
* @param io_if IO interface
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
const struct ocf_io_if *io_if,
bool allow_sync);
void inc_fallback_pt_error_counter(ocf_cache_t cache);
void ocf_engine_on_resume(struct ocf_request *rq);
#endif /* ENGINE_COMMON_H_ */

72
src/engine/engine_d2c.c Normal file
View File

@@ -0,0 +1,72 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_d2c.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "d2c"
#include "engine_debug.h"
static void _ocf_d2c_completion(void *private_data, int error)
{
struct ocf_request *rq = private_data;
rq->error = error;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
rq->info.core_error = 1;
if (rq->rw == OCF_READ) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.read);
} else {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.write);
}
}
/* Complete request */
rq->complete(rq, rq->error);
/* Release OCF request */
ocf_rq_put(rq);
}
int ocf_io_d2c(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, rq->rw,
_ocf_d2c_completion, rq);
ocf_engine_update_block_stats(rq);
if (rq->rw == OCF_READ) {
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].read_reqs.pass_through);
} else {
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].write_reqs.pass_through);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_d2c.h Normal file
View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_2DC_H_
#define ENGINE_2DC_H_
int ocf_io_d2c(struct ocf_request *rq);
#endif /* ENGINE_2DC_H_ */

48
src/engine/engine_debug.h Normal file
View File

@@ -0,0 +1,48 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_DEBUG_H_
#define ENGINE_DEBUG_H_
#ifndef OCF_ENGINE_DEBUG
#define OCF_ENGINE_DEBUG 0
#endif
#if 1 == OCF_ENGINE_DEBUG
#ifndef OCF_ENGINE_DEBUG_IO_NAME
#define OCF_ENGINE_DEBUG_IO_NAME "null"
#endif
#define OCF_DEBUG_PREFIX "[Engine][%s] %s "
#define OCF_DEBUG_LOG(cache, format, ...) \
ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
format"\n", OCF_ENGINE_DEBUG_IO_NAME, __func__, \
##__VA_ARGS__)
#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
##__VA_ARGS__)
#define OCF_DEBUG_RQ(rq, format, ...) \
ocf_cache_log(rq->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \
format"\n", OCF_ENGINE_DEBUG_IO_NAME, \
OCF_READ == (rq)->rw ? "RD" : "WR", rq->byte_position, \
rq->byte_length, __func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_PREFIX
#define OCF_DEBUG_LOG(cache, format, ...)
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#define OCF_DEBUG_RQ(rq, format, ...)
#endif
#endif /* ENGINE_DEBUG_H_ */

248
src/engine/engine_discard.c Normal file
View File

@@ -0,0 +1,248 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_discard.h"
#include "../metadata/metadata.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG 0
#define OCF_ENGINE_DEBUG_IO_NAME "discard"
#include "engine_debug.h"
static int _ocf_discard_step_do(struct ocf_request *rq);
static int _ocf_discard_step(struct ocf_request *rq);
static int _ocf_discard_flush_cache(struct ocf_request *rq);
static int _ocf_discard_core(struct ocf_request *rq);
static const struct ocf_io_if _io_if_discard_step = {
.read = _ocf_discard_step,
.write = _ocf_discard_step
};
static const struct ocf_io_if _io_if_discard_step_resume = {
.read = _ocf_discard_step_do,
.write = _ocf_discard_step_do
};
static const struct ocf_io_if _io_if_discard_flush_cache = {
.read = _ocf_discard_flush_cache,
.write = _ocf_discard_flush_cache,
};
static const struct ocf_io_if _io_if_discard_core = {
.read = _ocf_discard_core,
.write = _ocf_discard_core
};
static void _ocf_discard_complete_rq(struct ocf_request *rq, int error)
{
rq->complete(rq, error);
ocf_rq_put(rq);
}
static void _ocf_discard_core_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
OCF_DEBUG_RQ(rq, "Core DISCARD Completion");
_ocf_discard_complete_rq(rq, error);
}
static int _ocf_discard_core(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ocf_submit_obj_discard(&cache->core_obj[rq->core_id].obj, rq,
_ocf_discard_core_io, rq);
return 0;
}
static void _ocf_discard_cache_flush_io_cmpl(void *priv, int error)
{
struct ocf_request *rq = priv;
if (error) {
ocf_metadata_error(rq->cache);
_ocf_discard_complete_rq(rq, error);
return;
}
rq->io_if = &_io_if_discard_core;
ocf_engine_push_rq_front(rq, true);
}
static int _ocf_discard_flush_cache(struct ocf_request *rq)
{
ocf_submit_obj_flush(&rq->cache->device->obj,
_ocf_discard_cache_flush_io_cmpl, rq);
return 0;
}
static void _ocf_discard_finish_step(struct ocf_request *rq)
{
rq->discard.handled += BYTES_TO_SECTORS(rq->byte_length);
if (rq->discard.handled < rq->discard.nr_sects)
rq->io_if = &_io_if_discard_step;
else if (rq->cache->device->init_mode != ocf_init_mode_metadata_volatile)
rq->io_if = &_io_if_discard_flush_cache;
else
rq->io_if = &_io_if_discard_core;
ocf_engine_push_rq_front(rq, true);
}
static void _ocf_discard_step_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
/* Release WRITE lock of request */
ocf_rq_unlock_wr(rq);
if (rq->error) {
ocf_metadata_error(rq->cache);
_ocf_discard_complete_rq(rq, rq->error);
return;
}
_ocf_discard_finish_step(rq);
}
int _ocf_discard_step_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
if (ocf_engine_mapped_count(rq)) {
/* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(rq);
if (rq->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_discard_step_io);
}
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
OCF_DEBUG_RQ(rq, "Discard");
_ocf_discard_step_io(rq, 0);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static void _ocf_discard_on_resume(struct ocf_request *rq)
{
OCF_DEBUG_RQ(rq, "On resume");
ocf_engine_push_rq_front(rq, true);
}
static int _ocf_discard_step(struct ocf_request *rq)
{
int lock;
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
rq->byte_position = SECTORS_TO_BYTES(rq->discard.sector +
rq->discard.handled);
rq->byte_length = MIN(SECTORS_TO_BYTES(rq->discard.nr_sects -
rq->discard.handled), MAX_TRIM_RQ_SIZE);
rq->core_line_first = ocf_bytes_2_lines(cache, rq->byte_position);
rq->core_line_last =
ocf_bytes_2_lines(cache, rq->byte_position + rq->byte_length - 1);
rq->core_line_count = rq->core_line_last - rq->core_line_first + 1;
rq->io_if = &_io_if_discard_step_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
ENV_BUG_ON(env_memset(rq->map, sizeof(*rq->map) * rq->core_line_count,
0));
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
if (ocf_engine_mapped_count(rq)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
} else {
lock = OCF_LOCK_ACQUIRED;
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
if (lock >= 0) {
if (OCF_LOCK_ACQUIRED == lock) {
_ocf_discard_step_do(rq);
} else {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK")
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->error |= lock;
_ocf_discard_finish_step(rq);
}
env_cond_resched();
return 0;
}
int ocf_discard(struct ocf_request *rq)
{
OCF_DEBUG_TRACE(rq->cache);
ocf_io_start(rq->io);
if (rq->rw == OCF_READ) {
rq->complete(rq, -EINVAL);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = _ocf_discard_on_resume;
_ocf_discard_step(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __ENGINE_DISCARD_H__
#define __ENGINE_DISCARD_H__
int ocf_discard(struct ocf_request *rq);
#endif

235
src/engine/engine_fast.c Normal file
View File

@@ -0,0 +1,235 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_fast.h"
#include "engine_common.h"
#include "engine_pt.h"
#include "engine_wb.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_part.h"
#include "../utils/utils_io.h"
#include "../concurrency/ocf_concurrency.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG 0
#define OCF_ENGINE_DEBUG_IO_NAME "fast"
#include "engine_debug.h"
/* _____ _ ______ _ _____ _ _
* | __ \ | | | ____| | | | __ \ | | | |
* | |__) |___ __ _ __| | | |__ __ _ ___| |_ | |__) |_ _| |_| |__
* | _ // _ \/ _` |/ _` | | __/ _` / __| __| | ___/ _` | __| '_ \
* | | \ \ __/ (_| | (_| | | | | (_| \__ \ |_ | | | (_| | |_| | | |
* |_| \_\___|\__,_|\__,_| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
*/
static void _ocf_read_fast_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining)) {
/* Not all requests finished */
return;
}
OCF_DEBUG_RQ(rq, "HIT completion");
if (rq->error) {
OCF_DEBUG_RQ(rq, "ERROR");
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.read);
ocf_engine_push_rq_front_pt(rq);
} else {
ocf_rq_unlock(rq);
/* Complete request */
rq->complete(rq, rq->error);
/* Free the request at the last point of the completion path */
ocf_rq_put(rq);
}
}
static int _ocf_read_fast_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (ocf_engine_is_miss(rq)) {
/* It seams that after resume, now request is MISS, do PT */
OCF_DEBUG_RQ(rq, "Switching to read PT");
ocf_read_pt_do(rq);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
/* Submit IO */
OCF_DEBUG_RQ(rq, "Submit");
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
ocf_engine_io_count(rq), _ocf_read_fast_io, rq);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_read_fast_resume = {
.read = _ocf_read_fast_do,
.write = _ocf_read_fast_do,
};
int ocf_read_fast(struct ocf_request *rq)
{
bool hit;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_read_fast_resume;
/*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */
ocf_engine_traverse(rq);
hit = ocf_engine_is_hit(rq);
if (hit) {
ocf_io_start(rq->io);
lock = ocf_rq_trylock_rd(rq);
}
OCF_METADATA_UNLOCK_RD();
if (hit) {
OCF_DEBUG_RQ(rq, "Fast path success");
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
/* Lock was acquired can perform IO */
_ocf_read_fast_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR");
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
OCF_DEBUG_RQ(rq, "Fast path failure");
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
if (hit)
return OCF_FAST_PATH_YES;
else
return OCF_FAST_PATH_NO;
}
/* __ __ _ _ ______ _ _____ _ _
* \ \ / / (_) | | ____| | | | __ \ | | | |
* \ \ /\ / / __ _| |_ ___ | |__ __ _ ___| |_ | |__) |_ _| |_| |__
* \ \/ \/ / '__| | __/ _ \ | __/ _` / __| __| | ___/ _` | __| '_ \
* \ /\ /| | | | || __/ | | | (_| \__ \ |_ | | | (_| | |_| | | |
* \/ \/ |_| |_|\__\___| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
*/
static const struct ocf_io_if _io_if_write_fast_resume = {
.read = ocf_write_wb_do,
.write = ocf_write_wb_do,
};
int ocf_write_fast(struct ocf_request *rq)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_write_fast_resume;
/*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */
ocf_engine_traverse(rq);
mapped = ocf_engine_is_mapped(rq);
if (mapped) {
ocf_io_start(rq->io);
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_RD();
if (mapped) {
if (lock >= 0) {
OCF_DEBUG_RQ(rq, "Fast path success");
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
/* Lock was acquired can perform IO */
ocf_write_wb_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "Fast path lock failure");
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
OCF_DEBUG_RQ(rq, "Fast path failure");
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
}

12
src/engine/engine_fast.h Normal file
View File

@@ -0,0 +1,12 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_FAST_H_
#define ENGINE_FAST_H_
int ocf_read_fast(struct ocf_request *rq);
int ocf_write_fast(struct ocf_request *rq);
#endif /* ENGINE_WI_H_ */

74
src/engine/engine_inv.c Normal file
View File

@@ -0,0 +1,74 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_inv.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cache_line.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "inv"
#include "engine_debug.h"
static void _ocf_invalidate_rq(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
rq->error = error;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error)
ocf_engine_error(rq, true, "Failed to flush metadata to cache");
ocf_rq_unlock(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
}
static int _ocf_invalidate_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ENV_BUG_ON(env_atomic_read(&rq->req_remaining));
OCF_METADATA_LOCK_WR();
ocf_purge_map_info(rq);
OCF_METADATA_UNLOCK_WR();
env_atomic_inc(&rq->req_remaining);
if (ocf_data_obj_is_atomic(&cache->device->obj) &&
rq->info.flush_metadata) {
/* Metadata flush IO */
ocf_metadata_flush_do_asynch(cache, rq, _ocf_invalidate_rq);
}
_ocf_invalidate_rq(rq, 0);
return 0;
}
static const struct ocf_io_if _io_if_invalidate = {
.read = _ocf_invalidate_do,
.write = _ocf_invalidate_do,
};
void ocf_engine_invalidate(struct ocf_request *rq)
{
ocf_engine_push_rq_front_if(rq, &_io_if_invalidate, true);
}

11
src/engine/engine_inv.h Normal file
View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_INV_H_
#define ENGINE_INV_H_
void ocf_engine_invalidate(struct ocf_request *rq);
#endif /* ENGINE_INV_H_ */

65
src/engine/engine_ops.c Normal file
View File

@@ -0,0 +1,65 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "engine_ops.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#define OCF_ENGINE_DEBUG_IO_NAME "ops"
#include "engine_debug.h"
static void _ocf_engine_ops_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
/* An error occured */
ocf_engine_error(rq, false, "Core operation failure");
}
/* Complete requests - both to cache and to core*/
rq->complete(rq, rq->error);
/* Release OCF request */
ocf_rq_put(rq);
}
int ocf_engine_ops(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* IO to the core device and to the cache device */
env_atomic_set(&rq->req_remaining, 2);
/* Submit operation into core device */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, rq->rw,
_ocf_engine_ops_io, rq);
ocf_submit_cache_reqs(cache, rq->map, rq, rq->rw,
1, _ocf_engine_ops_io, rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_ops.h Normal file
View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __CACHE_ENGINE_OPS_H_
#define __CACHE_ENGINE_OPS_H_
int ocf_engine_ops(struct ocf_request *rq);
#endif /* __CACHE_ENGINE_OPS_H_ */

181
src/engine/engine_pt.c Normal file
View File

@@ -0,0 +1,181 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_pt.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "pt"
#include "engine_debug.h"
static void _ocf_read_pt_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.read);
}
/* Complete request */
rq->complete(rq, rq->error);
ocf_rq_unlock_rd(rq);
/* Release OCF request */
ocf_rq_put(rq);
}
static inline void _ocf_read_pt_submit(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
env_atomic_set(&rq->req_remaining, 1); /* Core device IO */
OCF_DEBUG_RQ(rq, "Submit");
/* Core read */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_READ,
_ocf_read_pt_io, rq);
}
int ocf_read_pt_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
if (rq->info.dirty_any) {
OCF_METADATA_LOCK_RD();
/* Need to clean, start it */
ocf_engine_clean(rq);
OCF_METADATA_UNLOCK_RD();
/* Do not processing, because first we need to clean request */
ocf_rq_put(rq);
return 0;
}
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
/* Submit read IO to the core */
_ocf_read_pt_submit(rq);
/* Update statistics */
ocf_engine_update_block_stats(rq);
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].read_reqs.pass_through);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_pt_resume = {
.read = ocf_read_pt_do,
.write = ocf_read_pt_do,
};
int ocf_read_pt(struct ocf_request *rq)
{
bool use_cache = false;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_pt_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(rq);
if (rq->info.seq_cutoff && ocf_engine_is_dirty_all(rq)) {
use_cache = true;
} else {
if (ocf_engine_mapped_count(rq)) {
/* There are mapped cache line,
* lock request for READ access
*/
lock = ocf_rq_trylock_rd(rq);
} else {
/* No mapped cache lines, no need to get lock */
lock = OCF_LOCK_ACQUIRED;
}
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
if (use_cache) {
/*
* There is dirt HIT, and sequential cut off,
* because of this force read data from cache
*/
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_wt)->read(rq);
} else {
if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) {
/* Lock acquired perform read off operations */
ocf_read_pt_do(rq);
} else {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
void ocf_engine_push_rq_front_pt(struct ocf_request *rq)
{
ocf_engine_push_rq_front_if(rq, &_io_if_pt_resume, true);
}

15
src/engine/engine_pt.h Normal file
View File

@@ -0,0 +1,15 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_OFF_H_
#define ENGINE_OFF_H_
int ocf_read_pt(struct ocf_request *rq);
int ocf_read_pt_do(struct ocf_request *rq);
void ocf_engine_push_rq_front_pt(struct ocf_request *rq);
#endif /* ENGINE_OFF_H_ */

319
src/engine/engine_rd.c Normal file
View File

@@ -0,0 +1,319 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_rd.h"
#include "engine_pt.h"
#include "engine_inv.h"
#include "engine_bf.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_io.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
#include "../ocf_def_priv.h"
#define OCF_ENGINE_DEBUG_IO_NAME "rd"
#include "engine_debug.h"
static void _ocf_read_generic_hit_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (rq->error)
inc_fallback_pt_error_counter(rq->cache);
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
OCF_DEBUG_RQ(rq, "HIT completion");
if (rq->error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].
counters->cache_errors.read);
ocf_engine_push_rq_front_pt(rq);
} else {
ocf_rq_unlock(rq);
/* Complete request */
rq->complete(rq, rq->error);
/* Free the request at the last point
* of the completion path
*/
ocf_rq_put(rq);
}
}
}
static void _ocf_read_generic_miss_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
struct ocf_cache *cache = rq->cache;
if (error)
rq->error = error;
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
OCF_DEBUG_RQ(rq, "MISS completion");
if (rq->error) {
/*
* --- Do not submit this request to write-back-thread.
* Stop it here ---
*/
rq->complete(rq, rq->error);
rq->info.core_error = 1;
env_atomic_inc(&cache->core_obj[rq->core_id].
counters->core_errors.read);
ctx_data_free(cache->owner, rq->cp_data);
rq->cp_data = NULL;
/* Invalidate metadata */
ocf_engine_invalidate(rq);
return;
}
/* Copy pages to copy vec, since this is the one needed
* by the above layer
*/
ctx_data_cpy(cache->owner, rq->cp_data, rq->data, 0, 0,
rq->byte_length);
/* Complete request */
rq->complete(rq, rq->error);
ocf_engine_backfill(rq);
}
}
static inline void _ocf_read_generic_submit_hit(struct ocf_request *rq)
{
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
ocf_engine_io_count(rq), _ocf_read_generic_hit_io, rq);
}
static inline void _ocf_read_generic_submit_miss(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
int ret;
env_atomic_set(&rq->req_remaining, 1);
rq->cp_data = ctx_data_alloc(cache->owner,
BYTES_TO_PAGES(rq->byte_length));
if (!rq->cp_data)
goto err_alloc;
ret = ctx_data_mlock(cache->owner, rq->cp_data);
if (ret)
goto err_alloc;
/* Submit read request to core device. */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_READ,
_ocf_read_generic_miss_io, rq);
return;
err_alloc:
_ocf_read_generic_miss_io(rq, -ENOMEM);
}
static int _ocf_read_generic_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (ocf_engine_is_miss(rq) && rq->map->rd_locked) {
/* Miss can be handled only on write locks.
* Need to switch to PT
*/
OCF_DEBUG_RQ(rq, "Switching to PT");
ocf_read_pt_do(rq);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
if (ocf_engine_is_miss(rq)) {
if (rq->info.dirty_any) {
OCF_METADATA_LOCK_RD();
/* Request is dirty need to clean request */
ocf_engine_clean(rq);
OCF_METADATA_UNLOCK_RD();
/* We need to clean request before processing, return */
ocf_rq_put(rq);
return 0;
}
OCF_METADATA_LOCK_RD();
/* Set valid status bits map */
ocf_set_valid_map_info(rq);
OCF_METADATA_UNLOCK_RD();
}
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
OCF_DEBUG_RQ(rq, "Submit");
/* Submit IO */
if (ocf_engine_is_hit(rq))
_ocf_read_generic_submit_hit(rq);
else
_ocf_read_generic_submit_miss(rq);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_read_generic_resume = {
.read = _ocf_read_generic_do,
.write = _ocf_read_generic_do,
};
int ocf_read_generic(struct ocf_request *rq)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
ocf_io_start(rq->io);
if (env_atomic_read(&cache->pending_read_misses_list_blocked)) {
/* There are conditions to bypass IO */
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_read_generic_resume;
/*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */
ocf_engine_traverse(rq);
mapped = ocf_engine_is_mapped(rq);
if (mapped) {
/* Request is fully mapped, no need to call eviction */
if (ocf_engine_is_hit(rq)) {
/* There is a hit, lock request for READ access */
lock = ocf_rq_trylock_rd(rq);
} else {
/* All cache line mapped, but some sectors are not valid
* and cache insert will be performed - lock for
* WRITE is required
*/
lock = ocf_rq_trylock_wr(rq);
}
}
OCF_METADATA_UNLOCK_RD();
/*- END Metadata RD access -------------------------------------------*/
if (!mapped) {
/*- Metadata WR access ---------------------------------------*/
OCF_METADATA_LOCK_WR();
/* Now there is exclusive access for metadata. May traverse once
* again. If there are misses need to call eviction. This
* process is called 'mapping'.
*/
ocf_engine_map(rq);
if (!rq->info.eviction_error) {
if (ocf_engine_is_hit(rq)) {
/* After mapping turns out there is hit,
* so lock OCF request for read access
*/
lock = ocf_rq_trylock_rd(rq);
} else {
/* Miss, new cache lines were mapped,
* need to lock OCF request for write access
*/
lock = ocf_rq_trylock_wr(rq);
}
}
OCF_METADATA_UNLOCK_WR();
/*- END Metadata WR access -----------------------------------*/
}
if (!rq->info.eviction_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
/* Lock was acquired can perform IO */
_ocf_read_generic_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_rd.h Normal file
View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_RD_H_
#define ENGINE_RD_H_
int ocf_read_generic(struct ocf_request *rq);
#endif /* ENGINE_RD_H_ */

92
src/engine/engine_wa.c Normal file
View File

@@ -0,0 +1,92 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_wa.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wa"
#include "engine_debug.h"
static void _ocf_read_wa_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
if (rq->error) {
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.write);
}
/* Complete request */
rq->complete(rq, rq->error);
OCF_DEBUG_RQ(rq, "Completion");
/* Release OCF request */
ocf_rq_put(rq);
}
int ocf_write_wa(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(rq);
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
if (ocf_engine_is_hit(rq)) {
ocf_rq_clear(rq);
/* There is HIT, do WT */
ocf_get_io_if(ocf_cache_mode_wt)->write(rq);
} else if (ocf_engine_mapped_count(rq)) {
ocf_rq_clear(rq);
/* Partial MISS, do WI */
ocf_get_io_if(ocf_cache_mode_wi)->write(rq);
} else {
/* There is no mapped cache line, write directly into core */
OCF_DEBUG_RQ(rq, "Submit");
/* Submit write IO to the core */
env_atomic_set(&rq->req_remaining, 1);
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
OCF_WRITE, _ocf_read_wa_io, rq);
/* Update statistics */
ocf_engine_update_block_stats(rq);
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].write_reqs.pass_through);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_wa.h Normal file
View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_WA_H_
#define ENGINE_WA_H_
int ocf_write_wa(struct ocf_request *rq);
#endif /* ENGINE_WA_H_ */

242
src/engine/engine_wb.c Normal file
View File

@@ -0,0 +1,242 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_wb.h"
#include "../metadata/metadata.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wb"
#include "engine_debug.h"
static const struct ocf_io_if _io_if_wb_resume = {
.read = ocf_write_wb_do,
.write = ocf_write_wb_do,
};
static void _ocf_write_wb_update_bits(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (ocf_engine_is_miss(rq)) {
OCF_METADATA_LOCK_RD();
/* Update valid status bits */
ocf_set_valid_map_info(rq);
OCF_METADATA_UNLOCK_RD();
}
if (!ocf_engine_is_dirty_all(rq)) {
OCF_METADATA_LOCK_WR();
/* set dirty bits, and mark if metadata flushing is required */
ocf_set_dirty_map_info(rq);
OCF_METADATA_UNLOCK_WR();
}
}
static void _ocf_write_wb_io_flush_metadata(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *) private_data;
if (error)
rq->error = error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
if (rq->error)
ocf_engine_error(rq, true, "Failed to write data to cache");
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
}
static int ocf_write_wb_do_flush_metadata(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
if (rq->info.flush_metadata) {
OCF_DEBUG_RQ(rq, "Flush metadata");
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_write_wb_io_flush_metadata);
}
_ocf_write_wb_io_flush_metadata(rq, 0);
return 0;
}
static const struct ocf_io_if _io_if_wb_flush_metadata = {
.read = ocf_write_wb_do_flush_metadata,
.write = ocf_write_wb_do_flush_metadata,
};
static void _ocf_write_wb_io(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *) private_data;
if (error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
rq->error |= error;
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
ocf_engine_error(rq, true, "Failed to write data to cache");
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
} else {
ocf_engine_push_rq_front_if(rq, &_io_if_wb_flush_metadata,
true);
}
}
static inline void _ocf_write_wb_submit(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
/*
* 1. Submit data
* 2. Wait for completion of data
* 3. Then continue processing request (flush metadata)
*/
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
OCF_DEBUG_RQ(rq, "Submit Data");
/* Data IO */
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
ocf_engine_io_count(rq), _ocf_write_wb_io, rq);
}
int ocf_write_wb_do(struct ocf_request *rq)
{
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Updata status bits */
_ocf_write_wb_update_bits(rq);
/* Submit IO */
_ocf_write_wb_submit(rq);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
int ocf_write_wb(struct ocf_request *rq)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
ocf_io_start(rq->io);
/* Not sure if we need this. */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_wb_resume;
/* TODO: Handle fits into dirty */
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
mapped = ocf_engine_is_mapped(rq);
if (mapped) {
/* All cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
if (!mapped) {
OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/
/* Now there is exclusive access for metadata. May traverse once
* again. If there are misses need to call eviction. This
* process is called 'mapping'.
*/
ocf_engine_map(rq);
if (!rq->info.eviction_error) {
/* Lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
if (!rq->info.eviction_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
ocf_write_wb_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

12
src/engine/engine_wb.h Normal file
View File

@@ -0,0 +1,12 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_WB_H_
#define ENGINE_WB_H_
int ocf_write_wb(struct ocf_request *rq);
int ocf_write_wb_do(struct ocf_request *rq);
#endif /* ENGINE_WI_H_ */

190
src/engine/engine_wi.c Normal file
View File

@@ -0,0 +1,190 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_wi.h"
#include "engine_common.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wi"
#include "engine_debug.h"
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq);
static const struct ocf_io_if _io_if_wi_flush_metadata = {
.read = ocf_write_wi_update_and_flush_metadata,
.write = ocf_write_wi_update_and_flush_metadata,
};
static void _ocf_write_wi_io_flush_metadata(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *) private_data;
if (error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
rq->error |= error;
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
if (rq->error)
ocf_engine_error(rq, true, "Failed to write data to cache");
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
}
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
if (ocf_engine_mapped_count(rq)) {
/* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(rq);
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
if (rq->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_write_wi_io_flush_metadata);
}
}
_ocf_write_wi_io_flush_metadata(rq, 0);
return 0;
}
static void _ocf_write_wi_core_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
rq->error = error;
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.write);
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
} else {
ocf_engine_push_rq_front_if(rq, &_io_if_wi_flush_metadata,
true);
}
}
static int _ocf_write_wi_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
OCF_DEBUG_RQ(rq, "Submit");
/* Submit write IO to the core */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_WRITE,
_ocf_write_wi_core_io, rq);
/* Update statistics */
ocf_engine_update_block_stats(rq);
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].write_reqs.pass_through);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static void _ocf_write_wi_on_resume(struct ocf_request *rq)
{
OCF_DEBUG_RQ(rq, "On resume");
ocf_engine_push_rq_front(rq, true);
}
static const struct ocf_io_if _io_if_wi_resume = {
.read = _ocf_write_wi_do,
.write = _ocf_write_wi_do,
};
int ocf_write_wi(struct ocf_request *rq)
{
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = _ocf_write_wi_on_resume;
rq->io_if = &_io_if_wi_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
if (ocf_engine_mapped_count(rq)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
} else {
lock = OCF_LOCK_ACQUIRED;
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) {
_ocf_write_wi_do(rq);
} else {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_wi.h Normal file
View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_WI_H_
#define ENGINE_WI_H_
int ocf_write_wi(struct ocf_request *rq);
#endif /* ENGINE_WI_H_ */

236
src/engine/engine_wt.c Normal file
View File

@@ -0,0 +1,236 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_wt.h"
#include "engine_inv.h"
#include "engine_common.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wt"
#include "engine_debug.h"
static void _ocf_write_wt_io(struct ocf_request *rq)
{
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
/* An error occured */
/* Complete request */
rq->complete(rq, rq->info.core_error ? rq->error : 0);
ocf_engine_invalidate(rq);
} else {
/* Unlock reqest from WRITE access */
ocf_rq_unlock_wr(rq);
/* Complete request */
rq->complete(rq, rq->info.core_error ? rq->error : 0);
/* Release OCF request */
ocf_rq_put(rq);
}
}
static void _ocf_write_wt_cache_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
rq->error = rq->error ?: error;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
if (rq->error)
inc_fallback_pt_error_counter(rq->cache);
}
_ocf_write_wt_io(rq);
}
static void _ocf_write_wt_core_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
rq->error = error;
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.write);
}
_ocf_write_wt_io(rq);
}
static inline void _ocf_write_wt_submit(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Submit IOs */
OCF_DEBUG_RQ(rq, "Submit");
/* Calculate how many IOs need to be submited */
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq)); /* Cache IO */
env_atomic_inc(&rq->req_remaining); /* Core device IO */
if (rq->info.flush_metadata) {
/* Metadata flush IO */
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_write_wt_cache_io);
}
/* To cache */
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
ocf_engine_io_count(rq), _ocf_write_wt_cache_io, rq);
/* To core */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_WRITE,
_ocf_write_wt_core_io, rq);
}
static void _ocf_write_wt_update_bits(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (ocf_engine_is_miss(rq)) {
OCF_METADATA_LOCK_RD();
/* Update valid status bits */
ocf_set_valid_map_info(rq);
OCF_METADATA_UNLOCK_RD();
}
if (rq->info.dirty_any) {
OCF_METADATA_LOCK_WR();
/* Writes goes to SDD and HDD, need to update status bits from
* dirty to clean
*/
ocf_set_clean_map_info(rq);
OCF_METADATA_UNLOCK_WR();
}
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
}
static int _ocf_write_wt_do(struct ocf_request *rq)
{
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Update status bits */
_ocf_write_wt_update_bits(rq);
/* Submit IO */
_ocf_write_wt_submit(rq);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_wt_resume = {
.read = _ocf_write_wt_do,
.write = _ocf_write_wt_do,
};
int ocf_write_wt(struct ocf_request *rq)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_wt_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
mapped = ocf_engine_is_mapped(rq);
if (mapped) {
/* All cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
if (!mapped) {
OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/
/* Now there is exclusive access for metadata. May traverse once
* again. If there are misses need to call eviction. This
* process is called 'mapping'.
*/
ocf_engine_map(rq);
if (!rq->info.eviction_error) {
/* Lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
if (!rq->info.eviction_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
_ocf_write_wt_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d\n", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_wt.h Normal file
View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_WT_H_
#define ENGINE_WT_H_
int ocf_write_wt(struct ocf_request *rq);
#endif /* ENGINE_WT_H_ */

168
src/engine/engine_zero.c Normal file
View File

@@ -0,0 +1,168 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_zero.h"
#include "engine_common.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "zero"
#include "engine_debug.h"
static int ocf_zero_purge(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (rq->error) {
ocf_engine_error(rq, true, "Failed to discard data on cache");
} else {
/* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(rq);
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_zero_purge = {
.read = ocf_zero_purge,
.write = ocf_zero_purge,
};
static void _ocf_zero_io_flush_metadata(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *) private_data;
if (error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
rq->error = error;
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
ocf_engine_push_rq_front_if(rq, &_io_if_zero_purge, true);
}
static inline void ocf_zero_map_info(struct ocf_request *rq)
{
uint32_t map_idx = 0;
uint8_t start_bit;
uint8_t end_bit;
struct ocf_map_info *map = rq->map;
struct ocf_cache *cache = rq->cache;
uint32_t count = rq->core_line_count;
/* Purge range on the basis of map info
*
* | 01234567 | 01234567 | ... | 01234567 | 01234567 |
* | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
* | first | Middle | last |
*/
for (map_idx = 0; map_idx < count; map_idx++) {
if (map[map_idx].status == LOOKUP_MISS)
continue;
start_bit = 0;
end_bit = ocf_line_end_sector(cache);
if (map_idx == 0) {
/* First */
start_bit = BYTES_TO_SECTORS(rq->byte_position)
% ocf_line_sectors(cache);
}
if (map_idx == (count - 1)) {
/* Last */
end_bit = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1) %
ocf_line_sectors(cache);
}
ocf_metadata_flush_mark(cache, rq, map_idx, INVALID,
start_bit, end_bit);
}
}
static int _ocf_zero_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Mark cache lines for zeroing/discarding */
ocf_zero_map_info(rq);
/* Discard marked cache lines */
env_atomic_set(&rq->req_remaining, 1);
if (rq->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_zero_io_flush_metadata);
}
_ocf_zero_io_flush_metadata(rq, 0);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_ocf_zero_do = {
.read = _ocf_zero_do,
.write = _ocf_zero_do,
};
/**
* @note
* - Caller has to have metadata write lock
* - Core line has to be mapped
*/
void ocf_engine_zero_line(struct ocf_request *rq)
{
int lock = OCF_LOCK_NOT_ACQUIRED;
ENV_BUG_ON(rq->core_line_count != 1);
/* Traverse to check if request is mapped */
ocf_engine_traverse(rq);
ENV_BUG_ON(!ocf_engine_is_mapped(rq));
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_ocf_zero_do;
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
if (lock >= 0) {
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
ocf_engine_push_rq_front_if(rq, &_io_if_ocf_zero_do, true);
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
}

11
src/engine/engine_zero.h Normal file
View File

@@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_ZERO_H_
#define ENGINE_ZERO_H_
void ocf_engine_zero_line(struct ocf_request *rq);
#endif /* ENGINE_ZERO_H_ */

19
src/eviction/eviction.c Normal file
View File

@@ -0,0 +1,19 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "eviction.h"
struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
[ocf_eviction_lru] = {
.init_cline = evp_lru_init_cline,
.rm_cline = evp_lru_rm_cline,
.req_clines = evp_lru_req_clines,
.hot_cline = evp_lru_hot_cline,
.init_evp = evp_lru_init_evp,
.dirty_cline = evp_lru_dirty_cline,
.clean_cline = evp_lru_clean_cline,
.name = "lru",
},
};

56
src/eviction/eviction.h Normal file
View File

@@ -0,0 +1,56 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_EVICTION_POLICY_H__
#define __LAYER_EVICTION_POLICY_H__
#define OCF_PENDING_EVICTION_LIMIT 512UL
#include "ocf/ocf.h"
#include "lru.h"
#include "lru_structs.h"
struct eviction_policy {
union {
struct lru_eviction_policy lru;
} policy;
};
/* Eviction policy metadata per cache line */
union eviction_policy_meta {
struct lru_eviction_policy_meta lru;
} __attribute__((packed));
/* the caller must hold the metadata lock for all operations
*
* For range operations the caller can:
* set core_id to -1 to purge the whole cache device
* set core_id to -2 to purge the whole cache partition
*/
struct eviction_policy_ops {
void (*init_cline)(struct ocf_cache *cache,
ocf_cache_line_t cline);
void (*rm_cline)(struct ocf_cache *cache,
ocf_cache_line_t cline);
bool (*can_evict)(struct ocf_cache *cache);
uint32_t (*req_clines)(struct ocf_cache *cache,
uint32_t io_queue, ocf_part_id_t part_id,
uint32_t cline_no, ocf_core_id_t core_id);
void (*hot_cline)(struct ocf_cache *cache,
ocf_cache_line_t cline);
void (*init_evp)(struct ocf_cache *cache,
ocf_part_id_t part_id);
void (*dirty_cline)(struct ocf_cache *cache,
ocf_part_id_t part_id,
uint32_t cline_no);
void (*clean_cline)(struct ocf_cache *cache,
ocf_part_id_t part_id,
uint32_t cline_no);
const char *name;
};
extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
#endif

503
src/eviction/lru.c Normal file
View File

@@ -0,0 +1,503 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "eviction.h"
#include "lru.h"
#include "ops.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
#include "../concurrency/ocf_concurrency.h"
#include "../mngt/ocf_mngt_common.h"
#include "../engine/engine_zero.h"
#include "../utils/utils_rq.h"
#define OCF_EVICTION_MAX_SCAN 1024
/* -- Start of LRU functions --*/
/* Returns 1 if the given collision_index is the _head_ of
* the LRU list, 0 otherwise.
*/
/* static inline int is_lru_head(unsigned collision_index) {
* return collision_index == lru_list.lru_head;
* }
*/
#define is_lru_head(x) (x == collision_table_entries)
#define is_lru_tail(x) (x == collision_table_entries)
/* Sets the given collision_index as the new _head_ of the LRU list. */
static inline void update_lru_head(struct ocf_cache *cache,
int partition_id, unsigned int collision_index,
int cline_dirty)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
if (cline_dirty)
part->runtime->eviction.policy.lru.dirty_head = collision_index;
else
part->runtime->eviction.policy.lru.clean_head = collision_index;
}
/* Sets the given collision_index as the new _tail_ of the LRU list. */
static inline void update_lru_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index,
int cline_dirty)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
if (cline_dirty)
part->runtime->eviction.policy.lru.dirty_tail = collision_index;
else
part->runtime->eviction.policy.lru.clean_tail = collision_index;
}
/* Sets the given collision_index as the new _head_ and _tail_ of
* the LRU list.
*/
static inline void update_lru_head_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index, int cline_dirty)
{
update_lru_head(cache, partition_id, collision_index, cline_dirty);
update_lru_tail(cache, partition_id, collision_index, cline_dirty);
}
/* Adds the given collision_index to the _head_ of the LRU list */
static void add_lru_head(struct ocf_cache *cache, int partition_id,
unsigned int collision_index, int cline_dirty)
{
unsigned int curr_head_index;
unsigned int collision_table_entries =
cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
union eviction_policy_meta eviction;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, collision_index, &eviction);
/* First node to be added/ */
if ((cline_dirty && !part->runtime->eviction.policy.lru.has_dirty_nodes) ||
(!cline_dirty && !part->runtime->eviction.policy.lru.has_clean_nodes)) {
update_lru_head_tail(cache, partition_id, collision_index, cline_dirty);
eviction.lru.next = collision_table_entries;
eviction.lru.prev = collision_table_entries;
if (cline_dirty)
part->runtime->eviction.policy.lru.has_dirty_nodes = 1;
else
part->runtime->eviction.policy.lru.has_clean_nodes = 1;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
} else {
union eviction_policy_meta eviction_curr;
/* Not the first node to be added. */
curr_head_index = cline_dirty ?
part->runtime->eviction.policy.lru.dirty_head :
part->runtime->eviction.policy.lru.clean_head;
ENV_BUG_ON(!(curr_head_index < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, curr_head_index,
&eviction_curr);
eviction.lru.next = curr_head_index;
eviction.lru.prev = collision_table_entries;
eviction_curr.lru.prev = collision_index;
update_lru_head(cache, partition_id, collision_index, cline_dirty);
ocf_metadata_set_evicition_policy(cache, curr_head_index,
&eviction_curr);
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
}
}
/* Deletes the node with the given collision_index from the lru list */
static void remove_lru_list(struct ocf_cache *cache, int partition_id,
unsigned int collision_index, int cline_dirty)
{
int is_clean_head = 0, is_clean_tail = 0, is_dirty_head = 0, is_dirty_tail = 0;
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
union eviction_policy_meta eviction;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, collision_index, &eviction);
/* Find out if this node is LRU _head_ or LRU _tail_ */
if (part->runtime->eviction.policy.lru.clean_head == collision_index)
is_clean_head = 1;
if (part->runtime->eviction.policy.lru.dirty_head == collision_index)
is_dirty_head = 1;
if (part->runtime->eviction.policy.lru.clean_tail == collision_index)
is_clean_tail = 1;
if (part->runtime->eviction.policy.lru.dirty_tail == collision_index)
is_dirty_tail = 1;
ENV_BUG_ON((is_clean_tail || is_clean_head) && (is_dirty_tail || is_dirty_head));
/* Set prev and next (even if not existent) */
next_lru_node = eviction.lru.next;
prev_lru_node = eviction.lru.prev;
/* Case 1: If we are head AND tail, there is only one node.
* So unlink node and set that there is no node left in the list.
*/
if ((is_clean_head && is_clean_tail) || (is_dirty_head && is_dirty_tail)) {
eviction.lru.next = collision_table_entries;
eviction.lru.prev = collision_table_entries;
update_lru_head_tail(cache, partition_id, collision_table_entries, cline_dirty);
if (cline_dirty)
part->runtime->eviction.policy.lru.has_dirty_nodes = 0;
else
part->runtime->eviction.policy.lru.has_clean_nodes = 0;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
update_lru_head_tail(cache, partition_id,
collision_table_entries, cline_dirty);
}
/* Case 2: else if this collision_index is LRU head, but not tail,
* update head and return
*/
else if ((!is_clean_tail && is_clean_head) || (!is_dirty_tail && is_dirty_head)) {
union eviction_policy_meta eviction_next;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, next_lru_node,
&eviction_next);
update_lru_head(cache, partition_id, next_lru_node, cline_dirty);
eviction.lru.next = collision_table_entries;
eviction_next.lru.prev = collision_table_entries;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
ocf_metadata_set_evicition_policy(cache, next_lru_node,
&eviction_next);
}
/* Case 3: else if this collision_index is LRU tail, but not head,
* update tail and return
*/
else if ((is_clean_tail && !is_clean_head) || (is_dirty_tail && !is_dirty_head)) {
union eviction_policy_meta eviction_prev;
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
update_lru_tail(cache, partition_id, prev_lru_node, cline_dirty);
ocf_metadata_get_evicition_policy(cache, prev_lru_node,
&eviction_prev);
eviction.lru.prev = collision_table_entries;
eviction_prev.lru.next = collision_table_entries;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
ocf_metadata_set_evicition_policy(cache, prev_lru_node,
&eviction_prev);
}
/* Case 4: else this collision_index is a middle node. There is no
* change to the head and the tail pointers.
*/
else {
union eviction_policy_meta eviction_prev;
union eviction_policy_meta eviction_next;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, next_lru_node,
&eviction_next);
ocf_metadata_get_evicition_policy(cache, prev_lru_node,
&eviction_prev);
/* Update prev and next nodes */
eviction_prev.lru.next = eviction.lru.next;
eviction_next.lru.prev = eviction.lru.prev;
/* Update the given node */
eviction.lru.next = collision_table_entries;
eviction.lru.prev = collision_table_entries;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
ocf_metadata_set_evicition_policy(cache, next_lru_node,
&eviction_next);
ocf_metadata_set_evicition_policy(cache, prev_lru_node,
&eviction_prev);
}
}
/*-- End of LRU functions*/
void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline)
{
union eviction_policy_meta eviction;
ocf_metadata_get_evicition_policy(cache, cline, &eviction);
eviction.lru.prev = cache->device->collision_table_entries;
eviction.lru.next = cache->device->collision_table_entries;
ocf_metadata_set_evicition_policy(cache, cline, &eviction);
}
/* the caller must hold the metadata lock */
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
remove_lru_list(cache, part_id, cline, metadata_test_dirty(cache, cline));
}
static void evp_lru_clean_end(void *private_data, int error)
{
env_atomic *cleaning_in_progress = private_data;
env_atomic_set(cleaning_in_progress, 0);
}
static int evp_lru_clean_getter(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line)
{
union eviction_policy_meta eviction;
struct ocf_cleaner_attribs *attribs = getter_context;
ocf_cache_line_t prev_cline, curr_cline = attribs->getter_item;
while (curr_cline < cache->device->collision_table_entries) {
ocf_metadata_get_evicition_policy(cache, curr_cline,
&eviction);
prev_cline = eviction.lru.prev;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, curr_cline)) {
curr_cline = prev_cline;
continue;
}
ENV_BUG_ON(!metadata_test_dirty(cache, curr_cline));
*line = curr_cline;
attribs->getter_item = prev_cline;
return 0;
}
return -1;
}
static void evp_lru_clean(struct ocf_cache *cache, uint32_t io_queue,
ocf_part_id_t part_id, uint32_t count)
{
env_atomic *progress = &cache->cleaning[part_id];
struct ocf_user_part *part = &cache->user_parts[part_id];
if (ocf_mngt_is_cache_locked(cache))
return;
if (env_atomic_cmpxchg(progress, 0, 1) == 0) {
/* Initialize attributes for cleaner */
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = true,
.do_sort = true,
.cmpl_context = progress,
.cmpl_fn = evp_lru_clean_end,
.getter = evp_lru_clean_getter,
.getter_context = &attribs,
.getter_item = part->runtime->eviction.policy.lru.dirty_tail,
.count = count > 32 ? 32 : count,
.io_queue = io_queue
};
ocf_cleaner_fire(cache, &attribs);
}
}
static void evp_lru_zero_line_complete(struct ocf_request *ocf_req, int error)
{
env_atomic_dec(&ocf_req->cache->pending_eviction_clines);
}
static void evp_lru_zero_line(struct ocf_cache *cache, uint32_t io_queue,
ocf_cache_line_t line)
{
struct ocf_request *rq;
ocf_core_id_t id;
uint64_t addr, core_line;
ocf_metadata_get_core_info(cache, line, &id, &core_line);
addr = core_line * ocf_line_size(cache);
rq = ocf_rq_new(cache, id, addr, ocf_line_size(cache), OCF_WRITE);
if (rq) {
rq->info.internal = true;
rq->complete = evp_lru_zero_line_complete;
rq->io_queue = io_queue;
env_atomic_inc(&cache->pending_eviction_clines);
ocf_engine_zero_line(rq);
}
}
bool evp_lru_can_evict(struct ocf_cache *cache)
{
if (env_atomic_read(&cache->pending_eviction_clines) >=
OCF_PENDING_EVICTION_LIMIT) {
return false;
}
return true;
}
/* the caller must hold the metadata lock */
uint32_t evp_lru_req_clines(struct ocf_cache *cache, uint32_t io_queue,
ocf_part_id_t part_id, uint32_t cline_no, ocf_core_id_t core_id)
{
uint32_t i;
ocf_cache_line_t curr_cline, prev_cline;
struct ocf_user_part *part = &cache->user_parts[part_id];
union eviction_policy_meta eviction;
if (cline_no == 0)
return 0;
i = 0;
curr_cline = part->runtime->eviction.policy.lru.clean_tail;
/* Find cachelines to be evicted. */
while (i < cline_no) {
ENV_BUG_ON(curr_cline > cache->device->collision_table_entries);
if (!evp_lru_can_evict(cache))
break;
if (curr_cline == cache->device->collision_table_entries)
break;
ocf_metadata_get_evicition_policy(cache, curr_cline,
&eviction);
prev_cline = eviction.lru.prev;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, curr_cline)) {
curr_cline = prev_cline;
continue;
}
ENV_BUG_ON(metadata_test_dirty(cache, curr_cline));
if (ocf_data_obj_is_atomic(&cache->device->obj)) {
/* atomic cache, we have to trim cache lines before
* eviction
*/
evp_lru_zero_line(cache, io_queue, curr_cline);
} else {
set_cache_line_invalid_no_flush(cache, 0,
ocf_line_end_sector(cache),
curr_cline);
/* Goto next item. */
i++;
}
curr_cline = prev_cline;
}
if (i < cline_no && part->runtime->eviction.policy.lru.dirty_tail !=
cache->device->collision_table_entries) {
evp_lru_clean(cache, io_queue, part_id, cline_no - i);
}
/* Return number of clines that were really evicted */
return i;
}
/* the caller must hold the metadata lock */
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
union eviction_policy_meta eviction;
int cline_dirty;
ocf_metadata_get_evicition_policy(cache, cline, &eviction);
next_lru_node = eviction.lru.next;
prev_lru_node = eviction.lru.prev;
cline_dirty = metadata_test_dirty(cache, cline);
if ((next_lru_node != collision_table_entries) ||
(prev_lru_node != collision_table_entries) ||
((part->runtime->eviction.policy.lru.clean_head == cline) &&
(part->runtime->eviction.policy.lru.clean_tail == cline)) ||
((part->runtime->eviction.policy.lru.dirty_head == cline) &&
(part->runtime->eviction.policy.lru.dirty_tail == cline))) {
remove_lru_list(cache, part_id, cline, cline_dirty);
}
/* Update LRU */
add_lru_head(cache, part_id, cline, cline_dirty);
}
void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id)
{
unsigned int collision_table_entries =
cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
part->runtime->eviction.policy.lru.has_clean_nodes = 0;
part->runtime->eviction.policy.lru.has_dirty_nodes = 0;
part->runtime->eviction.policy.lru.clean_head = collision_table_entries;
part->runtime->eviction.policy.lru.clean_tail = collision_table_entries;
part->runtime->eviction.policy.lru.dirty_head = collision_table_entries;
part->runtime->eviction.policy.lru.dirty_tail = collision_table_entries;
}
void evp_lru_clean_cline(struct ocf_cache *cache, ocf_part_id_t part_id,
uint32_t cline)
{
OCF_METADATA_EVICTION_LOCK();
remove_lru_list(cache, part_id, cline, 1);
add_lru_head(cache, part_id, cline, 0);
OCF_METADATA_EVICTION_UNLOCK();
}
void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id,
uint32_t cline)
{
OCF_METADATA_EVICTION_LOCK();
remove_lru_list(cache, part_id, cline, 0);
add_lru_head(cache, part_id, cline, 1);
OCF_METADATA_EVICTION_UNLOCK();
}

23
src/eviction/lru.h Normal file
View File

@@ -0,0 +1,23 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __EVICTION_LRU_H__
#define __EVICTION_LRU_H__
#include "eviction.h"
#include "lru_structs.h"
void evp_lru_init_cline(struct ocf_cache *cache,
ocf_cache_line_t cline);
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
bool evp_lru_can_evict(struct ocf_cache *cache);
uint32_t evp_lru_req_clines(struct ocf_cache *cache, uint32_t io_queue,
ocf_part_id_t part_id, uint32_t cline_no,
ocf_core_id_t core_id);
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id);
void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline);
void evp_lru_clean_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline);
#endif

View File

@@ -0,0 +1,24 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __EVICTION_LRU_STRUCTS_H__
#define __EVICTION_LRU_STRUCTS_H__
struct lru_eviction_policy_meta {
/* LRU pointers 2*4=8 bytes */
uint32_t prev;
uint32_t next;
} __attribute__((packed));
struct lru_eviction_policy {
int has_clean_nodes;
int has_dirty_nodes;
uint32_t dirty_head;
uint32_t dirty_tail;
uint32_t clean_head;
uint32_t clean_tail;
};
#endif

108
src/eviction/ops.h Normal file
View File

@@ -0,0 +1,108 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef LAYER_EVICTION_POLICY_OPS_H_
#define LAYER_EVICTION_POLICY_OPS_H_
#include "eviction.h"
#include "../metadata/metadata.h"
/**
* @brief Initialize cache line before adding it into eviction
*
* @note This operation is called under WR metadata lock
*/
static inline void ocf_eviction_init_cache_line(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id)
{
uint8_t type;
type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_cline))
evict_policy_ops[type].init_cline(cache, line);
}
static inline void ocf_eviction_purge_cache_line(
struct ocf_cache *cache, ocf_cache_line_t line)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].rm_cline)) {
OCF_METADATA_EVICTION_LOCK();
evict_policy_ops[type].rm_cline(cache, line);
OCF_METADATA_EVICTION_UNLOCK();
}
}
static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
if (likely(evict_policy_ops[type].can_evict))
return evict_policy_ops[type].can_evict(cache);
return true;
}
static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
uint32_t io_queue, ocf_part_id_t part_id, uint32_t clines,
ocf_core_id_t core_id)
{
uint8_t type;
uint32_t result = 0;
ENV_BUG_ON(core_id >= OCF_CORE_MAX);
type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].req_clines)) {
/*
* This is called under METADATA WR lock. No need to get
* eviction lock.
*/
result = evict_policy_ops[type].req_clines(cache, io_queue,
part_id, clines, core_id);
}
return result;
}
static inline void ocf_eviction_set_hot_cache_line(
struct ocf_cache *cache, ocf_cache_line_t line)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].hot_cline)) {
OCF_METADATA_EVICTION_LOCK();
evict_policy_ops[type].hot_cline(cache, line);
OCF_METADATA_EVICTION_UNLOCK();
}
}
static inline void ocf_eviction_initialize(struct ocf_cache *cache,
ocf_part_id_t part_id)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_evp)) {
OCF_METADATA_EVICTION_LOCK();
evict_policy_ops[type].init_evp(cache, part_id);
OCF_METADATA_EVICTION_UNLOCK();
}
}
#endif /* LAYER_EVICTION_POLICY_OPS_H_ */

View File

@@ -0,0 +1,114 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "layer_space_management.h"
#include "utils/utils_allocator.h"
#include "utils/utils_part.h"
#include "concurrency/ocf_concurrency.h"
#include "engine/engine_common.h"
#include "eviction/ops.h"
static uint32_t ocf_evict_calculate(struct ocf_user_part *part,
uint32_t to_evict)
{
if (part->runtime->curr_size <= part->config->min_size) {
/*
* Cannot evict from this partition because current size
* is less than minimum size
*/
return 0;
}
if (to_evict < OCF_TO_EVICTION_MIN)
to_evict = OCF_TO_EVICTION_MIN;
if (to_evict > (part->runtime->curr_size - part->config->min_size))
to_evict = part->runtime->curr_size - part->config->min_size;
return to_evict;
}
static inline uint32_t ocf_evict_do(struct ocf_cache *cache,
uint32_t io_queue, const uint32_t evict_cline_no,
ocf_core_id_t core_id, ocf_part_id_t target_part_id)
{
uint32_t to_evict = 0, evicted = 0;
struct ocf_user_part *part;
struct ocf_user_part *target_part = &cache->user_parts[target_part_id];
ocf_part_id_t part_id;
/* For each partition from the lowest priority to highest one */
for_each_part(cache, part, part_id) {
if (!ocf_eviction_can_evict(cache))
goto out;
/*
* Check stop and continue conditions
*/
if (target_part->config->priority > part->config->priority) {
/*
* iterate partition have higher priority, do not evict
*/
break;
}
if (!part->config->flags.eviction) {
/* It seams that no more partition for eviction */
break;
}
if (part_id == target_part_id) {
/* Omit targeted, evict from different first */
continue;
}
if (evicted >= evict_cline_no) {
/* Evicted requested number of cache line, stop */
goto out;
}
to_evict = ocf_evict_calculate(part, evict_cline_no);
if (to_evict == 0) {
/* No cache lines to evict for this partition */
continue;
}
evicted += ocf_eviction_need_space(cache, io_queue,
part_id, to_evict, core_id);
}
if (!ocf_eviction_can_evict(cache))
goto out;
if (evicted < evict_cline_no) {
/* Now we can evict form targeted partition */
to_evict = ocf_evict_calculate(target_part, evict_cline_no);
if (to_evict) {
evicted += ocf_eviction_need_space(cache, io_queue,
target_part_id, to_evict, core_id);
}
}
out:
return evicted;
}
int space_managment_evict_do(struct ocf_cache *cache,
struct ocf_request *req, uint32_t evict_cline_no)
{
uint32_t evicted;
if (evict_cline_no <= cache->device->freelist_part->curr_size)
return LOOKUP_MAPPED;
evict_cline_no = evict_cline_no - cache->device->freelist_part->curr_size;
evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no,
req->core_id, req->part_id);
if (evict_cline_no <= evicted)
return LOOKUP_MAPPED;
req->info.eviction_error |= true;
return LOOKUP_MISS;
}

View File

@@ -0,0 +1,25 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_SPACE_MANAGEMENT_H__
#define __LAYER_SPACE_MANAGEMENT_H__
#include "ocf_request.h"
#define OCF_TO_EVICTION_MIN 128UL
/*
* Deallocates space from low priority partitions.
*
* Returns -1 on error
* or the destination partition ID for the free buffers
* (it matches label and is part of the object (#core_id) IO group)
*/
int space_managment_evict_do(struct ocf_cache *cache,
struct ocf_request *req, uint32_t evict_cline_no);
int space_management_free(struct ocf_cache *cache, uint32_t count);
#endif

388
src/metadata/metadata.c Normal file
View File

@@ -0,0 +1,388 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_io.h"
#include "../ocf_priv.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#define OCF_METADATA_DEBUG 0
#if 1 == OCF_METADATA_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][Hash] %s\n", __func__)
#else
#define OCF_DEBUG_TRACE(cache)
#endif
int ocf_metadata_init(struct ocf_cache *cache,
ocf_cache_line_size_t cache_line_size)
{
struct ocf_metadata_iface *iface = (struct ocf_metadata_iface *)
&cache->metadata.iface;
int ret;
OCF_DEBUG_TRACE(cache);
ENV_BUG_ON(cache->metadata.iface_priv);
ret = ocf_metadata_io_init(cache);
if (ret)
return ret;
*iface = *metadata_hash_get_iface();
ret = cache->metadata.iface.init(cache, cache_line_size);
if (ret)
ocf_metadata_io_deinit(cache);
return ret;
}
int ocf_metadata_init_variable_size(struct ocf_cache *cache, uint64_t device_size,
ocf_cache_line_size_t cache_line_size,
ocf_metadata_layout_t layout)
{
OCF_DEBUG_TRACE(cache);
return cache->metadata.iface.init_variable_size(cache, device_size,
cache_line_size, layout);
}
void ocf_metadata_init_freelist_partition(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
cache->metadata.iface.layout_iface->init_freelist(cache);
}
void ocf_metadata_init_hash_table(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
cache->metadata.iface.init_hash_table(cache);
}
void ocf_metadata_deinit(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
if (cache->metadata.iface.deinit) {
cache->metadata.iface.deinit(cache);
}
ocf_metadata_io_deinit(cache);
}
void ocf_metadata_deinit_variable_size(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
if (cache->metadata.iface.deinit_variable_size)
cache->metadata.iface.deinit_variable_size(cache);
}
size_t ocf_metadata_size_of(struct ocf_cache *cache)
{
return cache->metadata.iface.size_of(cache);
}
void ocf_metadata_error(struct ocf_cache *cache)
{
if (cache->device->metadata_error == 0)
ocf_cache_log(cache, log_err, "Metadata Error\n");
env_bit_clear(ocf_cache_state_running, &cache->cache_state);
cache->device->metadata_error = -1;
}
ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache)
{
return cache->metadata.iface.pages(cache);
}
ocf_cache_line_t
ocf_metadata_get_cachelines_count(struct ocf_cache *cache)
{
return cache->metadata.iface.cachelines(cache);
}
int ocf_metadata_flush_all(struct ocf_cache *cache)
{
int result;
OCF_METADATA_LOCK_WR();
result = cache->metadata.iface.flush_all(cache);
OCF_METADATA_UNLOCK_WR();
return result;
}
void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line)
{
cache->metadata.iface.flush(cache, line);
}
int ocf_metadata_load_all(struct ocf_cache *cache)
{
int result;
OCF_METADATA_LOCK_WR();
result = cache->metadata.iface.load_all(cache);
OCF_METADATA_UNLOCK_WR();
return result;
}
int ocf_metadata_load_recovery(struct ocf_cache *cache)
{
return cache->metadata.iface.load_recovery(cache);
}
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
cache->metadata.iface.flush_mark(cache, rq, map_idx, to_state,
start, stop);
}
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, ocf_end_t complete)
{
cache->metadata.iface.flush_do_asynch(cache, rq, complete);
}
static inline int ocf_metadata_check_properties(void)
{
uint32_t field_offset;
/* Because metadata basic properties are on the beginning of super block
* read/write only first page of supper block.
*
* For safety reason check if offset of metadata properties are in first
* page of super block.
*
* Maybe in future super block fields order may be changed and metadata
* variant may go out first page of super block
*/
field_offset = offsetof(struct ocf_superblock_config, line_size);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
/* The same checking for magic number */
field_offset = offsetof(struct ocf_superblock_config, magic_number);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
/* The same checking for IO interface type */
field_offset = offsetof(struct ocf_superblock_config, cache_mode);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
/* And the same for version location within superblock structure */
field_offset = offsetof(struct ocf_superblock_config, metadata_version);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
return 0;
}
static int ocf_metadata_read_properties(ocf_ctx_t ctx, ocf_data_obj_t cache_obj,
struct ocf_superblock_config *superblock)
{
ctx_data_t *data;
struct ocf_io *io;
int result = 0;
if (ocf_metadata_check_properties())
return -EINVAL;
/* Allocate resources for IO */
io = ocf_dobj_new_io(cache_obj);
data = ctx_data_alloc(ctx, 1);
/* Check allocation result */
if (!io || !data) {
ocf_log(ctx, log_err, "Memory allocation error");
result = -ENOMEM;
goto out;
}
/*
* Read first page of cache device in order to recover metadata
* properties
*/
result = ocf_io_set_data(io, data, 0);
if (result) {
ocf_log(ctx, log_err, "Metadata IO configuration error\n");
result = -EIO;
goto out;
}
ocf_io_configure(io, 0, PAGE_SIZE, OCF_READ, 0, 0);
result = ocf_submit_io_wait(io);
if (result) {
ocf_log(ctx, log_err, "Metadata IO request submit error\n");
result = -EIO;
goto out;
}
/* Read data from data into super block buffer */
ctx_data_rd_check(ctx, superblock, data,
PAGE_SIZE);
out:
if (io)
ocf_io_put(io);
ctx_data_free(ctx, data);
return result;
}
/**
* @brief function loads individual properties from metadata set
* @param cache_obj object from which to load metadata
* @param variant - field to which save metadata variant; if NULL,
* metadata variant won't be read.
* @param cache mode; if NULL is passed it won't be read
* @param shutdown_status - dirty shutdown or clean shutdown
* @param dirty_flushed - if all dirty data was flushed prior to closing
* the cache
* @return 0 upon successful completion
*/
int ocf_metadata_load_properties(ocf_data_obj_t cache_obj,
ocf_cache_line_size_t *line_size,
ocf_metadata_layout_t *layout,
ocf_cache_mode_t *cache_mode,
enum ocf_metadata_shutdown_status *shutdown_status,
uint8_t *dirty_flushed)
{
struct ocf_superblock_config *superblock;
int err_value = 0;
/* Allocate first page of super block */
superblock = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!superblock) {
ocf_cache_log(cache_obj->cache, log_err,
"Allocation memory error");
return -ENOMEM;
}
OCF_DEBUG_TRACE(cache);
err_value = ocf_metadata_read_properties(cache_obj->cache->owner,
cache_obj, superblock);
if (err_value)
goto ocf_metadata_load_variant_ERROR;
if (superblock->magic_number != CACHE_MAGIC_NUMBER) {
err_value = -ENODATA;
ocf_cache_log(cache_obj->cache, log_info,
"Can not detect pre-existing metadata\n");
goto ocf_metadata_load_variant_ERROR;
}
if (METADATA_VERSION() != superblock->metadata_version) {
err_value = -EBADF;
ocf_cache_log(cache_obj->cache, log_err,
"Metadata version mismatch!\n");
goto ocf_metadata_load_variant_ERROR;
}
if (line_size) {
if (ocf_cache_line_size_is_valid(superblock->line_size)) {
*line_size = superblock->line_size;
} else {
err_value = -EINVAL;
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid cache line size!\n");
}
}
if (layout) {
if (superblock->metadata_layout >= ocf_metadata_layout_max ||
superblock->metadata_layout < 0) {
err_value = -EINVAL;
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid metadata layout!\n");
} else {
*layout = superblock->metadata_layout;
}
}
if (cache_mode) {
if (superblock->cache_mode < ocf_cache_mode_max) {
*cache_mode = superblock->cache_mode;
} else {
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid cache mode!\n");
err_value = -EINVAL;
}
}
if (shutdown_status != NULL) {
if (superblock->clean_shutdown <= ocf_metadata_clean_shutdown) {
*shutdown_status = superblock->clean_shutdown;
} else {
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid shutdown status!\n");
err_value = -EINVAL;
}
}
if (dirty_flushed != NULL) {
if (superblock->dirty_flushed <= DIRTY_FLUSHED) {
*dirty_flushed = superblock->dirty_flushed;
} else {
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid flush status!\n");
err_value = -EINVAL;
}
}
ocf_metadata_load_variant_ERROR:
env_free(superblock);
return err_value;
}
int ocf_metadata_probe(ocf_ctx_t ctx, ocf_data_obj_t cache_obj,
bool *clean_shutdown, bool *cache_dirty)
{
struct ocf_superblock_config *superblock;
int result = 0;
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(cache_obj);
/* Allocate first page of super block */
superblock = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!superblock) {
ocf_log(ctx, log_err, "Allocation memory error");
return -ENOMEM;
}
OCF_DEBUG_TRACE(cache);
result = ocf_metadata_read_properties(ctx, cache_obj, superblock);
if (result)
goto ocf_metadata_probe_END;
if (superblock->magic_number != CACHE_MAGIC_NUMBER) {
result = -ENODATA;
goto ocf_metadata_probe_END;
}
if (clean_shutdown != NULL) {
*clean_shutdown = (superblock->clean_shutdown !=
ocf_metadata_dirty_shutdown);
}
if (cache_dirty != NULL)
*cache_dirty = (superblock->dirty_flushed == DIRTY_NOT_FLUSHED);
if (METADATA_VERSION() != superblock->metadata_version)
result = -EBADF;
ocf_metadata_probe_END:
env_free(superblock);
return result;
}

336
src/metadata/metadata.h Normal file
View File

@@ -0,0 +1,336 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_H__
#define __METADATA_H__
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
static inline void ocf_metadata_eviction_lock(struct ocf_cache *cache)
{
env_spinlock_lock(&cache->metadata.lock.eviction);
}
static inline void ocf_metadata_eviction_unlock(struct ocf_cache *cache)
{
env_spinlock_unlock(&cache->metadata.lock.eviction);
}
#define OCF_METADATA_EVICTION_LOCK() \
ocf_metadata_eviction_lock(cache)
#define OCF_METADATA_EVICTION_UNLOCK() \
ocf_metadata_eviction_unlock(cache)
static inline void ocf_metadata_lock(struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwsem_down_write(&cache->metadata.lock.collision);
else if (rw == OCF_METADATA_RD)
env_rwsem_down_read(&cache->metadata.lock.collision);
else
ENV_BUG();
}
static inline void ocf_metadata_unlock(struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwsem_up_write(&cache->metadata.lock.collision);
else if (rw == OCF_METADATA_RD)
env_rwsem_up_read(&cache->metadata.lock.collision);
else
ENV_BUG();
}
static inline int ocf_metadata_try_lock(struct ocf_cache *cache, int rw)
{
int result = -1;
if (rw == OCF_METADATA_WR) {
result = env_rwsem_down_write_trylock(
&cache->metadata.lock.collision);
} else if (rw == OCF_METADATA_RD) {
result = env_rwsem_down_read_trylock(
&cache->metadata.lock.collision);
} else {
ENV_BUG();
}
if (!result)
return -1;
return 0;
}
static inline void ocf_metadata_status_bits_lock(
struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwlock_write_lock(&cache->metadata.lock.status);
else if (rw == OCF_METADATA_RD)
env_rwlock_read_lock(&cache->metadata.lock.status);
else
ENV_BUG();
}
static inline void ocf_metadata_status_bits_unlock(
struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwlock_write_unlock(&cache->metadata.lock.status);
else if (rw == OCF_METADATA_RD)
env_rwlock_read_unlock(&cache->metadata.lock.status);
else
ENV_BUG();
}
#define OCF_METADATA_LOCK_RD() \
ocf_metadata_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_UNLOCK_RD() \
ocf_metadata_unlock(cache, OCF_METADATA_RD)
#define OCF_METADATA_LOCK_RD_TRY() \
ocf_metadata_try_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_LOCK_WR() \
ocf_metadata_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_LOCK_WR_TRY() \
ocf_metadata_try_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_UNLOCK_WR() \
ocf_metadata_unlock(cache, OCF_METADATA_WR)
#define OCF_METADATA_BITS_LOCK_RD() \
ocf_metadata_status_bits_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_BITS_UNLOCK_RD() \
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_RD)
#define OCF_METADATA_BITS_LOCK_WR() \
ocf_metadata_status_bits_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_BITS_UNLOCK_WR() \
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_WR)
#define OCF_METADATA_FLUSH_LOCK() \
ocf_metadata_flush_lock(cache)
#define OCF_METADATA_FLUSH_UNLOCK() \
ocf_metadata_flush_unlock(cache)
#include "metadata_cleaning_policy.h"
#include "metadata_eviction_policy.h"
#include "metadata_partition.h"
#include "metadata_hash.h"
#include "metadata_superblock.h"
#include "metadata_status.h"
#include "metadata_collision.h"
#include "metadata_core.h"
#include "metadata_misc.h"
#define INVALID 0
#define VALID 1
#define CLEAN 2
#define DIRTY 3
/**
* @brief Initialize metadata
*
* @param cache - Cache instance
* @param cache_line_size Cache line size
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_init(struct ocf_cache *cache,
ocf_cache_line_size_t cache_line_size);
/**
* @brief Initialize per-cacheline metadata
*
* @param cache - Cache instance
* @param device_size - Device size in bytes
* @param cache_line_size Cache line size
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_init_variable_size(struct ocf_cache *cache,
uint64_t device_size, ocf_cache_line_size_t cache_line_size,
ocf_metadata_layout_t layout);
/**
* @brief Initialize collision table
*
* @param cache - Cache instance
*/
void ocf_metadata_init_freelist_partition(struct ocf_cache *cache);
/**
* @brief Initialize hash table
*
* @param cache - Cache instance
*/
void ocf_metadata_init_hash_table(struct ocf_cache *cache);
/**
* @brief De-Initialize metadata
*
* @param cache - Cache instance
*/
void ocf_metadata_deinit(struct ocf_cache *cache);
/**
* @brief De-Initialize per-cacheline metadata
*
* @param cache - Cache instance
*/
void ocf_metadata_deinit_variable_size(struct ocf_cache *cache);
/**
* @brief Get memory footprint
*
* @param cache - Cache instance
* @return 0 - memory footprint
*/
size_t ocf_metadata_size_of(struct ocf_cache *cache);
/**
* @brief Handle metadata error
*
* @param cache - Cache instance
*/
void ocf_metadata_error(struct ocf_cache *cache);
/**
* @brief Get amount of cache lines
*
* @param cache - Cache instance
* @return Amount of cache lines (cache device lines - metadata space)
*/
ocf_cache_line_t
ocf_metadata_get_cachelines_count(struct ocf_cache *cache);
/**
* @brief Get amount of pages required for metadata
*
* @param cache - Cache instance
* @return Pages required for store metadata on cache device
*/
ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache);
/**
* @brief Flush metadata
*
* @param cache
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_flush_all(struct ocf_cache *cache);
/**
* @brief Flush metadata for specified cache line
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line);
/**
* @brief Mark specified cache line to be flushed
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/**
* @brief Flush marked cache lines asynchronously
*
* @param cache - Cache instance
* @param queue - I/O queue to which metadata flush should be submitted
* @param remaining - request remaining
* @param complete - flushing request callback
* @param context - context that will be passed into callback
*/
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, ocf_end_t complete);
/**
* @brief Load metadata
*
* @param cache - Cache instance
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_load_all(struct ocf_cache *cache);
/**
* @brief Load metadata required for recovery procedure
*
* @param cache Cache instance
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_load_recovery(struct ocf_cache *cache);
/*
* NOTE Hash table is specific for hash table metadata service implementation
* and should be used internally by metadata service.
* At the moment there is no high level metadata interface because of that
* temporary defined in this file.
*/
static inline ocf_cache_line_t
ocf_metadata_get_hash(struct ocf_cache *cache, ocf_cache_line_t index)
{
return cache->metadata.iface.get_hash(cache, index);
}
static inline void ocf_metadata_set_hash(struct ocf_cache *cache,
ocf_cache_line_t index, ocf_cache_line_t line)
{
cache->metadata.iface.set_hash(cache, index, line);
}
static inline void ocf_metadata_flush_hash(struct ocf_cache *cache,
ocf_cache_line_t index)
{
cache->metadata.iface.flush_hash(cache, index);
}
static inline ocf_cache_line_t ocf_metadata_entries_hash(
struct ocf_cache *cache)
{
return cache->metadata.iface.entries_hash(cache);
}
int ocf_metadata_load_properties(ocf_data_obj_t cache_obj,
ocf_cache_line_size_t *line_size,
ocf_metadata_layout_t *layout,
ocf_cache_mode_t *cache_mode,
enum ocf_metadata_shutdown_status *shutdown_status,
uint8_t *dirty_flushed);
/**
* @brief Validate cache line size
*
* @param size Cache line size
* @return true - cache line size is valid, false - cache line is invalid
*/
static inline bool ocf_metadata_line_size_is_valid(uint32_t size)
{
switch (size) {
case 4 * KiB:
case 8 * KiB:
case 16 * KiB:
case 32 * KiB:
case 64 * KiB:
return true;
default:
return false;
}
}
#endif /* METADATA_H_ */

240
src/metadata/metadata_bit.h Normal file
View File

@@ -0,0 +1,240 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
/*******************************************************************************
* Sector mask getter
******************************************************************************/
static inline uint64_t _get_mask(uint8_t start, uint8_t stop)
{
uint64_t mask = 0;
ENV_BUG_ON(start >= 64);
ENV_BUG_ON(stop >= 64);
ENV_BUG_ON(stop < start);
mask = ~mask;
mask >>= start + (63 - stop);
mask <<= start;
return mask;
}
#define _get_mask_u8(start, stop) _get_mask(start, stop)
#define _get_mask_u16(start, stop) _get_mask(start, stop)
#define _get_mask_u32(start, stop) _get_mask(start, stop)
#define _get_mask_u64(start, stop) _get_mask(start, stop)
typedef __uint128_t u128;
static inline u128 _get_mask_u128(uint8_t start, uint8_t stop)
{
u128 mask = 0;
ENV_BUG_ON(start >= 128);
ENV_BUG_ON(stop >= 128);
ENV_BUG_ON(stop < start);
mask = ~mask;
mask >>= start + (127 - stop);
mask <<= start;
return mask;
}
#define ocf_metadata_bit_struct(type) \
struct ocf_metadata_map_##type { \
struct ocf_metadata_map map; \
type valid; \
type dirty; \
} __attribute__((packed))
#define ocf_metadata_bit_func(what, type) \
static bool _ocf_metadata_test_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
const struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (all) { \
if (mask == (map[line].what & mask)) { \
return true; \
} else { \
return false; \
} \
} else { \
if (map[line].what & mask) { \
return true; \
} else { \
return false; \
} \
} \
} \
\
static bool _ocf_metadata_test_out_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
const struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (map[line].what & ~mask) { \
return true; \
} else { \
return false; \
} \
} \
\
static bool _ocf_metadata_clear_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
map[line].what &= ~mask; \
\
if (map[line].what) { \
return true; \
} else { \
return false; \
} \
} \
\
static bool _ocf_metadata_set_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
bool result; \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
result = map[line].what ? true : false; \
\
map[line].what |= mask; \
\
return result; \
} \
\
static bool _ocf_metadata_test_and_set_##what##_##type( \
struct ocf_cache *cache, ocf_cache_line_t line, \
uint8_t start, uint8_t stop, bool all) \
{ \
bool test; \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (all) { \
if (mask == (map[line].what & mask)) { \
test = true; \
} else { \
test = false; \
} \
} else { \
if (map[line].what & mask) { \
test = true; \
} else { \
test = false; \
} \
} \
\
map[line].what |= mask; \
return test; \
} \
\
static bool _ocf_metadata_test_and_clear_##what##_##type( \
struct ocf_cache *cache, ocf_cache_line_t line, \
uint8_t start, uint8_t stop, bool all) \
{ \
bool test; \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (all) { \
if (mask == (map[line].what & mask)) { \
test = true; \
} else { \
test = false; \
} \
} else { \
if (map[line].what & mask) { \
test = true; \
} else { \
test = false; \
} \
} \
\
map[line].what &= ~mask; \
return test; \
} \
ocf_metadata_bit_struct(u8);
ocf_metadata_bit_struct(u16);
ocf_metadata_bit_struct(u32);
ocf_metadata_bit_struct(u64);
ocf_metadata_bit_struct(u128);
ocf_metadata_bit_func(dirty, u8);
ocf_metadata_bit_func(dirty, u16);
ocf_metadata_bit_func(dirty, u32);
ocf_metadata_bit_func(dirty, u64);
ocf_metadata_bit_func(dirty, u128);
ocf_metadata_bit_func(valid, u8);
ocf_metadata_bit_func(valid, u16);
ocf_metadata_bit_func(valid, u32);
ocf_metadata_bit_func(valid, u64);
ocf_metadata_bit_func(valid, u128);

View File

@@ -0,0 +1,39 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_CLEANING_POLICY_H__
#define __METADATA_CLEANING_POLICY_H__
/*
* GET
*/
static inline void
ocf_metadata_get_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line, struct cleaning_policy_meta *policy)
{
cache->metadata.iface.get_cleaning_policy(cache, line, policy);
}
/*
* SET
*/
static inline void
ocf_metadata_set_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line, struct cleaning_policy_meta *policy)
{
cache->metadata.iface.set_cleaning_policy(cache, line, policy);
}
/*
* FLUSH
*/
static inline void
ocf_metadata_flush_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
cache->metadata.iface.flush_cleaning_policy(cache, line);
}
#endif /* METADATA_CLEANING_POLICY_H_ */

View File

@@ -0,0 +1,88 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "../utils/utils_cache_line.h"
/*
*
*/
void ocf_metadata_add_to_collision(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t hash, ocf_cache_line_t cache_line)
{
ocf_cache_line_t prev_cache_line = ocf_metadata_get_hash(cache, hash);
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ocf_cache_line_t hash_entries = cache->device->hash_table_entries;
ENV_BUG_ON(!(hash < hash_entries));
ENV_BUG_ON(!(cache_line < line_entries));
/* Setup new node */
ocf_metadata_set_core_info(cache, cache_line, core_id,
core_line);
/* Update collision info:
* - next is set to value from hash table;
* - previous is set to collision table entries value
*/
ocf_metadata_set_collision_info(cache, cache_line, prev_cache_line,
line_entries);
/* Update previous head */
if (prev_cache_line != line_entries) {
ocf_metadata_set_collision_prev(cache, prev_cache_line,
cache_line);
}
/* Update hash Table: hash table contains pointer to
* collision table so it contains indexes in collision table
*/
ocf_metadata_set_hash(cache, hash, cache_line);
}
/*
*
*/
void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id)
{
ocf_core_id_t core_id;
uint64_t core_sector;
ocf_cache_line_t hash_father;
ocf_cache_line_t prev_line, next_line;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ocf_cache_line_t hash_entries = cache->device->hash_table_entries;
ENV_BUG_ON(!(line < line_entries));
ocf_metadata_get_collision_info(cache, line, &next_line, &prev_line);
/* Update previous node if any. */
if (prev_line != line_entries)
ocf_metadata_set_collision_next(cache, prev_line, next_line);
/* Update next node if any. */
if (next_line != line_entries)
ocf_metadata_set_collision_prev(cache, next_line, prev_line);
ocf_metadata_get_core_info(cache, line, &core_id, &core_sector);
/* Update hash table, because if it was pointing to the given node it
* must now point to the given's node next
*/
hash_father = ocf_metadata_hash_func(cache, core_sector, core_id);
ENV_BUG_ON(!(hash_father < hash_entries));
if (ocf_metadata_get_hash(cache, hash_father) == line)
ocf_metadata_set_hash(cache, hash_father, next_line);
ocf_metadata_set_collision_info(cache, line,
line_entries, line_entries);
ocf_metadata_set_core_info(cache, line,
OCF_CORE_MAX, ULLONG_MAX);
}

View File

@@ -0,0 +1,102 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_COLLISION_H__
#define __METADATA_COLLISION_H__
/**
* @brief Metadata map structure
*/
struct ocf_metadata_list_info {
ocf_cache_line_t prev_col;
/*!< Previous cache line in collision list */
ocf_cache_line_t next_col;
/*!< Next cache line in collision list*/
ocf_cache_line_t partition_prev;
/*!< Previous cache line in the same partition*/
ocf_cache_line_t partition_next;
/*!< Next cache line in the same partition*/
ocf_part_id_t partition_id : 8;
/*!< ID of partition where is assigned this cache line*/
} __attribute__((packed));
/**
* @brief Metadata map structure
*/
struct ocf_metadata_map {
uint64_t core_line;
/*!< Core line addres on cache mapped by this strcture */
uint16_t core_id;
/*!< ID of core where is assigned this cache line*/
uint8_t status[];
/*!< Entry status structure e.g. valid, dirty...*/
} __attribute__((packed));
static inline ocf_cache_line_t ocf_metadata_map_lg2phy(
struct ocf_cache *cache, ocf_cache_line_t coll_idx)
{
return cache->metadata.iface.layout_iface->lg2phy(cache,
coll_idx);
}
static inline ocf_cache_line_t ocf_metadata_map_phy2lg(
struct ocf_cache *cache, ocf_cache_line_t cache_line)
{
return cache->metadata.iface.layout_iface->phy2lg(cache,
cache_line);
}
static inline void ocf_metadata_set_collision_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next, ocf_cache_line_t prev)
{
cache->metadata.iface.set_collision_info(cache, line, next, prev);
}
static inline void ocf_metadata_get_collision_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t *next, ocf_cache_line_t *prev)
{
cache->metadata.iface.get_collision_info(cache, line, next, prev);
}
static inline void ocf_metadata_set_collision_next(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next)
{
cache->metadata.iface.set_collision_next(cache, line, next);
}
static inline void ocf_metadata_set_collision_prev(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t prev)
{
cache->metadata.iface.set_collision_prev(cache, line, prev);
}
static inline ocf_cache_line_t ocf_metadata_get_collision_next(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_collision_next(cache, line);
}
static inline ocf_cache_line_t ocf_metadata_get_collision_prev(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_collision_prev(cache, line);
}
void ocf_metadata_add_to_collision(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t hash, ocf_cache_line_t cache_line);
void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id);
#endif /* METADATA_COLLISION_H_ */

View File

@@ -0,0 +1,51 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_CORE_H__
#define __METADATA_CORE_H__
static inline void ocf_metadata_set_core_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t core_id,
uint64_t core_sector)
{
cache->metadata.iface.set_core_info(cache, line, core_id,
core_sector);
}
static inline void ocf_metadata_get_core_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t *core_id,
uint64_t *core_sector)
{
cache->metadata.iface.get_core_info(cache, line, core_id,
core_sector);
}
static inline void ocf_metadata_get_core_and_part_id(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_core_id_t *core_id, ocf_part_id_t *part_id)
{
cache->metadata.iface.get_core_and_part_id(cache, line, core_id,
part_id);
}
static inline ocf_core_id_t ocf_metadata_get_core_id(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_core_id(cache, line);
}
static inline uint64_t ocf_metadata_get_core_sector(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_core_sector(cache, line);
}
static inline struct ocf_metadata_uuid *ocf_metadata_get_core_uuid(
struct ocf_cache *cache, ocf_core_id_t core_id)
{
return cache->metadata.iface.get_core_uuid(cache, core_id);
}
#endif /* METADATA_CORE_H_ */

View File

@@ -0,0 +1,35 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_EVICTION_H__
#define __METADATA_EVICTION_H__
static inline void ocf_metadata_get_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line,
union eviction_policy_meta *eviction)
{
cache->metadata.iface.get_eviction_policy(cache, line, eviction);
}
/*
* SET
*/
static inline void ocf_metadata_set_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line,
union eviction_policy_meta *eviction)
{
cache->metadata.iface.set_eviction_policy(cache, line, eviction);
}
/*
* FLUSH
*/
static inline void ocf_metadata_flush_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line)
{
cache->metadata.iface.flush_eviction_policy(cache, line);
}
#endif /* METADATA_EVICTION_H_ */

2462
src/metadata/metadata_hash.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,49 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_HASH_H__
#define __METADATA_HASH_H__
/**
* @file metadata_.h
* @brief Metadata Service - Hash Implementation
*/
#include "../ocf_request.h"
/**
* @brief Metada hash elements type
*/
enum ocf_metadata_segment {
metadata_segment_sb_config = 0, /*!< Super block conf */
metadata_segment_sb_runtime, /*!< Super block runtime */
metadata_segment_reserved, /*!< Reserved space on disk */
metadata_segment_core_config, /*!< Core Config Metadata */
metadata_segment_core_runtime, /*!< Core Runtime Metadata */
metadata_segment_core_uuid, /*!< Core UUID */
/* .... new fixed size sections go here */
metadata_segment_fixed_size_max,
metadata_segment_variable_size_start = metadata_segment_fixed_size_max,
/* sections with size dependent on cache device size go here: */
metadata_segment_cleaning = /*!< Cleaning policy */
metadata_segment_variable_size_start,
metadata_segment_eviction, /*!< Eviction policy */
metadata_segment_collision, /*!< Collision */
metadata_segment_list_info, /*!< Collision */
metadata_segment_hash, /*!< Hash */
/* .... new variable size sections go here */
metadata_segment_max, /*!< MAX */
};
/**
* @brief Get metadata interface implementation
*
* @return metadata interface
*/
const struct ocf_metadata_iface *metadata_hash_get_iface(void);
#endif /* METADATA_HASH_H_ */

629
src/metadata/metadata_io.c Normal file
View File

@@ -0,0 +1,629 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_io.h"
#include "../ocf_priv.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../engine/engine_bf.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_allocator.h"
#include "../utils/utils_io.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_IO_DEBUG 0
#if 1 == OCF_METADATA_IO_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][IO] %s\n", __func__)
#define OCF_DEBUG_MSG(cache, msg) \
ocf_cache_log(cache, log_info, "[Metadata][IO] %s - %s\n", \
__func__, msg)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][IO] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
static void metadata_io_write_i_end_asynch(void *private_data, int error);
static int ocf_restart_meta_io(struct ocf_request *req);
static struct ocf_io_if meta_restart_if = {
.read = ocf_restart_meta_io,
.write = ocf_restart_meta_io
};
/*
* Get max pages for IO
*/
static uint32_t metadata_io_max_page(struct ocf_cache *cache)
{
return ocf_data_obj_get_max_io_size(&cache->device->obj) /
BYTES_TO_SECTORS(PAGE_SIZE);
}
/*
* Iterative read end callback
*/
static void metadata_io_read_i_atomic_end(struct ocf_io *io, int error)
{
struct metadata_io_request_atomic *meta_atom_req = io->priv1;
OCF_DEBUG_TRACE(ocf_data_obj_get_cache(io->obj));
meta_atom_req->error |= error;
env_completion_complete(&meta_atom_req->complete);
}
/*
* Iterative read request
*/
int metadata_io_read_i_atomic(struct ocf_cache *cache,
ocf_metadata_atomic_io_event_t hndl)
{
uint64_t i;
uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE;
uint64_t io_sectors_count = cache->device->collision_table_entries *
ocf_line_sectors(cache);
uint64_t count, curr_count;
int result = 0;
struct ocf_io *io;
ctx_data_t *data;
struct metadata_io_request_atomic meta_atom_req;
unsigned char step = 0;
OCF_DEBUG_TRACE(cache);
/* Allocate one 4k page for metadata*/
data = ctx_data_alloc(cache->owner, 1);
if (!data)
return -ENOMEM;
count = io_sectors_count;
for (i = 0; i < io_sectors_count; i += curr_count) {
/* Get sectors count of this IO iteration */
curr_count = MIN(max_sectors_count, count);
env_completion_init(&meta_atom_req.complete);
meta_atom_req.error = 0;
/* Reset position in data buffer */
ctx_data_seek(cache->owner, data, ctx_data_seek_begin, 0);
/* Allocate new IO */
io = ocf_new_cache_io(cache);
if (!io) {
result = -ENOMEM;
break;
}
/* Setup IO */
ocf_io_configure(io,
cache->device->metadata_offset +
SECTORS_TO_BYTES(i),
SECTORS_TO_BYTES(curr_count),
OCF_READ, 0, 0);
ocf_io_set_cmpl(io, &meta_atom_req, NULL,
metadata_io_read_i_atomic_end);
result = ocf_io_set_data(io, data, 0);
if (result) {
ocf_io_put(io);
break;
}
/* Submit IO */
ocf_dobj_submit_metadata(io);
ocf_io_put(io);
/* Wait for completion of IO */
env_completion_wait(&meta_atom_req.complete);
/* Check for error */
if (meta_atom_req.error) {
result = meta_atom_req.error;
break;
}
result |= hndl(cache, i, curr_count, data);
if (result)
break;
count -= curr_count;
OCF_COND_RESCHED(step, 128);
}
/* Memory free */
ctx_data_free(cache->owner, data);
return result;
}
static int ocf_restart_meta_io(struct ocf_request *req)
{
struct ocf_io *io;
struct metadata_io_request *meta_io_req;
struct ocf_cache *cache;
int i;
int ret;
cache = req->cache;
meta_io_req = req->priv;
/* Fill with the latest metadata. */
OCF_METADATA_LOCK_RD();
for (i = 0; i < meta_io_req->count; i++) {
meta_io_req->on_meta_fill(cache, meta_io_req->data,
meta_io_req->page + i, meta_io_req->context);
}
OCF_METADATA_UNLOCK_RD();
io = ocf_new_cache_io(cache);
if (!io) {
metadata_io_write_i_end_asynch(meta_io_req, -ENOMEM);
return 0;
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(meta_io_req->page),
PAGES_TO_BYTES(meta_io_req->count),
OCF_WRITE, 0, 0);
ocf_io_set_default_cmpl(io, meta_io_req,
metadata_io_write_i_end_asynch);
ret = ocf_io_set_data(io, meta_io_req->data, 0);
if (ret) {
ocf_io_put(io);
metadata_io_write_i_end_asynch(meta_io_req, ret);
return ret;
}
ocf_dobj_submit_io(io);
return 0;
}
/*
* Iterative asynchronous write callback
*/
static void metadata_io_write_i_end_asynch(void *private_data, int error)
{
struct metadata_io_request *request = (private_data);
struct metadata_io_request_asynch *a_req;
struct ocf_cache *cache;
OCF_CHECK_NULL(request);
cache = request->cache;
a_req = request->asynch;
OCF_CHECK_NULL(a_req);
OCF_CHECK_NULL(a_req->on_complete);
if (error) {
request->error |= error;
request->asynch->error |= error;
}
if (env_atomic_dec_return(&request->req_remaining))
return;
OCF_DEBUG_PARAM(cache, "Page = %u", request->page);
ctx_data_free(cache->owner, request->data);
request->data = NULL;
if (env_atomic_dec_return(&a_req->req_remaining)) {
env_atomic_set(&request->finished, 1);
ocf_metadata_updater_kick(cache);
return;
}
OCF_DEBUG_MSG(cache, "Asynchronous IO completed");
/* All IOs have been finished, call IO end callback */
a_req->on_complete(request->cache, a_req->context, request->error);
/*
* If it's last request, we mark is as finished
* after calling IO end callback
*/
env_atomic_set(&request->finished, 1);
ocf_metadata_updater_kick(cache);
}
static void metadata_io_req_error(struct ocf_cache *cache,
struct metadata_io_request_asynch *a_req,
uint32_t i, int error)
{
a_req->error |= error;
a_req->reqs[i].error |= error;
a_req->reqs[i].count = 0;
if (a_req->reqs[i].data)
ctx_data_free(cache->owner, a_req->reqs[i].data);
a_req->reqs[i].data = NULL;
}
/*
* Iterative write request asynchronously
*/
int metadata_io_write_i_asynch(struct ocf_cache *cache, uint32_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_hndl_on_write_t compl_hndl)
{
uint32_t curr_count, written;
uint32_t max_count = metadata_io_max_page(cache);
uint32_t io_count = DIV_ROUND_UP(count, max_count);
uint32_t i, i_fill;
int error = 0, ret;
struct ocf_io *io;
/* Allocation and initialization of asynchronous metadata IO request */
struct metadata_io_request_asynch *a_req;
if (count == 0)
return 0;
a_req = env_zalloc(sizeof(*a_req), ENV_MEM_NOIO);
if (!a_req)
return -OCF_ERR_NO_MEM;
env_atomic_set(&a_req->req_remaining, io_count);
env_atomic_set(&a_req->req_active, io_count);
a_req->on_complete = compl_hndl;
a_req->context = context;
a_req->page = page;
/* Allocate particular requests and initialize them */
OCF_REALLOC_CP(&a_req->reqs, sizeof(a_req->reqs[0]),
io_count, &a_req->reqs_limit);
if (!a_req->reqs) {
env_free(a_req);
ocf_cache_log(cache, log_warn,
"No memory during metadata IO\n");
return -OCF_ERR_NO_MEM;
}
/* IO Requests initialization */
for (i = 0; i < io_count; i++) {
env_atomic_set(&(a_req->reqs[i].req_remaining), 1);
env_atomic_set(&(a_req->reqs[i].finished), 0);
a_req->reqs[i].asynch = a_req;
}
OCF_DEBUG_PARAM(cache, "IO count = %u", io_count);
i = 0;
written = 0;
while (count) {
/* Get pages count of this IO iteration */
if (count > max_count)
curr_count = max_count;
else
curr_count = count;
/* Fill request */
a_req->reqs[i].cache = cache;
a_req->reqs[i].context = context;
a_req->reqs[i].page = page + written;
a_req->reqs[i].count = curr_count;
a_req->reqs[i].on_meta_fill = fill_hndl;
a_req->reqs[i].fl_req.io_if = &meta_restart_if;
a_req->reqs[i].fl_req.io_queue = queue;
a_req->reqs[i].fl_req.cache = cache;
a_req->reqs[i].fl_req.priv = &a_req->reqs[i];
a_req->reqs[i].fl_req.info.internal = true;
/*
* We don't want allocate map for this request in
* threads.
*/
a_req->reqs[i].fl_req.map = LIST_POISON1;
INIT_LIST_HEAD(&a_req->reqs[i].list);
a_req->reqs[i].data = ctx_data_alloc(cache->owner, curr_count);
if (!a_req->reqs[i].data) {
error = -OCF_ERR_NO_MEM;
metadata_io_req_error(cache, a_req, i, error);
break;
}
/* Issue IO if it is not overlapping with anything else */
ret = metadata_updater_check_overlaps(cache, &a_req->reqs[i]);
if (ret == 0) {
/* Allocate new IO */
io = ocf_new_cache_io(cache);
if (!io) {
error = -OCF_ERR_NO_MEM;
metadata_io_req_error(cache, a_req, i, error);
break;
}
for (i_fill = 0; i_fill < curr_count; i_fill++) {
fill_hndl(cache, a_req->reqs[i].data,
page + written + i_fill,
context);
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(a_req->reqs[i].page),
PAGES_TO_BYTES(a_req->reqs[i].count),
OCF_WRITE, 0, 0);
ocf_io_set_default_cmpl(io, &a_req->reqs[i],
metadata_io_write_i_end_asynch);
error = ocf_io_set_data(io, a_req->reqs[i].data, 0);
if (error) {
ocf_io_put(io);
metadata_io_req_error(cache, a_req, i, error);
break;
}
ocf_dobj_submit_io(io);
}
count -= curr_count;
written += curr_count;
i++;
}
if (error == 0) {
/* No error, return 0 that indicates operation successful */
return 0;
}
OCF_DEBUG_MSG(cache, "ERROR");
if (i == 0) {
/*
* If no requests were submitted, we just call completion
* callback, free memory and return error.
*/
compl_hndl(cache, context, error);
OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit);
env_free(a_req);
return error;
}
/*
* Decrement total reaming requests with IO that were not triggered.
* If we reached zero, we need to call completion callback.
*/
if (env_atomic_sub_return(io_count - i, &a_req->req_remaining) == 0)
compl_hndl(cache, context, error);
/*
* Decrement total active requests with IO that were not triggered.
* If we reached zero, we need to free memory.
*/
if (env_atomic_sub_return(io_count - i, &a_req->req_active) == 0) {
OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit);
env_free(a_req);
}
return error;
}
int ocf_metadata_io_init(ocf_cache_t cache)
{
return ocf_metadata_updater_init(cache);
}
void ocf_metadata_io_deinit(ocf_cache_t cache)
{
ocf_metadata_updater_stop(cache);
}
static void metadata_io_end(struct ocf_io *io, int error)
{
struct metadata_io *mio = io->priv1;
ctx_data_t *data = ocf_io_get_data(io);
uint32_t page = BYTES_TO_PAGES(io->addr);
uint32_t count = BYTES_TO_PAGES(io->bytes);
struct ocf_cache *cache = mio->cache;
uint32_t i = 0;
if (error) {
mio->error |= error;
goto out;
}
for (i = 0; mio->dir == OCF_READ && i < count; i++) {
mio->error |= mio->hndl_fn(cache, data, page + i,
mio->hndl_cntx);
}
out:
ctx_data_free(cache->owner, data);
ocf_io_put(io);
if (env_atomic_dec_return(&mio->rq_remaining))
return;
env_completion_complete(&mio->completion);
}
static int metadata_submit_io(
struct ocf_cache *cache,
struct metadata_io *mio,
uint32_t count,
uint32_t written)
{
ctx_data_t *data;
struct ocf_io *io;
int err;
int i;
/* Allocate IO */
io = ocf_new_cache_io(cache);
if (!io) {
err = -ENOMEM;
goto error;
}
/* Allocate data buffer for this IO */
data = ctx_data_alloc(cache->owner, count);
if (!data) {
err = -ENOMEM;
goto put_io;
}
/* Fill data */
for (i = 0; mio->dir == OCF_WRITE && i < count; i++) {
err = mio->hndl_fn(cache, data,
mio->page + written + i, mio->hndl_cntx);
if (err)
goto free_data;
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(mio->page + written),
PAGES_TO_BYTES(count),
mio->dir, 0, 0);
ocf_io_set_cmpl(io, mio, NULL, metadata_io_end);
err = ocf_io_set_data(io, data, 0);
if (err)
goto free_data;
/* Submit IO */
env_atomic_inc(&mio->rq_remaining);
ocf_dobj_submit_io(io);
return 0;
free_data:
ctx_data_free(cache->owner, data);
put_io:
ocf_io_put(io);
error:
mio->error = err;
return err;
}
/*
*
*/
static int metadata_io(struct metadata_io *mio)
{
uint32_t max_count = metadata_io_max_page(mio->cache);
uint32_t this_count, written = 0;
uint32_t count = mio->count;
unsigned char step = 0;
int err;
struct ocf_cache *cache = mio->cache;
/* Check direction value correctness */
switch (mio->dir) {
case OCF_WRITE:
case OCF_READ:
break;
default:
return -EINVAL;
}
env_atomic_set(&mio->rq_remaining, 1);
env_completion_init(&mio->completion);
while (count) {
this_count = MIN(count, max_count);
err = metadata_submit_io(cache, mio, this_count, written);
if (err)
break;
/* Update counters */
count -= this_count;
written += this_count;
OCF_COND_RESCHED(step, 128);
}
if (env_atomic_dec_return(&mio->rq_remaining) == 0)
env_completion_complete(&mio->completion);
/* Wait for all IO to be finished */
env_completion_wait(&mio->completion);
return mio->error;
}
/*
*
*/
int metadata_io_write_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
{
struct metadata_io mio = {
.dir = OCF_WRITE,
.cache = cache,
.page = page,
.count = count,
.hndl_fn = hndl_fn,
.hndl_cntx = hndl_cntx,
};
return metadata_io(&mio);
}
/*
*
*/
int metadata_io_read_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
{
struct metadata_io mio = {
.dir = OCF_READ,
.cache = cache,
.page = page,
.count = count,
.hndl_fn = hndl_fn,
.hndl_cntx = hndl_cntx,
};
return metadata_io(&mio);
}
/*
*
*/
static int metadata_io_write_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ctx_data_wr_check(cache->owner, data, context, PAGE_SIZE);
return 0;
}
/*
* Write request
*/
int metadata_io_write(struct ocf_cache *cache,
void *data, uint32_t page)
{
struct metadata_io mio = {
.dir = OCF_WRITE,
.cache = cache,
.page = page,
.count = 1,
.hndl_fn = metadata_io_write_fill,
.hndl_cntx = data,
};
return metadata_io(&mio);
}

188
src/metadata/metadata_io.h Normal file
View File

@@ -0,0 +1,188 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_IO_H__
#define __METADATA_IO_H__
/**
* @file metadata_io.h
* @brief Metadata IO utilities
*/
/**
* @brief Metadata IO event
*
* The client of metadata IO service if informed trough this event:
* - on completion of read from cache device
* - on fill data which will be written into cache device
*
* @param data[in,out] Environment data for read ot write IO
* @param page[in] Page which is issued
* @param context[in] context caller
*
* @retval 0 Success
* @retval Non-zero Error which will bee finally returned to the caller
*/
typedef int (*ocf_metadata_io_event_t)(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context);
/**
* @brief Metadata write end callback
*
* @param cache - Cache instance
* @param context - Read context
* @param error - error
* @param page - page that was written
*/
typedef void (*ocf_metadata_io_hndl_on_write_t)(struct ocf_cache *cache,
void *context, int error);
struct metadata_io_request_asynch;
/*
* IO request context
*/
struct metadata_io_request {
struct ocf_cache *cache;
void *context;
uint32_t page;
uint32_t count;
ocf_metadata_io_event_t on_meta_fill;
env_atomic req_remaining;
ctx_data_t *data;
env_completion completion;
int error;
struct metadata_io_request_asynch *asynch;
env_atomic finished;
struct ocf_request fl_req;
struct list_head list;
};
/*
* IO request context
*/
struct metadata_io_request_atomic {
env_completion complete;
int error;
};
/*
*
*/
struct metadata_io {
int error;
int dir;
struct ocf_cache *cache;
uint32_t page;
uint32_t count;
env_completion completion;
env_atomic rq_remaining;
ocf_metadata_io_event_t hndl_fn;
void *hndl_cntx;
};
/*
* Asynchronous IO request context
*/
struct metadata_io_request_asynch {
struct ocf_cache *cache;
struct metadata_io_request *reqs;
void *context;
int error;
size_t reqs_limit;
env_atomic req_remaining;
env_atomic req_active;
uint32_t page;
ocf_metadata_io_hndl_on_write_t on_complete;
};
/**
* @brief Metadata read end callback
*
* @param cache Cache instance
* @param sector_addr Begin sector of metadata
* @param sector_no Number of sectors
* @param data Data environment buffer with atomic metadata
*
* @retval 0 Success
* @retval Non-zero Error which will bee finally returned to the caller
*/
typedef int (*ocf_metadata_atomic_io_event_t)(
struct ocf_cache *cache, uint64_t sector_addr,
uint32_t sector_no, ctx_data_t *data);
/**
* @brief Write page request
*
* @param cache - Cache instance
* @param data - Data to be written for specified page
* @param page - Page of SSD (cache device) where data has to be placed
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write(struct ocf_cache *cache,
void *data, uint32_t page);
int metadata_io_read_i_atomic(struct ocf_cache *cache,
ocf_metadata_atomic_io_event_t hndl);
/**
* @brief Iterative pages write
*
* @param cache - Cache instance
* @param page - Start page of SSD (cache device) where data will be written
* @param count - Counts of page to be processed
* @param hndl_fn - Fill callback is called to fill each pages with data
* @param hndl_cntx - Caller context which is passed on fill callback request
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx);
/**
* * @brief Iterative pages read
*
* @param cache - Cache instance
* @param page - Start page of SSD (cache device) of data will be read
* @param count - Counts of page to be processed
* @param hndl_fn - Callback function is called on each page read completion
* @param hndl_cntx - Caller context passed during handle function call
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_read_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx);
/**
* @brief Iterative asynchronous pages write
*
* @param cache - Cache instance
* @param context - Read context
* @param page - Start page of SSD (cache device) where data will be written
* @param count - Counts of page to be processed
* @param fill - Fill callback
* @param complete - All IOs completed callback
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write_i_asynch(struct ocf_cache *cache, uint32_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_hndl_on_write_t compl_hndl);
/**
* Function for initializing metadata io.
*/
int ocf_metadata_io_init(ocf_cache_t cache);
/**
* Function for deinitializing metadata io.
*/
void ocf_metadata_io_deinit(ocf_cache_t cache);
#endif /* METADATA_IO_UTILS_H_ */

View File

@@ -0,0 +1,126 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "../utils/utils_cache_line.h"
static bool _is_cache_line_acting(struct ocf_cache *cache,
uint32_t cache_line, ocf_core_id_t core_id,
uint64_t start_line, uint64_t end_line)
{
ocf_core_id_t tmp_core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&tmp_core_id, &core_line);
if (core_id != OCF_CORE_ID_INVALID) {
if (core_id != tmp_core_id)
return false;
if (core_line < start_line || core_line > end_line)
return false;
} else if (tmp_core_id == OCF_CORE_ID_INVALID) {
return false;
}
return true;
}
/*
* Iterates over cache lines that belong to the core device with
* core ID = core_id whose core byte addresses are in the range
* [start_byte, end_byte] and applies actor(cache, cache_line) to all
* matching cache lines
*
* set partition_id to PARTITION_INVALID to not care about partition_id
*
* METADATA lock must be held before calling this function
*/
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor)
{
uint32_t step = 0;
ocf_cache_line_t i, next_i;
uint64_t start_line, end_line;
int ret = 0;
start_line = ocf_bytes_2_lines(cache, start_byte);
end_line = ocf_bytes_2_lines(cache, end_byte);
if (part_id != PARTITION_INVALID) {
for (i = cache->user_parts[part_id].runtime->head;
i != cache->device->collision_table_entries;
i = next_i) {
next_i = ocf_metadata_get_partition_next(cache, i);
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(cache, i))
ret = -EAGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
} else {
for (i = 0; i < cache->device->collision_table_entries; ++i) {
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(cache, i))
ret = -EAGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
}
return ret;
}
/* the caller must hold the relevant cache block concurrency reader lock
* and the metadata lock
*/
void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
ocf_part_id_t partition_id =
ocf_metadata_get_partition_id(cache, cache_line);
ocf_metadata_remove_from_collision(cache, cache_line, partition_id);
ocf_metadata_remove_from_partition(cache, partition_id, cache_line);
ocf_metadata_add_to_free_list(cache, cache_line);
}
static void _ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache),
cache_line);
/*
* This is especially for removing inactive core
*/
metadata_clear_dirty(cache, cache_line);
}
/* caller must hold metadata lock
* set core_id to -1 to clean the whole cache device
*/
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte)
{
return ocf_metadata_actor(cache, PARTITION_INVALID, core_id,
start_byte, end_byte, _ocf_metadata_sparse_cache_line);
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_MISC_H__
#define __METADATA_MISC_H__
static inline ocf_cache_line_t ocf_metadata_hash_func(ocf_cache_t cache,
uint64_t cache_line_num, ocf_core_id_t core_id)
{
return (ocf_cache_line_t) ((cache_line_num * (core_id + 1)) %
cache->device->hash_table_entries);
}
void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte);
typedef void (*ocf_metadata_actor_t)(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor);
#endif /* __METADATA_MISC_H__ */

View File

@@ -0,0 +1,227 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "../utils/utils_part.h"
/* Sets the given collision_index as the new _head_ of the Partition list. */
static void update_partition_head(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
struct ocf_user_part *part = &cache->user_parts[part_id];
part->runtime->head = line;
}
void ocf_metadata_remove_from_free_list(struct ocf_cache *cache,
ocf_cache_line_t cline)
{
struct ocf_part *free_list = cache->device->freelist_part;
int is_head, is_tail;
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ocf_cache_line_t prev, next;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ENV_BUG_ON(cline >= line_entries);
/* Get Partition info */
ocf_metadata_get_partition_info(cache, cline, NULL, &next, &prev);
/* Find out if this node is Partition _head_ */
is_head = (prev == line_entries);
is_tail = (next == line_entries);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && (free_list->curr_size == 1)) {
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
free_list->head = line_entries;
free_list->tail = line_entries;
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(next >= line_entries);
free_list->head = next;
ocf_metadata_set_partition_prev(cache, next, line_entries);
ocf_metadata_set_partition_next(cache, cline, line_entries);
} else if (is_tail) {
/* Case 3: else if this cline is partition list tail */
ENV_BUG_ON(prev >= line_entries);
free_list->tail = prev;
ocf_metadata_set_partition_prev(cache, cline, line_entries);
ocf_metadata_set_partition_next(cache, prev, line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(next >= line_entries || prev >= line_entries);
/* Update prev and next nodes */
ocf_metadata_set_partition_prev(cache, next, prev);
ocf_metadata_set_partition_next(cache, prev, next);
/* Update the given node */
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
}
free_list->curr_size--;
}
void ocf_metadata_add_to_free_list(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_part *free_list = cache->device->freelist_part;
ocf_cache_line_t tail;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ENV_BUG_ON(line >= line_entries);
if (free_list->curr_size == 0) {
free_list->head = line;
free_list->tail = line;
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, line_entries);
} else {
tail = free_list->tail;
ENV_BUG_ON(tail >= line_entries);
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, tail);
ocf_metadata_set_partition_next(cache, tail, line);
free_list->tail = line;
}
free_list->curr_size++;
}
/* Adds the given collision_index to the _head_ of the Partition list */
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
ocf_cache_line_t line_head;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
ENV_BUG_ON(!(line < line_entries));
/* First node to be added/ */
if (!part->runtime->curr_size) {
update_partition_head(cache, part_id, line);
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
if (!ocf_part_is_valid(part)) {
/* Partition becomes empty, and is not valid
* update list of partitions
*/
ocf_part_sort(cache);
}
} else {
/* Not the first node to be added. */
line_head = part->runtime->head;
ENV_BUG_ON(!(line_head < line_entries));
ocf_metadata_set_partition_info(cache, line, part_id,
line_head, line_entries);
ocf_metadata_set_partition_prev(cache, line_head, line);
update_partition_head(cache, part_id, line);
}
part->runtime->curr_size++;
}
/* Deletes the node with the given collision_index from the Partition list */
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
int is_head, is_tail;
ocf_cache_line_t prev_line, next_line;
uint32_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
ENV_BUG_ON(!(line < line_entries));
/* Get Partition info */
ocf_metadata_get_partition_info(cache, line, NULL,
&next_line, &prev_line);
/* Find out if this node is Partition _head_ */
is_head = (prev_line == line_entries);
is_tail = (next_line == line_entries);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && (part->runtime->curr_size == 1)) {
ocf_metadata_set_partition_info(cache, line,
part_id, line_entries, line_entries);
update_partition_head(cache, part_id, line_entries);
if (!ocf_part_is_valid(part)) {
/* Partition becomes not empty, and is not valid
* update list of partitions
*/
ocf_part_sort(cache);
}
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(!(next_line < line_entries));
update_partition_head(cache, part_id, next_line);
ocf_metadata_set_partition_next(cache, line, line_entries);
ocf_metadata_set_partition_prev(cache, next_line,
line_entries);
} else if (is_tail) {
/* Case 3: else if this collision_index is partition list tail
*/
ENV_BUG_ON(!(prev_line < line_entries));
ocf_metadata_set_partition_prev(cache, line, line_entries);
ocf_metadata_set_partition_next(cache, prev_line,
line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(!(next_line < line_entries));
ENV_BUG_ON(!(prev_line < line_entries));
/* Update prev and next nodes */
ocf_metadata_set_partition_next(cache, prev_line, next_line);
ocf_metadata_set_partition_prev(cache, next_line, prev_line);
/* Update the given node */
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
}
part->runtime->curr_size--;
}

View File

@@ -0,0 +1,78 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_PARTITION_H__
#define __METADATA_PARTITION_H__
#include "metadata_partition_structs.h"
#include "../ocf_cache_priv.h"
#define PARTITION_DEFAULT 0
#define PARTITION_INVALID ((ocf_part_id_t)-1)
#define PARTITION_SIZE_MAX ((ocf_cache_line_t)-1)
static inline ocf_part_id_t ocf_metadata_get_partition_id(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_partition_id(cache, line);
}
static inline ocf_cache_line_t ocf_metadata_get_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_partition_next(cache, line);
}
static inline ocf_cache_line_t ocf_metadata_get_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_partition_prev(cache, line);
}
static inline void ocf_metadata_get_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t *part_id, ocf_cache_line_t *next_line,
ocf_cache_line_t *prev_line)
{
cache->metadata.iface.get_partition_info(cache, line, part_id,
next_line, prev_line);
}
static inline void ocf_metadata_set_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next_line)
{
cache->metadata.iface.set_partition_next(cache, line, next_line);
}
static inline void ocf_metadata_set_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t prev_line)
{
cache->metadata.iface.set_partition_prev(cache, line, prev_line);
}
static inline void ocf_metadata_set_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t part_id, ocf_cache_line_t next_line,
ocf_cache_line_t prev_line)
{
cache->metadata.iface.set_partition_info(cache, line, part_id,
next_line, prev_line);
}
void ocf_metadata_add_to_free_list(struct ocf_cache *cache,
ocf_cache_line_t cline);
void ocf_metadata_remove_from_free_list(struct ocf_cache *cache,
ocf_cache_line_t cline);
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
#endif /* __METADATA_PARTITION_H__ */

View File

@@ -0,0 +1,50 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_PARTITION_STRUCTS_H__
#define __METADATA_PARTITION_STRUCTS_H__
#include "../utils/utils_list.h"
#include "../cleaning/cleaning.h"
#include "../eviction/eviction.h"
struct ocf_part {
ocf_cache_line_t head;
ocf_cache_line_t tail;
uint32_t curr_size;
};
struct ocf_user_part_config {
char name[OCF_IO_CLASS_NAME_MAX];
uint32_t min_size;
uint32_t max_size;
int16_t priority;
ocf_cache_mode_t cache_mode;
struct {
uint8_t valid : 1;
uint8_t added : 1;
uint8_t eviction : 1;
/*!< This bits is setting during partition sorting,
* and means that can evict from this partition
*/
} flags;
};
struct ocf_user_part_runtime {
uint32_t curr_size;
uint32_t head;
struct eviction_policy eviction;
struct cleaning_policy cleaning;
};
struct ocf_user_part {
struct ocf_user_part_config *config;
struct ocf_user_part_runtime *runtime;
struct ocf_lst_entry lst_valid;
};
#endif /* __METADATA_PARTITION_STRUCTS_H__ */

609
src/metadata/metadata_raw.c Normal file
View File

@@ -0,0 +1,609 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#include "metadata_io.h"
#include "metadata_raw_atomic.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_RAW_DEBUG 0
#if 1 == OCF_METADATA_RAW_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(log_info, "[Metadata][Raw] %s\n", __func__)
#define OCF_DEBUG_MSG(cache, msg) \
ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - %s\n", \
__func__, msg)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
/*******************************************************************************
* Common RAW Implementation
******************************************************************************/
/*
* Check if page is valid for specified RAW descriptor
*/
static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page)
{
ENV_BUG_ON(page < raw->ssd_pages_offset);
ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages));
return true;
}
/*******************************************************************************
* RAW RAM Implementation
******************************************************************************/
#define _RAW_RAM_ADDR(raw, line) \
(raw->mem_pool + (((uint64_t)raw->entry_size * (line))))
#define _RAW_RAM_PAGE(raw, line) \
((line) / raw->entries_in_page)
#define _RAW_RAM_PAGE_SSD(raw, line) \
(raw->ssd_pages_offset + _RAW_RAM_PAGE(raw, line))
#define _RAW_RAM_ADDR_PAGE(raw, line) \
(_RAW_RAM_ADDR(raw, \
_RAW_RAM_PAGE(raw, line) * raw->entries_in_page))
#define _RAW_RAM_GET(raw, line, data) \
env_memcpy(data, raw->entry_size, _RAW_RAM_ADDR(raw, (line)), \
raw->entry_size)
#define _RAW_RAM_SET(raw, line, data) \
env_memcpy(_RAW_RAM_ADDR(raw, line), raw->entry_size, \
data, raw->entry_size)
/*
* RAM Implementation - De-Initialize
*/
static int _raw_ram_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
if (raw->mem_pool) {
env_vfree(raw->mem_pool);
raw->mem_pool = NULL;
}
return 0;
}
/*
* RAM Implementation - Initialize
*/
static int _raw_ram_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
size_t mem_pool_size;
OCF_DEBUG_TRACE(cache);
/* Allocate memory pool for entries */
mem_pool_size = raw->ssd_pages;
mem_pool_size *= PAGE_SIZE;
raw->mem_pool_limit = mem_pool_size;
raw->mem_pool = env_vzalloc(mem_pool_size);
if (!raw->mem_pool)
return -ENOMEM;
return 0;
}
/*
* RAM Implementation - Size of
*/
static size_t _raw_ram_size_of(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
size_t size;
size = raw->ssd_pages;
size *= PAGE_SIZE;
return size;
}
/*
* RAM Implementation - Size on SSD
*/
static uint32_t _raw_ram_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
const size_t alignment = 128 * KiB / PAGE_SIZE;
return DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment;
}
/*
* RAM Implementation - Checksum
*/
static uint32_t _raw_ram_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
uint64_t i;
uint32_t step = 0;
uint32_t crc = 0;
for (i = 0; i < raw->ssd_pages; i++) {
crc = env_crc32(crc, raw->mem_pool + PAGE_SIZE * i, PAGE_SIZE);
OCF_COND_RESCHED(step, 10000);
}
return crc;
}
/*
* RAM Implementation - Get entry
*/
static int _raw_ram_get(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_GET(raw, line, data);
}
/*
* RAM Implementation - Read only entry access
*/
static const void *_raw_ram_rd_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_ADDR(raw, line);
}
/*
* RAM Implementation - Read only entry access
*/
static void *_raw_ram_wr_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_ADDR(raw, line);
}
/*
* RAM Implementation - Set Entry
*/
static int _raw_ram_set(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_SET(raw, line, data);
}
/*
* RAM Implementation - Flush specified element from SSD
*/
static int _raw_ram_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
OCF_DEBUG_PARAM(cache, "Line = %u", line);
OCF_DEBUG_PARAM(cache, "Page = %llu", _RAW_RAM_PAGE(raw, line));
ENV_BUG_ON(!_raw_is_valid(raw, line, raw->entry_size));
return metadata_io_write(cache, _RAW_RAM_ADDR_PAGE(raw, line),
_RAW_RAM_PAGE_SSD(raw, line));
}
/*
* RAM Implementation - Load all IO callback
*/
static int _raw_ram_load_all_io(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ocf_cache_line_t line;
uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *) context;
uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE);
raw_page = page - raw->ssd_pages_offset;
line = raw_page * raw->entries_in_page;
OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
ctx_data_rd_check(cache->owner, _RAW_RAM_ADDR(raw, line), data, size);
ctx_data_seek(cache->owner, data, ctx_data_seek_current,
PAGE_SIZE - size);
return 0;
}
/*
* RAM Implementation - Load all metadata elements from SSD
*/
static int _raw_ram_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
return metadata_io_read_i(cache, raw->ssd_pages_offset,
raw->ssd_pages, _raw_ram_load_all_io, raw);
}
/*
* RAM Implementation - Flush IO callback - Fill page
*/
static int _raw_ram_flush_all_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ocf_cache_line_t line;
uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context;
uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE);
raw_page = page - raw->ssd_pages_offset;
line = raw_page * raw->entries_in_page;
OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size);
ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size);
return 0;
}
/*
* RAM Implementation - Flush all elements
*/
static int _raw_ram_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
return metadata_io_write_i(cache, raw->ssd_pages_offset,
raw->ssd_pages, _raw_ram_flush_all_fill, raw);
}
/*
* RAM RAM Implementation - Mark to Flush
*/
static void _raw_ram_flush_mark(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t map_idx, int to_state,
uint8_t start, uint8_t stop)
{
if (to_state == DIRTY || to_state == CLEAN) {
rq->map[map_idx].flush = true;
rq->info.flush_metadata = true;
}
}
/*******************************************************************************
* RAM RAM Implementation - Do Flush Asynchronously
******************************************************************************/
struct _raw_ram_flush_ctx {
struct ocf_metadata_raw *raw;
struct ocf_request *rq;
ocf_metadata_asynch_flush_hndl complete;
env_atomic flush_req_cnt;
int error;
};
static void _raw_ram_flush_do_asynch_io_complete(struct ocf_cache *cache,
void *context, int error)
{
struct _raw_ram_flush_ctx *ctx = context;
if (error) {
ctx->error = error;
ocf_metadata_error(cache);
}
if (env_atomic_dec_return(&ctx->flush_req_cnt))
return;
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
/* Call metadata flush completed call back */
ctx->rq->error |= ctx->error;
ctx->complete(ctx->rq, ctx->error);
env_free(ctx);
}
/*
* RAM Implementation - Flush IO callback - Fill page
*/
static int _raw_ram_flush_do_asynch_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ocf_cache_line_t line;
uint32_t raw_page;
struct _raw_ram_flush_ctx *ctx = context;
struct ocf_metadata_raw *raw = NULL;
uint64_t size;
ENV_BUG_ON(!ctx);
raw = ctx->raw;
ENV_BUG_ON(!raw);
size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(size > PAGE_SIZE);
raw_page = page - raw->ssd_pages_offset;
line = raw_page * raw->entries_in_page;
OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size);
ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size);
return 0;
}
/*
* RAM RAM Implementation - Do Flush
*/
int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2)
{
uint32_t *page1 = (uint32_t *)item1;
uint32_t *page2 = (uint32_t *)item2;
if (*page1 > *page2)
return 1;
if (*page1 < *page2)
return -1;
return 0;
}
static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *rq,
uint32_t *pages_tab, struct ocf_metadata_raw *raw,
int *pages_to_flush) {
int i, j = 0;
int line_no = rq->core_line_count;
struct ocf_map_info *map;
for (i = 0; i < line_no; i++) {
map = &rq->map[i];
if (map->flush) {
pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx);
j++;
}
}
*pages_to_flush = j;
}
static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete)
{
int result = 0, i;
uint32_t __pages_tab[MAX_STACK_TAB_SIZE];
uint32_t *pages_tab;
int line_no = rq->core_line_count;
int pages_to_flush;
uint32_t start_page = 0;
uint32_t count = 0;
struct _raw_ram_flush_ctx *ctx;
ENV_BUG_ON(!complete);
OCF_DEBUG_TRACE(cache);
if (!rq->info.flush_metadata) {
/* Nothing to flush call flush callback */
complete(rq, 0);
return 0;
}
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
if (!ctx) {
complete(rq, -ENOMEM);
return -ENOMEM;
}
ctx->rq = rq;
ctx->complete = complete;
ctx->raw = raw;
env_atomic_set(&ctx->flush_req_cnt, 1);
if (line_no <= MAX_STACK_TAB_SIZE) {
pages_tab = __pages_tab;
} else {
pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO);
if (!pages_tab) {
env_free(ctx);
complete(rq, -ENOMEM);
return -ENOMEM;
}
}
/* While sorting in progress keep request remaining equal to 1,
* to prevent freeing of asynchronous context
*/
__raw_ram_flush_do_asynch_add_pages(rq, pages_tab, raw,
&pages_to_flush);
env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab),
_raw_ram_flush_do_page_cmp, NULL);
i = 0;
while (i < pages_to_flush) {
start_page = pages_tab[i];
count = 1;
while (true) {
if ((i + 1) >= pages_to_flush)
break;
if (pages_tab[i] == pages_tab[i + 1]) {
i++;
continue;
}
if ((pages_tab[i] + 1) != pages_tab[i + 1])
break;
i++;
count++;
}
env_atomic_inc(&ctx->flush_req_cnt);
result |= metadata_io_write_i_asynch(cache, rq->io_queue, ctx,
raw->ssd_pages_offset + start_page, count,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete);
if (result)
break;
i++;
}
_raw_ram_flush_do_asynch_io_complete(cache, ctx, result);
if (line_no > MAX_STACK_TAB_SIZE)
env_free(pages_tab);
return result;
}
/*******************************************************************************
* RAW Interfaces definitions
******************************************************************************/
#include "metadata_raw_dynamic.h"
#include "metadata_raw_volatile.h"
static const struct raw_iface IRAW[metadata_raw_type_max] = {
[metadata_raw_type_ram] = {
.init = _raw_ram_init,
.deinit = _raw_ram_deinit,
.size_of = _raw_ram_size_of,
.size_on_ssd = _raw_ram_size_on_ssd,
.checksum = _raw_ram_checksum,
.get = _raw_ram_get,
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
.flush = _raw_ram_flush,
.load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all,
.flush_mark = _raw_ram_flush_mark,
.flush_do_asynch = _raw_ram_flush_do_asynch,
},
[metadata_raw_type_dynamic] = {
.init = raw_dynamic_init,
.deinit = raw_dynamic_deinit,
.size_of = raw_dynamic_size_of,
.size_on_ssd = raw_dynamic_size_on_ssd,
.checksum = raw_dynamic_checksum,
.get = raw_dynamic_get,
.set = raw_dynamic_set,
.rd_access = raw_dynamic_rd_access,
.wr_access = raw_dynamic_wr_access,
.flush = raw_dynamic_flush,
.load_all = raw_dynamic_load_all,
.flush_all = raw_dynamic_flush_all,
.flush_mark = raw_dynamic_flush_mark,
.flush_do_asynch = raw_dynamic_flush_do_asynch,
},
[metadata_raw_type_volatile] = {
.init = _raw_ram_init,
.deinit = _raw_ram_deinit,
.size_of = _raw_ram_size_of,
.size_on_ssd = raw_volatile_size_on_ssd,
.checksum = raw_volatile_checksum,
.get = _raw_ram_get,
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
.flush = raw_volatile_flush,
.load_all = raw_volatile_load_all,
.flush_all = raw_volatile_flush_all,
.flush_mark = raw_volatile_flush_mark,
.flush_do_asynch = raw_volatile_flush_do_asynch,
},
[metadata_raw_type_atomic] = {
.init = _raw_ram_init,
.deinit = _raw_ram_deinit,
.size_of = _raw_ram_size_of,
.size_on_ssd = _raw_ram_size_on_ssd,
.checksum = _raw_ram_checksum,
.get = _raw_ram_get,
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
.flush = _raw_ram_flush,
.load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all,
.flush_mark = raw_atomic_flush_mark,
.flush_do_asynch = raw_atomic_flush_do_asynch,
},
};
/*******************************************************************************
* RAW Top interface implementation
******************************************************************************/
int ocf_metadata_raw_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
ENV_BUG_ON(raw->raw_type < metadata_raw_type_min);
ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max);
raw->iface = &(IRAW[raw->raw_type]);
return raw->iface->init(cache, raw);
}
int ocf_metadata_raw_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
int result;
if (!raw->iface)
return 0;
result = raw->iface->deinit(cache, raw);
raw->iface = NULL;
return result;
}
size_t ocf_metadata_raw_size_on_ssd(struct ocf_cache* cache,
struct ocf_metadata_raw* raw)
{
ENV_BUG_ON(raw->raw_type < metadata_raw_type_min);
ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max);
return IRAW[raw->raw_type].size_on_ssd(cache, raw);
}

352
src/metadata/metadata_raw.h Normal file
View File

@@ -0,0 +1,352 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_RAW_H__
#define __METADATA_RAW_H__
/**
* @file metadata_raw.h
* @brief Metadata RAW container implementation
*/
/**
* @brief Metadata raw type
*/
enum ocf_metadata_raw_type {
/**
* @brief Default implementation with support of
* flushing to/landing from SSD
*/
metadata_raw_type_ram = 0,
/**
* @brief Dynamic implementation, elements are allocated when first
* time called
*/
metadata_raw_type_dynamic,
/**
* @brief This containers does not flush metadata on SSD and does not
* Support loading from SSD
*/
metadata_raw_type_volatile,
/**
* @brief Implementation for atomic device used as cache
*/
metadata_raw_type_atomic,
metadata_raw_type_max, /*!< MAX */
metadata_raw_type_min = metadata_raw_type_ram /*!< MAX */
};
/**
* @brief RAW instance descriptor
*/
struct ocf_metadata_raw {
/**
* @name Metadata and RAW types
*/
enum ocf_metadata_segment metadata_segment; /*!< Metadata segment */
enum ocf_metadata_raw_type raw_type; /*!< RAW implementation type */
/**
* @name Metdata elements description
*/
uint32_t entry_size; /*!< Size of particular entry */
uint32_t entries_in_page; /*!< Numbers of entries in one page*/
uint64_t entries; /*!< Numbers of entries */
/**
* @name Location on cache device description
*/
uint64_t ssd_pages_offset; /*!< SSD (Cache device) Page offset */
uint64_t ssd_pages; /*!< Numbers of pages that are required */
const struct raw_iface *iface; /*!< RAW container interface*/
/**
* @name Private RAW elements
*/
void *mem_pool; /*!< Private memory pool*/
size_t mem_pool_limit; /*! Current memory pool size (limit) */
void *priv; /*!< Private data - context */
};
/**
* RAW container interface
*/
struct raw_iface {
int (*init)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
int (*deinit)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
size_t (*size_of)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/**
* @brief Return size which metadata take on cache device
*
* @param cache Cache instance
* @param raw RAW container of metadata
*
* @return Number of pages (4 kiB) on cache device
*/
uint32_t (*size_on_ssd)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
uint32_t (*checksum)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
int (*get)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
int (*set)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
const void* (*rd_access)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size);
void* (*wr_access)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size);
int (*flush)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line);
int (*load_all)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
int (*flush_all)(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start,
uint8_t stop);
int (*flush_do_asynch)(struct ocf_cache *cache, struct ocf_request *rq,
struct ocf_metadata_raw *raw,
ocf_metadata_asynch_flush_hndl complete);
};
/**
* @brief Initialize RAW instance
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
int ocf_metadata_raw_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/**
* @brief De-Initialize RAW instance
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
int ocf_metadata_raw_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/**
* @brief Get memory footprint
*
* @param cache Cache instance
* @param raw RAW descriptor
* @return Memory footprint
*/
static inline size_t ocf_metadata_raw_size_of(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
if (!raw->iface)
return 0;
return raw->iface->size_of(cache, raw);
}
/**
* @brief Get SSD footprint
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return Size on SSD
*/
size_t ocf_metadata_raw_size_on_ssd(struct ocf_cache* cache,
struct ocf_metadata_raw* raw);
/**
* @brief Calculate metadata checksum
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return Checksum
*/
static inline uint32_t ocf_metadata_raw_checksum(struct ocf_cache* cache,
struct ocf_metadata_raw* raw)
{
return raw->iface->checksum(cache, raw);
}
/**
* @brief Get specified element of metadata
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be get
* @param data - Data where metadata entry will be copied into
* @param size - Size of data
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_get(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data,
uint32_t size)
{
return raw->iface->get(cache, raw, line, data, size);
}
/**
* @brief Access specified element of metadata directly
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be get
* @param data - Data where metadata entry will be copied into
* @param size - Size of data
* @return 0 - Point to accessed data, in case of error NULL
*/
static inline void *ocf_metadata_raw_wr_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
return raw->iface->wr_access(cache, raw, line, size);
}
/**
* @brief Access specified element of metadata directly
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be get
* @param data - Data where metadata entry will be copied into
* @param size - Size of data
* @return 0 - Point to accessed data, in case of error NULL
*/
static inline const void *ocf_metadata_raw_rd_access(
struct ocf_cache *cache, struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size)
{
return raw->iface->rd_access(cache, raw, line, size);
}
/**
* @brief Set specified element of metadata
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be set
* @param data - Data taht will be copied into metadata entry
* @param size - Size of data
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_set(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data,
uint32_t size)
{
return raw->iface->set(cache, raw, line, data, size);
}
/**
* @brief Flush specified element of metadata into SSD
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be flushed
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
return raw->iface->flush(cache, raw, line);
}
/**
* @brief Load all entries from SSD cache (cahce cache)
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return raw->iface->load_all(cache, raw);
}
/**
* @brief Flush all entries for into SSD cache (cahce cache)
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return raw->iface->flush_all(cache, raw);
}
static inline void ocf_metadata_raw_flush_mark(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
raw->iface->flush_mark(cache, rq, map_idx, to_state, start, stop);
}
static inline int ocf_metadata_raw_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_metadata_asynch_flush_hndl complete)
{
return raw->iface->flush_do_asynch(cache, rq, raw, complete);
}
/*
* Check if line is valid for specified RAW descriptor
*/
static inline bool _raw_is_valid(struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size)
{
if (!raw)
return false;
if (size != raw->entry_size)
return false;
if (line >= raw->entries)
return false;
return true;
}
static inline void _raw_bug_on(struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
}
#define MAX_STACK_TAB_SIZE 32
int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2);
#endif /* METADATA_RAW_H_ */

View File

@@ -0,0 +1,260 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_io.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#include "metadata_raw_atomic.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_RAW_ATOMIC_DEBUG 0
#if 1 == OCF_METADATA_RAW_ATOMIC_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s\n", __func__)
#define OCF_DEBUG_MSG(cache, msg) \
ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s - %s\n", \
__func__, msg)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
struct _raw_atomic_flush_ctx {
struct ocf_request *rq;
ocf_metadata_asynch_flush_hndl complete;
env_atomic flush_req_cnt;
};
static void _raw_atomic_io_discard_cmpl(struct _raw_atomic_flush_ctx *ctx,
int error)
{
if (error)
ctx->rq->error = error;
if (env_atomic_dec_return(&ctx->flush_req_cnt))
return;
if (ctx->rq->error)
ocf_metadata_error(ctx->rq->cache);
/* Call metadata flush completed call back */
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
ctx->complete(ctx->rq, ctx->rq->error);
env_free(ctx);
}
static void _raw_atomic_io_discard_end(struct ocf_io *io, int error)
{
struct _raw_atomic_flush_ctx *ctx = io->priv1;
ocf_io_put(io); /* Release IO */
_raw_atomic_io_discard_cmpl(ctx, error);
}
static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
uint64_t start_addr, uint32_t len, struct _raw_atomic_flush_ctx *ctx)
{
struct ocf_request *rq = context;
struct ocf_io *io = ocf_new_cache_io(cache);
if (!io) {
rq->error = -ENOMEM;
return rq->error;
}
OCF_DEBUG_PARAM(cache, "Page to flushing = %u, count of pages = %u",
start_line, len);
env_atomic_inc(&ctx->flush_req_cnt);
ocf_io_configure(io, start_addr, len, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, ctx, NULL, _raw_atomic_io_discard_end);
if (cache->device->obj.features.discard_zeroes)
ocf_dobj_submit_discard(io);
else
ocf_dobj_submit_write_zeroes(io);
return rq->error;
}
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
if (to_state == INVALID) {
rq->map[map_idx].flush = true;
rq->map[map_idx].start_flush = start;
rq->map[map_idx].stop_flush = stop;
rq->info.flush_metadata = true;
}
}
#define MAX_STACK_TAB_SIZE 32
static inline void _raw_atomic_add_page(struct ocf_cache *cache,
uint32_t *clines_tab, uint64_t line, int *idx)
{
clines_tab[*idx] = ocf_metadata_map_lg2phy(cache, line);
(*idx)++;
}
static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
struct ocf_request *rq, int map_idx,
struct _raw_atomic_flush_ctx *ctx)
{
struct ocf_map_info *map = &rq->map[map_idx];
uint32_t len = 0;
uint64_t start_addr;
int result = 0;
start_addr = ocf_metadata_map_lg2phy(cache, map->coll_idx);
start_addr *= ocf_line_size(cache);
start_addr += cache->device->metadata_offset;
start_addr += SECTORS_TO_BYTES(map->start_flush);
len = SECTORS_TO_BYTES(map->stop_flush - map->start_flush);
len += SECTORS_TO_BYTES(1);
result = _raw_atomic_io_discard_do(cache, rq, start_addr, len, ctx);
return result;
}
int raw_atomic_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete)
{
int result = 0, i;
uint32_t __clines_tab[MAX_STACK_TAB_SIZE];
uint32_t *clines_tab;
int clines_to_flush = 0;
uint32_t len = 0;
int line_no = rq->core_line_count;
struct ocf_map_info *map;
uint64_t start_addr;
struct _raw_atomic_flush_ctx *ctx;
ENV_BUG_ON(!complete);
if (!rq->info.flush_metadata) {
/* Nothing to flush call flush callback */
complete(rq, 0);
return 0;
}
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
if (!ctx) {
complete(rq, -ENOMEM);
return -ENOMEM;
}
ctx->rq = rq;
ctx->complete = complete;
env_atomic_set(&ctx->flush_req_cnt, 1);
if (line_no == 1) {
map = &rq->map[0];
if (map->flush && map->status != LOOKUP_MISS) {
result = _raw_atomic_flush_do_asynch_sec(cache, rq,
0, ctx);
}
_raw_atomic_io_discard_cmpl(ctx, result);
return result;
}
if (line_no <= MAX_STACK_TAB_SIZE) {
clines_tab = __clines_tab;
} else {
clines_tab = env_zalloc(sizeof(*clines_tab) * line_no,
ENV_MEM_NOIO);
if (!clines_tab) {
complete(rq, -ENOMEM);
env_free(ctx);
return -ENOMEM;
}
}
for (i = 0; i < line_no; i++) {
map = &rq->map[i];
if (!map->flush || map->status == LOOKUP_MISS)
continue;
if (i == 0) {
/* First */
if (map->start_flush) {
_raw_atomic_flush_do_asynch_sec(cache, rq, i,
ctx);
} else {
_raw_atomic_add_page(cache, clines_tab,
map->coll_idx, &clines_to_flush);
}
} else if (i == (line_no - 1)) {
/* Last */
if (map->stop_flush != ocf_line_end_sector(cache)) {
_raw_atomic_flush_do_asynch_sec(cache, rq,
i, ctx);
} else {
_raw_atomic_add_page(cache, clines_tab,
map->coll_idx, &clines_to_flush);
}
} else {
/* Middle */
_raw_atomic_add_page(cache, clines_tab, map->coll_idx,
&clines_to_flush);
}
}
env_sort(clines_tab, clines_to_flush, sizeof(*clines_tab),
_raw_ram_flush_do_page_cmp, NULL);
i = 0;
while (i < clines_to_flush) {
start_addr = clines_tab[i];
start_addr *= ocf_line_size(cache);
start_addr += cache->device->metadata_offset;
len = ocf_line_size(cache);
while (true) {
if ((i + 1) >= clines_to_flush)
break;
if ((clines_tab[i] + 1) != clines_tab[i + 1])
break;
i++;
len += ocf_line_size(cache);
}
result |= _raw_atomic_io_discard_do(cache, rq, start_addr,
len, ctx);
if (result)
break;
i++;
}
_raw_atomic_io_discard_cmpl(ctx, result);
if (line_no > MAX_STACK_TAB_SIZE)
env_free(clines_tab);
return result;
}

View File

@@ -0,0 +1,16 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_RAW_ATOMIC_H__
#define __METADATA_RAW_ATOMIC_H__
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
int raw_atomic_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete);
#endif /* __METADATA_RAW_ATOMIC_H__ */

View File

@@ -0,0 +1,446 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#include "metadata_raw_dynamic.h"
#include "metadata_io.h"
#include "../utils/utils_io.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_RAW_DEBUG 0
#if 1 == OCF_METADATA_RAW_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s\n", __func__)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
/*******************************************************************************
* Common RAW Implementation
******************************************************************************/
/*
* Check if page is valid for specified RAW descriptor
*/
static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page)
{
ENV_BUG_ON(page < raw->ssd_pages_offset);
ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages));
return true;
}
/*******************************************************************************
* RAW dynamic Implementation
******************************************************************************/
#define _RAW_DYNAMIC_PAGE(raw, line) \
((line) / raw->entries_in_page)
#define _RAW_DYNAMIC_PAGE_OFFSET(raw, line) \
((line % raw->entries_in_page) * raw->entry_size)
/*
* RAW DYNAMIC control structure
*/
struct _raw_ctrl {
env_mutex lock;
env_atomic count;
void *pages[];
};
static void *_raw_dynamic_get_item(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, uint32_t size)
{
void *new = NULL;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
uint32_t page = _RAW_DYNAMIC_PAGE(raw, line);
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
OCF_DEBUG_PARAM(cache, "Accessing item %u on page %u", line, page);
if (!ctrl->pages[page]) {
/* No page, allocate one, and set*/
/* This RAW container has some restrictions and need to check
* this limitation:
* 1. no atomic context when allocation
* 2. Only one allocator in time
*/
ENV_BUG_ON(env_in_interrupt());
env_mutex_lock(&ctrl->lock);
if (ctrl->pages[page]) {
/* Page has been already allocated, skip allocation */
goto _raw_dynamic_get_item_SKIP;
}
OCF_DEBUG_PARAM(cache, "New page allocation - %u", page);
new = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (new) {
ctrl->pages[page] = new;
env_atomic_inc(&ctrl->count);
}
_raw_dynamic_get_item_SKIP:
env_mutex_unlock(&ctrl->lock);
}
if (ctrl->pages[page])
return ctrl->pages[page] + _RAW_DYNAMIC_PAGE_OFFSET(raw, line);
return NULL;
}
/*
* RAM DYNAMIC Implementation - De-Initialize
*/
int raw_dynamic_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
uint32_t i;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
if (!ctrl)
return 0;
OCF_DEBUG_TRACE(cache);
for (i = 0; i < raw->ssd_pages; i++)
env_free(ctrl->pages[i]);
env_vfree(ctrl);
raw->priv = NULL;
return 0;
}
/*
* RAM DYNAMIC Implementation - Initialize
*/
int raw_dynamic_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl;
size_t size = sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
OCF_DEBUG_TRACE(cache);
if (raw->entry_size > PAGE_SIZE)
return -1;
ctrl = env_vmalloc(size);
if (!ctrl)
return -1;
ENV_BUG_ON(env_memset(ctrl, size, 0));
if (env_mutex_init(&ctrl->lock)) {
env_vfree(ctrl);
return -1;
}
raw->priv = ctrl;
return 0;
}
/*
* RAW DYNAMIC Implementation - Size of
*/
size_t raw_dynamic_size_of(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
size_t size;
/* Size of allocated items */
size = env_atomic_read(&ctrl->count);
size *= PAGE_SIZE;
/* Size of control structure */
size += sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
OCF_DEBUG_PARAM(cache, "Count = %d, Size = %lu",
env_atomic_read(&ctrl->count), size);
return size;
}
/*
* RAW DYNAMIC Implementation - Size on SSD
*/
uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
const size_t alignment = 128 * KiB / PAGE_SIZE;
return DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment;
}
/*
* RAM DYNAMIC Implementation - Checksum
*/
uint32_t raw_dynamic_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
uint64_t i;
uint32_t step = 0;
uint32_t crc = 0;
for (i = 0; i < raw->ssd_pages; i++) {
if (ctrl->pages[i])
crc = env_crc32(crc, ctrl->pages[i], PAGE_SIZE);
OCF_COND_RESCHED(step, 10000);
}
return crc;
}
/*
* RAM DYNAMIC Implementation - Get
*/
int raw_dynamic_get(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
void *item = _raw_dynamic_get_item(cache, raw, line, size);
if (!item) {
ENV_BUG_ON(env_memset(data, size, 0));
ocf_metadata_error(cache);
return -1;
}
return env_memcpy(data, size, item, size);
}
/*
* RAM DYNAMIC Implementation - Set
*/
int raw_dynamic_set(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
void *item = _raw_dynamic_get_item(cache, raw, line, size);
if (!item) {
ocf_metadata_error(cache);
return -1;
}
return env_memcpy(item, size, data, size);
}
/*
* RAM DYNAMIC Implementation - access
*/
const void *raw_dynamic_rd_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
return _raw_dynamic_get_item(cache, raw, line, size);
}
/*
* RAM DYNAMIC Implementation - access
*/
void *raw_dynamic_wr_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
return _raw_dynamic_get_item(cache, raw, line, size);
}
int raw_dynamic_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
uint32_t page = _RAW_DYNAMIC_PAGE(raw, line);
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
OCF_DEBUG_PARAM(cache, "Line %u, page = %u", line, page);
ENV_BUG_ON(!ctrl->pages[page]);
return metadata_io_write(cache, ctrl->pages[page],
raw->ssd_pages_offset + page);
}
/*
* RAM DYNAMIC Implementation - Load all
*/
#define RAW_DYNAMIC_LOAD_PAGES 128
int raw_dynamic_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
uint64_t i = 0, i_page = 0;
uint64_t count = RAW_DYNAMIC_LOAD_PAGES;
int error = 0, cmp;
struct ocf_io *io;
ctx_data_t *data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES);
char *page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
char *zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!data || !page || !zpage) {
ctx_data_free(cache->owner, data);
env_free(page);
env_free(zpage);
return -ENOMEM;
}
OCF_DEBUG_TRACE(cache);
/* Loading, need to load all metadata, when page is zero set, no need
* to allocate space for it
*/
while (i < raw->ssd_pages) {
if (i + count > raw->ssd_pages)
count = raw->ssd_pages - i;
/* Allocate IO */
io = ocf_new_cache_io(cache);
if (!io) {
error = -ENOMEM;
break;
}
/* Setup IO */
error = ocf_io_set_data(io, data, 0);
if (error) {
ocf_io_put(io);
break;
}
ocf_io_configure(io,
PAGES_TO_BYTES(raw->ssd_pages_offset + i),
PAGES_TO_BYTES(count), OCF_READ, 0, 0);
/* Submit IO */
error = ocf_submit_io_wait(io);
ocf_io_put(io);
io = NULL;
if (error)
break;
/* Reset head of data buffer */
ctx_data_seek_check(cache->owner, data,
ctx_data_seek_begin, 0);
for (i_page = 0; i_page < count; i_page++, i++) {
if (!page) {
page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!page) {
/* Allocation error */
error = -ENOMEM;
break;
}
}
ctx_data_rd_check(cache->owner, page, data, PAGE_SIZE);
error = env_memcmp(zpage, PAGE_SIZE, page,
PAGE_SIZE, &cmp);
if (error)
break;
if (cmp == 0) {
OCF_DEBUG_PARAM(cache, "Zero loaded %llu", i);
continue;
}
OCF_DEBUG_PARAM(cache, "Non-zero loaded %llu", i);
ctrl->pages[i] = page;
page = NULL;
env_atomic_inc(&ctrl->count);
}
if (error)
break;
}
env_free(zpage);
env_free(page);
ctx_data_free(cache->owner, data);
return error;
}
/*
* RAM DYNAMIC Implementation - Flush all
*/
/*
* RAM Implementation - Flush IO callback - Fill page
*/
static int _raw_dynamic_flush_all_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
raw_page = page - raw->ssd_pages_offset;
if (ctrl->pages[raw_page]) {
OCF_DEBUG_PARAM(cache, "Page = %u", raw_page);
ctx_data_wr_check(cache->owner, data, ctrl->pages[raw_page],
PAGE_SIZE);
} else {
OCF_DEBUG_PARAM(cache, "Zero fill, Page = %u", raw_page);
/* Page was not allocated before set only zeros */
ctx_data_zero_check(cache->owner, data, PAGE_SIZE);
}
return 0;
}
int raw_dynamic_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
return metadata_io_write_i(cache, raw->ssd_pages_offset,
raw->ssd_pages, _raw_dynamic_flush_all_fill, raw);
}
/*
* RAM DYNAMIC Implementation - Mark to Flush
*/
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
ENV_BUG();
}
/*
* RAM DYNAMIC Implementation - Do flushing asynchronously
*/
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete)
{
ENV_BUG();
return -ENOSYS;
}

View File

@@ -0,0 +1,106 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_RAW_DYNAMIC_H__
#define __METADATA_RAW_DYNAMIC_H__
/**
* @file metadata_raw_dynamic.h
* @brief Metadata RAW container implementation for dynamic numbers of elements
*/
/*
* RAW DYNAMIC - Initialize
*/
int raw_dynamic_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - De-Initialize
*/
int raw_dynamic_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Get size of memory footprint of this RAW metadata container
*/
size_t raw_dynamic_size_of(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC Implementation - Size on SSD
*/
uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC Implementation - Checksum
*/
uint32_t raw_dynamic_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Get specified entry
*/
int raw_dynamic_get(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
/*
* RAW DYNAMIC - Set specified entry
*/
int raw_dynamic_set(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
/*
* RAW DYNAMIC - Read only access for specified entry
*/
const void *raw_dynamic_rd_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size);
/*
* RAW DYNAMIC - Write access for specified entry
*/
void *raw_dynamic_wr_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size);
/*
* RAW DYNAMIC - Flush specified entry
*/
int raw_dynamic_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line);
/*
* RAW DYNAMIC - Load all metadata of this RAW metadata container
* from cache device
*/
int raw_dynamic_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Flush all metadata of this RAW metadata container
* to cache device
*/
int raw_dynamic_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Mark specified entry to be flushed
*/
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/*
* DYNAMIC Implementation - Do Flush Asynchronously
*/
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete);
#endif /* METADATA_RAW_H_ */

View File

@@ -0,0 +1,74 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#include "metadata_io.h"
#include "metadata_raw_volatile.h"
/*
* RAW volatile Implementation - Size on SSD
*/
uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return 0;
}
/*
* RAW volatile Implementation - Checksum
*/
uint32_t raw_volatile_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return 0;
}
/*
* RAW volatile Implementation - Flush specified element to SSD
*/
int raw_volatile_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
return 0;
}
/*
* RAW volatile Implementation - Load all metadata elements from SSD
*/
int raw_volatile_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return -ENOTSUP;
}
/*
* RAM Implementation - Flush all elements
*/
int raw_volatile_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
return 0;
}
/*
* RAM RAM Implementation - Mark to Flush
*/
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
}
/*
* RAM RAM Implementation - Do Flush asynchronously
*/
int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete)
{
complete(rq, 0);
return 0;
}

View File

@@ -0,0 +1,52 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_RAW_VOLATILE_H__
#define __METADATA_RAW_VOLATILE_H__
/*
* RAW volatile Implementation - Size on SSD
*/
uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW volatile Implementation - Checksum
*/
uint32_t raw_volatile_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW volatile Implementation - Flush specified element to SSD
*/
int raw_volatile_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line);
/*
* RAW volatile Implementation - Load all metadata elements from SSD
*/
int raw_volatile_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAW volatile Implementation - Flush all elements
*/
int raw_volatile_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw);
/*
* RAM RAW volatile Implementation - Mark to Flush
*/
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/*
* RAM RAW volatile Implementation - Do Flush asynchronously
*/
int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete);
#endif /* __METADATA_RAW_VOLATILE_H__ */

View File

@@ -0,0 +1,435 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_STATUS_H__
#define __METADATA_STATUS_H__
#include "../ocf_request.h"
/*******************************************************************************
* Dirty
******************************************************************************/
static inline void metadata_init_status_bits(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
cache->metadata.iface.clear_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline bool metadata_test_dirty_all(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, true);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_dirty(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline void metadata_set_dirty(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_dirty(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline bool metadata_test_and_clear_dirty(
struct ocf_cache *cache, ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_clear_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
static inline bool metadata_test_and_set_dirty(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_set_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
/*******************************************************************************
* Dirty - Sector Implementation
******************************************************************************/
static inline bool metadata_test_dirty_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_dirty(cache, line,
start, stop, false);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_dirty_all_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_dirty(cache, line,
start, stop, true);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_dirty_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
return metadata_test_dirty_sec(cache, line, pos, pos);
}
static inline bool metadata_test_dirty_out_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_out_dirty(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline void metadata_set_dirty_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_dirty(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_dirty_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_dirty(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_set_dirty_sec_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_dirty(cache, line, pos, pos);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_dirty_sec_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_dirty(cache, line, pos, pos);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline bool metadata_test_and_clear_dirty_sec(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool test = false;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_clear_dirty(cache, line,
start, stop, false);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
/*
* Marks given cache line's bits as clean
*
* @return true if the cache line was dirty and became clean
* @return false for other cases
*/
static inline bool metadata_clear_dirty_sec_changed(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool was_dirty, is_dirty = false;
OCF_METADATA_BITS_LOCK_WR();
was_dirty = cache->metadata.iface.test_dirty(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end,
false);
if (was_dirty) {
is_dirty = cache->metadata.iface.clear_dirty(cache, line,
start, stop);
}
OCF_METADATA_BITS_UNLOCK_WR();
return was_dirty && !is_dirty;
}
/*
* Marks given cache line's bits as dirty
*
* @return true if the cache line was clean and became dirty
* @return false if the cache line was dirty before marking bits
*/
static inline bool metadata_set_dirty_sec_changed(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool was_dirty;
OCF_METADATA_BITS_LOCK_WR();
was_dirty = cache->metadata.iface.set_dirty(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
return !was_dirty;
}
/*******************************************************************************
* Valid
******************************************************************************/
static inline bool metadata_test_valid_any(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_valid(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, true);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline void metadata_set_valid(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_valid(struct ocf_cache *cache,
ocf_cache_line_t line)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline bool metadata_test_and_clear_valid(
struct ocf_cache *cache, ocf_cache_line_t line)
{
bool test = false;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_clear_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, true);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
static inline bool metadata_test_and_set_valid(struct ocf_cache *cache,
ocf_cache_line_t line)
{
bool test = false;
OCF_METADATA_BITS_LOCK_WR();
test = cache->metadata.iface.test_and_set_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, true);
OCF_METADATA_BITS_UNLOCK_WR();
return test;
}
/*******************************************************************************
* Valid - Sector Implementation
******************************************************************************/
static inline bool metadata_test_valid_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
bool test;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_valid(cache, line,
start, stop, true);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_valid_any_out_sec(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool test = false;
OCF_METADATA_BITS_LOCK_RD();
test = cache->metadata.iface.test_out_valid(cache, line,
start, stop);
OCF_METADATA_BITS_UNLOCK_RD();
return test;
}
static inline bool metadata_test_valid_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
return metadata_test_valid_sec(cache, line, pos, pos);
}
/*
* Marks given cache line's bits as valid
*
* @return true if any of the cache line's bits was valid before this operation
* @return false if the cache line was invalid (all bits invalid) before this
* operation
*/
static inline bool metadata_set_valid_sec_changed(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop)
{
bool was_any_valid;
OCF_METADATA_BITS_LOCK_WR();
was_any_valid = cache->metadata.iface.set_valid(cache, line,
start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
return !was_any_valid;
}
static inline void metadata_clear_valid_sec(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_valid(cache, line, start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_clear_valid_sec_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.clear_valid(cache, line, pos, pos);
OCF_METADATA_BITS_UNLOCK_WR();
}
static inline void metadata_set_valid_sec_one(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t pos)
{
OCF_METADATA_BITS_LOCK_WR();
cache->metadata.iface.set_valid(cache, line, pos, pos);
OCF_METADATA_BITS_UNLOCK_WR();
}
/*
* Marks given cache line's bits as invalid
*
* @return true if any of the cache line's bits was valid and the cache line
* became invalid (all bits invalid) after the operation
* @return false in other cases
*/
static inline bool metadata_clear_valid_sec_changed(
struct ocf_cache *cache, ocf_cache_line_t line,
uint8_t start, uint8_t stop, bool *is_valid)
{
bool was_any_valid;
OCF_METADATA_BITS_LOCK_WR();
was_any_valid = cache->metadata.iface.test_valid(cache, line,
cache->metadata.settings.sector_start,
cache->metadata.settings.sector_end, false);
*is_valid = cache->metadata.iface.clear_valid(cache, line,
start, stop);
OCF_METADATA_BITS_UNLOCK_WR();
return was_any_valid && !*is_valid;
}
#endif /* METADATA_STATUS_H_ */

View File

@@ -0,0 +1,491 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_STRUCTS_H__
#define __METADATA_STRUCTS_H__
#include "../eviction/eviction.h"
#include "../cleaning/cleaning.h"
#include "../ocf_request.h"
/**
* @file metadata_priv.h
* @brief Metadata private structures
*/
/**
* @brief Metadata shutdown status
*/
enum ocf_metadata_shutdown_status {
ocf_metadata_clean_shutdown = 1, /*!< OCF shutdown graceful*/
ocf_metadata_dirty_shutdown = 0, /*!< Dirty OCF shutdown*/
ocf_metadata_detached = 2, /*!< Cache device detached */
};
/**
* @brief Asynchronous metadata request completed
*
* @param cache - Cache instance
* @param error - Indicates operation result, 0 - Finished successfully
* @param line - cache line for which completion is signaled
* @param context - Context of metadata request
*/
typedef void (*ocf_metadata_asynch_hndl)(struct ocf_cache *cache,
int error, ocf_cache_line_t line, void *context);
typedef void (*ocf_metadata_asynch_flush_hndl)(void *context, int error);
/*
* Metadata cache line location on pages interface
*/
struct ocf_metadata_layout_iface {
/**
* @brief Initialize freelist partition
*
* @param cache - Cache instance
*/
void (*init_freelist)(struct ocf_cache *cache);
/**
* This function is mapping collision index to appropriate cache line
* (logical cache line to physical one mapping).
*
* It is necessary because we want to generate sequential workload with
* data to cache device.
* Our collision list, for example, looks:
* 0 3 6 9
* 1 4 7 10
* 2 5 8
* All collision index in each column is on the same page
* on cache device. We don't want send request x times to the same
* page. To don't do it we use collision index by row, but in this
* case we can't use collision index directly as cache line,
* because we will generate non sequential workload (we will write
* pages: 0 -> 3 -> 6 ...). To map collision index in correct way
* we use this function.
*
* After use this function, collision index in the above array
* corresponds with below cache line:
* 0 1 2 3
* 4 5 6 7
* 8 9 10
*
* @param cache - cache instance
* @param idx - index in collision list
* @return mapped cache line
*/
ocf_cache_line_t (*lg2phy)(struct ocf_cache *cache,
ocf_cache_line_t coll_idx);
/**
* @brief Map physical cache line on cache device to logical one
* @note This function is the inverse of map_coll_idx_to_cache_line
*
* @param cache Cache instance
* @param phy Physical cache line of cache device
* @return Logical cache line
*/
ocf_cache_line_t (*phy2lg)(struct ocf_cache *cache,
ocf_cache_line_t phy);
};
/**
* OCF Metadata interface
*/
struct ocf_metadata_iface {
/**
* @brief Initialize metadata
*
* @param cache - Cache instance
* @param cache_line_size - Cache line size
* @return 0 - Operation success otherwise failure
*/
int (*init)(struct ocf_cache *cache,
ocf_cache_line_size_t cache_line_size);
/**
* @brief Initialize variable size metadata sections
*
* @param cache - Cache instance
* @param device_size - Cache size in bytes
* @param cache_line_size - Cache line size
* @param layout Metadata layout
* @return 0 - Operation success otherwise failure
*/
int (*init_variable_size)(struct ocf_cache *cache, uint64_t device_size,
ocf_cache_line_size_t cache_line_size,
ocf_metadata_layout_t layout);
/**
* @brief Metadata cache line location on pages interface
*/
const struct ocf_metadata_layout_iface *layout_iface;
/**
* @brief Initialize hash table
*
* @param cache - Cache instance
*/
void (*init_hash_table)(struct ocf_cache *cache);
/**
* @brief De-Initialize metadata
*
* @param cache - Cache instance
*/
void (*deinit)(struct ocf_cache *cache);
/**
* @brief De-Initialize variable size metadata segments
*
* @param cache - Cache instance
*/
void (*deinit_variable_size)(struct ocf_cache *cache);
/**
* @brief Get memory footprint
*
* @param cache - Cache instance
* @return 0 - memory footprint
*/
size_t (*size_of)(struct ocf_cache *cache);
/**
* @brief Get amount of pages required for metadata
*
* @param cache - Cache instance
* @return Pages required for store metadata on cache device
*/
ocf_cache_line_t (*pages)(struct ocf_cache *cache);
/**
* @brief Get amount of cache lines
*
* @param cache - Cache instance
* @return Amount of cache lines (cache device lines - metadata space)
*/
ocf_cache_line_t (*cachelines)(struct ocf_cache *cache);
/**
* @brief Load metadata from cache device
*
* @param[in] cache - Cache instance
* @return 0 - Operation success otherwise failure
*/
int (*load_all)(struct ocf_cache *cache);
/**
* @brief Load metadata from recovery procedure
* recovery
* @param[in] cache - Cache instance
* @return 0 - Operation success otherwise failure
*/
int (*load_recovery)(struct ocf_cache *cache);
/**
* @brief Flush metadata into cahce cache
*
* @param[in] cache - Cache instance
* @return 0 - Operation success otherwise failure
*/
int (*flush_all)(struct ocf_cache *cache);
/**
* @brief Flush metadata for specified cache line
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void (*flush)(struct ocf_cache *cache, ocf_cache_line_t line);
/**
* @brief Mark specified cache line to be flushed
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start,
uint8_t stop);
/**
* @brief Flush marked cache lines asynchronously
*
* @param cache - Cache instance
* @param queue - I/O queue to which metadata flush should be submitted
* @param remaining - request remaining
* @param complete - flushing request callback
* @param context - context that will be passed into callback
*/
void (*flush_do_asynch)(struct ocf_cache *cache,
struct ocf_request *rq, ocf_end_t complete);
/* TODO Provide documentation below */
enum ocf_metadata_shutdown_status (*get_shutdown_status)(
struct ocf_cache *cache);
int (*set_shutdown_status)(struct ocf_cache *cache,
enum ocf_metadata_shutdown_status shutdown_status);
int (*load_superblock)(struct ocf_cache *cache);
int (*flush_superblock)(struct ocf_cache *cache);
uint64_t (*get_reserved_lba)(struct ocf_cache *cache);
/**
* @brief Get eviction policy
*
* @param[in] cache - Cache instance
* @param[in] line - cache line for which eviction policy is requested
* @param[out] eviction_policy - Eviction policy
*/
void (*get_eviction_policy)(struct ocf_cache *cache,
ocf_cache_line_t line,
union eviction_policy_meta *eviction_policy);
/**
* @brief Set eviction policy
*
* @param[in] cache - Cache instance
* @param[in] line - Eviction policy values which will be stored in
* metadata service
* @param[out] eviction_policy - Eviction policy
*/
void (*set_eviction_policy)(struct ocf_cache *cache,
ocf_cache_line_t line,
union eviction_policy_meta *eviction_policy);
/**
* @brief Flush eviction policy for given cache line
*
* @param[in] cache - Cache instance
* @param[in] line - Cache line for which flushing has to be performed
*/
void (*flush_eviction_policy)(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief Get cleaning policy
*
* @param[in] cache - Cache instance
* @param[in] line - cache line for which cleaning policy is requested
* @param[out] cleaning_policy - Cleaning policy
*/
void (*get_cleaning_policy)(struct ocf_cache *cache,
ocf_cache_line_t line,
struct cleaning_policy_meta *cleaning_policy);
/**
* @brief Set cleaning policy
*
* @param[in] cache - Cache instance
* @param[in] line
* @param[in] cleaning_policy - Cleaning policy values which will be
* stored in metadata service
*/
void (*set_cleaning_policy)(struct ocf_cache *cache,
ocf_cache_line_t line,
struct cleaning_policy_meta *cleaning_policy);
/**
* @brief Flush cleaning policy for given cache line
*
* @param[in] cache - Cache instance
* @param[in] line - Cache line for which flushing has to be performed
*/
void (*flush_cleaning_policy)(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief Get hash table for specified index
*
* @param[in] cache - Cache instance
* @param[in] index - Hash table index
* @return Cache line value under specified hash table index
*/
ocf_cache_line_t (*get_hash)(struct ocf_cache *cache,
ocf_cache_line_t index);
/**
* @brief Set hash table value for specified index
*
* @param[in] cache - Cache instance
* @param[in] index - Hash table index
* @param[in] line - Cache line value to be set under specified hash
* table index
*/
void (*set_hash)(struct ocf_cache *cache,
ocf_cache_line_t index, ocf_cache_line_t line);
/**
* @brief Flush has table for specified index
*
* @param[in] cache - Cache instance
* @param[in] index - Hash table index
*/
void (*flush_hash)(struct ocf_cache *cache,
ocf_cache_line_t index);
/**
* @brief Get hash table entries
*
* @param[in] cache - Cache instance
* @return Hash table entries
*/
ocf_cache_line_t (*entries_hash)(struct ocf_cache *cache);
/* TODO Provide documentation below */
void (*set_core_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t core_id,
uint64_t core_sector);
void (*get_core_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t *core_id,
uint64_t *core_sector);
ocf_core_id_t (*get_core_id)(struct ocf_cache *cache,
ocf_cache_line_t line);
uint64_t (*get_core_sector)(struct ocf_cache *cache,
ocf_cache_line_t line);
void (*get_core_and_part_id)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t *core_id,
ocf_part_id_t *part_id);
struct ocf_metadata_uuid *(*get_core_uuid)(
struct ocf_cache *cache, ocf_core_id_t core_id);
void (*set_collision_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next,
ocf_cache_line_t prev);
void (*get_collision_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t *next,
ocf_cache_line_t *prev);
void (*set_collision_next)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next);
void (*set_collision_prev)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev);
ocf_cache_line_t (*get_collision_next)(struct ocf_cache *cache,
ocf_cache_line_t line);
ocf_cache_line_t (*get_collision_prev)(struct ocf_cache *cache,
ocf_cache_line_t line);
ocf_part_id_t (*get_partition_id)(struct ocf_cache *cache,
ocf_cache_line_t line);
ocf_cache_line_t (*get_partition_next)(struct ocf_cache *cache,
ocf_cache_line_t line);
ocf_cache_line_t (*get_partition_prev)(struct ocf_cache *cache,
ocf_cache_line_t line);
void (*get_partition_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t *part_id,
ocf_cache_line_t *next_line,
ocf_cache_line_t *prev_line);
void (*set_partition_next)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next_line);
void (*set_partition_prev)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev_line);
void (*set_partition_info)(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line);
const struct ocf_metadata_status*
(*rd_status_access)(struct ocf_cache *cache,
ocf_cache_line_t line);
struct ocf_metadata_status*
(*wr_status_access)(struct ocf_cache *cache,
ocf_cache_line_t line);
bool (*test_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_out_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*clear_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*set_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*test_and_set_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_and_clear_dirty)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_out_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*clear_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*set_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
bool (*test_and_set_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool (*test_and_clear_valid)(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
};
struct ocf_cache_line_settings {
ocf_cache_line_size_t size;
uint64_t sector_count;
uint64_t sector_start;
uint64_t sector_end;
};
/**
* @brief Metadata control structure
*/
struct ocf_metadata {
const struct ocf_metadata_iface iface;
/*!< Metadata service interface */
void *iface_priv;
/*!< Private data of metadata service interface */
const struct ocf_cache_line_settings settings;
/*!< Cache line configuration */
bool is_volatile;
/*!< true if metadata used in volatile mode (RAM only) */
struct {
env_rwsem collision; /*!< lock for collision table */
env_rwlock status; /*!< Fast lock for status bits */
env_spinlock eviction; /*!< Fast lock for eviction policy */
} lock;
};
#define OCF_METADATA_RD 0
#define OCF_METADATA_WR 1
#endif /* __METADATA_STRUCTS_H__ */

View File

@@ -0,0 +1,93 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_SUPERBLOCK_H__
#define __METADATA_SUPERBLOCK_H__
#define CACHE_MAGIC_NUMBER 0x187E1CA6
/**
* @brief OCF cache metadata configuration superblock
*/
struct ocf_superblock_config {
/** WARNING: Metadata probe disregards metadata version when
* checking if the cache is dirty - position of next two fields
* shouldn't change!! */
uint8_t clean_shutdown;
uint8_t dirty_flushed;
uint32_t magic_number;
uint32_t metadata_version;
/* Currently set cache mode */
ocf_cache_mode_t cache_mode;
ocf_cache_line_t cachelines;
uint32_t valid_parts_no;
ocf_cache_line_size_t line_size;
ocf_metadata_layout_t metadata_layout;
uint32_t core_obj_count;
unsigned long valid_object_bitmap[(OCF_CORE_MAX /
(sizeof(unsigned long) * 8)) + 1];
ocf_cleaning_t cleaning_policy_type;
struct cleaning_policy_config cleaning[CLEANING_POLICY_TYPE_MAX];
ocf_eviction_t eviction_policy_type;
/* Current core sequence number */
ocf_core_id_t curr_core_seq_no;
struct ocf_user_part_config user_parts[OCF_IO_CLASS_MAX + 1];
/*
* Checksum for each metadata region.
* This field has to be the last one!
*/
uint32_t checksum[metadata_segment_max];
};
/**
* @brief OCF cache metadata runtime superblock
*/
struct ocf_superblock_runtime {
struct ocf_part freelist_part;
struct ocf_user_part_runtime user_parts[OCF_IO_CLASS_MAX + 1];
uint32_t cleaning_thread_access;
};
static inline int ocf_metadata_set_shutdown_status(
struct ocf_cache *cache,
enum ocf_metadata_shutdown_status shutdown_status)
{
return cache->metadata.iface.set_shutdown_status(cache,
shutdown_status);
}
static inline int ocf_metadata_load_superblock(struct ocf_cache *cache)
{
return cache->metadata.iface.load_superblock(cache);
}
static inline
int ocf_metadata_flush_superblock(struct ocf_cache *cache)
{
if (cache->device)
return cache->metadata.iface.flush_superblock(cache);
return 0;
}
static inline uint64_t ocf_metadata_get_reserved_lba(
struct ocf_cache *cache)
{
return cache->metadata.iface.get_reserved_lba(cache);
}
#endif /* METADATA_SUPERBLOCK_H_ */

View File

@@ -0,0 +1,152 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_io.h"
#include "metadata_updater_priv.h"
#include "../ocf_priv.h"
#include "../engine/engine_common.h"
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
#include "../utils/utils_io.h"
#include "../utils/utils_allocator.h"
int ocf_metadata_updater_init(ocf_cache_t cache)
{
ocf_metadata_updater_t mu = &cache->metadata_updater;
struct ocf_metadata_io_syncher *syncher = &mu->syncher;
INIT_LIST_HEAD(&syncher->in_progress_head);
INIT_LIST_HEAD(&syncher->pending_head);
env_mutex_init(&syncher->lock);
return ctx_metadata_updater_init(cache->owner, mu);
}
void ocf_metadata_updater_kick(ocf_cache_t cache)
{
ctx_metadata_updater_kick(cache->owner, &cache->metadata_updater);
}
void ocf_metadata_updater_stop(ocf_cache_t cache)
{
ctx_metadata_updater_stop(cache->owner, &cache->metadata_updater);
}
void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv)
{
OCF_CHECK_NULL(mu);
mu->priv = priv;
}
void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu)
{
OCF_CHECK_NULL(mu);
return mu->priv;
}
ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu)
{
OCF_CHECK_NULL(mu);
return container_of(mu, struct ocf_cache, metadata_updater);
}
static int _metadata_updater_iterate_in_progress(ocf_cache_t cache,
struct metadata_io_request *new_req)
{
struct metadata_io_request_asynch *a_req;
struct ocf_metadata_io_syncher *syncher =
&cache->metadata_updater.syncher;
struct metadata_io_request *curr, *temp;
list_for_each_entry_safe(curr, temp, &syncher->in_progress_head, list) {
if (env_atomic_read(&curr->finished)) {
a_req = curr->asynch;
ENV_BUG_ON(!a_req);
list_del(&curr->list);
if (env_atomic_dec_return(&a_req->req_active) == 0) {
OCF_REALLOC_DEINIT(&a_req->reqs,
&a_req->reqs_limit);
env_free(a_req);
}
continue;
}
if (new_req) {
/* If request specified, check if overlap occurs. */
if (ocf_io_overlaps(new_req->page, new_req->count,
curr->page, curr->count)) {
return 1;
}
}
}
return 0;
}
int metadata_updater_check_overlaps(ocf_cache_t cache,
struct metadata_io_request *req)
{
struct ocf_metadata_io_syncher *syncher =
&cache->metadata_updater.syncher;
int ret;
env_mutex_lock(&syncher->lock);
ret = _metadata_updater_iterate_in_progress(cache, req);
/* Either add it to in-progress list or pending list for deferred
* execution.
*/
if (ret == 0)
list_add_tail(&req->list, &syncher->in_progress_head);
else
list_add_tail(&req->list, &syncher->pending_head);
env_mutex_unlock(&syncher->lock);
return ret;
}
uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu)
{
struct metadata_io_request *curr, *temp;
struct ocf_metadata_io_syncher *syncher;
ocf_cache_t cache;
int ret;
OCF_CHECK_NULL(mu);
cache = ocf_metadata_updater_get_cache(mu);
syncher = &cache->metadata_updater.syncher;
env_mutex_lock(&syncher->lock);
if (list_empty(&syncher->pending_head)) {
/*
* If pending list is empty, we iterate over in progress
* list to free memory used by finished requests.
*/
_metadata_updater_iterate_in_progress(cache, NULL);
env_mutex_unlock(&syncher->lock);
env_cond_resched();
return 0;
}
list_for_each_entry_safe(curr, temp, &syncher->pending_head, list) {
ret = _metadata_updater_iterate_in_progress(cache, curr);
if (ret == 0) {
/* Move to in-progress list and kick the workers */
list_move_tail(&curr->list, &syncher->in_progress_head);
}
env_mutex_unlock(&syncher->lock);
if (ret == 0)
ocf_engine_push_rq_front(&curr->fl_req, true);
env_cond_resched();
env_mutex_lock(&syncher->lock);
}
env_mutex_unlock(&syncher->lock);
return 0;
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_UPDATER_PRIV_H__
#define __METADATA_UPDATER_PRIV_H__
#include "../ocf_def_priv.h"
#include "metadata_io.h"
struct ocf_metadata_updater {
/* Metadata flush synchronizer context */
struct ocf_metadata_io_syncher {
struct list_head in_progress_head;
struct list_head pending_head;
env_mutex lock;
} syncher;
void *priv;
};
int metadata_updater_check_overlaps(ocf_cache_t cache,
struct metadata_io_request *req);
int ocf_metadata_updater_init(struct ocf_cache *cache);
void ocf_metadata_updater_kick(struct ocf_cache *cache);
void ocf_metadata_updater_stop(struct ocf_cache *cache);
#endif /* __METADATA_UPDATER_PRIV_H__ */

2121
src/mngt/ocf_mngt_cache.c Normal file

File diff suppressed because it is too large Load Diff

448
src/mngt/ocf_mngt_common.c Normal file
View File

@@ -0,0 +1,448 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "ocf_mngt_common.h"
#include "../ocf_priv.h"
#include "../ocf_ctx_priv.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_part.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_device.h"
#include "../eviction/ops.h"
#include "../ocf_logger_priv.h"
#include "../ocf_queue_priv.h"
/* Close if opened */
int cache_mng_core_close(ocf_cache_t cache, ocf_core_id_t core_id)
{
if (!cache->core_obj[core_id].opened)
return -OCF_ERR_CORE_IN_INACTIVE_STATE;
ocf_data_obj_close(&cache->core_obj[core_id].obj);
cache->core_obj[core_id].opened = false;
return 0;
}
/* Remove core from cleaning policy */
void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache,
int core_id)
{
ocf_cleaning_t clean_pol_type;
OCF_METADATA_LOCK_WR();
clean_pol_type = cache->conf_meta->cleaning_policy_type;
if (cache->core_obj[core_id].opened) {
if (cleaning_policy_ops[clean_pol_type].remove_core) {
cleaning_policy_ops[clean_pol_type].
remove_core(cache, core_id);
}
}
OCF_METADATA_UNLOCK_WR();
}
/* Deinitialize core metadata in attached metadata */
void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id)
{
int retry = 1;
uint64_t core_size = 0;
ocf_cleaning_t clean_pol_type;
ocf_data_obj_t core;
core = &cache->core_obj[core_id].obj;
core_size = ocf_data_obj_get_length(core);
if (!core_size)
core_size = ~0ULL;
OCF_METADATA_LOCK_WR();
clean_pol_type = cache->conf_meta->cleaning_policy_type;
while (retry) {
retry = 0;
if (cleaning_policy_ops[clean_pol_type].purge_range) {
retry = cleaning_policy_ops[clean_pol_type].purge_range(cache,
core_id, 0, core_size);
}
if (!retry) {
/* Remove from collision_table and Partition. Put in FREELIST */
retry = ocf_metadata_sparse_range(cache, core_id, 0,
core_size);
}
if (retry) {
OCF_METADATA_UNLOCK_WR();
env_msleep(100);
OCF_METADATA_LOCK_WR();
}
}
OCF_METADATA_UNLOCK_WR();
}
/* Mark core as removed in metadata */
void cache_mng_core_remove_from_meta(struct ocf_cache *cache, int core_id)
{
OCF_METADATA_LOCK_WR();
/* In metadata mark data this core was removed from cache */
cache->core_conf_meta[core_id].added = false;
/* Clear UUID of core */
ocf_uuid_core_clear(cache, &cache->core_obj[core_id]);
cache->core_conf_meta[core_id].seq_no = OCF_SEQ_NO_INVALID;
OCF_METADATA_UNLOCK_WR();
}
/* Deinit in-memory structures related to this core */
void cache_mng_core_remove_from_cache(struct ocf_cache *cache, int core_id)
{
env_free(cache->core_obj[core_id].counters);
cache->core_obj[core_id].counters = NULL;
env_bit_clear(core_id, cache->conf_meta->valid_object_bitmap);
if (!cache->core_obj[core_id].opened &&
--cache->ocf_core_inactive_count == 0) {
env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state);
}
cache->conf_meta->core_obj_count--;
}
/**
* @brief Wait for the end of asynchronous cleaning
*
* @param cache OCF cache instance
* @param timeout_ms Timeout for waiting in milliseconds
* @note When timeout is less than zero it means wait forever
*
* @retval 0 cleaning finished
* @retval non-zero timeout and cleaning still in progress
*/
static int _ocf_cleaning_wait_for_finish(struct ocf_cache *cache,
const int32_t timeout_ms)
{
struct ocf_user_part *curr_part;
ocf_part_id_t part_id;
bool cleaning_active = ocf_cache_is_device_attached(cache);
int64_t _timeout = timeout_ms;
while (cleaning_active) {
cleaning_active = false;
OCF_METADATA_LOCK_WR();
for_each_part(cache, curr_part, part_id) {
if (env_atomic_read(&cache->cleaning[part_id])) {
cleaning_active = true;
break;
}
}
OCF_METADATA_UNLOCK_WR();
if (cleaning_active) {
env_msleep(20);
if (timeout_ms >= 0) {
_timeout -= 20;
if (_timeout <= 0)
break;
}
}
};
if (cleaning_active)
return -EBUSY;
else
return 0;
}
void ocf_mngt_cache_put(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
if (env_atomic_dec_return(&cache->ref_count) == 0) {
ocf_free_queues(cache);
ocf_metadata_deinit(cache);
env_vfree(cache);
}
}
int ocf_mngt_cache_get(ocf_ctx_t ocf_ctx, ocf_cache_id_t id, ocf_cache_t *cache)
{
int error = 0;
struct ocf_cache *instance = NULL;
struct ocf_cache *iter = NULL;
OCF_CHECK_NULL(ocf_ctx);
OCF_CHECK_NULL(cache);
*cache = NULL;
if ((id < OCF_CACHE_ID_MIN) || (id > OCF_CACHE_ID_MAX)) {
/* Cache id out of range */
return -OCF_ERR_INVAL;
}
/* Lock caches list */
env_mutex_lock(&ocf_ctx->lock);
list_for_each_entry(iter, &ocf_ctx->caches, list) {
if (iter->cache_id == id) {
instance = iter;
break;
}
}
if (instance) {
/* if cache is either fully initialized or during recovery */
if (instance->valid_ocf_cache_device_t) {
/* Increase reference counter */
env_atomic_inc(&instance->ref_count);
} else {
/* Cache not initialized yet */
instance = NULL;
}
}
env_mutex_unlock(&ocf_ctx->lock);
if (!instance)
error = -OCF_ERR_CACHE_NOT_EXIST;
else
*cache = instance;
return error;
}
bool ocf_mngt_is_cache_locked(ocf_cache_t cache)
{
if (env_rwsem_is_locked(&cache->lock))
return true;
if (env_atomic_read(&cache->lock_waiter))
return true;
return false;
}
void ocf_mngt_cache_unlock(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
env_rwsem_up_write(&cache->lock);
ocf_mngt_cache_put(cache);
}
void ocf_mngt_cache_read_unlock(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
env_rwsem_up_read(&cache->lock);
ocf_mngt_cache_put(cache);
}
int _ocf_mngt_cache_lock(ocf_cache_t cache, bool read)
{
int ret;
/* Increment reference counter */
env_atomic_inc(&cache->ref_count);
env_atomic_inc(&cache->lock_waiter);
if (read)
ret = env_rwsem_down_read_interruptible(&cache->lock);
else
ret = env_rwsem_down_write_interruptible(&cache->lock);
env_atomic_dec(&cache->lock_waiter);
if (ret) {
ocf_mngt_cache_put(cache);
return ret;
}
if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) {
/* Cache already stooping, do not allow any operation */
ret = -OCF_ERR_CACHE_NOT_EXIST;
goto unlock;
}
/* Return, when asynchronous cleaning is finished */
if (_ocf_cleaning_wait_for_finish(cache, 60 * 1000)) {
/* Because of some reasons, asynchronous cleaning still active,
* cannot continue
*/
ret = -OCF_ERR_CACHE_IN_USE;
goto unlock;
}
return 0;
unlock:
if (read)
ocf_mngt_cache_read_unlock(cache);
else
ocf_mngt_cache_unlock(cache);
return ret;
}
int ocf_mngt_cache_lock(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return _ocf_mngt_cache_lock(cache, false);
}
int ocf_mngt_cache_read_lock(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return _ocf_mngt_cache_lock(cache, true);
}
/* if cache is either fully initialized or during recovery */
static ocf_cache_t _ocf_mngt_cache_try_get(ocf_cache_t cache)
{
if (!!cache->valid_ocf_cache_device_t) {
/* Increase reference counter */
env_atomic_inc(&cache->ref_count);
return cache;
}
return NULL;
}
static int _ocf_mngt_cache_get_list_cpy(ocf_ctx_t ocf_ctx, ocf_cache_t **list,
uint32_t *size)
{
int result = 0;
uint32_t count = 0, i = 0;
struct ocf_cache *iter, *this;
*list = NULL;
*size = 0;
env_mutex_lock(&ocf_ctx->lock);
list_for_each_entry(iter, &ocf_ctx->caches, list) {
count++;
}
if (!count)
goto END;
*list = env_vmalloc(sizeof((*list)[0]) * count);
if (*list == NULL) {
result = -ENOMEM;
goto END;
}
list_for_each_entry(iter, &ocf_ctx->caches, list) {
this = _ocf_mngt_cache_try_get(iter);
if (this) {
(*list)[i] = this;
i++;
}
}
if (i) {
/* Update size if cache list */
*size = i;
} else {
env_vfree(*list);
*list = NULL;
}
END:
env_mutex_unlock(&ocf_ctx->lock);
return result;
}
int ocf_mngt_cache_visit(ocf_ctx_t ocf_ctx, ocf_mngt_cache_visitor_t visitor,
void *cntx)
{
ocf_cache_t *list;
uint32_t size, i;
int result;
OCF_CHECK_NULL(ocf_ctx);
OCF_CHECK_NULL(visitor);
result = _ocf_mngt_cache_get_list_cpy(ocf_ctx, &list, &size);
if (result)
return result;
if (size == 0)
return 0;
/* Iterate over caches */
for (i = 0; i < size; i++) {
ocf_cache_t this = list[i];
result = visitor(this, cntx);
if (result)
break;
}
/* Put caches */
for (i = 0; i < size; i++)
ocf_mngt_cache_put(list[i]);
env_vfree(list);
return result;
}
int ocf_mngt_cache_visit_reverse(ocf_ctx_t ocf_ctx,
ocf_mngt_cache_visitor_t visitor, void *cntx)
{
ocf_cache_t *list;
uint32_t size, i;
int result;
OCF_CHECK_NULL(ocf_ctx);
OCF_CHECK_NULL(visitor);
result = _ocf_mngt_cache_get_list_cpy(ocf_ctx, &list, &size);
if (result)
return result;
if (size == 0)
return 0;
/* Iterate over caches */
for (i = size; i; i--) {
ocf_cache_t this = list[i - 1];
result = visitor(this, cntx);
if (result)
break;
}
/* Put caches */
for (i = 0; i < size; i++)
ocf_mngt_cache_put(list[i]);
env_vfree(list);
return result;
}
void ocf_mngt_wait_for_io_finish(ocf_cache_t cache)
{
uint32_t rq_active = 0;
do {
rq_active = ocf_rq_get_allocated(cache);
if (rq_active)
env_msleep(500);
} while (rq_active);
}

View File

@@ -0,0 +1,35 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_MNGT_COMMON_H__
#define __OCF_MNGT_COMMON_H__
int cache_mng_core_close(ocf_cache_t cache, ocf_core_id_t core_id);
void cache_mng_core_remove_from_meta(struct ocf_cache *cache, int core_id);
void cache_mng_core_remove_from_cache(struct ocf_cache *cache, int core_id);
void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id);
void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache,
int core_id);
int _ocf_cleaning_thread(void *priv);
int cache_mng_thread_io_requests(void *data);
bool ocf_mngt_cache_is_dirty(ocf_cache_t cache);
void ocf_mngt_wait_for_io_finish(ocf_cache_t cache);
int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
ocf_part_id_t part_id, const char *name, uint32_t min_size,
uint32_t max_size, uint8_t priority, bool valid);
bool ocf_mngt_is_cache_locked(ocf_cache_t cache);
#endif /* __OCF_MNGT_COMMON_H__ */

480
src/mngt/ocf_mngt_core.c Normal file
View File

@@ -0,0 +1,480 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "ocf_mngt_common.h"
#include "../ocf_priv.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_device.h"
#include "../ocf_stats_priv.h"
#include "../ocf_def_priv.h"
static ocf_seq_no_t _ocf_mngt_get_core_seq_no(ocf_cache_t cache)
{
if (cache->conf_meta->curr_core_seq_no == OCF_SEQ_NO_MAX)
return OCF_SEQ_NO_INVALID;
return ++cache->conf_meta->curr_core_seq_no;
}
static int _ocf_mngt_cache_try_add_core(ocf_cache_t cache, ocf_core_t *core,
struct ocf_mngt_core_config *cfg)
{
int result = 0;
struct ocf_core *core_obj;
ocf_data_obj_t obj;
core_obj = &cache->core_obj[cfg->core_id];
obj = &core_obj->obj;
if (ocf_ctx_get_data_obj_type_id(cache->owner, obj->type) !=
cfg->data_obj_type) {
result = -OCF_ERR_INVAL_DATA_OBJ_TYPE;
goto error_out;
}
result = ocf_data_obj_open(obj);
if (result)
goto error_out;
if (!ocf_data_obj_get_length(obj)) {
result = -OCF_ERR_CORE_NOT_AVAIL;
goto error_after_open;
}
cache->core_obj[cfg->core_id].opened = true;
if (!(--cache->ocf_core_inactive_count))
env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state);
*core = core_obj;
return 0;
error_after_open:
ocf_data_obj_close(obj);
error_out:
*core = NULL;
return result;
}
static int _ocf_mngt_cache_add_core(ocf_cache_t cache, ocf_core_t *core,
struct ocf_mngt_core_config *cfg)
{
int result = 0;
struct ocf_core *core_obj;
ocf_data_obj_t obj;
ocf_seq_no_t core_sequence_no;
ocf_cleaning_t clean_type;
uint64_t length;
core_obj = &cache->core_obj[cfg->core_id];
obj = &core_obj->obj;
core_obj->obj.cache = cache;
/* Set uuid */
ocf_uuid_core_set(cache, core_obj, &cfg->uuid);
obj->type = ocf_ctx_get_data_obj_type(cache->owner, cfg->data_obj_type);
if (!obj->type) {
result = -OCF_ERR_INVAL_DATA_OBJ_TYPE;
goto error_out;
}
if (cfg->user_metadata.data && cfg->user_metadata.size > 0) {
result = ocf_core_set_user_metadata_raw(core_obj,
cfg->user_metadata.data,
cfg->user_metadata.size);
if (result)
goto error_out;
}
result = ocf_data_obj_open(obj);
if (result)
goto error_out;
length = ocf_data_obj_get_length(obj);
if (!length) {
result = -OCF_ERR_CORE_NOT_AVAIL;
goto error_after_open;
}
cache->core_conf_meta[cfg->core_id].length = length;
clean_type = cache->conf_meta->cleaning_policy_type;
if (ocf_cache_is_device_attached(cache) &&
cleaning_policy_ops[clean_type].add_core) {
result = cleaning_policy_ops[clean_type].add_core(cache,
cfg->core_id);
if (result)
goto error_after_open;
}
/* When adding new core to cache, allocate stat counters */
core_obj->counters =
env_zalloc(sizeof(*core_obj->counters), ENV_MEM_NORMAL);
if (!core_obj->counters) {
result = -OCF_ERR_NO_MEM;
goto error_after_clean_pol;
}
/* When adding new core to cache, reset all core/cache statistics */
ocf_stats_init(core_obj);
env_atomic_set(&cache->core_runtime_meta[cfg->core_id].
cached_clines, 0);
env_atomic_set(&cache->core_runtime_meta[cfg->core_id].
dirty_clines, 0);
env_atomic64_set(&cache->core_runtime_meta[cfg->core_id].
dirty_since, 0);
/* In metadata mark data this core was added into cache */
env_bit_set(cfg->core_id, cache->conf_meta->valid_object_bitmap);
cache->core_conf_meta[cfg->core_id].added = true;
cache->core_obj[cfg->core_id].opened = true;
/* Set default cache parameters for sequential */
cache->core_conf_meta[cfg->core_id].seq_cutoff_policy =
ocf_seq_cutoff_policy_default;
cache->core_conf_meta[cfg->core_id].seq_cutoff_threshold =
cfg->seq_cutoff_threshold;
/* Add core sequence number for atomic metadata matching */
core_sequence_no = _ocf_mngt_get_core_seq_no(cache);
if (core_sequence_no == OCF_SEQ_NO_INVALID) {
result = -OCF_ERR_TOO_MANY_CORES;
goto error_after_counters_allocation;
}
cache->core_conf_meta[cfg->core_id].seq_no = core_sequence_no;
/* Update super-block with core device addition */
if (ocf_metadata_flush_superblock(cache)) {
result = -OCF_ERR_WRITE_CACHE;
goto error_after_counters_allocation;
}
/* Increase value of added cores */
cache->conf_meta->core_obj_count++;
*core = core_obj;
return 0;
error_after_counters_allocation:
env_bit_clear(cfg->core_id, cache->conf_meta->valid_object_bitmap);
cache->core_conf_meta[cfg->core_id].added = false;
cache->core_obj[cfg->core_id].opened = false;
/* An error when flushing metadata, try restore for safety reason
* previous metadata sate on cache device.
* But if that fails too, we are scr**ed... or maybe:
* TODO: Handle situation when we can't flush metadata by
* trying to flush all the dirty data and switching to non-wb
* cache mode.
*/
ocf_metadata_flush_superblock(cache);
env_free(core_obj->counters);
core_obj->counters = NULL;
error_after_clean_pol:
if (cleaning_policy_ops[clean_type].remove_core)
cleaning_policy_ops[clean_type].remove_core(cache, cfg->core_id);
error_after_open:
ocf_data_obj_close(obj);
error_out:
ocf_uuid_core_clear(cache, core_obj);
*core = NULL;
return result;
}
static unsigned long _ffz(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
}
static unsigned long _ocf_mngt_find_first_free_core(const unsigned long *bitmap,
unsigned long size)
{
unsigned long i;
unsigned long ret = size;
/* check core 0 availability */
bool zero_core_free = !(*bitmap & 0x1UL);
/* check if any core id is free except 0 */
for (i = 0; i * sizeof(unsigned long) * 8 < size; i++) {
unsigned long long ignore_mask = (i == 0) ? 1UL : 0UL;
if (~(bitmap[i] | ignore_mask)) {
ret = MIN(size, i * sizeof(unsigned long) * 8 +
_ffz(bitmap[i] | ignore_mask));
break;
}
}
/* return 0 only if no other core is free */
if (ret == size && zero_core_free)
return 0;
return ret;
}
static int __ocf_mngt_lookup_core_uuid(ocf_cache_t cache,
struct ocf_mngt_core_config *cfg)
{
int i;
for (i = 0; i < OCF_CORE_MAX; i++) {
ocf_core_t core = &cache->core_obj[i];
if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap))
continue;
if (cache->core_obj[i].opened)
continue;
if (ocf_ctx_get_data_obj_type_id(cache->owner, core->obj.type)
!= cfg->data_obj_type) {
continue;
}
if (!env_strncmp(core->obj.uuid.data, cfg->uuid.data,
min(core->obj.uuid.size,
cfg->uuid.size)))
return i;
}
return OCF_CORE_MAX;
}
static int __ocf_mngt_try_find_core_id(ocf_cache_t cache,
struct ocf_mngt_core_config *cfg, ocf_core_id_t tmp_core_id)
{
if (tmp_core_id == OCF_CORE_MAX) {
/* FIXME: uuid.data could be not NULL-terminated ANSI string */
ocf_cache_log(cache, log_err, "Core with uuid %s not found in "
"cache metadata\n", (char*) cfg->uuid.data);
return -OCF_ERR_CORE_NOT_AVAIL;
}
if (cfg->core_id != tmp_core_id) {
ocf_cache_log(cache, log_err,
"Given core id doesn't match with metadata\n");
return -OCF_ERR_CORE_NOT_AVAIL;
}
cfg->core_id = tmp_core_id;
return 0;
}
static int __ocf_mngt_find_core_id(ocf_cache_t cache,
struct ocf_mngt_core_config *cfg, ocf_core_id_t tmp_core_id)
{
if (tmp_core_id != OCF_CORE_MAX) {
ocf_cache_log(cache, log_err,
"Core ID already added as inactive with id:"
" %hu.\n", tmp_core_id);
return -OCF_ERR_CORE_NOT_AVAIL;
}
if (cfg->core_id == OCF_CORE_MAX) {
ocf_cache_log(cache, log_debug, "Core ID is unspecified - "
"will set first available number\n");
/* Core is unspecified */
cfg->core_id = _ocf_mngt_find_first_free_core(
cache->conf_meta->valid_object_bitmap,
OCF_CORE_MAX);
/* no need to check if find_first_zero_bit failed and
* *core_id == MAX_CORE_OBJS_PER_CACHE, as above there is check
* for core_obj_count being greater or equal to
* MAX_CORE_OBJS_PER_CACHE
*/
} else if (cfg->core_id < OCF_CORE_MAX) {
/* check if id is not used already */
if (env_bit_test(cfg->core_id,
cache->conf_meta->valid_object_bitmap)) {
ocf_cache_log(cache, log_debug,
"Core ID already allocated: %d.\n",
cfg->core_id);
return -OCF_ERR_CORE_NOT_AVAIL;
}
} else {
ocf_cache_log(cache, log_err,
"Core ID exceeds maximum of %d.\n",
OCF_CORE_MAX);
return -OCF_ERR_CORE_NOT_AVAIL;
}
return 0;
}
static int _ocf_mngt_find_core_id(ocf_cache_t cache,
struct ocf_mngt_core_config *cfg)
{
int result;
ocf_core_id_t tmp_core_id;
if (cache->conf_meta->core_obj_count >= OCF_CORE_MAX)
return -OCF_ERR_TOO_MANY_CORES;
tmp_core_id = __ocf_mngt_lookup_core_uuid(cache, cfg);
if (cfg->try_add)
result = __ocf_mngt_try_find_core_id(cache, cfg, tmp_core_id);
else
result = __ocf_mngt_find_core_id(cache, cfg, tmp_core_id);
return result;
}
int ocf_mngt_cache_add_core_nolock(ocf_cache_t cache, ocf_core_t *core,
struct ocf_mngt_core_config *cfg)
{
int result;
char core_name[OCF_CORE_NAME_SIZE];
OCF_CHECK_NULL(cache);
OCF_CHECK_NULL(core);
result = _ocf_mngt_find_core_id(cache, cfg);
if (result)
return result;
if (cfg->name) {
result = env_strncpy(core_name, sizeof(core_name), cfg->name,
cfg->name_size);
if (result)
return result;
} else {
result = snprintf(core_name, sizeof(core_name), "%hu",
cfg->core_id);
if (result < 0)
return result;
}
result = ocf_core_set_name(&cache->core_obj[cfg->core_id], core_name,
sizeof(core_name));
if (result)
return result;
ocf_cache_log(cache, log_debug, "Inserting core %s\n", core_name);
if (cfg->try_add)
result = _ocf_mngt_cache_try_add_core(cache, core, cfg);
else
result = _ocf_mngt_cache_add_core(cache, core, cfg);
if (!result) {
ocf_core_log(*core, log_info, "Successfully added\n");
} else {
if (result == -OCF_ERR_CORE_NOT_AVAIL) {
ocf_cache_log(cache, log_err, "Core %s is zero size\n",
core_name);
}
ocf_cache_log(cache, log_err, "Adding core %s failed\n",
core_name);
}
return result;
}
int ocf_mngt_cache_add_core(ocf_cache_t cache, ocf_core_t *core,
struct ocf_mngt_core_config *cfg)
{
int result;
OCF_CHECK_NULL(cache);
result = ocf_mngt_cache_lock(cache);
if (result)
return result;
result = ocf_mngt_cache_add_core_nolock(cache, core, cfg);
ocf_mngt_cache_unlock(cache);
return result;
}
static int _ocf_mngt_cache_remove_core(ocf_core_t core, bool detach)
{
struct ocf_cache *cache = core->obj.cache;
ocf_core_id_t core_id = ocf_core_get_id(core);
int status;
if (detach) {
status = cache_mng_core_close(cache, core_id);
if (!status) {
cache->ocf_core_inactive_count++;
env_bit_set(ocf_cache_state_incomplete,
&cache->cache_state);
}
return status;
}
/* Deinit everything*/
if (ocf_cache_is_device_attached(cache)) {
cache_mng_core_deinit_attached_meta(cache, core_id);
cache_mng_core_remove_from_cleaning_pol(cache, core_id);
}
cache_mng_core_remove_from_meta(cache, core_id);
cache_mng_core_remove_from_cache(cache, core_id);
cache_mng_core_close(cache, core_id);
/* Update super-block with core device removal */
ocf_metadata_flush_superblock(cache);
return 0;
}
int ocf_mngt_cache_remove_core_nolock(ocf_cache_t cache, ocf_core_id_t core_id,
bool detach)
{
int result;
ocf_core_t core;
const char *core_name;
OCF_CHECK_NULL(cache);
result = ocf_core_get(cache, core_id, &core);
if (result < 0)
return -OCF_ERR_CORE_NOT_AVAIL;
ocf_core_log(core, log_debug, "Removing core\n");
core_name = ocf_core_get_name(core);
result = _ocf_mngt_cache_remove_core(core, detach);
if (!result) {
ocf_cache_log(cache, log_info, "Core %s successfully removed\n",
core_name);
} else {
ocf_cache_log(cache, log_err, "Removing core %s failed\n",
core_name);
}
return result;
}
int ocf_mngt_cache_remove_core(ocf_cache_t cache, ocf_core_id_t core_id,
bool detach)
{
int result;
OCF_CHECK_NULL(cache);
result = ocf_mngt_cache_lock(cache);
if (result)
return result;
result = ocf_mngt_cache_remove_core_nolock(cache, core_id, detach);
ocf_mngt_cache_unlock(cache);
return result;
}

View File

@@ -0,0 +1,123 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "ocf_mngt_common.h"
#include "../ocf_priv.h"
#include "../ocf_core_priv.h"
#include "../ocf_ctx_priv.h"
void ocf_mngt_core_pool_init(ocf_ctx_t ctx)
{
OCF_CHECK_NULL(ctx);
INIT_LIST_HEAD(&ctx->core_pool.core_pool_head);
}
int ocf_mngt_core_pool_get_count(ocf_ctx_t ctx)
{
int count;
OCF_CHECK_NULL(ctx);
env_mutex_lock(&ctx->lock);
count = ctx->core_pool.core_pool_count;
env_mutex_unlock(&ctx->lock);
return count;
}
int ocf_mngt_core_pool_add(ocf_ctx_t ctx, ocf_uuid_t uuid, uint8_t type)
{
ocf_data_obj_t obj;
int result = 0;
OCF_CHECK_NULL(ctx);
result = ocf_ctx_data_obj_create(ctx, &obj, uuid, type);
if (result)
return result;
result = ocf_data_obj_open(obj);
if (result) {
ocf_data_obj_deinit(obj);
return result;
}
env_mutex_lock(&ctx->lock);
list_add(&obj->core_pool_item, &ctx->core_pool.core_pool_head);
ctx->core_pool.core_pool_count++;
env_mutex_unlock(&ctx->lock);
return result;
}
int ocf_mngt_core_pool_visit(ocf_ctx_t ctx,
int (*visitor)(ocf_uuid_t, void *), void *visitor_ctx)
{
int result = 0;
ocf_data_obj_t sobj;
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(visitor);
env_mutex_lock(&ctx->lock);
list_for_each_entry(sobj, &ctx->core_pool.core_pool_head,
core_pool_item) {
result = visitor(&sobj->uuid, visitor_ctx);
if (result)
break;
}
env_mutex_unlock(&ctx->lock);
return result;
}
ocf_data_obj_t ocf_mngt_core_pool_lookup(ocf_ctx_t ctx, ocf_uuid_t uuid,
ocf_data_obj_type_t type)
{
ocf_data_obj_t sobj;
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(uuid);
OCF_CHECK_NULL(uuid->data);
list_for_each_entry(sobj, &ctx->core_pool.core_pool_head,
core_pool_item) {
if (sobj->type == type && !env_strncmp(sobj->uuid.data,
uuid->data, min(sobj->uuid.size, uuid->size))) {
return sobj;
}
}
return NULL;
}
void ocf_mngt_core_pool_remove(ocf_ctx_t ctx, ocf_data_obj_t obj)
{
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(obj);
env_mutex_lock(&ctx->lock);
ctx->core_pool.core_pool_count--;
list_del(&obj->core_pool_item);
env_mutex_unlock(&ctx->lock);
ocf_data_obj_deinit(obj);
}
void ocf_mngt_core_pool_close_and_remove(ocf_ctx_t ctx, ocf_data_obj_t obj)
{
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(obj);
ocf_data_obj_close(obj);
ocf_mngt_core_pool_remove(ctx, obj);
}
void ocf_mngt_core_pool_deinit(ocf_ctx_t ctx)
{
ocf_data_obj_t sobj, tobj;
OCF_CHECK_NULL(ctx);
list_for_each_entry_safe(sobj, tobj, &ctx->core_pool.core_pool_head,
core_pool_item) {
ocf_mngt_core_pool_close_and_remove(ctx, sobj);
}
}

803
src/mngt/ocf_mngt_flush.c Normal file
View File

@@ -0,0 +1,803 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "ocf_mngt_common.h"
#include "../ocf_priv.h"
#include "../metadata/metadata.h"
#include "../cleaning/cleaning.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../ocf_def_priv.h"
static inline void _ocf_mngt_begin_flush(struct ocf_cache *cache)
{
env_mutex_lock(&cache->flush_mutex);
env_atomic_set(&cache->flush_started, 1);
env_waitqueue_wait(cache->pending_dirty_wq,
!env_atomic_read(&cache->pending_dirty_requests));
}
static inline void _ocf_mngt_end_flush(struct ocf_cache *cache)
{
env_atomic_set(&cache->flush_started, 0);
env_mutex_unlock(&cache->flush_mutex);
}
bool ocf_mngt_cache_is_dirty(ocf_cache_t cache)
{
uint32_t i;
for (i = 0; i < OCF_CORE_MAX; ++i) {
if (!cache->core_conf_meta[i].added)
continue;
if (env_atomic_read(&(cache->core_runtime_meta[i].
dirty_clines))) {
return true;
}
}
return false;
}
/************************FLUSH CORE CODE**************************************/
/* Returns:
* 0 if OK and tbl & num is filled:
* * tbl - table with sectors&cacheline
* * num - number of items in this table.
* other value means error.
* NOTE:
* Table is not sorted.
*/
static int _ocf_mngt_get_sectors(struct ocf_cache *cache, int core_id,
struct flush_data **tbl, uint32_t *num)
{
uint64_t core_line;
ocf_core_id_t i_core_id;
struct flush_data *p;
uint32_t i, j, dirty = 0;
dirty = env_atomic_read(&cache->core_runtime_meta[core_id].
dirty_clines);
if (!dirty) {
*num = 0;
*tbl = NULL;
return 0;
}
p = env_vmalloc(dirty * sizeof(**tbl));
if (!p)
return -OCF_ERR_NO_MEM;
for (i = 0, j = 0; i < cache->device->collision_table_entries; i++) {
ocf_metadata_get_core_info(cache, i, &i_core_id, &core_line);
if (i_core_id != core_id)
continue;
if (!metadata_test_valid_any(cache, i))
continue;
if (!metadata_test_dirty(cache, i))
continue;
if (ocf_cache_line_is_used(cache, i))
continue;
/* It's core_id cacheline and it's valid and it's dirty! */
p[j].cache_line = i;
p[j].core_line = core_line;
p[j].core_id = i_core_id;
j++;
/* stop if all cachelines were found */
if (j == dirty)
break;
}
ocf_core_log(&cache->core_obj[core_id], log_debug,
"%u dirty cache lines to clean\n", j);
if (dirty != j) {
ocf_cache_log(cache, log_debug, "Wrong number of dirty "
"blocks for flushing core %s (%u!=%u)\n",
cache->core_obj[core_id].name, j, dirty);
}
*tbl = p;
*num = j;
return 0;
}
static void _ocf_mngt_free_sectors(void *tbl)
{
env_vfree(tbl);
}
static int _ocf_mngt_get_flush_containers(ocf_cache_t cache,
struct flush_container **fctbl, uint32_t *fcnum)
{
struct flush_container *fc;
struct flush_container *curr;
uint32_t *core_revmap;
uint32_t num;
uint64_t core_line;
ocf_core_id_t core_id;
uint32_t i, j, dirty = 0;
int step = 0;
/*
* TODO: Create containers for each physical device, not for
* each core. Cores can be partitions of single device.
*/
num = cache->conf_meta->core_obj_count;
if (num == 0) {
*fcnum = 0;
return 0;
}
core_revmap = env_vzalloc(sizeof(*core_revmap) * OCF_CORE_MAX);
if (!core_revmap)
return -OCF_ERR_NO_MEM;
/* TODO: Alloc flush_containers and data tables in single allocation */
fc = env_vzalloc(sizeof(**fctbl) * num);
if (!fc) {
env_vfree(core_revmap);
return -OCF_ERR_NO_MEM;
}
for (i = 0, j = 0; i < OCF_CORE_MAX; i++) {
if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap))
continue;
fc[j].core_id = i;
core_revmap[i] = j;
/* Check for dirty blocks */
fc[j].count = env_atomic_read(&cache->
core_runtime_meta[i].dirty_clines);
dirty += fc[j].count;
if (fc[j].count) {
fc[j].flush_data = env_vmalloc(fc[j].count *
sizeof(*fc[j].flush_data));
}
if (++j == cache->conf_meta->core_obj_count)
break;
}
if (!dirty) {
env_vfree(core_revmap);
env_vfree(fc);
*fcnum = 0;
return 0;
}
for (i = 0, j = 0; i < cache->device->collision_table_entries; i++) {
ocf_metadata_get_core_info(cache, i, &core_id, &core_line);
if (!metadata_test_valid_any(cache, i))
continue;
if (!metadata_test_dirty(cache, i))
continue;
if (ocf_cache_line_is_used(cache, i))
continue;
curr = &fc[core_revmap[core_id]];
ENV_BUG_ON(curr->iter >= curr->count);
/* It's core_id cacheline and it's valid and it's dirty! */
curr->flush_data[curr->iter].cache_line = i;
curr->flush_data[curr->iter].core_line = core_line;
curr->flush_data[curr->iter].core_id = core_id;
curr->iter++;
j++;
/* stop if all cachelines were found */
if (j == dirty)
break;
OCF_COND_RESCHED(step, 1000000)
}
if (dirty != j) {
ocf_cache_log(cache, log_debug, "Wrong number of dirty "
"blocks (%u!=%u)\n", j, dirty);
for (i = 0; i < num; i++)
fc[i].count = fc[i].iter;
}
for (i = 0; i < num; i++)
fc[i].iter = 0;
env_vfree(core_revmap);
*fctbl = fc;
*fcnum = num;
return 0;
}
static void _ocf_mngt_free_flush_containers(struct flush_container *fctbl,
uint32_t num)
{
int i;
for (i = 0; i < num; i++)
env_vfree(fctbl[i].flush_data);
env_vfree(fctbl);
}
/*
* OCF will try to guess disk speed etc. and adjust flushing block
* size accordingly, however these bounds shall be respected regardless
* of disk speed, cache line size configured etc.
*/
#define OCF_MNG_FLUSH_MIN (4*MiB / ocf_line_size(cache))
#define OCF_MNG_FLUSH_MAX (100*MiB / ocf_line_size(cache))
static void _ocf_mngt_flush_portion(struct flush_container *fc)
{
ocf_cache_t cache = fc->cache;
uint64_t flush_portion_div;
uint32_t curr_count;
flush_portion_div = env_ticks_to_msecs(fc->ticks2 - fc->ticks1);
if (unlikely(!flush_portion_div))
flush_portion_div = 1;
fc->flush_portion = fc->flush_portion * 1000 / flush_portion_div;
fc->flush_portion &= ~0x3ffULL;
/* regardless those calculations, limit flush portion to be
* between OCF_MNG_FLUSH_MIN and OCF_MNG_FLUSH_MAX
*/
fc->flush_portion = MIN(fc->flush_portion, OCF_MNG_FLUSH_MAX);
fc->flush_portion = MAX(fc->flush_portion, OCF_MNG_FLUSH_MIN);
curr_count = MIN(fc->count - fc->iter, fc->flush_portion);
ocf_cleaner_do_flush_data_async(fc->cache,
&fc->flush_data[fc->iter],
curr_count, &fc->attribs);
fc->iter += curr_count;
}
static void _ocf_mngt_flush_end(void *private_data, int error)
{
struct flush_container *fc = private_data;
fc->ticks2 = env_get_tick_count();
env_atomic_cmpxchg(fc->error, 0, error);
env_atomic_set(&fc->completed, 1);
env_atomic_inc(fc->progress);
env_waitqueue_wake_up(fc->wq);
}
static int _ocf_mngt_flush_containers(ocf_cache_t cache,
struct flush_container *fctbl, uint32_t fcnum,
bool allow_interruption)
{
uint32_t fc_to_flush;
env_waitqueue wq;
env_atomic progress; /* incremented each time flushing of a portion of a
container is completed */
env_atomic error;
ocf_core_t core;
bool interrupt = false;
int i;
if (fcnum == 0)
return 0;
env_waitqueue_init(&wq);
/* Sort data. Smallest sectors first (0...n). */
ocf_cleaner_sort_flush_containers(fctbl, fcnum);
env_atomic_set(&error, 0);
for (i = 0; i < fcnum; i++) {
fctbl[i].attribs.cache_line_lock = true;
fctbl[i].attribs.metadata_locked = true;
fctbl[i].attribs.cmpl_context = &fctbl[i];
fctbl[i].attribs.cmpl_fn = _ocf_mngt_flush_end;
fctbl[i].attribs.io_queue = 0;
fctbl[i].cache = cache;
fctbl[i].progress = &progress;
fctbl[i].error = &error;
fctbl[i].wq = &wq;
fctbl[i].flush_portion = OCF_MNG_FLUSH_MIN;
fctbl[i].ticks1 = 0;
fctbl[i].ticks2 = UINT_MAX;
env_atomic_set(&fctbl[i].completed, 1);
}
for (fc_to_flush = fcnum; fc_to_flush > 0;) {
env_atomic_set(&progress, 0);
for (i = 0; i < fcnum; i++) {
if (!env_atomic_read(&fctbl[i].completed))
continue;
core = &cache->core_obj[fctbl[i].core_id];
env_atomic_set(&core->flushed, fctbl[i].iter);
env_atomic_set(&fctbl[i].completed, 0);
if (fctbl[i].iter == fctbl[i].count || interrupt ||
env_atomic_read(&error)) {
fc_to_flush--;
continue;
}
_ocf_mngt_flush_portion(&fctbl[i]);
}
if (fc_to_flush) {
ocf_metadata_unlock(cache, OCF_METADATA_WR);
env_cond_resched();
env_waitqueue_wait(wq, env_atomic_read(&progress));
ocf_metadata_lock(cache, OCF_METADATA_WR);
}
if (cache->flushing_interrupted && !interrupt) {
if (allow_interruption) {
interrupt = true;
ocf_cache_log(cache, log_info,
"Flushing interrupted by "
"user\n");
} else {
ocf_cache_log(cache, log_err,
"Cannot interrupt flushing\n");
}
}
}
return interrupt ? -OCF_ERR_FLUSHING_INTERRUPTED :
env_atomic_read(&error);
}
static int _ocf_mngt_flush_core(ocf_core_t core, bool allow_interruption)
{
ocf_core_id_t core_id = ocf_core_get_id(core);
ocf_cache_t cache = core->obj.cache;
struct flush_container fc;
int ret;
ocf_metadata_lock(cache, OCF_METADATA_WR);
ret = _ocf_mngt_get_sectors(cache, core_id,
&fc.flush_data, &fc.count);
if (ret) {
ocf_core_log(core, log_err, "Flushing operation aborted, "
"no memory\n");
goto out;
}
fc.core_id = core_id;
fc.iter = 0;
ret = _ocf_mngt_flush_containers(cache, &fc, 1, allow_interruption);
_ocf_mngt_free_sectors(fc.flush_data);
out:
ocf_metadata_unlock(cache, OCF_METADATA_WR);
return ret;
}
static int _ocf_mngt_flush_all_cores(ocf_cache_t cache, bool allow_interruption)
{
struct flush_container *fctbl = NULL;
uint32_t fcnum = 0;
int ret;
ocf_metadata_lock(cache, OCF_METADATA_WR);
/* Get all 'dirty' sectors for all cores */
ret = _ocf_mngt_get_flush_containers(cache, &fctbl, &fcnum);
if (ret) {
ocf_cache_log(cache, log_err, "Flushing operation aborted, "
"no memory\n");
goto out;
}
ret = _ocf_mngt_flush_containers(cache, fctbl, fcnum,
allow_interruption);
_ocf_mngt_free_flush_containers(fctbl, fcnum);
out:
ocf_metadata_unlock(cache, OCF_METADATA_WR);
return ret;
}
/**
* Flush all the dirty data stored on cache (all the cores attached to it)
* @param cache cache instance to which operation applies
* @param allow_interruption whenever to allow interruption of flushing process.
* if set to 0, all requests to interrupt flushing will be ignored
*/
static int _ocf_mng_cache_flush_nolock(ocf_cache_t cache, bool interruption)
{
int result = 0;
int i, j;
env_atomic_set(&cache->flush_in_progress, 1);
cache->flushing_interrupted = 0;
do {
env_cond_resched();
result = _ocf_mngt_flush_all_cores(cache, interruption);
if (result) {
/* Cleaning error */
break;
}
} while (ocf_mngt_cache_is_dirty(cache));
env_atomic_set(&cache->flush_in_progress, 0);
for (i = 0, j = 0; i < OCF_CORE_MAX; i++) {
if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap))
continue;
env_atomic_set(&cache->core_obj[i].flushed, 0);
if (++j == cache->conf_meta->core_obj_count)
break;
}
return result;
}
int ocf_mngt_cache_flush_nolock(ocf_cache_t cache, bool interruption)
{
int result = 0;
OCF_CHECK_NULL(cache);
if (ocf_cache_is_incomplete(cache)) {
ocf_cache_log(cache, log_err, "Cannot flush cache - "
"cache is in incomplete state\n");
return -OCF_ERR_CACHE_IN_INCOMPLETE_STATE;
}
ocf_cache_log(cache, log_info, "Flushing cache\n");
_ocf_mngt_begin_flush(cache);
result = _ocf_mng_cache_flush_nolock(cache, interruption);
_ocf_mngt_end_flush(cache);
if (!result)
ocf_cache_log(cache, log_info, "Flushing cache completed\n");
return result;
}
static int _ocf_mng_core_flush_nolock(ocf_core_t core, bool interruption)
{
struct ocf_cache *cache = core->obj.cache;
ocf_core_id_t core_id = ocf_core_get_id(core);
int ret;
cache->flushing_interrupted = 0;
do {
env_cond_resched();
ret = _ocf_mngt_flush_core(core, interruption);
if (ret == -OCF_ERR_FLUSHING_INTERRUPTED ||
ret == -OCF_ERR_WRITE_CORE) {
break;
}
} while (env_atomic_read(&cache->core_runtime_meta[core_id].
dirty_clines));
env_atomic_set(&core->flushed, 0);
return ret;
}
int ocf_mngt_core_flush_nolock(ocf_cache_t cache, ocf_core_id_t id,
bool interruption)
{
ocf_core_t core;
int ret = 0;
OCF_CHECK_NULL(cache);
ret = ocf_core_get(cache, id, &core);
if (ret < 0)
return -OCF_ERR_CORE_NOT_AVAIL;
if (!core->opened) {
ocf_core_log(core, log_err, "Cannot flush - core is in "
"inactive state\n");
return -OCF_ERR_CORE_IN_INACTIVE_STATE;
}
ocf_core_log(core, log_info, "Flushing\n");
_ocf_mngt_begin_flush(cache);
ret = _ocf_mng_core_flush_nolock(core, interruption);
_ocf_mngt_end_flush(cache);
if (!ret)
ocf_cache_log(cache, log_info, "Flushing completed\n");
return ret;
}
int ocf_mngt_cache_flush(ocf_cache_t cache, bool interruption)
{
int result = ocf_mngt_cache_read_lock(cache);
if (result)
return result;
if (!ocf_cache_is_device_attached(cache)) {
result = -OCF_ERR_INVAL;
goto unlock;
}
result = ocf_mngt_cache_flush_nolock(cache, interruption);
unlock:
ocf_mngt_cache_read_unlock(cache);
return result;
}
int ocf_mngt_core_flush(ocf_cache_t cache, ocf_core_id_t id, bool interruption)
{
int result;
/* lock read only */
result = ocf_mngt_cache_read_lock(cache);
if (result)
return result;
if (!ocf_cache_is_device_attached(cache)) {
result = -OCF_ERR_INVAL;
goto unlock;
}
result = ocf_mngt_core_flush_nolock(cache, id, interruption);
unlock:
ocf_mngt_cache_read_unlock(cache);
return result;
}
int ocf_mngt_core_purge(ocf_cache_t cache, ocf_core_id_t core_id, bool interruption)
{
int result = 0;
uint64_t core_size = ~0ULL;
ocf_core_t core;
OCF_CHECK_NULL(cache);
result = ocf_mngt_cache_read_lock(cache);
if (result)
return result;
result = ocf_core_get(cache, core_id, &core);
if (result < 0) {
ocf_mngt_cache_unlock(cache);
return -OCF_ERR_CORE_NOT_AVAIL;
}
core_size = ocf_data_obj_get_length(&cache->core_obj[core_id].obj);
core_size = core_size ?: ~0ULL;
_ocf_mngt_begin_flush(cache);
ocf_core_log(core, log_info, "Purging\n");
result = _ocf_mng_core_flush_nolock(core, interruption);
if (result)
goto err;
OCF_METADATA_LOCK_WR();
result = ocf_metadata_sparse_range(cache, core_id, 0,
core_size);
OCF_METADATA_UNLOCK_WR();
err:
_ocf_mngt_end_flush(cache);
ocf_mngt_cache_read_unlock(cache);
return result;
}
int ocf_mngt_cache_purge(ocf_cache_t cache, bool interruption)
{
int result = 0;
result = ocf_mngt_cache_read_lock(cache);
if (result)
return result;
_ocf_mngt_begin_flush(cache);
ocf_cache_log(cache, log_info, "Purging\n");
result = _ocf_mng_cache_flush_nolock(cache, interruption);
if (result)
goto err;
OCF_METADATA_LOCK_WR();
result = ocf_metadata_sparse_range(cache, OCF_CORE_ID_INVALID, 0,
~0ULL);
OCF_METADATA_UNLOCK_WR();
err:
_ocf_mngt_end_flush(cache);
ocf_mngt_cache_read_unlock(cache);
return result;
}
int ocf_mngt_cache_flush_interrupt(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
ocf_cache_log(cache, log_alert, "Flushing interrupt\n");
cache->flushing_interrupted = 1;
return 0;
}
int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type)
{
ocf_cleaning_t old_type;
int ret;
if (type < 0 || type >= ocf_cleaning_max)
return -OCF_ERR_INVAL;
ret = ocf_mngt_cache_lock(cache);
if (ret)
return ret;
old_type = cache->conf_meta->cleaning_policy_type;
if (type == old_type) {
ocf_cache_log(cache, log_info, "Cleaning policy %s is already "
"set\n", cleaning_policy_ops[old_type].name);
goto out;
}
ocf_metadata_lock(cache, OCF_METADATA_WR);
if (cleaning_policy_ops[old_type].deinitialize)
cleaning_policy_ops[old_type].deinitialize(cache);
if (cleaning_policy_ops[type].initialize) {
if (cleaning_policy_ops[type].initialize(cache, 1)) {
/*
* If initialization of new cleaning policy failed,
* we set cleaning policy to nop.
*/
type = ocf_cleaning_nop;
ret = -OCF_ERR_INVAL;
}
}
cache->conf_meta->cleaning_policy_type = type;
if (type != old_type) {
/*
* If operation was successfull or cleaning policy changed,
* we need to flush superblock.
*/
if (ocf_metadata_flush_superblock(cache)) {
ocf_cache_log(cache, log_err,
"Failed to flush superblock! Changes "
"in cache config are not persistent!\n");
}
}
ocf_cache_log(cache, log_info, "Changing cleaning policy from "
"%s to %s\n", cleaning_policy_ops[old_type].name,
cleaning_policy_ops[type].name);
ocf_metadata_unlock(cache, OCF_METADATA_WR);
out:
ocf_mngt_cache_unlock(cache);
return ret;
}
int ocf_mngt_cache_cleaning_get_policy(ocf_cache_t cache, ocf_cleaning_t *type)
{
int ret;
ret = ocf_mngt_cache_read_lock(cache);
if (ret)
return ret;
*type = cache->conf_meta->cleaning_policy_type;
ocf_mngt_cache_read_unlock(cache);
return 0;
}
int ocf_mngt_cache_cleaning_set_param(ocf_cache_t cache, ocf_cleaning_t type,
uint32_t param_id, uint32_t param_value)
{
int ret;
if (type < 0 || type >= ocf_cleaning_max)
return -OCF_ERR_INVAL;
if (!cleaning_policy_ops[type].set_cleaning_param)
return -OCF_ERR_INVAL;
ret = ocf_mngt_cache_lock(cache);
if (ret)
return ret;
ocf_metadata_lock(cache, OCF_METADATA_WR);
ret = cleaning_policy_ops[type].set_cleaning_param(cache,
param_id, param_value);
if (ret == 0) {
/*
* If operation was successfull or cleaning policy changed,
* we need to flush superblock.
*/
if (ocf_metadata_flush_superblock(cache)) {
ocf_cache_log(cache, log_err,
"Failed to flush superblock! Changes "
"in cache config are not persistent!\n");
}
}
ocf_metadata_unlock(cache, OCF_METADATA_WR);
ocf_mngt_cache_unlock(cache);
return ret;
}
int ocf_mngt_cache_cleaning_get_param(ocf_cache_t cache, ocf_cleaning_t type,
uint32_t param_id, uint32_t *param_value)
{
int ret;
if (type < 0 || type >= ocf_cleaning_max)
return -OCF_ERR_INVAL;
if (!cleaning_policy_ops[type].get_cleaning_param)
return -OCF_ERR_INVAL;
ret = ocf_mngt_cache_read_lock(cache);
if (ret)
return ret;
ret = cleaning_policy_ops[type].get_cleaning_param(cache,
param_id, param_value);
ocf_mngt_cache_read_unlock(cache);
return ret;
}

View File

@@ -0,0 +1,273 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "ocf_mngt_common.h"
#include "../ocf_priv.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_part.h"
#include "../eviction/ops.h"
#include "ocf_env.h"
static uint64_t _ocf_mngt_count_parts_min_size(struct ocf_cache *cache)
{
struct ocf_user_part *part;
ocf_part_id_t part_id;
uint64_t count = 0;
for_each_part(cache, part, part_id) {
if (ocf_part_is_valid(part))
count += part->config->min_size;
}
return count;
}
int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
ocf_part_id_t part_id, const char *name, uint32_t min_size,
uint32_t max_size, uint8_t priority, bool valid)
{
uint32_t size;
if (!name)
return -OCF_ERR_INVAL;
if (part_id >= OCF_IO_CLASS_MAX)
return -OCF_ERR_INVAL;
if (cache->user_parts[part_id].config->flags.valid)
return -OCF_ERR_INVAL;
if (max_size > PARTITION_SIZE_MAX)
return -OCF_ERR_INVAL;
if (env_strnlen(name, OCF_IO_CLASS_NAME_MAX) >=
OCF_IO_CLASS_NAME_MAX) {
ocf_cache_log(cache, log_info,
"Name of the partition is too long\n");
return -OCF_ERR_INVAL;
}
size = sizeof(cache->user_parts[part_id].config->name);
if (env_strncpy(cache->user_parts[part_id].config->name, size, name, size))
return -OCF_ERR_INVAL;
cache->user_parts[part_id].config->min_size = min_size;
cache->user_parts[part_id].config->max_size = max_size;
cache->user_parts[part_id].config->priority = priority;
cache->user_parts[part_id].config->cache_mode = ocf_cache_mode_max;
ocf_part_set_valid(cache, part_id, valid);
ocf_lst_add(&cache->lst_part, part_id);
ocf_part_sort(cache);
cache->user_parts[part_id].config->flags.added = 1;
return 0;
}
static int _ocf_mngt_set_partition_size(struct ocf_cache *cache,
ocf_part_id_t part_id, uint32_t min, uint32_t max)
{
struct ocf_user_part *part = &cache->user_parts[part_id];
if (min > max)
return -OCF_ERR_INVAL;
if (_ocf_mngt_count_parts_min_size(cache) + min
>= cache->device->collision_table_entries) {
/* Illegal configuration in which sum of all min_sizes exceeds
* cache size.
*/
return -OCF_ERR_INVAL;
}
if (max > PARTITION_SIZE_MAX)
max = PARTITION_SIZE_MAX;
part->config->min_size = min;
part->config->max_size = max;
return 0;
}
static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
const struct ocf_mngt_io_class_config *cfg)
{
int result = -1;
struct ocf_user_part *dest_part;
ocf_part_id_t part_id = cfg->class_id;
const char *name = cfg->name;
int16_t prio = cfg->prio;
ocf_cache_mode_t cache_mode = cfg->cache_mode;
uint32_t min = cfg->min_size;
uint32_t max = cfg->max_size;
OCF_CHECK_NULL(cache->device);
OCF_METADATA_LOCK_WR();
dest_part = &cache->user_parts[part_id];
if (!ocf_part_is_added(dest_part)) {
ocf_cache_log(cache, log_info, "Setting IO class, id: %u, "
"name: '%s' [ ERROR ]\n", part_id, dest_part->config->name);
OCF_METADATA_UNLOCK_WR();
return -OCF_ERR_INVAL;
}
if (part_id == PARTITION_DEFAULT) {
/* Special behavior for default partition */
if (!name[0]) {
/* Removing of default partition is not allowed */
ocf_cache_log(cache, log_info,
"Cannot remove unclassified IO class, "
"id: %u [ ERROR ]\n", part_id);
OCF_METADATA_UNLOCK_WR();
return 0;
}
/* Try set partition size */
if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) {
ocf_cache_log(cache, log_info,
"Setting IO class size, id: %u, name: '%s' "
"[ ERROR ]\n", part_id, dest_part->config->name);
OCF_METADATA_UNLOCK_WR();
return -OCF_ERR_INVAL;
}
ocf_part_set_prio(cache, dest_part, prio);
ocf_part_sort(cache);
dest_part->config->cache_mode = cache_mode;
ocf_cache_log(cache, log_info,
"Updating Unclassified IO class, id: "
"%u [ OK ]\n", part_id);
OCF_METADATA_UNLOCK_WR();
return 0;
}
if (name[0]) {
/* Setting */
result = env_strncpy(dest_part->config->name, sizeof(dest_part->config->name), name,
sizeof(dest_part->config->name));
if (result) {
OCF_METADATA_UNLOCK_WR();
return result;
}
/* Try set partition size */
if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) {
ocf_cache_log(cache, log_info,
"Setting IO class size, id: %u, name: '%s' "
"[ ERROR ]\n", part_id, dest_part->config->name);
OCF_METADATA_UNLOCK_WR();
return -OCF_ERR_INVAL;
}
if (ocf_part_is_valid(dest_part)) {
/* Updating existing */
ocf_cache_log(cache, log_info, "Updating existing IO "
"class, id: %u, name: '%s' [ OK ]\n",
part_id, dest_part->config->name);
} else {
/* Adding new */
ocf_part_set_valid(cache, part_id, true);
ocf_cache_log(cache, log_info, "Adding new IO class, "
"id: %u, name: '%s' [ OK ]\n", part_id,
dest_part->config->name);
}
ocf_part_set_prio(cache, dest_part, prio);
dest_part->config->cache_mode = cache_mode;
result = 0;
} else {
/* Clearing */
if (ocf_part_is_valid(dest_part)) {
/* Removing */
result = 0;
ocf_part_set_valid(cache, part_id, false);
ocf_cache_log(cache, log_info,
"Removing IO class, id: %u [ %s ]\n",
part_id, result ? "ERROR" : "OK");
} else {
/* Does not exist */
result = -OCF_ERR_IO_CLASS_NOT_EXIST;
}
}
ocf_part_sort(cache);
OCF_METADATA_UNLOCK_WR();
return result;
}
static int _ocf_mngt_io_class_validate_cfg(ocf_cache_t cache,
const struct ocf_mngt_io_class_config *cfg)
{
if (cfg->class_id >= OCF_IO_CLASS_MAX)
return -OCF_ERR_INVAL;
/* TODO(r.baldyga): ocf_cache_mode_max is allowed for compatibility
* with OCF 3.1 kernel adapter (upgrade in flight) and casadm.
* Forbid ocf_cache_mode_max after fixing these problems.
*/
if (cfg->cache_mode < ocf_cache_mode_none ||
cfg->cache_mode > ocf_cache_mode_max) {
return -OCF_ERR_INVAL;
}
if (!ocf_part_is_name_valid(cfg->name)) {
ocf_cache_log(cache, log_info,
"The name of the partition is not valid\n");
return -OCF_ERR_INVAL;
}
if (!ocf_part_is_prio_valid(cfg->prio)) {
ocf_cache_log(cache, log_info,
"Invalid value of the partition priority\n");
return -OCF_ERR_INVAL;
}
return 0;
}
int ocf_mngt_io_class_configure(ocf_cache_t cache,
const struct ocf_mngt_io_class_config *cfg)
{
int result;
OCF_CHECK_NULL(cache);
result = _ocf_mngt_io_class_validate_cfg(cache, cfg);
if (result)
return result;
result = ocf_mngt_cache_lock(cache);
if (result)
return result;
result = _ocf_mngt_io_class_configure(cache, cfg);
ocf_mngt_cache_unlock(cache);
return 0;
}

29
src/mngt/ocf_mngt_misc.c Normal file
View File

@@ -0,0 +1,29 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "ocf_mngt_common.h"
#include "../ocf_priv.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../ocf_ctx_priv.h"
uint32_t ocf_mngt_cache_get_count(ocf_ctx_t ctx)
{
struct ocf_cache *cache;
uint32_t count = 0;
OCF_CHECK_NULL(ctx);
env_mutex_lock(&ctx->lock);
/* currently, there are no macros in list.h to get list size.*/
list_for_each_entry(cache, &ctx->caches, list)
count++;
env_mutex_unlock(&ctx->lock);
return count;
}

219
src/ocf_cache.c Normal file
View File

@@ -0,0 +1,219 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "engine/cache_engine.h"
#include "utils/utils_cache_line.h"
#include "ocf_priv.h"
#include "ocf_cache_priv.h"
ocf_data_obj_t ocf_cache_get_data_object(ocf_cache_t cache)
{
return ocf_cache_is_device_attached(cache) ? &cache->device->obj : NULL;
}
ocf_cache_id_t ocf_cache_get_id(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return cache->cache_id;
}
int ocf_cache_set_name(ocf_cache_t cache, const char *src, size_t src_size)
{
OCF_CHECK_NULL(cache);
return env_strncpy(cache->name, sizeof(cache->name), src, src_size);
}
const char *ocf_cache_get_name(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return cache->name;
}
bool ocf_cache_is_incomplete(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return env_bit_test(ocf_cache_state_incomplete, &cache->cache_state);
}
bool ocf_cache_is_running(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return env_bit_test(ocf_cache_state_running, &cache->cache_state);
}
bool ocf_cache_is_device_attached(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return env_atomic_read(&(cache)->attached);
}
ocf_cache_mode_t ocf_cache_get_mode(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return cache->conf_meta->cache_mode;
}
static uint32_t _calc_dirty_for(uint64_t dirty_since)
{
return dirty_since ?
(env_ticks_to_msecs(env_get_tick_count() - dirty_since) / 1000)
: 0;
}
int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
{
uint32_t i;
uint32_t cache_occupancy_total = 0;
uint32_t dirty_blocks_total = 0;
uint32_t initial_dirty_blocks_total = 0;
uint32_t flushed_total = 0;
uint32_t curr_dirty_cnt;
uint64_t dirty_since = 0;
uint32_t init_dirty_cnt;
uint64_t core_dirty_since;
uint32_t dirty_blocks_inactive = 0;
uint32_t cache_occupancy_inactive = 0;
int result;
OCF_CHECK_NULL(cache);
if (!info)
return -OCF_ERR_INVAL;
result = ocf_mngt_cache_read_lock(cache);
if (result)
return result;
ENV_BUG_ON(env_memset(info, sizeof(*info), 0));
info->attached = ocf_cache_is_device_attached(cache);
if (info->attached) {
info->data_obj_type = ocf_ctx_get_data_obj_type_id(cache->owner,
cache->device->obj.type);
info->size = cache->conf_meta->cachelines;
}
info->core_count = cache->conf_meta->core_obj_count;
info->cache_mode = ocf_cache_get_mode(cache);
/* iterate through all possibly valid core objcts, as list of
* valid objects may be not continuous
*/
for (i = 0; i != OCF_CORE_MAX; ++i) {
if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap))
continue;
/* If current dirty blocks exceeds saved initial dirty
* blocks then update the latter
*/
curr_dirty_cnt = env_atomic_read(&cache->
core_runtime_meta[i].dirty_clines);
init_dirty_cnt = env_atomic_read(&cache->
core_runtime_meta[i].initial_dirty_clines);
if (init_dirty_cnt &&
(curr_dirty_cnt > init_dirty_cnt)) {
env_atomic_set(
&cache->core_runtime_meta[i].
initial_dirty_clines,
env_atomic_read(&cache->
core_runtime_meta[i].dirty_clines));
}
cache_occupancy_total += env_atomic_read(&cache->
core_runtime_meta[i].cached_clines);
dirty_blocks_total += env_atomic_read(&(cache->
core_runtime_meta[i].dirty_clines));
initial_dirty_blocks_total += env_atomic_read(&(cache->
core_runtime_meta[i].initial_dirty_clines));
if (!cache->core_obj[i].opened) {
cache_occupancy_inactive += env_atomic_read(&cache->
core_runtime_meta[i].cached_clines);
dirty_blocks_inactive += env_atomic_read(&(cache->
core_runtime_meta[i].dirty_clines));
}
core_dirty_since = env_atomic64_read(&cache->
core_runtime_meta[i].dirty_since);
if (core_dirty_since) {
dirty_since = (dirty_since ?
MIN(dirty_since, core_dirty_since) :
core_dirty_since);
}
flushed_total += env_atomic_read(
&cache->core_obj[i].flushed);
}
info->dirty = dirty_blocks_total;
info->dirty_initial = initial_dirty_blocks_total;
info->occupancy = cache_occupancy_total;
info->dirty_for = _calc_dirty_for(dirty_since);
info->metadata_end_offset = ocf_cache_is_device_attached(cache) ?
cache->device->metadata_offset_line : 0;
info->state = cache->cache_state;
info->inactive.occupancy = cache_occupancy_inactive;
info->inactive.dirty = dirty_blocks_inactive;
info->flushed = (env_atomic_read(&cache->flush_in_progress)) ?
flushed_total : 0;
info->fallback_pt.status = ocf_fallback_pt_is_on(cache);
info->fallback_pt.error_counter =
env_atomic_read(&cache->fallback_pt_error_counter);
info->eviction_policy = cache->conf_meta->eviction_policy_type;
info->cleaning_policy = cache->conf_meta->cleaning_policy_type;
info->metadata_footprint = ocf_cache_is_device_attached(cache) ?
ocf_metadata_size_of(cache) : 0;
info->cache_line_size = ocf_line_size(cache);
ocf_mngt_cache_read_unlock(cache);
return 0;
}
const struct ocf_data_obj_uuid *ocf_cache_get_uuid(ocf_cache_t cache)
{
if (!ocf_cache_is_device_attached(cache))
return NULL;
return ocf_data_obj_get_uuid(ocf_cache_get_data_object(cache));
}
uint8_t ocf_cache_get_type_id(ocf_cache_t cache)
{
if (!ocf_cache_is_device_attached(cache))
return 0xff;
return ocf_ctx_get_data_obj_type_id(ocf_cache_get_ctx(cache),
ocf_data_obj_get_type(ocf_cache_get_data_object(cache)));
}
ocf_cache_line_size_t ocf_cache_get_line_size(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return ocf_line_size(cache);
}
uint64_t ocf_cache_bytes_2_lines(ocf_cache_t cache, uint64_t bytes)
{
OCF_CHECK_NULL(cache);
return ocf_bytes_2_lines(cache, bytes);
}
uint32_t ocf_cache_get_core_count(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return cache->conf_meta->core_obj_count;
}
ocf_ctx_t ocf_cache_get_ctx(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return cache->owner;
}

223
src/ocf_cache_priv.h Normal file
View File

@@ -0,0 +1,223 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_CACHE_PRIV_H__
#define __OCF_CACHE_PRIV_H__
#include "ocf/ocf.h"
#include "ocf_env.h"
#include "ocf_data_obj_priv.h"
#include "ocf_core_priv.h"
#include "metadata/metadata_structs.h"
#include "metadata/metadata_partition_structs.h"
#include "metadata/metadata_updater_priv.h"
#include "utils/utils_list.h"
#include "ocf_stats_priv.h"
#include "cleaning/cleaning.h"
#include "ocf_logger_priv.h"
#define DIRTY_FLUSHED 1
#define DIRTY_NOT_FLUSHED 0
struct ocf_metadata_uuid {
uint32_t size;
uint8_t data[OCF_DATA_OBJ_UUID_MAX_SIZE];
} __packed;
#define OCF_CORE_USER_DATA_SIZE 64
struct ocf_core_meta_config {
uint8_t type;
/* This bit means that object was added into cache */
uint32_t added : 1;
/* Core sequence number used to correlate cache lines with cores
* when recovering from atomic device */
ocf_seq_no_t seq_no;
/* Sequential cutoff threshold (in bytes) */
uint32_t seq_cutoff_threshold;
/* Sequential cutoff policy */
ocf_seq_cutoff_policy seq_cutoff_policy;
/* core object size in bytes */
uint64_t length;
uint8_t user_data[OCF_CORE_USER_DATA_SIZE];
};
struct ocf_core_meta_runtime {
/* Number of blocks from that objects that currently are cached
* on the caching device.
*/
env_atomic cached_clines;
env_atomic dirty_clines;
env_atomic initial_dirty_clines;
env_atomic64 dirty_since;
struct {
/* clines within lru list (?) */
env_atomic cached_clines;
/* dirty clines assigned to this specific partition within
* cache device
*/
env_atomic dirty_clines;
} part_counters[OCF_IO_CLASS_MAX];
};
/**
* @brief Initialization mode of cache instance
*/
enum ocf_mngt_cache_init_mode {
/**
* @brief Set up an SSD as new caching device
*/
ocf_init_mode_init,
/**
* @brief Set up an SSD as new caching device without saving cache
* metadata on SSD.
*
* When using this initialization mode, after shutdown, loading cache
* is not possible
*/
ocf_init_mode_metadata_volatile,
/**
* @brief Load pre-existing SSD cache state and set all parameters
* to previous configurations
*/
ocf_init_mode_load,
};
/* Cache device */
struct ocf_cache_device {
struct ocf_data_obj obj;
ocf_cache_line_t metadata_offset_line;
/* Hash Table contains contains pointer to the entry in
* Collision Table so it actually contains collision Table
* indexes.
* Invalid entry is collision_table_entries.
*/
unsigned int hash_table_entries;
unsigned int collision_table_entries;
int metadata_error;
/*!< This field indicates that an error during metadata IO
* occurred
*/
uint64_t metadata_offset;
struct ocf_part *freelist_part;
struct {
struct ocf_cache_concurrency *cache;
} concurrency;
enum ocf_mngt_cache_init_mode init_mode;
struct ocf_superblock_runtime *runtime_meta;
};
struct ocf_cache {
ocf_ctx_t owner;
struct list_head list;
/* set to make valid */
uint8_t valid_ocf_cache_device_t;
/* unset running to not serve any more I/O requests */
unsigned long cache_state;
env_atomic ref_count;
struct ocf_superblock_config *conf_meta;
struct ocf_cache_device *device;
struct ocf_lst lst_part;
struct ocf_user_part user_parts[OCF_IO_CLASS_MAX + 1];
struct ocf_metadata metadata;
ocf_eviction_t eviction_policy_init;
int cache_id;
char name[OCF_CACHE_NAME_SIZE];
env_atomic pending_requests;
env_atomic pending_cache_requests;
env_waitqueue pending_cache_wq;
env_atomic pending_dirty_requests;
env_waitqueue pending_dirty_wq;
uint32_t fallback_pt_error_threshold;
env_atomic fallback_pt_error_counter;
env_atomic pending_read_misses_list_blocked;
env_atomic pending_read_misses_list_count;
env_atomic last_access_ms;
env_atomic pending_eviction_clines;
struct ocf_queue *io_queues;
uint32_t io_queues_no;
uint16_t ocf_core_inactive_count;
struct ocf_core core_obj[OCF_CORE_MAX];
struct ocf_core_meta_config *core_conf_meta;
struct ocf_core_meta_runtime *core_runtime_meta;
env_atomic flush_in_progress;
env_atomic flush_started;
/* 1 if cache device attached, 0 otherwise */
env_atomic attached;
env_atomic cleaning[OCF_IO_CLASS_MAX];
struct ocf_cleaner cleaner;
struct ocf_metadata_updater metadata_updater;
env_rwsem lock;
env_atomic lock_waiter;
/*!< most of the time this variable is set to 0, unless user requested
*!< interruption of flushing process via ioctl/
*/
int flushing_interrupted;
env_mutex flush_mutex;
struct {
uint32_t max_queue_size;
uint32_t queue_unblock_size;
} backfill;
bool pt_unaligned_io;
bool use_submit_io_fast;
void *cleaning_policy_context;
};
#define ocf_cache_log_prefix(cache, lvl, prefix, fmt, ...) \
ocf_log_prefix(ocf_cache_get_ctx(cache), lvl, "[Cache %s] ", \
prefix fmt, ocf_cache_get_name(cache), ##__VA_ARGS__)
#define ocf_cache_log(cache, lvl, fmt, ...) \
ocf_cache_log_prefix(cache, lvl, "", fmt, ##__VA_ARGS__)
#define ocf_cache_log_rl(cache) \
ocf_log_rl(ocf_cache_get_ctx(cache))
#endif /* __OCF_CACHE_PRIV_H__ */

652
src/ocf_core.c Normal file
View File

@@ -0,0 +1,652 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "ocf_priv.h"
#include "ocf_core_priv.h"
#include "ocf_io_priv.h"
#include "metadata/metadata.h"
#include "engine/cache_engine.h"
#include "utils/utils_rq.h"
#include "utils/utils_part.h"
#include "utils/utils_device.h"
#include "ocf_request.h"
ocf_cache_t ocf_core_get_cache(ocf_core_t core)
{
OCF_CHECK_NULL(core);
return core->obj.cache;
}
ocf_data_obj_t ocf_core_get_data_object(ocf_core_t core)
{
OCF_CHECK_NULL(core);
return &core->obj;
}
ocf_core_id_t ocf_core_get_id(ocf_core_t core)
{
struct ocf_cache *cache;
ocf_core_id_t core_id;
OCF_CHECK_NULL(core);
cache = core->obj.cache;
core_id = core - cache->core_obj;
return core_id;
}
int ocf_core_set_name(ocf_core_t core, const char *src, size_t src_size)
{
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(src);
return env_strncpy(core->name, sizeof(core->name), src, src_size);
}
const char *ocf_core_get_name(ocf_core_t core)
{
OCF_CHECK_NULL(core);
return core->name;
}
ocf_core_state_t ocf_core_get_state(ocf_core_t core)
{
OCF_CHECK_NULL(core);
return core->opened ?
ocf_core_state_active : ocf_core_state_inactive;
}
bool ocf_core_is_valid(ocf_cache_t cache, ocf_core_id_t id)
{
OCF_CHECK_NULL(cache);
if (id > OCF_CORE_ID_MAX || id < OCF_CORE_ID_MIN)
return false;
if (!env_bit_test(id, cache->conf_meta->valid_object_bitmap))
return false;
return true;
}
int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core)
{
OCF_CHECK_NULL(cache);
if (!ocf_core_is_valid(cache, id))
return -OCF_ERR_CORE_NOT_AVAIL;
*core = &cache->core_obj[id];
return 0;
}
int ocf_core_set_uuid(ocf_core_t core, const struct ocf_data_obj_uuid *uuid)
{
struct ocf_cache *cache;
struct ocf_data_obj_uuid *current_uuid;
int result;
int diff;
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(uuid);
OCF_CHECK_NULL(uuid->data);
cache = core->obj.cache;
current_uuid = &ocf_core_get_data_object(core)->uuid;
result = env_memcmp(current_uuid->data, current_uuid->size,
uuid->data, uuid->size, &diff);
if (result)
return result;
if (!diff) {
/* UUIDs are identical */
return 0;
}
result = ocf_uuid_core_set(cache, core, uuid);
if (result)
return result;
result = ocf_metadata_flush_superblock(cache);
if (result) {
result = -OCF_ERR_WRITE_CACHE;
}
return result;
}
static inline void inc_dirty_rq_counter(struct ocf_core_io *core_io,
ocf_cache_t cache)
{
core_io->dirty = 1;
env_atomic_inc(&cache->pending_dirty_requests);
}
static inline void dec_counter_if_rq_was_dirty(struct ocf_core_io *core_io,
ocf_cache_t cache)
{
int pending_dirty_rq_count;
if (!core_io->dirty)
return;
pending_dirty_rq_count =
env_atomic_dec_return(&cache->pending_dirty_requests);
ENV_BUG_ON(pending_dirty_rq_count < 0);
core_io->dirty = 0;
if (!pending_dirty_rq_count)
env_waitqueue_wake_up(&cache->pending_dirty_wq);
}
/* *** CORE IO *** */
static inline struct ocf_core_io *ocf_io_to_core_io(struct ocf_io *io)
{
return container_of(io, struct ocf_core_io, base);
}
static void ocf_core_io_get(struct ocf_io *io)
{
struct ocf_core_io *core_io;
int value;
OCF_CHECK_NULL(io);
core_io = ocf_io_to_core_io(io);
value = env_atomic_inc_return(&core_io->ref_counter);
ENV_BUG_ON(value < 1);
}
static void ocf_core_io_put(struct ocf_io *io)
{
struct ocf_core_io *core_io;
ocf_cache_t cache;
int value;
OCF_CHECK_NULL(io);
core_io = ocf_io_to_core_io(io);
value = env_atomic_dec_return(&core_io->ref_counter);
ENV_BUG_ON(value < 0);
if (value)
return;
cache = ocf_core_get_cache(core_io->core);
core_io->data = NULL;
env_allocator_del(cache->owner->resources.core_io_allocator, core_io);
}
static int ocf_core_io_set_data(struct ocf_io *io,
ctx_data_t *data, uint32_t offset)
{
struct ocf_core_io *core_io;
OCF_CHECK_NULL(io);
if (!data || offset)
return -EINVAL;
core_io = ocf_io_to_core_io(io);
core_io->data = data;
return 0;
}
static ctx_data_t *ocf_core_io_get_data(struct ocf_io *io)
{
struct ocf_core_io *core_io;
OCF_CHECK_NULL(io);
core_io = ocf_io_to_core_io(io);
return core_io->data;
}
uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core)
{
uint32_t core_id = ocf_core_get_id(core);
ocf_cache_t cache = ocf_core_get_cache(core);
return cache->core_conf_meta[core_id].seq_cutoff_threshold;
}
ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core)
{
uint32_t core_id = ocf_core_get_id(core);
ocf_cache_t cache = ocf_core_get_cache(core);
return cache->core_conf_meta[core_id].seq_cutoff_policy;
}
const struct ocf_io_ops ocf_core_io_ops = {
.set_data = ocf_core_io_set_data,
.get_data = ocf_core_io_get_data,
.get = ocf_core_io_get,
.put = ocf_core_io_put,
};
int ocf_core_set_user_metadata_raw(ocf_core_t core, void *data, size_t size)
{
ocf_cache_t cache = ocf_core_get_cache(core);
uint32_t core_id = ocf_core_get_id(core);
if (size > OCF_CORE_USER_DATA_SIZE)
return -EINVAL;
env_memcpy(cache->core_conf_meta[core_id].user_data,
OCF_CORE_USER_DATA_SIZE, data, size);
return 0;
}
int ocf_core_set_user_metadata(ocf_core_t core, void *data, size_t size)
{
ocf_cache_t cache;
int ret;
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(data);
cache = ocf_core_get_cache(core);
ret = ocf_core_set_user_metadata_raw(core, data, size);
if (ret)
return ret;
ret = ocf_metadata_flush_superblock(cache);
if (ret)
return -OCF_ERR_WRITE_CACHE;
return 0;
}
int ocf_core_get_user_metadata(ocf_core_t core, void *data, size_t size)
{
uint32_t core_id;
ocf_cache_t cache;
OCF_CHECK_NULL(core);
core_id = ocf_core_get_id(core);
cache = ocf_core_get_cache(core);
if (size > sizeof(cache->core_conf_meta[core_id].user_data))
return -EINVAL;
env_memcpy(data, size, cache->core_conf_meta[core_id].user_data,
OCF_CORE_USER_DATA_SIZE);
return 0;
}
/* *** OCF API *** */
static inline int ocf_validate_io(struct ocf_core_io *core_io)
{
ocf_cache_t cache = ocf_core_get_cache(core_io->core);
struct ocf_io *io = &core_io->base;
if (!io->obj)
return -EINVAL;
if (!io->ops)
return -EINVAL;
if (io->addr >= ocf_data_obj_get_length(io->obj))
return -EINVAL;
if (io->addr + io->bytes > ocf_data_obj_get_length(io->obj))
return -EINVAL;
if (io->class >= OCF_IO_CLASS_MAX)
return -EINVAL;
if (io->dir != OCF_READ && io->dir != OCF_WRITE)
return -EINVAL;
if (io->io_queue >= cache->io_queues_no)
return -EINVAL;
if (!io->end)
return -EINVAL;
return 0;
}
static void ocf_req_complete(struct ocf_request *req, int error)
{
/* Complete IO */
ocf_io_end(req->io, error);
dec_counter_if_rq_was_dirty(ocf_io_to_core_io(req->io), req->cache);
/* Invalidate OCF IO, it is not valid after completion */
ocf_core_io_put(req->io);
req->io = NULL;
}
struct ocf_io *ocf_new_io(ocf_core_t core)
{
ocf_cache_t cache;
struct ocf_core_io *core_io;
OCF_CHECK_NULL(core);
cache = ocf_core_get_cache(core);
if (!cache)
return NULL;
core_io = env_allocator_new(
cache->owner->resources.core_io_allocator);
if (!core_io)
return NULL;
core_io->base.obj = ocf_core_get_data_object(core);
core_io->base.ops = &ocf_core_io_ops;
core_io->core = core;
env_atomic_set(&core_io->ref_counter, 1);
return &core_io->base;
}
int ocf_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
{
struct ocf_core_io *core_io;
ocf_req_cache_mode_t req_cache_mode;
ocf_core_t core;
ocf_cache_t cache;
int ret;
if (!io)
return -EINVAL;
core_io = ocf_io_to_core_io(io);
ret = ocf_validate_io(core_io);
if (ret < 0)
return ret;
core = core_io->core;
cache = ocf_core_get_cache(core);
if (unlikely(!env_bit_test(ocf_cache_state_running,
&cache->cache_state))) {
ocf_io_end(io, -EIO);
return 0;
}
/* TODO: instead of casting ocf_cache_mode_t to ocf_req_cache_mode_t
we can resolve IO interface here and get rid of the latter. */
req_cache_mode = cache_mode;
if (cache_mode == ocf_cache_mode_none)
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode == ocf_req_cache_mode_wb) {
inc_dirty_rq_counter(core_io, cache);
//Double cache mode check prevents sending WB request
//while flushing is performed.
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode != ocf_req_cache_mode_wb)
dec_counter_if_rq_was_dirty(core_io, cache);
}
if (cache->conf_meta->valid_parts_no <= 1)
io->class = 0;
core_io->req = ocf_rq_new(cache, ocf_core_get_id(core),
io->addr, io->bytes, io->dir);
if (!core_io->req) {
dec_counter_if_rq_was_dirty(core_io, cache);
io->end(io, -ENOMEM);
return 0;
}
if (core_io->req->d2c)
req_cache_mode = ocf_req_cache_mode_d2c;
core_io->req->io_queue = io->io_queue;
core_io->req->part_id = ocf_part_class2id(cache, io->class);
core_io->req->data = core_io->data;
core_io->req->complete = ocf_req_complete;
core_io->req->io = io;
ocf_seq_cutoff_update(core, core_io->req);
ocf_core_update_stats(core, io);
ocf_core_io_get(io);
ret = ocf_engine_hndl_rq(core_io->req, req_cache_mode);
if (ret) {
dec_counter_if_rq_was_dirty(core_io, cache);
ocf_rq_put(core_io->req);
io->end(io, ret);
}
return 0;
}
int ocf_submit_io_fast(struct ocf_io *io)
{
struct ocf_core_io *core_io;
ocf_req_cache_mode_t req_cache_mode;
struct ocf_request *req;
ocf_core_t core;
ocf_cache_t cache;
int fast;
int ret;
if (!io)
return -EINVAL;
core_io = ocf_io_to_core_io(io);
ret = ocf_validate_io(core_io);
if (ret < 0)
return ret;
core = core_io->core;
cache = ocf_core_get_cache(core);
if (unlikely(!env_bit_test(ocf_cache_state_running,
&cache->cache_state))) {
ocf_io_end(io, -EIO);
return 0;
}
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode == ocf_req_cache_mode_wb) {
inc_dirty_rq_counter(core_io, cache);
//Double cache mode check prevents sending WB request
//while flushing is performed.
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
if (req_cache_mode != ocf_req_cache_mode_wb)
dec_counter_if_rq_was_dirty(core_io, cache);
}
switch (req_cache_mode) {
case ocf_req_cache_mode_pt:
return -EIO;
case ocf_req_cache_mode_wb:
req_cache_mode = ocf_req_cache_mode_fast;
break;
default:
if (cache->use_submit_io_fast)
break;
if (io->dir == OCF_WRITE)
return -EIO;
req_cache_mode = ocf_req_cache_mode_fast;
}
if (cache->conf_meta->valid_parts_no <= 1)
io->class = 0;
core_io->req = ocf_rq_new_extended(cache, ocf_core_get_id(core),
io->addr, io->bytes, io->dir);
// We need additional pointer to req in case completion arrives before
// we leave this function and core_io is freed
req = core_io->req;
if (!req) {
dec_counter_if_rq_was_dirty(core_io, cache);
io->end(io, -ENOMEM);
return 0;
}
if (req->d2c) {
dec_counter_if_rq_was_dirty(core_io, cache);
ocf_rq_put(req);
return -EIO;
}
req->io_queue = io->io_queue;
req->part_id = ocf_part_class2id(cache, io->class);
req->data = core_io->data;
req->complete = ocf_req_complete;
req->io = io;
ocf_core_update_stats(core, io);
ocf_core_io_get(io);
fast = ocf_engine_hndl_fast_rq(req, req_cache_mode);
if (fast != OCF_FAST_PATH_NO) {
ocf_seq_cutoff_update(core, req);
return 0;
}
dec_counter_if_rq_was_dirty(core_io, cache);
ocf_core_io_put(io);
ocf_rq_put(req);
return -EIO;
}
int ocf_submit_flush(struct ocf_io *io)
{
struct ocf_core_io *core_io;
ocf_core_t core;
ocf_cache_t cache;
int ret;
if (!io)
return -EINVAL;
core_io = ocf_io_to_core_io(io);
ret = ocf_validate_io(core_io);
if (ret < 0)
return ret;
core = core_io->core;
cache = ocf_core_get_cache(core);
if (unlikely(!env_bit_test(ocf_cache_state_running,
&cache->cache_state))) {
ocf_io_end(io, -EIO);
return 0;
}
core_io->req = ocf_rq_new(cache, ocf_core_get_id(core),
io->addr, io->bytes, io->dir);
if (!core_io->req) {
ocf_io_end(io, -ENOMEM);
return 0;
}
core_io->req->io_queue = io->io_queue;
core_io->req->complete = ocf_req_complete;
core_io->req->io = io;
core_io->req->data = core_io->data;
ocf_core_io_get(io);
ocf_engine_hndl_ops_rq(core_io->req);
return 0;
}
int ocf_submit_discard(struct ocf_io *io)
{
struct ocf_core_io *core_io;
ocf_core_t core;
ocf_cache_t cache;
int ret;
if (!io)
return -EINVAL;
core_io = ocf_io_to_core_io(io);
ret = ocf_validate_io(core_io);
if (ret < 0)
return ret;
core = core_io->core;
cache = ocf_core_get_cache(core);
if (unlikely(!env_bit_test(ocf_cache_state_running,
&cache->cache_state))) {
ocf_io_end(io, -EIO);
return 0;
}
core_io->req = ocf_rq_new_discard(cache, ocf_core_get_id(core),
io->addr, io->bytes, OCF_WRITE);
if (!core_io->req) {
ocf_io_end(io, -ENOMEM);
return 0;
}
core_io->req->io_queue = io->io_queue;
core_io->req->complete = ocf_req_complete;
core_io->req->io = io;
core_io->req->data = core_io->data;
ocf_core_io_get(io);
ocf_engine_hndl_discard_rq(core_io->req);
return 0;
}
int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
bool only_opened)
{
ocf_core_id_t id;
int result = 0;
OCF_CHECK_NULL(cache);
if (!visitor)
return -OCF_ERR_INVAL;
for (id = 0; id < OCF_CORE_MAX; id++) {
if (!env_bit_test(id, cache->conf_meta->valid_object_bitmap))
continue;
if (only_opened && !cache->core_obj[id].opened)
continue;
result = visitor(&cache->core_obj[id], cntx);
if (result)
break;
}
return result;
}

56
src/ocf_core_priv.h Normal file
View File

@@ -0,0 +1,56 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_CORE_PRIV_H__
#define __OCF_CORE_PRIV_H__
#include "ocf/ocf.h"
#include "ocf_env.h"
#include "ocf_data_obj_priv.h"
struct ocf_core_io {
struct ocf_io base;
ocf_core_t core;
env_atomic ref_counter;
bool dirty;
/*!< Indicates if io leaves dirty data */
struct ocf_request *req;
ctx_data_t *data;
};
struct ocf_core {
char name[OCF_CORE_NAME_SIZE];
struct ocf_data_obj obj;
struct {
uint64_t last;
uint64_t bytes;
int rw;
} seq_cutoff;
env_atomic flushed;
/* This bit means that object is open*/
uint32_t opened : 1;
struct ocf_counters_core *counters;
};
bool ocf_core_is_valid(ocf_cache_t cache, ocf_core_id_t id);
int ocf_core_set_user_metadata_raw(ocf_core_t core, void *data, size_t size);
#define ocf_core_log_prefix(core, lvl, prefix, fmt, ...) \
ocf_cache_log_prefix(ocf_core_get_cache(core), lvl, "[Core %s] ", \
prefix fmt, ocf_core_get_name(core), ##__VA_ARGS__)
#define ocf_core_log(core, lvl, fmt, ...) \
ocf_core_log_prefix(core, lvl, "", fmt, ##__VA_ARGS__)
#endif /* __OCF_CORE_PRIV_H__ */

196
src/ocf_ctx.c Normal file
View File

@@ -0,0 +1,196 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "ocf_ctx_priv.h"
#include "ocf_priv.h"
#include "ocf_data_obj_priv.h"
#include "ocf_utils.h"
#include "ocf_logger_priv.h"
/*
*
*/
int ocf_ctx_register_data_obj_type(ocf_ctx_t ctx, uint8_t type_id,
const struct ocf_data_obj_properties *properties)
{
int result = 0;
if (!ctx || !properties)
return -EINVAL;
env_mutex_lock(&ctx->lock);
if (type_id >= OCF_DATA_OBJ_TYPE_MAX || ctx->data_obj_type[type_id]) {
env_mutex_unlock(&ctx->lock);
result = -EINVAL;
goto err;
}
ocf_data_obj_type_init(&ctx->data_obj_type[type_id], properties);
if (!ctx->data_obj_type[type_id])
result = -EINVAL;
env_mutex_unlock(&ctx->lock);
if (result)
goto err;
ocf_log(ctx, log_debug, "'%s' data object operations registered\n",
properties->name);
return 0;
err:
ocf_log(ctx, log_err, "Failed to register data object operations '%s'",
properties->name);
return result;
}
/*
*
*/
void ocf_ctx_unregister_data_obj_type(ocf_ctx_t ctx, uint8_t type_id)
{
OCF_CHECK_NULL(ctx);
env_mutex_lock(&ctx->lock);
if (type_id < OCF_DATA_OBJ_TYPE_MAX && ctx->data_obj_type[type_id]) {
ocf_data_obj_type_deinit(ctx->data_obj_type[type_id]);
ctx->data_obj_type[type_id] = NULL;
}
env_mutex_unlock(&ctx->lock);
}
/*
*
*/
ocf_data_obj_type_t ocf_ctx_get_data_obj_type(ocf_ctx_t ctx, uint8_t type_id)
{
OCF_CHECK_NULL(ctx);
if (type_id >= OCF_DATA_OBJ_TYPE_MAX)
return NULL;
return ctx->data_obj_type[type_id];
}
/*
*
*/
int ocf_ctx_get_data_obj_type_id(ocf_ctx_t ctx, ocf_data_obj_type_t type)
{
int i;
OCF_CHECK_NULL(ctx);
for (i = 0; i < OCF_DATA_OBJ_TYPE_MAX; ++i) {
if (ctx->data_obj_type[i] == type)
return i;
}
return -1;
}
/*
*
*/
int ocf_ctx_data_obj_create(ocf_ctx_t ctx, ocf_data_obj_t *obj,
struct ocf_data_obj_uuid *uuid, uint8_t type_id)
{
OCF_CHECK_NULL(ctx);
if (type_id >= OCF_DATA_OBJ_TYPE_MAX)
return -EINVAL;
return ocf_data_obj_create(obj, ctx->data_obj_type[type_id], uuid);
}
/*
*
*/
int ocf_ctx_set_logger(ocf_ctx_t ctx, const struct ocf_logger *logger)
{
int ret = 0;
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(logger);
env_mutex_lock(&ctx->lock);
if (ctx->logger) {
ret = -EINVAL;
goto out;
}
if (logger->open) {
ret = logger->open(logger);
if (ret)
goto out;
}
ctx->logger = logger;
out:
env_mutex_unlock(&ctx->lock);
return ret;
}
/*
*
*/
int ocf_ctx_init(ocf_ctx_t *ctx, const struct ocf_ctx_ops *ops)
{
struct ocf_ctx *ocf_ctx;
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(ops);
ocf_ctx = env_zalloc(sizeof(*ocf_ctx), ENV_MEM_NORMAL);
if (!ocf_ctx)
return -ENOMEM;
INIT_LIST_HEAD(&ocf_ctx->caches);
if (env_mutex_init(&ocf_ctx->lock)) {
env_free(ocf_ctx);
return -ENOMEM;
}
ocf_ctx->ctx_ops = ops;
if (ocf_utils_init(ocf_ctx)) {
env_free(ocf_ctx);
return -ENOMEM;
}
*ctx = ocf_ctx;
return 0;
}
/*
*
*/
int ocf_ctx_exit(ocf_ctx_t ctx)
{
int result = 0;
OCF_CHECK_NULL(ctx);
/* Check if caches are setup */
env_mutex_lock(&ctx->lock);
if (!list_empty(&ctx->caches))
result = -EEXIST;
env_mutex_unlock(&ctx->lock);
if (result)
return result;
ocf_utils_deinit(ctx);
if (ctx->logger && ctx->logger->close)
ctx->logger->close(ctx->logger);
env_free(ctx);
return 0;
}

189
src/ocf_ctx_priv.h Normal file
View File

@@ -0,0 +1,189 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_CTX_PRIV_H__
#define __OCF_CTX_PRIV_H__
#include "ocf_env.h"
#include "ocf/ocf_logger.h"
#include "ocf_logger_priv.h"
#define OCF_DATA_OBJ_TYPE_MAX 8
/**
* @brief OCF main control structure
*/
struct ocf_ctx {
const struct ocf_ctx_ops *ctx_ops;
const struct ocf_logger *logger;
struct ocf_data_obj_type *data_obj_type[OCF_DATA_OBJ_TYPE_MAX];
env_mutex lock;
struct list_head caches;
struct {
struct list_head core_pool_head;
int core_pool_count;
} core_pool;
struct {
struct ocf_rq_allocator *rq;
env_allocator *core_io_allocator;
} resources;
};
#define ocf_log_prefix(ctx, lvl, prefix, fmt, ...) \
ocf_log_raw(ctx->logger, lvl, OCF_PREFIX_SHORT prefix fmt, ##__VA_ARGS__)
#define ocf_log(ctx, lvl, fmt, ...) \
ocf_log_prefix(ctx, lvl, "", fmt, ##__VA_ARGS__)
#define ocf_log_rl(ctx) \
ocf_log_raw_rl(ctx->logger, __func__)
#define ocf_log_stack_trace(ctx) \
ocf_log_stack_trace_raw(ctx->logger)
/**
* @name Environment data buffer operations wrappers
* @{
*/
static inline void *ctx_data_alloc(ocf_ctx_t ctx, uint32_t pages)
{
return ctx->ctx_ops->data_alloc(pages);
}
static inline void ctx_data_free(ocf_ctx_t ctx, ctx_data_t *data)
{
ctx->ctx_ops->data_free(data);
}
static inline int ctx_data_mlock(ocf_ctx_t ctx, ctx_data_t *data)
{
return ctx->ctx_ops->data_mlock(data);
}
static inline void ctx_data_munlock(ocf_ctx_t ctx, ctx_data_t *data)
{
ctx->ctx_ops->data_munlock(data);
}
static inline uint32_t ctx_data_rd(ocf_ctx_t ctx, void *dst,
ctx_data_t *src, uint32_t size)
{
return ctx->ctx_ops->data_rd(dst, src, size);
}
static inline uint32_t ctx_data_wr(ocf_ctx_t ctx, ctx_data_t *dst,
const void *src, uint32_t size)
{
return ctx->ctx_ops->data_wr(dst, src, size);
}
static inline void ctx_data_rd_check(ocf_ctx_t ctx, void *dst,
ctx_data_t *src, uint32_t size)
{
uint32_t read = ctx_data_rd(ctx, dst, src, size);
ENV_BUG_ON(read != size);
}
static inline void ctx_data_wr_check(ocf_ctx_t ctx, ctx_data_t *dst,
const void *src, uint32_t size)
{
uint32_t written = ctx_data_wr(ctx, dst, src, size);
ENV_BUG_ON(written != size);
}
static inline uint32_t ctx_data_zero(ocf_ctx_t ctx, ctx_data_t *dst,
uint32_t size)
{
return ctx->ctx_ops->data_zero(dst, size);
}
static inline void ctx_data_zero_check(ocf_ctx_t ctx, ctx_data_t *dst,
uint32_t size)
{
uint32_t zerored = ctx_data_zero(ctx, dst, size);
ENV_BUG_ON(zerored != size);
}
static inline uint32_t ctx_data_seek(ocf_ctx_t ctx, ctx_data_t *dst,
ctx_data_seek_t seek, uint32_t size)
{
return ctx->ctx_ops->data_seek(dst, seek, size);
}
static inline void ctx_data_seek_check(ocf_ctx_t ctx, ctx_data_t *dst,
ctx_data_seek_t seek, uint32_t size)
{
uint32_t bytes = ctx_data_seek(ctx, dst, seek, size);
ENV_BUG_ON(bytes != size);
}
static inline uint64_t ctx_data_cpy(ocf_ctx_t ctx, ctx_data_t *dst, ctx_data_t *src,
uint64_t to, uint64_t from, uint64_t bytes)
{
return ctx->ctx_ops->data_cpy(dst, src, to, from, bytes);
}
static inline void ctx_data_secure_erase(ocf_ctx_t ctx, ctx_data_t *dst)
{
return ctx->ctx_ops->data_secure_erase(dst);
}
static inline int ctx_queue_init(ocf_ctx_t ctx, ocf_queue_t queue)
{
return ctx->ctx_ops->queue_init(queue);
}
static inline void ctx_queue_kick(ocf_ctx_t ctx, ocf_queue_t queue,
bool allow_sync)
{
if (allow_sync && ctx->ctx_ops->queue_kick_sync)
ctx->ctx_ops->queue_kick_sync(queue);
else
ctx->ctx_ops->queue_kick(queue);
}
static inline void ctx_queue_stop(ocf_ctx_t ctx, ocf_queue_t queue)
{
ctx->ctx_ops->queue_stop(queue);
}
static inline int ctx_cleaner_init(ocf_ctx_t ctx, ocf_cleaner_t cleaner)
{
return ctx->ctx_ops->cleaner_init(cleaner);
}
static inline void ctx_cleaner_stop(ocf_ctx_t ctx, ocf_cleaner_t cleaner)
{
ctx->ctx_ops->cleaner_stop(cleaner);
}
static inline int ctx_metadata_updater_init(ocf_ctx_t ctx,
ocf_metadata_updater_t mu)
{
return ctx->ctx_ops->metadata_updater_init(mu);
}
static inline void ctx_metadata_updater_kick(ocf_ctx_t ctx,
ocf_metadata_updater_t mu)
{
ctx->ctx_ops->metadata_updater_kick(mu);
}
static inline void ctx_metadata_updater_stop(ocf_ctx_t ctx,
ocf_metadata_updater_t mu)
{
ctx->ctx_ops->metadata_updater_stop(mu);
}
/**
* @}
*/
#endif /* __OCF_CTX_PRIV_H__ */

247
src/ocf_data_obj.c Normal file
View File

@@ -0,0 +1,247 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "ocf_priv.h"
#include "ocf_data_obj_priv.h"
#include "ocf_io_priv.h"
#include "ocf_env.h"
/*
* This is io allocator dedicated for bottom devices.
* Out IO structure looks like this:
* --------------> +-------------------------+
* | OCF is aware | |
* | of this part. | struct ocf_io_meta |
* | | |
* | +-------------------------+ <----------------
* | | | Bottom adapter |
* | | struct ocf_io | is aware of |
* | | | this part. |
* --------------> +-------------------------+ |
* | | |
* | Bottom adapter specific | |
* | data structure. | |
* | | |
* +-------------------------+ <----------------
*/
#define OCF_IO_ALLOCATOR_TOTAL_SIZE(size) \
(sizeof(struct ocf_io_meta) + sizeof(struct ocf_io) + size)
static env_allocator *ocf_io_allocator_create(uint32_t size, const char *name)
{
return env_allocator_create(OCF_IO_ALLOCATOR_TOTAL_SIZE(size), name);
}
static void ocf_io_allocator_destroy(env_allocator *allocator)
{
env_allocator_destroy(allocator);
}
static struct ocf_io *ocf_io_allocator_new(env_allocator *allocator)
{
void *data = env_allocator_new(allocator);
return data ? (data + sizeof(struct ocf_io_meta)) : NULL;
}
static void ocf_io_allocator_del(env_allocator *allocator, struct ocf_io *io)
{
if (!io)
return;
env_allocator_del(allocator, (void *)io - sizeof(struct ocf_io_meta));
}
/*
* Data object type
*/
int ocf_data_obj_type_init(struct ocf_data_obj_type **type,
const struct ocf_data_obj_properties *properties)
{
const struct ocf_data_obj_ops *ops = &properties->ops;
struct ocf_data_obj_type *new_type;
int ret;
if (!ops->new_io || !ops->submit_io || !ops->open || !ops->close ||
!ops->get_max_io_size || !ops->get_length) {
return -EINVAL;
}
if (properties->caps.atomic_writes && !ops->submit_metadata)
return -EINVAL;
new_type = env_zalloc(sizeof(**type), ENV_MEM_NORMAL);
if (!new_type)
return -OCF_ERR_NO_MEM;
new_type->allocator = ocf_io_allocator_create(
properties->io_context_size, properties->name);
if (!new_type->allocator) {
ret = -ENOMEM;
goto err;
}
new_type->properties = properties;
*type = new_type;
return 0;
err:
env_free(new_type);
return ret;
}
void ocf_data_obj_type_deinit(struct ocf_data_obj_type *type)
{
ocf_io_allocator_destroy(type->allocator);
env_free(type);
}
/*
* Data object
*/
ocf_data_obj_type_t ocf_data_obj_get_type(ocf_data_obj_t obj)
{
OCF_CHECK_NULL(obj);
return obj->type;
}
void *ocf_data_obj_get_priv(ocf_data_obj_t obj)
{
OCF_CHECK_NULL(obj);
return obj->priv;
}
void ocf_data_obj_set_priv(ocf_data_obj_t obj, void *priv)
{
OCF_CHECK_NULL(obj);
obj->priv = priv;
}
const struct ocf_data_obj_uuid *ocf_data_obj_get_uuid(
ocf_data_obj_t obj)
{
OCF_CHECK_NULL(obj);
return &obj->uuid;
}
uint64_t ocf_data_obj_get_length(ocf_data_obj_t obj)
{
OCF_CHECK_NULL(obj);
return obj->type->properties->ops.get_length(obj);
}
ocf_cache_t ocf_data_obj_get_cache(ocf_data_obj_t obj)
{
OCF_CHECK_NULL(obj);
return obj->cache;
}
int ocf_data_obj_init(ocf_data_obj_t obj, ocf_data_obj_type_t type,
struct ocf_data_obj_uuid *uuid, bool uuid_copy)
{
if (!obj || !type)
return -OCF_ERR_INVAL;
obj->type = type;
if (!uuid) {
obj->uuid_copy = false;
return 0;
}
obj->uuid_copy = uuid_copy;
if (uuid_copy) {
obj->uuid.data = env_strdup(uuid->data, ENV_MEM_NORMAL);
if (!obj->uuid.data)
return -OCF_ERR_NO_MEM;
} else {
obj->uuid.data = uuid->data;
}
obj->uuid.size = uuid->size;
return 0;
}
void ocf_data_obj_deinit(ocf_data_obj_t obj)
{
OCF_CHECK_NULL(obj);
if (obj->uuid_copy && obj->uuid.data)
env_free(obj->uuid.data);
}
int ocf_data_obj_create(ocf_data_obj_t *obj, ocf_data_obj_type_t type,
struct ocf_data_obj_uuid *uuid)
{
ocf_data_obj_t tmp_obj;
int ret;
OCF_CHECK_NULL(obj);
tmp_obj = env_zalloc(sizeof(*tmp_obj), ENV_MEM_NORMAL);
if (!tmp_obj)
return -OCF_ERR_NO_MEM;
ret = ocf_data_obj_init(tmp_obj, type, uuid, true);
if (ret) {
env_free(tmp_obj);
return ret;
}
*obj = tmp_obj;
return 0;
}
void ocf_data_obj_destroy(ocf_data_obj_t obj)
{
OCF_CHECK_NULL(obj);
ocf_data_obj_deinit(obj);
env_free(obj);
}
struct ocf_io *ocf_data_obj_new_io(ocf_data_obj_t obj)
{
struct ocf_io *io;
OCF_CHECK_NULL(obj);
io = ocf_io_allocator_new(obj->type->allocator);
if (!io)
return NULL;
io->obj = obj;
return io;
}
void ocf_data_obj_del_io(struct ocf_io* io)
{
OCF_CHECK_NULL(io);
ocf_io_allocator_del(io->obj->type->allocator, io);
}
void *ocf_data_obj_get_data_from_io(struct ocf_io* io)
{
return (void *)io + sizeof(struct ocf_io);
}

115
src/ocf_data_obj_priv.h Normal file
View File

@@ -0,0 +1,115 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_DATA_OBJ_PRIV_H__
#define __OCF_DATA_OBJ_PRIV_H__
#include "ocf_env.h"
#include "ocf_io_priv.h"
struct ocf_data_obj_type {
const struct ocf_data_obj_properties *properties;
env_allocator *allocator;
};
struct ocf_data_obj {
ocf_data_obj_type_t type;
struct ocf_data_obj_uuid uuid;
bool uuid_copy;
void *priv;
ocf_cache_t cache;
struct list_head core_pool_item;
struct {
unsigned discard_zeroes:1;
/* true if reading discarded pages returns 0 */
} features;
};
int ocf_data_obj_type_init(struct ocf_data_obj_type **type,
const struct ocf_data_obj_properties *properties);
void ocf_data_obj_type_deinit(struct ocf_data_obj_type *type);
static inline struct ocf_io *ocf_dobj_new_io(ocf_data_obj_t obj)
{
ENV_BUG_ON(!obj->type->properties->ops.new_io);
return obj->type->properties->ops.new_io(obj);
}
static inline void ocf_dobj_submit_io(struct ocf_io *io)
{
ENV_BUG_ON(!io->obj->type->properties->ops.submit_io);
io->obj->type->properties->ops.submit_io(io);
}
static inline void ocf_dobj_submit_flush(struct ocf_io *io)
{
ENV_BUG_ON(!io->obj->type->properties->ops.submit_flush);
/*
* TODO(rbaldyga): Maybe we should supply function for checking
* submit_flush availability and return -ENOTSUPP here?
*/
if (!io->obj->type->properties->ops.submit_flush)
ocf_io_end(io, 0);
else
io->obj->type->properties->ops.submit_flush(io);
}
static inline void ocf_dobj_submit_discard(struct ocf_io *io)
{
ENV_BUG_ON(!io->obj->type->properties->ops.submit_discard);
/*
* TODO(rbaldyga): Maybe we should supply function for checking
* submit_discard availability and return -ENOTSUPP here?
*/
if (!io->obj->type->properties->ops.submit_discard)
ocf_io_end(io, 0);
else
io->obj->type->properties->ops.submit_discard(io);
}
static inline void ocf_dobj_submit_metadata(struct ocf_io *io)
{
ENV_BUG_ON(!io->obj->type->properties->ops.submit_metadata);
io->obj->type->properties->ops.submit_metadata(io);
}
static inline void ocf_dobj_submit_write_zeroes(struct ocf_io *io)
{
ENV_BUG_ON(!io->obj->type->properties->ops.submit_write_zeroes);
io->obj->type->properties->ops.submit_write_zeroes(io);
}
static inline int ocf_data_obj_open(ocf_data_obj_t obj)
{
ENV_BUG_ON(!obj->type->properties->ops.open);
return obj->type->properties->ops.open(obj);
}
static inline void ocf_data_obj_close(ocf_data_obj_t obj)
{
ENV_BUG_ON(!obj->type->properties->ops.close);
obj->type->properties->ops.close(obj);
}
static inline unsigned int ocf_data_obj_get_max_io_size(ocf_data_obj_t obj)
{
ENV_BUG_ON(!obj->type->properties->ops.get_max_io_size);
return obj->type->properties->ops.get_max_io_size(obj);
}
static inline int ocf_data_obj_is_atomic(ocf_data_obj_t obj)
{
return obj->type->properties->caps.atomic_writes;
}
#endif /*__OCF_DATA_OBJ_PRIV_H__ */

58
src/ocf_def_priv.h Normal file
View File

@@ -0,0 +1,58 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_DEF_PRIV_H__
#define __OCF_DEF_PRIV_H__
#include "ocf/ocf.h"
#include "ocf_env.h"
#define BYTES_TO_SECTORS(x) ((x) >> ENV_SECTOR_SHIFT)
#define SECTORS_TO_BYTES(x) ((x) << ENV_SECTOR_SHIFT)
#define BYTES_TO_PAGES(x) ((((uint64_t)x) + (PAGE_SIZE - 1)) / PAGE_SIZE)
#define PAGES_TO_BYTES(x) (((uint64_t)x) * PAGE_SIZE)
#ifndef DIV_ROUND_UP
# define DIV_ROUND_UP(x, y) \
({ \
__typeof__ (x) __x = (x); \
__typeof__ (y) __y = (y); \
(__x + __y - 1) / __y; \
})
#endif
#ifndef MAX
# define MAX(x,y) \
({ \
__typeof__ (x) __x = (x); \
__typeof__ (y) __y = (y); \
__x > __y ? __x : __y; \
})
#endif
#ifndef MIN
#define MIN(x,y) \
({ \
__typeof__ (x) __x = (x); \
__typeof__ (y) __y = (y); \
__x < __y ? __x : __y; \
})
#endif
#define METADATA_VERSION() ((OCF_VERSION_MAIN << 16) + \
(OCF_VERSION_MAJOR << 8) + OCF_VERSION_MINOR)
/* call conditional reschedule every 'iterations' calls */
#define OCF_COND_RESCHED(cnt, iterations) \
if (unlikely(++(cnt) == (iterations))) { \
env_cond_resched(); \
(cnt) = 0; \
}
/* call conditional reschedule with default interval */
#define OCF_COND_RESCHED_DEFAULT(cnt) OCF_COND_RESCHED(cnt, 1000000)
#endif

Some files were not shown because too many files have changed in this diff Show More