Initial commit
Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
735
src/cleaning/acp.c
Normal file
735
src/cleaning/acp.c
Normal file
@@ -0,0 +1,735 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "cleaning.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../cleaning/acp.h"
|
||||
#include "../engine/engine_common.h"
|
||||
#include "../concurrency/ocf_cache_concurrency.h"
|
||||
#include "cleaning_priv.h"
|
||||
|
||||
#define OCF_ACP_DEBUG 0
|
||||
|
||||
#if 1 == OCF_ACP_DEBUG
|
||||
|
||||
#define OCF_DEBUG_PREFIX "[Clean] %s():%d "
|
||||
|
||||
#define OCF_DEBUG_LOG(cache, format, ...) \
|
||||
ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
|
||||
format"\n", __func__, __LINE__, ##__VA_ARGS__)
|
||||
|
||||
#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
|
||||
|
||||
#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
|
||||
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define ACP_DEBUG_INIT(acp) acp->checksum = 0
|
||||
#define ACP_DEBUG_BEGIN(acp, cache_line) acp->checksum ^= cache_line
|
||||
#define ACP_DEBUG_END(acp, cache_line) acp->checksum ^= cache_line
|
||||
#define ACP_DEBUG_CHECK(acp) ENV_BUG_ON(acp->checksum)
|
||||
#else
|
||||
#define OCF_DEBUG_PREFIX
|
||||
#define OCF_DEBUG_LOG(cache, format, ...)
|
||||
#define OCF_DEBUG_TRACE(cache)
|
||||
#define OCF_DEBUG_MSG(cache, msg)
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...)
|
||||
#define ACP_DEBUG_INIT(acp)
|
||||
#define ACP_DEBUG_BEGIN(acp, cache_line)
|
||||
#define ACP_DEBUG_END(acp, cache_line)
|
||||
#define ACP_DEBUG_CHECK(acp)
|
||||
#endif
|
||||
|
||||
#define ACP_CHUNK_SIZE (100 * MiB)
|
||||
|
||||
/* minimal time to chunk cleaning after error */
|
||||
#define ACP_CHUNK_CLEANING_BACKOFF_TIME 5
|
||||
|
||||
/* time to sleep when nothing to clean in ms */
|
||||
#define ACP_BACKOFF_TIME_MS 1000
|
||||
|
||||
#define ACP_MAX_BUCKETS 11
|
||||
|
||||
/* Upper thresholds for buckets in percent dirty pages. First bucket should have
|
||||
* threshold=0 - it isn't cleaned and we don't want dirty chunks staying dirty
|
||||
* forever. Last bucket also should stay at 100 for obvious reasons */
|
||||
static const uint16_t ACP_BUCKET_DEFAULTS[ACP_MAX_BUCKETS] = { 0, 10, 20, 30, 40,
|
||||
50, 60, 70, 80, 90, 100 };
|
||||
|
||||
struct acp_flush_context {
|
||||
/* number of cache lines in flush */
|
||||
uint64_t size;
|
||||
/* chunk_for error handling */
|
||||
struct acp_chunk_info *chunk;
|
||||
/* cache lines to flush */
|
||||
struct flush_data data[OCF_ACP_MAX_FLUSH_MAX_BUFFERS];
|
||||
/* flush error code */
|
||||
int error;
|
||||
};
|
||||
|
||||
struct acp_state {
|
||||
/* currently cleaned chunk */
|
||||
struct acp_chunk_info *chunk;
|
||||
|
||||
/* cache line iterator within current chunk */
|
||||
unsigned iter;
|
||||
|
||||
/* true if there are cache lines to process
|
||||
* current chunk */
|
||||
bool in_progress;
|
||||
};
|
||||
|
||||
struct acp_chunk_info {
|
||||
struct list_head list;
|
||||
uint64_t chunk_id;
|
||||
uint64_t next_cleaning_timestamp;
|
||||
ocf_core_id_t core_id;
|
||||
uint16_t num_dirty;
|
||||
uint8_t bucket_id;
|
||||
};
|
||||
|
||||
struct acp_bucket {
|
||||
struct list_head chunk_list;
|
||||
uint16_t threshold; /* threshold in clines */
|
||||
};
|
||||
|
||||
struct acp_context {
|
||||
env_rwsem chunks_lock;
|
||||
|
||||
/* number of chunks per core */
|
||||
uint64_t num_chunks[OCF_CORE_MAX];
|
||||
|
||||
/* per core array of all chunks */
|
||||
struct acp_chunk_info *chunk_info[OCF_CORE_MAX];
|
||||
|
||||
struct acp_bucket bucket_info[ACP_MAX_BUCKETS];
|
||||
|
||||
/* total number of chunks in cache */
|
||||
uint64_t chunks_total;
|
||||
|
||||
/* structure to keep track of I/O in progress */
|
||||
struct acp_flush_context flush;
|
||||
|
||||
/* cleaning state persistent over subsequent calls to
|
||||
perform_cleaning */
|
||||
struct acp_state state;
|
||||
|
||||
#if 1 == OCF_ACP_DEBUG
|
||||
/* debug only */
|
||||
uint64_t checksum;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct acp_core_line_info
|
||||
{
|
||||
ocf_cache_line_t cache_line;
|
||||
ocf_core_id_t core_id;
|
||||
uint64_t core_line;
|
||||
};
|
||||
|
||||
#define ACP_LOCK_CHUNKS_RD() env_rwsem_down_read(&acp->chunks_lock)
|
||||
|
||||
#define ACP_UNLOCK_CHUNKS_RD() env_rwsem_up_read(&acp->chunks_lock)
|
||||
|
||||
#define ACP_LOCK_CHUNKS_WR() env_rwsem_down_write(&acp->chunks_lock)
|
||||
|
||||
#define ACP_UNLOCK_CHUNKS_WR() env_rwsem_up_write(&acp->chunks_lock)
|
||||
|
||||
static struct acp_context *_acp_get_ctx_from_cache(struct ocf_cache *cache)
|
||||
{
|
||||
return cache->cleaning_policy_context;
|
||||
}
|
||||
|
||||
static struct acp_cleaning_policy_meta* _acp_meta_get(
|
||||
struct ocf_cache *cache, uint32_t cache_line,
|
||||
struct cleaning_policy_meta *policy_meta)
|
||||
{
|
||||
ocf_metadata_get_cleaning_policy(cache, cache_line, policy_meta);
|
||||
return &policy_meta->meta.acp;
|
||||
}
|
||||
|
||||
static void _acp_meta_set(struct ocf_cache *cache, uint32_t cache_line,
|
||||
struct cleaning_policy_meta *policy_meta)
|
||||
{
|
||||
ocf_metadata_set_cleaning_policy(cache, cache_line, policy_meta);
|
||||
}
|
||||
|
||||
static struct acp_core_line_info _acp_core_line_info(struct ocf_cache *cache,
|
||||
ocf_cache_line_t cache_line)
|
||||
{
|
||||
struct acp_core_line_info acp_core_line_info = {.cache_line = cache_line, };
|
||||
ocf_metadata_get_core_info(cache, cache_line, &acp_core_line_info.core_id,
|
||||
&acp_core_line_info.core_line);
|
||||
return acp_core_line_info;
|
||||
}
|
||||
|
||||
static struct acp_chunk_info *_acp_get_chunk(struct ocf_cache *cache,
|
||||
uint32_t cache_line)
|
||||
{
|
||||
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
|
||||
struct acp_core_line_info core_line =
|
||||
_acp_core_line_info(cache, cache_line);
|
||||
uint64_t chunk_id;
|
||||
|
||||
chunk_id = core_line.core_line * ocf_line_size(cache) / ACP_CHUNK_SIZE;
|
||||
|
||||
return &acp->chunk_info[core_line.core_id][chunk_id];
|
||||
}
|
||||
|
||||
#define for_each_core(cache, iter) \
|
||||
for (iter = 0; iter < OCF_CORE_MAX; iter++) \
|
||||
if (cache->core_conf_meta[iter].added)
|
||||
|
||||
static void _acp_remove_cores(struct ocf_cache *cache)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_core(cache, i)
|
||||
cleaning_policy_acp_remove_core(cache, i);
|
||||
}
|
||||
|
||||
static int _acp_load_cores(struct ocf_cache *cache)
|
||||
{
|
||||
int i;
|
||||
int err = 0;
|
||||
|
||||
for_each_core(cache, i) {
|
||||
OCF_DEBUG_PARAM(cache, "loading core %i\n", i);
|
||||
err = cleaning_policy_acp_add_core(cache, i);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
if (err)
|
||||
_acp_remove_cores(cache);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void cleaning_policy_acp_init_cache_block(struct ocf_cache *cache,
|
||||
uint32_t cache_line)
|
||||
{
|
||||
struct cleaning_policy_meta policy_meta;
|
||||
struct acp_cleaning_policy_meta *acp_meta;
|
||||
|
||||
/* TODO: acp meta is going to be removed soon */
|
||||
acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
|
||||
acp_meta->dirty = 0;
|
||||
_acp_meta_set(cache, cache_line, &policy_meta);
|
||||
}
|
||||
|
||||
void cleaning_policy_acp_deinitialize(struct ocf_cache *cache)
|
||||
{
|
||||
_acp_remove_cores(cache);
|
||||
|
||||
env_vfree(cache->cleaning_policy_context);
|
||||
cache->cleaning_policy_context = NULL;
|
||||
}
|
||||
|
||||
static void _acp_rebuild(struct ocf_cache *cache)
|
||||
{
|
||||
ocf_cache_line_t cline;
|
||||
ocf_core_id_t cline_core_id;
|
||||
uint32_t step = 0;
|
||||
|
||||
for (cline = 0; cline < cache->device->collision_table_entries; cline++) {
|
||||
ocf_metadata_get_core_and_part_id(cache, cline, &cline_core_id,
|
||||
NULL);
|
||||
|
||||
OCF_COND_RESCHED_DEFAULT(step);
|
||||
|
||||
if (cline_core_id == OCF_CORE_MAX)
|
||||
continue;
|
||||
|
||||
cleaning_policy_acp_init_cache_block(cache, cline);
|
||||
|
||||
if (!metadata_test_dirty(cache, cline))
|
||||
continue;
|
||||
|
||||
cleaning_policy_acp_set_hot_cache_line(cache, cline);
|
||||
}
|
||||
|
||||
ocf_cache_log(cache, log_info, "Finished rebuilding ACP metadata\n");
|
||||
}
|
||||
|
||||
void cleaning_policy_acp_setup(struct ocf_cache *cache)
|
||||
{
|
||||
struct acp_cleaning_policy_config *config;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
|
||||
|
||||
config->thread_wakeup_time = OCF_ACP_DEFAULT_WAKE_UP;
|
||||
config->flush_max_buffers = OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS;
|
||||
}
|
||||
|
||||
int cleaning_policy_acp_initialize(struct ocf_cache *cache,
|
||||
int init_metadata)
|
||||
{
|
||||
struct acp_context *acp;
|
||||
int err, i;
|
||||
|
||||
/* bug if max chunk number would overflow dirty_no array type */
|
||||
#if defined (BUILD_BUG_ON)
|
||||
BUILD_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >=
|
||||
1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8));
|
||||
#else
|
||||
ENV_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >=
|
||||
1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8));
|
||||
#endif
|
||||
|
||||
ENV_BUG_ON(cache->cleaning_policy_context);
|
||||
|
||||
cache->cleaning_policy_context = env_vzalloc(sizeof(struct acp_context));
|
||||
if (!cache->cleaning_policy_context) {
|
||||
ocf_cache_log(cache, log_err, "acp context allocation error\n");
|
||||
return -OCF_ERR_NO_MEM;
|
||||
}
|
||||
acp = cache->cleaning_policy_context;
|
||||
|
||||
env_rwsem_init(&acp->chunks_lock);
|
||||
|
||||
for (i = 0; i < ACP_MAX_BUCKETS; i++) {
|
||||
INIT_LIST_HEAD(&acp->bucket_info[i].chunk_list);
|
||||
acp->bucket_info[i].threshold =
|
||||
((ACP_CHUNK_SIZE/ocf_line_size(cache)) *
|
||||
ACP_BUCKET_DEFAULTS[i]) / 100;
|
||||
}
|
||||
|
||||
if (cache->conf_meta->core_obj_count > 0) {
|
||||
err = _acp_load_cores(cache);
|
||||
if (err) {
|
||||
cleaning_policy_acp_deinitialize(cache);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
_acp_rebuild(cache);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cleaning_policy_acp_set_cleaning_param(ocf_cache_t cache,
|
||||
uint32_t param_id, uint32_t param_value)
|
||||
{
|
||||
struct acp_cleaning_policy_config *config;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
|
||||
|
||||
switch (param_id) {
|
||||
case ocf_acp_wake_up_time:
|
||||
OCF_CLEANING_CHECK_PARAM(cache, param_value,
|
||||
OCF_ACP_MIN_WAKE_UP,
|
||||
OCF_ACP_MAX_WAKE_UP,
|
||||
"thread_wakeup_time");
|
||||
config->thread_wakeup_time = param_value;
|
||||
ocf_cache_log(cache, log_info, "Write-back flush thread "
|
||||
"wake-up time: %d\n", config->thread_wakeup_time);
|
||||
break;
|
||||
case ocf_acp_flush_max_buffers:
|
||||
OCF_CLEANING_CHECK_PARAM(cache, param_value,
|
||||
OCF_ACP_MIN_FLUSH_MAX_BUFFERS,
|
||||
OCF_ACP_MAX_FLUSH_MAX_BUFFERS,
|
||||
"flush_max_buffers");
|
||||
config->flush_max_buffers = param_value;
|
||||
ocf_cache_log(cache, log_info, "Write-back flush thread max "
|
||||
"buffers flushed per iteration: %d\n",
|
||||
config->flush_max_buffers);
|
||||
break;
|
||||
default:
|
||||
return -OCF_ERR_INVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cleaning_policy_acp_get_cleaning_param(ocf_cache_t cache,
|
||||
uint32_t param_id, uint32_t *param_value)
|
||||
{
|
||||
struct acp_cleaning_policy_config *config;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
|
||||
|
||||
switch (param_id) {
|
||||
case ocf_acp_flush_max_buffers:
|
||||
*param_value = config->flush_max_buffers;
|
||||
break;
|
||||
case ocf_acp_wake_up_time:
|
||||
*param_value = config->thread_wakeup_time;
|
||||
break;
|
||||
default:
|
||||
return -OCF_ERR_INVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* attempt to lock cache line if it's dirty */
|
||||
static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
|
||||
uint32_t core_id, uint64_t core_line)
|
||||
{
|
||||
struct ocf_map_info info;
|
||||
bool locked = false;
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
ocf_engine_lookup_map_entry(cache, &info, core_id,
|
||||
core_line);
|
||||
|
||||
if (info.status == LOOKUP_HIT &&
|
||||
metadata_test_dirty(cache, info.coll_idx) &&
|
||||
ocf_cache_line_try_lock_rd(cache, info.coll_idx)) {
|
||||
locked = true;
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
return locked ? info.coll_idx : cache->device->collision_table_entries;
|
||||
}
|
||||
|
||||
static void _acp_handle_flush_error(struct ocf_cache *cache,
|
||||
struct acp_context *acp)
|
||||
{
|
||||
struct acp_flush_context *flush = &acp->flush;
|
||||
|
||||
flush->chunk->next_cleaning_timestamp = env_get_tick_count() +
|
||||
env_secs_to_ticks(ACP_CHUNK_CLEANING_BACKOFF_TIME);
|
||||
|
||||
if (ocf_cache_log_rl(cache)) {
|
||||
ocf_core_log(&cache->core_obj[flush->chunk->core_id],
|
||||
log_err, "Cleaning error (%d) in range"
|
||||
" <%llu; %llu) backing off for %u seconds\n",
|
||||
flush->error,
|
||||
flush->chunk->chunk_id * ACP_CHUNK_SIZE,
|
||||
(flush->chunk->chunk_id * ACP_CHUNK_SIZE) +
|
||||
ACP_CHUNK_SIZE,
|
||||
ACP_CHUNK_CLEANING_BACKOFF_TIME);
|
||||
}
|
||||
}
|
||||
|
||||
/* called after flush request completed */
|
||||
static void _acp_flush_end(
|
||||
struct ocf_cache *cache,
|
||||
struct acp_context *acp)
|
||||
{
|
||||
struct acp_flush_context *flush = &acp->flush;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < flush->size; i++) {
|
||||
ocf_cache_line_unlock_rd(cache, flush->data[i].cache_line);
|
||||
ACP_DEBUG_END(acp, flush->data[i].cache_line);
|
||||
}
|
||||
|
||||
if (flush->error)
|
||||
_acp_handle_flush_error(cache, acp);
|
||||
}
|
||||
|
||||
/* flush data */
|
||||
static void _acp_flush(struct ocf_cache *cache, struct acp_context *acp,
|
||||
uint32_t io_queue, struct acp_flush_context *flush)
|
||||
{
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.cache_line_lock = false,
|
||||
.metadata_locked = false,
|
||||
.do_sort = false,
|
||||
.io_queue = io_queue,
|
||||
};
|
||||
|
||||
flush->error = ocf_cleaner_do_flush_data(cache, flush->data,
|
||||
flush->size, &attribs);
|
||||
|
||||
_acp_flush_end(cache, acp);
|
||||
}
|
||||
|
||||
static inline bool _acp_can_clean_chunk(struct ocf_cache *cache,
|
||||
struct acp_chunk_info *chunk)
|
||||
{
|
||||
/* Check if core device is opened and if timeout after cleaning error
|
||||
* expired or wasn't set in the first place */
|
||||
return (cache->core_obj[chunk->core_id].opened &&
|
||||
(chunk->next_cleaning_timestamp > env_get_tick_count() ||
|
||||
!chunk->next_cleaning_timestamp));
|
||||
}
|
||||
|
||||
static struct acp_chunk_info *_acp_get_cleaning_candidate(
|
||||
struct ocf_cache *cache)
|
||||
{
|
||||
int i;
|
||||
struct acp_chunk_info *cur;
|
||||
struct acp_context *acp = cache->cleaning_policy_context;
|
||||
|
||||
ACP_LOCK_CHUNKS_RD();
|
||||
|
||||
/* go through all buckets in descending order, excluding bucket 0 which
|
||||
* is supposed to contain all clean chunks */
|
||||
for (i = ACP_MAX_BUCKETS - 1; i > 0; i--) {
|
||||
list_for_each_entry(cur, &acp->bucket_info[i].chunk_list, list) {
|
||||
if (_acp_can_clean_chunk(cache, cur)) {
|
||||
ACP_UNLOCK_CHUNKS_RD();
|
||||
return cur;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACP_UNLOCK_CHUNKS_RD();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define CHUNK_FINISHED -1
|
||||
|
||||
/* clean at most 'flush_max_buffers' cache lines from given chunk, starting
|
||||
* at given cache line */
|
||||
static int _acp_clean(struct ocf_cache *cache, uint32_t io_queue,
|
||||
struct acp_chunk_info *chunk, unsigned start,
|
||||
uint32_t flush_max_buffers)
|
||||
{
|
||||
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
|
||||
size_t lines_per_chunk = ACP_CHUNK_SIZE /
|
||||
ocf_line_size(cache);
|
||||
uint64_t first_core_line = chunk->chunk_id * lines_per_chunk;
|
||||
unsigned i;
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "lines per chunk %llu chunk %llu "
|
||||
"first_core_line %llu\n",
|
||||
(uint64_t)lines_per_chunk,
|
||||
chunk->chunk_id,
|
||||
first_core_line);
|
||||
|
||||
ACP_DEBUG_INIT(acp);
|
||||
|
||||
acp->flush.size = 0;
|
||||
acp->flush.chunk = chunk;
|
||||
for (i = start; i < lines_per_chunk && acp->flush.size < flush_max_buffers ; i++) {
|
||||
uint64_t core_line = first_core_line + i;
|
||||
ocf_cache_line_t cache_line;
|
||||
|
||||
cache_line = _acp_trylock_dirty(cache, chunk->core_id, core_line);
|
||||
if (cache_line == cache->device->collision_table_entries)
|
||||
continue;
|
||||
|
||||
acp->flush.data[acp->flush.size].core_id = chunk->core_id;
|
||||
acp->flush.data[acp->flush.size].core_line = core_line;
|
||||
acp->flush.data[acp->flush.size].cache_line = cache_line;
|
||||
acp->flush.size++;
|
||||
ACP_DEBUG_BEGIN(acp, cache_line);
|
||||
}
|
||||
|
||||
if (acp->flush.size > 0) {
|
||||
_acp_flush(cache, acp, io_queue, &acp->flush);
|
||||
}
|
||||
|
||||
ACP_DEBUG_CHECK(acp);
|
||||
|
||||
return (i == lines_per_chunk) ? CHUNK_FINISHED : i;
|
||||
}
|
||||
|
||||
#define NOTHING_TO_CLEAN 0
|
||||
#define MORE_TO_CLEAN 1
|
||||
|
||||
/* Clean at most 'flush_max_buffers' cache lines from current or newly
|
||||
* selected chunk */
|
||||
static int _acp_clean_iteration(struct ocf_cache *cache, uint32_t io_queue,
|
||||
uint32_t flush_max_buffers)
|
||||
{
|
||||
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
|
||||
struct acp_state *state = &acp->state;
|
||||
|
||||
if (!state->in_progress) {
|
||||
/* get next chunk to clean */
|
||||
state->chunk = _acp_get_cleaning_candidate(cache);
|
||||
|
||||
if (!state->chunk) {
|
||||
/* nothing co clean */
|
||||
return NOTHING_TO_CLEAN;
|
||||
}
|
||||
|
||||
/* new cleaning cycle - reset state */
|
||||
state->iter = 0;
|
||||
state->in_progress = true;
|
||||
}
|
||||
|
||||
state->iter = _acp_clean(cache, io_queue, state->chunk, state->iter,
|
||||
flush_max_buffers);
|
||||
|
||||
if (state->iter == CHUNK_FINISHED) {
|
||||
/* reached end of chunk - reset state */
|
||||
state->in_progress = false;
|
||||
}
|
||||
|
||||
|
||||
return MORE_TO_CLEAN;
|
||||
}
|
||||
|
||||
int cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache,
|
||||
uint32_t io_queue)
|
||||
{
|
||||
struct acp_cleaning_policy_config *config;
|
||||
int ret;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
|
||||
|
||||
if (NOTHING_TO_CLEAN == _acp_clean_iteration(cache, io_queue,
|
||||
config->flush_max_buffers)) {
|
||||
ret = ACP_BACKOFF_TIME_MS;
|
||||
} else {
|
||||
ret = config->thread_wakeup_time;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void _acp_update_bucket(struct acp_context *acp,
|
||||
struct acp_chunk_info *chunk)
|
||||
{
|
||||
struct acp_bucket *bucket = &acp->bucket_info[chunk->bucket_id];
|
||||
|
||||
if (chunk->num_dirty > bucket->threshold) {
|
||||
ENV_BUG_ON(chunk->bucket_id == ACP_MAX_BUCKETS - 1);
|
||||
|
||||
chunk->bucket_id++;
|
||||
/* buckets are stored in array, move up one bucket.
|
||||
* No overflow here. ENV_BUG_ON made sure of no incrementation on
|
||||
* last bucket */
|
||||
bucket++;
|
||||
|
||||
list_move_tail(&chunk->list, &bucket->chunk_list);
|
||||
} else if (chunk->bucket_id &&
|
||||
chunk->num_dirty <= (bucket - 1)->threshold) {
|
||||
chunk->bucket_id--;
|
||||
/* move down one bucket, we made sure we won't underflow */
|
||||
bucket--;
|
||||
|
||||
list_move(&chunk->list, &bucket->chunk_list);
|
||||
}
|
||||
}
|
||||
|
||||
void cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache,
|
||||
uint32_t cache_line)
|
||||
{
|
||||
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
|
||||
struct cleaning_policy_meta policy_meta;
|
||||
struct acp_cleaning_policy_meta *acp_meta;
|
||||
struct acp_chunk_info *chunk;
|
||||
|
||||
ACP_LOCK_CHUNKS_WR();
|
||||
|
||||
acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
|
||||
chunk = _acp_get_chunk(cache, cache_line);
|
||||
|
||||
if (!acp_meta->dirty) {
|
||||
acp_meta->dirty = 1;
|
||||
_acp_meta_set(cache, cache_line, &policy_meta);
|
||||
chunk->num_dirty++;
|
||||
}
|
||||
|
||||
_acp_update_bucket(acp, chunk);
|
||||
|
||||
ACP_UNLOCK_CHUNKS_WR();
|
||||
}
|
||||
|
||||
void cleaning_policy_acp_purge_block(struct ocf_cache *cache,
|
||||
uint32_t cache_line)
|
||||
{
|
||||
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
|
||||
struct cleaning_policy_meta policy_meta;
|
||||
struct acp_cleaning_policy_meta *acp_meta;
|
||||
struct acp_chunk_info *chunk;
|
||||
|
||||
acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
|
||||
chunk = _acp_get_chunk(cache, cache_line);
|
||||
|
||||
if (acp_meta->dirty) {
|
||||
acp_meta->dirty = 0;
|
||||
_acp_meta_set(cache, cache_line, &policy_meta);
|
||||
chunk->num_dirty--;
|
||||
}
|
||||
|
||||
_acp_update_bucket(acp, chunk);
|
||||
}
|
||||
|
||||
int cleaning_policy_acp_purge_range(struct ocf_cache *cache,
|
||||
int core_id, uint64_t start_byte, uint64_t end_byte)
|
||||
{
|
||||
return ocf_metadata_actor(cache, PARTITION_INVALID,
|
||||
core_id, start_byte, end_byte,
|
||||
cleaning_policy_acp_purge_block);
|
||||
}
|
||||
|
||||
void cleaning_policy_acp_remove_core(ocf_cache_t cache,
|
||||
ocf_core_id_t core_id)
|
||||
{
|
||||
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
|
||||
uint64_t i;
|
||||
|
||||
ENV_BUG_ON(acp->chunks_total < acp->num_chunks[core_id]);
|
||||
|
||||
if (acp->state.in_progress && acp->state.chunk->core_id == core_id) {
|
||||
acp->state.in_progress = false;
|
||||
acp->state.iter = 0;
|
||||
acp->state.chunk = NULL;
|
||||
}
|
||||
|
||||
ACP_LOCK_CHUNKS_WR();
|
||||
|
||||
for (i = 0; i < acp->num_chunks[core_id]; i++)
|
||||
list_del(&acp->chunk_info[core_id][i].list);
|
||||
|
||||
acp->chunks_total -= acp->num_chunks[core_id];
|
||||
acp->num_chunks[core_id] = 0;
|
||||
|
||||
env_vfree(acp->chunk_info[core_id]);
|
||||
acp->chunk_info[core_id] = NULL;
|
||||
|
||||
ACP_UNLOCK_CHUNKS_WR();
|
||||
}
|
||||
|
||||
int cleaning_policy_acp_add_core(ocf_cache_t cache,
|
||||
ocf_core_id_t core_id)
|
||||
{
|
||||
uint64_t core_size = cache->core_conf_meta[core_id].length;
|
||||
uint64_t num_chunks = DIV_ROUND_UP(core_size, ACP_CHUNK_SIZE);
|
||||
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
|
||||
int i;
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "%s core_id %llu num_chunks %llu\n",
|
||||
__func__, (uint64_t)core_id, (uint64_t) num_chunks);
|
||||
|
||||
ACP_LOCK_CHUNKS_WR();
|
||||
|
||||
ENV_BUG_ON(acp->chunk_info[core_id]);
|
||||
|
||||
acp->chunk_info[core_id] =
|
||||
env_vzalloc(num_chunks * sizeof(acp->chunk_info[0][0]));
|
||||
|
||||
if (!acp->chunk_info[core_id]) {
|
||||
ACP_UNLOCK_CHUNKS_WR();
|
||||
OCF_DEBUG_PARAM(cache, "failed to allocate acp tables\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "successfully allocated acp tables\n");
|
||||
|
||||
/* increment counters */
|
||||
acp->num_chunks[core_id] = num_chunks;
|
||||
acp->chunks_total += num_chunks;
|
||||
|
||||
for (i = 0; i < acp->num_chunks[core_id]; i++) {
|
||||
/* fill in chunk metadata and add to the clean bucket */
|
||||
acp->chunk_info[core_id][i].core_id = core_id;
|
||||
acp->chunk_info[core_id][i].chunk_id = i;
|
||||
list_add(&acp->chunk_info[core_id][i].list,
|
||||
&acp->bucket_info[0].chunk_list);
|
||||
}
|
||||
|
||||
ACP_UNLOCK_CHUNKS_WR();
|
||||
|
||||
return 0;
|
||||
}
|
45
src/cleaning/acp.h
Normal file
45
src/cleaning/acp.h
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#ifndef __LAYER_CLEANING_POLICY_AGGRESSIVE_H__
|
||||
|
||||
#define __LAYER_CLEANING_POLICY_AGGRESSIVE_H__
|
||||
|
||||
#include "cleaning.h"
|
||||
|
||||
void cleaning_policy_acp_setup(struct ocf_cache *cache);
|
||||
|
||||
int cleaning_policy_acp_initialize(struct ocf_cache *cache,
|
||||
int init_metadata);
|
||||
|
||||
void cleaning_policy_acp_deinitialize(struct ocf_cache *cache);
|
||||
|
||||
int cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache,
|
||||
uint32_t io_queue);
|
||||
|
||||
void cleaning_policy_acp_init_cache_block(struct ocf_cache *cache,
|
||||
uint32_t cache_line);
|
||||
|
||||
void cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache,
|
||||
uint32_t cache_line);
|
||||
|
||||
void cleaning_policy_acp_purge_block(struct ocf_cache *cache,
|
||||
uint32_t cache_line);
|
||||
|
||||
int cleaning_policy_acp_purge_range(struct ocf_cache *cache,
|
||||
int core_id, uint64_t start_byte, uint64_t end_byte);
|
||||
|
||||
int cleaning_policy_acp_set_cleaning_param(struct ocf_cache *cache,
|
||||
uint32_t param_id, uint32_t param_value);
|
||||
|
||||
int cleaning_policy_acp_get_cleaning_param(struct ocf_cache *cache,
|
||||
uint32_t param_id, uint32_t *param_value);
|
||||
|
||||
int cleaning_policy_acp_add_core(ocf_cache_t cache, ocf_core_id_t core_id);
|
||||
|
||||
void cleaning_policy_acp_remove_core(ocf_cache_t cache,
|
||||
ocf_core_id_t core_id);
|
||||
|
||||
#endif
|
||||
|
23
src/cleaning/acp_structs.h
Normal file
23
src/cleaning/acp_structs.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#ifndef __CLEANING_AGGRESSIVE_STRUCTS_H__
|
||||
#define __CLEANING_AGGRESSIVE_STRUCTS_H__
|
||||
|
||||
#include "../utils/utils_cleaner.h"
|
||||
|
||||
/* TODO: remove acp metadata */
|
||||
struct acp_cleaning_policy_meta {
|
||||
uint8_t dirty : 1;
|
||||
};
|
||||
|
||||
/* cleaning policy per partition metadata */
|
||||
struct acp_cleaning_policy_config {
|
||||
uint32_t thread_wakeup_time; /* in milliseconds*/
|
||||
uint32_t flush_max_buffers; /* in lines */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
802
src/cleaning/alru.c
Normal file
802
src/cleaning/alru.c
Normal file
@@ -0,0 +1,802 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "cleaning.h"
|
||||
#include "alru.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_allocator.h"
|
||||
#include "../concurrency/ocf_cache_concurrency.h"
|
||||
#include "../ocf_def_priv.h"
|
||||
#include "cleaning_priv.h"
|
||||
|
||||
#define is_alru_head(x) (x == collision_table_entries)
|
||||
#define is_alru_tail(x) (x == collision_table_entries)
|
||||
|
||||
#define OCF_CLEANING_DEBUG 0
|
||||
|
||||
#if 1 == OCF_CLEANING_DEBUG
|
||||
|
||||
#define OCF_DEBUG_PREFIX "[Clean] %s():%d "
|
||||
|
||||
#define OCF_DEBUG_LOG(cache, format, ...) \
|
||||
ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
|
||||
format"\n", __func__, __LINE__, ##__VA_ARGS__)
|
||||
|
||||
#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
|
||||
|
||||
#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
|
||||
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#else
|
||||
#define OCF_DEBUG_PREFIX
|
||||
#define OCF_DEBUG_LOG(cache, format, ...)
|
||||
#define OCF_DEBUG_TRACE(cache)
|
||||
#define OCF_DEBUG_MSG(cache, msg)
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...)
|
||||
#endif
|
||||
|
||||
struct flush_merge_struct {
|
||||
ocf_cache_line_t cache_line;
|
||||
ocf_core_id_t core_id;
|
||||
uint64_t core_sector;
|
||||
};
|
||||
|
||||
/* -- Start of ALRU functions -- */
|
||||
|
||||
|
||||
/* Sets the given collision_index as the new _head_ of the ALRU list. */
|
||||
static inline void update_alru_head(struct ocf_cache *cache,
|
||||
int partition_id, unsigned int collision_index)
|
||||
{
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
|
||||
part->runtime->cleaning.policy.alru.lru_head = collision_index;
|
||||
}
|
||||
|
||||
/* Sets the given collision_index as the new _tail_ of the ALRU list. */
|
||||
static inline void update_alru_tail(struct ocf_cache *cache,
|
||||
int partition_id, unsigned int collision_index)
|
||||
{
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
|
||||
part->runtime->cleaning.policy.alru.lru_tail = collision_index;
|
||||
}
|
||||
|
||||
/* Sets the given collision_index as the new _head_ and _tail_
|
||||
* of the ALRU list.
|
||||
*/
|
||||
static inline void update_alru_head_tail(struct ocf_cache *cache,
|
||||
int partition_id, unsigned int collision_index)
|
||||
{
|
||||
update_alru_head(cache, partition_id, collision_index);
|
||||
update_alru_tail(cache, partition_id, collision_index);
|
||||
}
|
||||
|
||||
|
||||
/* Adds the given collision_index to the _head_ of the ALRU list */
|
||||
static void add_alru_head(struct ocf_cache *cache, int partition_id,
|
||||
unsigned int collision_index)
|
||||
{
|
||||
unsigned int curr_head_index;
|
||||
unsigned int collision_table_entries = cache->device->collision_table_entries;
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
struct cleaning_policy_meta policy;
|
||||
|
||||
ENV_BUG_ON(!(collision_index < collision_table_entries));
|
||||
|
||||
ENV_BUG_ON(env_atomic_read(
|
||||
&part->runtime->cleaning.policy.alru.size) < 0);
|
||||
|
||||
ENV_WARN_ON(!metadata_test_dirty(cache, collision_index));
|
||||
ENV_WARN_ON(!metadata_test_valid_any(cache, collision_index));
|
||||
|
||||
/* First node to be added/ */
|
||||
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
|
||||
update_alru_head_tail(cache, partition_id, collision_index);
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, collision_index,
|
||||
&policy);
|
||||
policy.meta.alru.lru_next = collision_table_entries;
|
||||
policy.meta.alru.lru_prev = collision_table_entries;
|
||||
policy.meta.alru.timestamp = env_ticks_to_secs(
|
||||
env_get_tick_count());
|
||||
ocf_metadata_set_cleaning_policy(cache, collision_index,
|
||||
&policy);
|
||||
} else {
|
||||
/* Not the first node to be added. */
|
||||
|
||||
curr_head_index = part->runtime->cleaning.policy.alru.lru_head;
|
||||
|
||||
ENV_BUG_ON(!(curr_head_index < collision_table_entries));
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, collision_index,
|
||||
&policy);
|
||||
policy.meta.alru.lru_next = curr_head_index;
|
||||
policy.meta.alru.lru_prev = collision_table_entries;
|
||||
policy.meta.alru.timestamp = env_ticks_to_secs(
|
||||
env_get_tick_count());
|
||||
ocf_metadata_set_cleaning_policy(cache, collision_index,
|
||||
&policy);
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, curr_head_index,
|
||||
&policy);
|
||||
policy.meta.alru.lru_prev = collision_index;
|
||||
ocf_metadata_set_cleaning_policy(cache, curr_head_index,
|
||||
&policy);
|
||||
|
||||
update_alru_head(cache, partition_id, collision_index);
|
||||
}
|
||||
|
||||
env_atomic_inc(&part->runtime->cleaning.policy.alru.size);
|
||||
}
|
||||
|
||||
/* Deletes the node with the given collision_index from the ALRU list */
|
||||
static void remove_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
unsigned int collision_index)
|
||||
{
|
||||
uint32_t prev_lru_node, next_lru_node;
|
||||
uint32_t collision_table_entries = cache->device->collision_table_entries;
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
struct alru_cleaning_policy *cleaning_policy =
|
||||
&part->runtime->cleaning.policy.alru;
|
||||
struct cleaning_policy_meta policy;
|
||||
|
||||
ENV_BUG_ON(!(collision_index < collision_table_entries));
|
||||
|
||||
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
|
||||
ocf_cache_log(cache, log_err, "ERROR: Attempt to remove item "
|
||||
"from empty ALRU Cleaning Policy queue!\n");
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, collision_index, &policy);
|
||||
|
||||
/* Set prev and next (even if non existent) */
|
||||
next_lru_node = policy.meta.alru.lru_next;
|
||||
prev_lru_node = policy.meta.alru.lru_prev;
|
||||
|
||||
/* Check if entry is not part of the ALRU list */
|
||||
if ((next_lru_node == collision_table_entries) &&
|
||||
(prev_lru_node == collision_table_entries) &&
|
||||
(cleaning_policy->lru_head != collision_index) &&
|
||||
(cleaning_policy->lru_tail != collision_index)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Case 0: If we are head AND tail, there is only one node. So unlink
|
||||
* node and set that there is no node left in the list.
|
||||
*/
|
||||
if (cleaning_policy->lru_head == collision_index &&
|
||||
cleaning_policy->lru_tail == collision_index) {
|
||||
policy.meta.alru.lru_next = collision_table_entries;
|
||||
policy.meta.alru.lru_prev = collision_table_entries;
|
||||
|
||||
ocf_metadata_set_cleaning_policy(cache, collision_index,
|
||||
&policy);
|
||||
|
||||
update_alru_head_tail(cache, partition_id,
|
||||
collision_table_entries);
|
||||
}
|
||||
|
||||
/* Case 1: else if this collision_index is ALRU head, but not tail,
|
||||
* update head and return
|
||||
*/
|
||||
else if ((cleaning_policy->lru_tail != collision_index) &&
|
||||
(cleaning_policy->lru_head == collision_index)) {
|
||||
struct cleaning_policy_meta next_policy;
|
||||
|
||||
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, next_lru_node,
|
||||
&next_policy);
|
||||
|
||||
update_alru_head(cache, partition_id, next_lru_node);
|
||||
|
||||
policy.meta.alru.lru_next = collision_table_entries;
|
||||
next_policy.meta.alru.lru_prev = collision_table_entries;
|
||||
|
||||
ocf_metadata_set_cleaning_policy(cache, collision_index,
|
||||
&policy);
|
||||
ocf_metadata_set_cleaning_policy(cache, next_lru_node,
|
||||
&next_policy);
|
||||
}
|
||||
|
||||
/* Case 2: else if this collision_index is ALRU tail, but not head,
|
||||
* update tail and return
|
||||
*/
|
||||
else if ((cleaning_policy->lru_head != collision_index) &&
|
||||
(cleaning_policy->lru_tail == collision_index)) {
|
||||
struct cleaning_policy_meta prev_policy;
|
||||
|
||||
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, prev_lru_node,
|
||||
&prev_policy);
|
||||
|
||||
update_alru_tail(cache, partition_id, prev_lru_node);
|
||||
|
||||
policy.meta.alru.lru_prev = collision_table_entries;
|
||||
prev_policy.meta.alru.lru_next = collision_table_entries;
|
||||
|
||||
ocf_metadata_set_cleaning_policy(cache, collision_index,
|
||||
&policy);
|
||||
ocf_metadata_set_cleaning_policy(cache, prev_lru_node,
|
||||
&prev_policy);
|
||||
}
|
||||
|
||||
/* Case 3: else this collision_index is a middle node. There is no
|
||||
* change to the head and the tail pointers.
|
||||
*/
|
||||
else {
|
||||
struct cleaning_policy_meta next_policy;
|
||||
struct cleaning_policy_meta prev_policy;
|
||||
|
||||
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
|
||||
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, prev_lru_node,
|
||||
&prev_policy);
|
||||
ocf_metadata_get_cleaning_policy(cache, next_lru_node,
|
||||
&next_policy);
|
||||
|
||||
/* Update prev and next nodes */
|
||||
prev_policy.meta.alru.lru_next = policy.meta.alru.lru_next;
|
||||
next_policy.meta.alru.lru_prev = policy.meta.alru.lru_prev;
|
||||
|
||||
/* Update the given node */
|
||||
policy.meta.alru.lru_next = collision_table_entries;
|
||||
policy.meta.alru.lru_prev = collision_table_entries;
|
||||
|
||||
ocf_metadata_set_cleaning_policy(cache, collision_index,
|
||||
&policy);
|
||||
ocf_metadata_set_cleaning_policy(cache, prev_lru_node,
|
||||
&prev_policy);
|
||||
ocf_metadata_set_cleaning_policy(cache, next_lru_node,
|
||||
&next_policy);
|
||||
}
|
||||
|
||||
env_atomic_dec(&part->runtime->cleaning.policy.alru.size);
|
||||
}
|
||||
|
||||
static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
unsigned int collision_index)
|
||||
{
|
||||
uint32_t prev_lru_node, next_lru_node;
|
||||
uint32_t collision_table_entries = cache->device->collision_table_entries;
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
struct alru_cleaning_policy *cleaning_policy =
|
||||
&part->runtime->cleaning.policy.alru;
|
||||
struct cleaning_policy_meta policy;
|
||||
|
||||
ENV_BUG_ON(!(collision_index < collision_table_entries));
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, collision_index, &policy);
|
||||
|
||||
next_lru_node = policy.meta.alru.lru_next;
|
||||
prev_lru_node = policy.meta.alru.lru_prev;
|
||||
|
||||
return cleaning_policy->lru_tail == collision_index ||
|
||||
cleaning_policy->lru_head == collision_index ||
|
||||
next_lru_node != collision_table_entries ||
|
||||
prev_lru_node != collision_table_entries;
|
||||
}
|
||||
|
||||
|
||||
/* -- End of ALRU functions -- */
|
||||
|
||||
void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache,
|
||||
uint32_t cache_line)
|
||||
{
|
||||
struct cleaning_policy_meta policy;
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
|
||||
|
||||
policy.meta.alru.timestamp = 0;
|
||||
policy.meta.alru.lru_prev = cache->device->collision_table_entries;
|
||||
policy.meta.alru.lru_next = cache->device->collision_table_entries;
|
||||
|
||||
ocf_metadata_set_cleaning_policy(cache, cache_line, &policy);
|
||||
}
|
||||
|
||||
void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache,
|
||||
uint32_t cache_line)
|
||||
{
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
|
||||
cache_line);
|
||||
|
||||
remove_alru_list(cache, part_id, cache_line);
|
||||
}
|
||||
|
||||
static void __cleaning_policy_alru_purge_cache_block_any(
|
||||
struct ocf_cache *cache, uint32_t cache_line)
|
||||
{
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
|
||||
cache_line);
|
||||
|
||||
if (is_on_alru_list(cache, part_id, cache_line))
|
||||
remove_alru_list(cache, part_id, cache_line);
|
||||
}
|
||||
|
||||
int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id,
|
||||
uint64_t start_byte, uint64_t end_byte) {
|
||||
struct ocf_user_part *part;
|
||||
ocf_part_id_t part_id;
|
||||
int ret = 0;
|
||||
|
||||
for_each_part(cache, part, part_id) {
|
||||
if (env_atomic_read(&part->runtime->cleaning.
|
||||
policy.alru.size) == 0)
|
||||
continue;
|
||||
|
||||
ret |= ocf_metadata_actor(cache, part_id,
|
||||
core_id, start_byte, end_byte,
|
||||
__cleaning_policy_alru_purge_cache_block_any);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
|
||||
uint32_t cache_line)
|
||||
{
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
|
||||
cache_line);
|
||||
struct ocf_user_part *part = &cache->user_parts[part_id];
|
||||
|
||||
uint32_t prev_lru_node, next_lru_node;
|
||||
uint32_t collision_table_entries = cache->device->collision_table_entries;
|
||||
struct cleaning_policy_meta policy;
|
||||
|
||||
ENV_WARN_ON(!metadata_test_dirty(cache, cache_line));
|
||||
ENV_WARN_ON(!metadata_test_valid_any(cache, cache_line));
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
|
||||
next_lru_node = policy.meta.alru.lru_next;
|
||||
prev_lru_node = policy.meta.alru.lru_prev;
|
||||
|
||||
if ((next_lru_node != collision_table_entries) ||
|
||||
(prev_lru_node != collision_table_entries) ||
|
||||
((part->runtime->cleaning.policy.
|
||||
alru.lru_head == cache_line) &&
|
||||
(part->runtime->cleaning.policy.
|
||||
alru.lru_tail == cache_line)))
|
||||
remove_alru_list(cache, part_id, cache_line);
|
||||
|
||||
add_alru_head(cache, part_id, cache_line);
|
||||
}
|
||||
|
||||
static void _alru_rebuild(struct ocf_cache *cache)
|
||||
{
|
||||
struct ocf_user_part *part;
|
||||
ocf_part_id_t part_id;
|
||||
ocf_core_id_t core_id;
|
||||
ocf_cache_line_t cline;
|
||||
uint32_t step = 0;
|
||||
|
||||
for_each_part(cache, part, part_id) {
|
||||
/* ALRU initialization */
|
||||
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
|
||||
part->runtime->cleaning.policy.alru.lru_head =
|
||||
cache->device->collision_table_entries;
|
||||
part->runtime->cleaning.policy.alru.lru_tail =
|
||||
cache->device->collision_table_entries;
|
||||
cache->device->runtime_meta->cleaning_thread_access = 0;
|
||||
}
|
||||
|
||||
for (cline = 0; cline < cache->device->collision_table_entries; cline++) {
|
||||
ocf_metadata_get_core_and_part_id(cache, cline, &core_id,
|
||||
NULL);
|
||||
|
||||
OCF_COND_RESCHED_DEFAULT(step);
|
||||
|
||||
if (core_id == OCF_CORE_MAX)
|
||||
continue;
|
||||
|
||||
cleaning_policy_alru_init_cache_block(cache, cline);
|
||||
|
||||
if (!metadata_test_dirty(cache, cline))
|
||||
continue;
|
||||
|
||||
cleaning_policy_alru_set_hot_cache_line(cache, cline);
|
||||
}
|
||||
}
|
||||
|
||||
static int cleaning_policy_alru_initialize_part(struct ocf_cache *cache,
|
||||
struct ocf_user_part *part, int init_metadata)
|
||||
{
|
||||
|
||||
if (init_metadata) {
|
||||
/* ALRU initialization */
|
||||
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
|
||||
part->runtime->cleaning.policy.alru.lru_head =
|
||||
cache->device->collision_table_entries;
|
||||
part->runtime->cleaning.policy.alru.lru_tail =
|
||||
cache->device->collision_table_entries;
|
||||
}
|
||||
|
||||
cache->device->runtime_meta->cleaning_thread_access = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cleaning_policy_alru_setup(struct ocf_cache *cache)
|
||||
{
|
||||
struct alru_cleaning_policy_config *config;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
|
||||
|
||||
config->thread_wakeup_time = OCF_ALRU_DEFAULT_WAKE_UP;
|
||||
config->stale_buffer_time = OCF_ALRU_DEFAULT_STALENESS_TIME;
|
||||
config->flush_max_buffers = OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS;
|
||||
config->activity_threshold = OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD;
|
||||
}
|
||||
|
||||
int cleaning_policy_alru_initialize(struct ocf_cache *cache, int init_metadata)
|
||||
{
|
||||
struct ocf_user_part *part;
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for_each_part(cache, part, part_id) {
|
||||
cleaning_policy_alru_initialize_part(cache,
|
||||
part, init_metadata);
|
||||
}
|
||||
|
||||
if (init_metadata)
|
||||
_alru_rebuild(cache);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cleaning_policy_alru_set_cleaning_param(ocf_cache_t cache,
|
||||
uint32_t param_id, uint32_t param_value)
|
||||
{
|
||||
struct alru_cleaning_policy_config *config;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
|
||||
|
||||
switch (param_id) {
|
||||
case ocf_alru_wake_up_time:
|
||||
OCF_CLEANING_CHECK_PARAM(cache, param_value,
|
||||
OCF_ALRU_MIN_WAKE_UP,
|
||||
OCF_ALRU_MAX_WAKE_UP,
|
||||
"thread_wakeup_time");
|
||||
config->thread_wakeup_time = param_value;
|
||||
ocf_cache_log(cache, log_info, "Write-back flush thread "
|
||||
"wake-up time: %d\n", config->thread_wakeup_time);
|
||||
break;
|
||||
case ocf_alru_stale_buffer_time:
|
||||
OCF_CLEANING_CHECK_PARAM(cache, param_value,
|
||||
OCF_ALRU_MIN_STALENESS_TIME,
|
||||
OCF_ALRU_MAX_STALENESS_TIME,
|
||||
"stale_buffer_time");
|
||||
config->stale_buffer_time = param_value;
|
||||
ocf_cache_log(cache, log_info, "Write-back flush thread "
|
||||
"staleness time: %d\n", config->stale_buffer_time);
|
||||
break;
|
||||
case ocf_alru_flush_max_buffers:
|
||||
OCF_CLEANING_CHECK_PARAM(cache, param_value,
|
||||
OCF_ALRU_MIN_FLUSH_MAX_BUFFERS,
|
||||
OCF_ALRU_MAX_FLUSH_MAX_BUFFERS,
|
||||
"flush_max_buffers");
|
||||
config->flush_max_buffers = param_value;
|
||||
ocf_cache_log(cache, log_info, "Write-back flush thread max "
|
||||
"buffers flushed per iteration: %d\n",
|
||||
config->flush_max_buffers);
|
||||
break;
|
||||
case ocf_alru_activity_threshold:
|
||||
OCF_CLEANING_CHECK_PARAM(cache, param_value,
|
||||
OCF_ALRU_MIN_ACTIVITY_THRESHOLD,
|
||||
OCF_ALRU_MAX_ACTIVITY_THRESHOLD,
|
||||
"activity_threshold");
|
||||
config->activity_threshold = param_value;
|
||||
ocf_cache_log(cache, log_info, "Write-back flush thread "
|
||||
"activity time threshold: %d\n",
|
||||
config->activity_threshold);
|
||||
break;
|
||||
default:
|
||||
return -OCF_ERR_INVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cleaning_policy_alru_get_cleaning_param(ocf_cache_t cache,
|
||||
uint32_t param_id, uint32_t *param_value)
|
||||
{
|
||||
struct alru_cleaning_policy_config *config;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
|
||||
|
||||
switch (param_id) {
|
||||
case ocf_alru_wake_up_time:
|
||||
*param_value = config->thread_wakeup_time;
|
||||
break;
|
||||
case ocf_alru_stale_buffer_time:
|
||||
*param_value = config->stale_buffer_time;
|
||||
break;
|
||||
case ocf_alru_flush_max_buffers:
|
||||
*param_value = config->flush_max_buffers;
|
||||
break;
|
||||
case ocf_alru_activity_threshold:
|
||||
*param_value = config->activity_threshold;
|
||||
break;
|
||||
default:
|
||||
return -OCF_ERR_INVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline uint32_t compute_timestamp(
|
||||
const struct alru_cleaning_policy_config *config)
|
||||
{
|
||||
unsigned long time;
|
||||
|
||||
time = env_get_tick_count();
|
||||
time -= env_secs_to_ticks(config->stale_buffer_time);
|
||||
time = env_ticks_to_secs(time);
|
||||
|
||||
return (uint32_t) time;
|
||||
}
|
||||
|
||||
static int check_for_io_activity(struct ocf_cache *cache,
|
||||
struct alru_cleaning_policy_config *config)
|
||||
{
|
||||
unsigned int now, last;
|
||||
|
||||
now = env_ticks_to_msecs(env_get_tick_count());
|
||||
last = env_atomic_read(&cache->last_access_ms);
|
||||
|
||||
if ((now - last) < config->activity_threshold)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cmp_ocf_user_parts(const void *p1, const void *p2) {
|
||||
const struct ocf_user_part *t1 = *(const struct ocf_user_part**)p1;
|
||||
const struct ocf_user_part *t2 = *(const struct ocf_user_part**)p2;
|
||||
|
||||
if (t1->config->priority > t2->config->priority)
|
||||
return 1;
|
||||
else if (t1->config->priority < t2->config->priority)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void swp_ocf_user_part(void *part1, void *part2, int size) {
|
||||
void *tmp = *(void **)part1;
|
||||
|
||||
*(void **)part1 = *(void **) part2;
|
||||
*(void **)part2 = tmp;
|
||||
}
|
||||
|
||||
static void get_parts_sorted(struct ocf_user_part **parts,
|
||||
struct ocf_cache *cache) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
|
||||
parts[i] = &cache->user_parts[i];
|
||||
|
||||
env_sort(parts, OCF_IO_CLASS_MAX, sizeof(struct ocf_user_part*),
|
||||
cmp_ocf_user_parts, swp_ocf_user_part);
|
||||
}
|
||||
|
||||
static int clean_later(ocf_cache_t cache, uint32_t *delta)
|
||||
{
|
||||
struct alru_cleaning_policy_config *config;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
|
||||
|
||||
*delta = env_ticks_to_secs(env_get_tick_count()) -
|
||||
cache->device->runtime_meta->cleaning_thread_access;
|
||||
if (*delta <= config->thread_wakeup_time)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void get_block_to_flush(struct flush_data* dst,
|
||||
ocf_cache_line_t cache_line, struct ocf_cache* cache)
|
||||
{
|
||||
ocf_core_id_t core_id;
|
||||
uint64_t core_line;
|
||||
|
||||
ocf_metadata_get_core_info(cache, cache_line,
|
||||
&core_id, &core_line);
|
||||
|
||||
dst->cache_line = cache_line;
|
||||
dst->core_id = core_id;
|
||||
dst->core_line = core_line;
|
||||
}
|
||||
|
||||
static int more_blocks_to_flush(struct ocf_cache *cache,
|
||||
ocf_cache_line_t cache_line, uint32_t last_access)
|
||||
{
|
||||
struct cleaning_policy_meta policy;
|
||||
|
||||
if (cache_line >= cache->device->collision_table_entries)
|
||||
return false;
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
|
||||
|
||||
if (policy.meta.alru.timestamp >= last_access)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int block_is_busy(struct ocf_cache *cache,
|
||||
ocf_cache_line_t cache_line)
|
||||
{
|
||||
ocf_core_id_t core_id;
|
||||
uint64_t core_line;
|
||||
|
||||
ocf_metadata_get_core_info(cache, cache_line,
|
||||
&core_id, &core_line);
|
||||
|
||||
if (!cache->core_obj[core_id].opened)
|
||||
return true;
|
||||
|
||||
if (ocf_cache_line_is_used(cache, cache_line))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int get_data_to_flush(struct flush_data *dst, uint32_t clines_no,
|
||||
struct ocf_cache *cache, struct ocf_user_part *part)
|
||||
{
|
||||
struct alru_cleaning_policy_config *config;
|
||||
struct cleaning_policy_meta policy;
|
||||
ocf_cache_line_t cache_line;
|
||||
int to_flush = 0;
|
||||
uint32_t last_access;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
|
||||
|
||||
cache_line = part->runtime->cleaning.policy.alru.lru_tail;
|
||||
|
||||
last_access = compute_timestamp(config);
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Last access=%u, timestamp=%u rel=%d",
|
||||
last_access, policy.meta.alru.timestamp,
|
||||
policy.meta.alru.timestamp < last_access);
|
||||
|
||||
while (to_flush < clines_no &&
|
||||
more_blocks_to_flush(cache, cache_line, last_access)) {
|
||||
if (!block_is_busy(cache, cache_line)) {
|
||||
get_block_to_flush(&dst[to_flush], cache_line, cache);
|
||||
to_flush++;
|
||||
}
|
||||
|
||||
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
|
||||
cache_line = policy.meta.alru.lru_prev;
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Collected items_to_clean=%u", to_flush);
|
||||
|
||||
return to_flush;
|
||||
}
|
||||
|
||||
static int perform_flushing(int clines_no, struct ocf_cache *cache, uint32_t io_queue,
|
||||
struct flush_data *flush_data, struct ocf_user_part *part)
|
||||
{
|
||||
int to_clean = get_data_to_flush(flush_data, clines_no, cache, part);
|
||||
|
||||
if (to_clean > 0) {
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.cache_line_lock = true,
|
||||
.metadata_locked = true,
|
||||
.do_sort = true,
|
||||
.io_queue = io_queue
|
||||
};
|
||||
|
||||
ocf_cleaner_do_flush_data(cache, flush_data,
|
||||
to_clean, &attribs);
|
||||
} else {
|
||||
/* Update timestamp only if there are no items to be cleaned */
|
||||
cache->device->runtime_meta->cleaning_thread_access =
|
||||
env_ticks_to_secs(env_get_tick_count());
|
||||
}
|
||||
|
||||
return to_clean;
|
||||
}
|
||||
|
||||
static int is_cleanup_possible(ocf_cache_t cache)
|
||||
{
|
||||
struct alru_cleaning_policy_config *config;
|
||||
uint32_t delta;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
|
||||
|
||||
if (check_for_io_activity(cache, config)) {
|
||||
OCF_DEBUG_PARAM(cache, "IO activity detected");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (clean_later(cache, &delta)) {
|
||||
OCF_DEBUG_PARAM(cache,
|
||||
"Cleaning policy configured to clean later "
|
||||
"delta=%u wake_up=%u", delta,
|
||||
config->thread_wakeup_time);
|
||||
return false;
|
||||
}
|
||||
|
||||
//Cleaning policy configured to not clean anything
|
||||
if (config->flush_max_buffers == 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int cleanup(struct ocf_cache *cache, uint32_t clines_no,
|
||||
struct ocf_user_part *part, uint32_t io_queue)
|
||||
{
|
||||
struct flush_data *flush_data;
|
||||
size_t flush_data_limit;
|
||||
int flushed_blocks = 0;
|
||||
|
||||
if (!is_cleanup_possible(cache))
|
||||
return flushed_blocks;
|
||||
|
||||
if (OCF_METADATA_LOCK_WR_TRY())
|
||||
return flushed_blocks;
|
||||
|
||||
OCF_REALLOC_INIT(&flush_data, &flush_data_limit);
|
||||
OCF_REALLOC(&flush_data, sizeof(flush_data[0]), clines_no,
|
||||
&flush_data_limit);
|
||||
|
||||
if (!flush_data) {
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
ocf_cache_log(cache, log_warn, "No memory to allocate flush "
|
||||
"data for ALRU cleaning policy");
|
||||
return flushed_blocks;
|
||||
}
|
||||
|
||||
flushed_blocks = perform_flushing(clines_no, cache, io_queue,
|
||||
flush_data, part);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
|
||||
OCF_REALLOC_DEINIT(&flush_data, &flush_data_limit);
|
||||
|
||||
return flushed_blocks;
|
||||
}
|
||||
|
||||
int cleaning_alru_perform_cleaning(ocf_cache_t cache, uint32_t io_queue)
|
||||
{
|
||||
struct ocf_user_part *parts[OCF_IO_CLASS_MAX];
|
||||
int part_id = OCF_IO_CLASS_MAX - 1;
|
||||
struct alru_cleaning_policy_config *config;
|
||||
uint32_t clines_no;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
|
||||
|
||||
get_parts_sorted(parts, cache);
|
||||
|
||||
clines_no = config->flush_max_buffers;
|
||||
|
||||
while (part_id >= 0) {
|
||||
clines_no -= cleanup(cache, clines_no,
|
||||
parts[part_id], io_queue);
|
||||
|
||||
if (clines_no > 0)
|
||||
part_id--;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (clines_no > 0)
|
||||
return config->thread_wakeup_time * 1000;
|
||||
|
||||
return 0;
|
||||
}
|
30
src/cleaning/alru.h
Normal file
30
src/cleaning/alru.h
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#ifndef __LAYER_CLEANING_POLICY_ALRU_H__
|
||||
|
||||
#define __LAYER_CLEANING_POLICY_ALRU_H__
|
||||
|
||||
#include "cleaning.h"
|
||||
#include "alru_structs.h"
|
||||
|
||||
void cleaning_policy_alru_setup(struct ocf_cache *cache);
|
||||
int cleaning_policy_alru_initialize(struct ocf_cache *cache,
|
||||
int init_metadata);
|
||||
void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache,
|
||||
uint32_t cache_line);
|
||||
void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache,
|
||||
uint32_t cache_line);
|
||||
int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id,
|
||||
uint64_t start_byte, uint64_t end_byte);
|
||||
void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
|
||||
uint32_t cache_line);
|
||||
int cleaning_policy_alru_set_cleaning_param(struct ocf_cache *cache,
|
||||
uint32_t param_id, uint32_t param_value);
|
||||
int cleaning_policy_alru_get_cleaning_param(struct ocf_cache *cache,
|
||||
uint32_t param_id, uint32_t *param_value);
|
||||
int cleaning_alru_perform_cleaning(struct ocf_cache *cache, uint32_t io_queue);
|
||||
|
||||
#endif
|
||||
|
32
src/cleaning/alru_structs.h
Normal file
32
src/cleaning/alru_structs.h
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#ifndef __CLEANING_ALRU_STRUCTS_H__
|
||||
#define __CLEANING_ALRU_STRUCTS_H__
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "ocf_env.h"
|
||||
|
||||
struct alru_cleaning_policy_meta {
|
||||
/* Lru pointers 2*4=8 bytes */
|
||||
uint32_t timestamp;
|
||||
uint32_t lru_prev;
|
||||
uint32_t lru_next;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct alru_cleaning_policy_config {
|
||||
uint32_t thread_wakeup_time; /* in seconds */
|
||||
uint32_t stale_buffer_time; /* in seconds */
|
||||
uint32_t flush_max_buffers; /* in lines */
|
||||
uint32_t activity_threshold; /* in milliseconds */
|
||||
};
|
||||
|
||||
struct alru_cleaning_policy {
|
||||
env_atomic size;
|
||||
uint32_t lru_head;
|
||||
uint32_t lru_tail;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
137
src/cleaning/cleaning.c
Normal file
137
src/cleaning/cleaning.c
Normal file
@@ -0,0 +1,137 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "cleaning.h"
|
||||
#include "alru.h"
|
||||
#include "acp.h"
|
||||
#include "../ocf_priv.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "../ocf_ctx_priv.h"
|
||||
#include "../mngt/ocf_mngt_common.h"
|
||||
#include "../metadata/metadata.h"
|
||||
|
||||
#define SLEEP_TIME_MS (1000)
|
||||
|
||||
struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max] = {
|
||||
[ocf_cleaning_nop] = {
|
||||
.name = "nop",
|
||||
},
|
||||
[ocf_cleaning_alru] = {
|
||||
.setup = cleaning_policy_alru_setup,
|
||||
.init_cache_block = cleaning_policy_alru_init_cache_block,
|
||||
.purge_cache_block = cleaning_policy_alru_purge_cache_block,
|
||||
.purge_range = cleaning_policy_alru_purge_range,
|
||||
.set_hot_cache_line = cleaning_policy_alru_set_hot_cache_line,
|
||||
.initialize = cleaning_policy_alru_initialize,
|
||||
.set_cleaning_param = cleaning_policy_alru_set_cleaning_param,
|
||||
.get_cleaning_param = cleaning_policy_alru_get_cleaning_param,
|
||||
.perform_cleaning = cleaning_alru_perform_cleaning,
|
||||
.name = "alru",
|
||||
},
|
||||
[ocf_cleaning_acp] = {
|
||||
.setup = cleaning_policy_acp_setup,
|
||||
.init_cache_block = cleaning_policy_acp_init_cache_block,
|
||||
.purge_cache_block = cleaning_policy_acp_purge_block,
|
||||
.purge_range = cleaning_policy_acp_purge_range,
|
||||
.set_hot_cache_line = cleaning_policy_acp_set_hot_cache_line,
|
||||
.initialize = cleaning_policy_acp_initialize,
|
||||
.deinitialize = cleaning_policy_acp_deinitialize,
|
||||
.set_cleaning_param = cleaning_policy_acp_set_cleaning_param,
|
||||
.get_cleaning_param = cleaning_policy_acp_get_cleaning_param,
|
||||
.add_core = cleaning_policy_acp_add_core,
|
||||
.remove_core = cleaning_policy_acp_remove_core,
|
||||
.perform_cleaning = cleaning_policy_acp_perform_cleaning,
|
||||
.name = "acp",
|
||||
},
|
||||
};
|
||||
|
||||
int ocf_start_cleaner(struct ocf_cache *cache)
|
||||
{
|
||||
return ctx_cleaner_init(cache->owner, &cache->cleaner);
|
||||
}
|
||||
|
||||
void ocf_stop_cleaner(struct ocf_cache *cache)
|
||||
{
|
||||
ctx_cleaner_stop(cache->owner, &cache->cleaner);
|
||||
}
|
||||
|
||||
void ocf_cleaner_set_priv(ocf_cleaner_t c, void *priv)
|
||||
{
|
||||
OCF_CHECK_NULL(c);
|
||||
c->priv = priv;
|
||||
}
|
||||
|
||||
void *ocf_cleaner_get_priv(ocf_cleaner_t c)
|
||||
{
|
||||
OCF_CHECK_NULL(c);
|
||||
return c->priv;
|
||||
}
|
||||
|
||||
ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c)
|
||||
{
|
||||
OCF_CHECK_NULL(c);
|
||||
return container_of(c, struct ocf_cache, cleaner);
|
||||
}
|
||||
|
||||
static int _ocf_cleaner_run_check_dirty_inactive(struct ocf_cache *cache)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!env_bit_test(ocf_cache_state_incomplete, &cache->cache_state))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < OCF_CORE_MAX; ++i) {
|
||||
if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap))
|
||||
continue;
|
||||
|
||||
if (cache->core_obj[i].opened && env_atomic_read(&(cache->
|
||||
core_runtime_meta[i].dirty_clines))) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
uint32_t ocf_cleaner_run(ocf_cleaner_t c, uint32_t io_queue)
|
||||
{
|
||||
struct ocf_cache *cache;
|
||||
ocf_cleaning_t clean_type;
|
||||
int sleep = SLEEP_TIME_MS;
|
||||
|
||||
cache = ocf_cleaner_get_cache(c);
|
||||
|
||||
/* Do not involve cleaning when cache is not running
|
||||
* (error, etc.).
|
||||
*/
|
||||
if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) ||
|
||||
ocf_mngt_is_cache_locked(cache)) {
|
||||
return SLEEP_TIME_MS;
|
||||
}
|
||||
|
||||
/* Sleep in case there is management operation in progress. */
|
||||
if (env_rwsem_down_write_trylock(&cache->lock) == 0)
|
||||
return SLEEP_TIME_MS;
|
||||
|
||||
if (_ocf_cleaner_run_check_dirty_inactive(cache)) {
|
||||
env_rwsem_up_write(&cache->lock);
|
||||
return SLEEP_TIME_MS;
|
||||
}
|
||||
|
||||
clean_type = cache->conf_meta->cleaning_policy_type;
|
||||
|
||||
ENV_BUG_ON(clean_type >= ocf_cleaning_max);
|
||||
|
||||
/* Call cleaning. */
|
||||
if (cleaning_policy_ops[clean_type].perform_cleaning) {
|
||||
sleep = cleaning_policy_ops[clean_type].
|
||||
perform_cleaning(cache, io_queue);
|
||||
}
|
||||
|
||||
env_rwsem_up_write(&cache->lock);
|
||||
|
||||
return sleep;
|
||||
}
|
||||
|
75
src/cleaning/cleaning.h
Normal file
75
src/cleaning/cleaning.h
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef __LAYER_CLEANING_POLICY_H__
|
||||
#define __LAYER_CLEANING_POLICY_H__
|
||||
|
||||
#include "alru_structs.h"
|
||||
#include "nop_structs.h"
|
||||
#include "acp_structs.h"
|
||||
|
||||
#define CLEANING_POLICY_CONFIG_BYTES 256
|
||||
#define CLEANING_POLICY_TYPE_MAX 4
|
||||
|
||||
struct ocf_request;
|
||||
|
||||
struct cleaning_policy_config {
|
||||
uint8_t data[CLEANING_POLICY_CONFIG_BYTES];
|
||||
struct acp_cleaning_policy_config acp;
|
||||
};
|
||||
|
||||
struct cleaning_policy {
|
||||
union {
|
||||
struct nop_cleaning_policy nop;
|
||||
struct alru_cleaning_policy alru;
|
||||
} policy;
|
||||
};
|
||||
|
||||
/* Cleaning policy metadata per cache line */
|
||||
struct cleaning_policy_meta {
|
||||
union {
|
||||
struct nop_cleaning_policy_meta nop;
|
||||
struct alru_cleaning_policy_meta alru;
|
||||
struct acp_cleaning_policy_meta acp;
|
||||
} meta;
|
||||
};
|
||||
|
||||
struct cleaning_policy_ops {
|
||||
void (*setup)(struct ocf_cache *cache);
|
||||
int (*initialize)(struct ocf_cache *cache, int init_metadata);
|
||||
void (*deinitialize)(struct ocf_cache *cache);
|
||||
int (*add_core)(struct ocf_cache *cache, ocf_core_id_t core_id);
|
||||
void (*remove_core)(struct ocf_cache *cache, ocf_core_id_t core_id);
|
||||
void (*init_cache_block)(struct ocf_cache *cache, uint32_t cache_line);
|
||||
void (*purge_cache_block)(struct ocf_cache *cache,
|
||||
uint32_t cache_line);
|
||||
int (*purge_range)(struct ocf_cache *cache, int core_id,
|
||||
uint64_t start_byte, uint64_t end_byte);
|
||||
void (*set_hot_cache_line)(struct ocf_cache *cache,
|
||||
uint32_t cache_line);
|
||||
int (*set_cleaning_param)(struct ocf_cache *cache,
|
||||
uint32_t param_id, uint32_t param_value);
|
||||
int (*get_cleaning_param)(struct ocf_cache *cache,
|
||||
uint32_t param_id, uint32_t *param_value);
|
||||
/**
|
||||
* @brief Performs cleaning.
|
||||
* @return requested time (in ms) of next call
|
||||
*/
|
||||
int (*perform_cleaning)(struct ocf_cache *cache,
|
||||
uint32_t io_queue);
|
||||
const char *name;
|
||||
};
|
||||
|
||||
extern struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max];
|
||||
|
||||
struct ocf_cleaner {
|
||||
void *priv;
|
||||
};
|
||||
|
||||
int ocf_start_cleaner(struct ocf_cache *cache);
|
||||
|
||||
void ocf_stop_cleaner(struct ocf_cache *cache);
|
||||
|
||||
#endif
|
19
src/cleaning/cleaning_priv.h
Normal file
19
src/cleaning/cleaning_priv.h
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
static inline void cleaning_policy_param_error(ocf_cache_t cache,
|
||||
const char *param_name, uint32_t min, uint32_t max)
|
||||
{
|
||||
ocf_cache_log(cache, log_err, "Refusing setting flush "
|
||||
"parameters because parameter %s is not within range "
|
||||
"of <%d-%d>\n", param_name, min, max);
|
||||
}
|
||||
|
||||
#define OCF_CLEANING_CHECK_PARAM(CACHE, VAL, MIN, MAX, NAME) ({ \
|
||||
if (VAL < MIN || VAL > MAX) { \
|
||||
cleaning_policy_param_error(CACHE, NAME, MIN, MAX); \
|
||||
return -OCF_ERR_INVAL; \
|
||||
} \
|
||||
})
|
15
src/cleaning/nop_structs.h
Normal file
15
src/cleaning/nop_structs.h
Normal file
@@ -0,0 +1,15 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#ifndef __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__
|
||||
|
||||
#define __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__
|
||||
|
||||
struct nop_cleaning_policy_meta {
|
||||
} __attribute__((packed));
|
||||
|
||||
struct nop_cleaning_policy {
|
||||
};
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user