Merge pull request #579 from mmichal10/fix-passive-update

Maintaining runtime metadata structures in failover standby
This commit is contained in:
Robert Baldyga 2021-11-19 16:53:54 +01:00 committed by GitHub
commit 8669a296cc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 1017 additions and 171 deletions

View File

@ -41,6 +41,7 @@ struct cleaning_policy_meta {
struct ocf_cleaner {
struct ocf_refcnt refcnt __attribute__((aligned(64)));
ocf_cleaning_t policy;
void *cleaning_policy_context;
ocf_queue_t io_queue;
ocf_cleaner_end_t end;

View File

@ -91,7 +91,7 @@ static inline void ocf_cleaning_deinitialize(ocf_cache_t cache)
{
ocf_cleaning_t policy;
policy = cache->conf_meta->cleaning_policy_type;
policy = cache->cleaner.policy;
ENV_BUG_ON(policy >= ocf_cleaning_max);
@ -110,7 +110,7 @@ static inline int ocf_cleaning_add_core(ocf_cache_t cache,
if (unlikely(!ocf_refcnt_inc(&cache->cleaner.refcnt)))
return -OCF_ERR_NO_LOCK;
policy = cache->conf_meta->cleaning_policy_type;
policy = cache->cleaner.policy;
ENV_BUG_ON(policy >= ocf_cleaning_max);
@ -133,7 +133,7 @@ static inline void ocf_cleaning_remove_core(ocf_cache_t cache,
if (unlikely(!ocf_refcnt_inc(&cache->cleaner.refcnt)))
return;
policy = cache->conf_meta->cleaning_policy_type;
policy = cache->cleaner.policy;
ENV_BUG_ON(policy >= ocf_cleaning_max);
@ -154,7 +154,7 @@ static inline void ocf_cleaning_init_cache_block(ocf_cache_t cache,
if (unlikely(!ocf_refcnt_inc(&cache->cleaner.refcnt)))
return;
policy = cache->conf_meta->cleaning_policy_type;
policy = cache->cleaner.policy;
ENV_BUG_ON(policy >= ocf_cleaning_max);
if (unlikely(!cleaning_policy_ops[policy].init_cache_block))
@ -174,7 +174,7 @@ static inline void ocf_cleaning_purge_cache_block(ocf_cache_t cache,
if (unlikely(!ocf_refcnt_inc(&cache->cleaner.refcnt)))
return;
policy = cache->conf_meta->cleaning_policy_type;
policy = cache->cleaner.policy;
ENV_BUG_ON(policy >= ocf_cleaning_max);
if (unlikely(!cleaning_policy_ops[policy].purge_cache_block))
@ -194,7 +194,7 @@ static inline void ocf_cleaning_purge_range(ocf_cache_t cache,
if (unlikely(!ocf_refcnt_inc(&cache->cleaner.refcnt)))
return;
policy = cache->conf_meta->cleaning_policy_type;
policy = cache->cleaner.policy;
ENV_BUG_ON(policy >= ocf_cleaning_max);
if (unlikely(!cleaning_policy_ops[policy].purge_range))
@ -215,7 +215,7 @@ static inline void ocf_cleaning_set_hot_cache_line(ocf_cache_t cache,
if (unlikely(!ocf_refcnt_inc(&cache->cleaner.refcnt)))
return;
policy = cache->conf_meta->cleaning_policy_type;
policy = cache->cleaner.policy;
ENV_BUG_ON(policy >= ocf_cleaning_max);
if (unlikely(!cleaning_policy_ops[policy].set_hot_cache_line))
@ -259,7 +259,7 @@ static inline void ocf_cleaning_perform_cleaning(ocf_cache_t cache,
if (unlikely(!ocf_refcnt_inc(&cache->cleaner.refcnt)))
return;
policy = cache->conf_meta->cleaning_policy_type;
policy = cache->cleaner.policy;
ENV_BUG_ON(policy >= ocf_cleaning_max);
if (unlikely(!cleaning_policy_ops[policy].perform_cleaning))

View File

@ -0,0 +1,231 @@
/*
* Copyright(c) 2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "ocf_concurrency.h"
#include "../metadata/metadata_internal.h"
#include "../metadata/metadata_io.h"
#include "../ocf_priv.h"
#include "../ocf_request.h"
#include "../utils/utils_alock.h"
#include "../utils/utils_cache_line.h"
struct pio_ctx {
uint32_t segments_number;
struct {
enum ocf_metadata_segment_id id;
uint64_t first_entry;
uint64_t begin;
uint64_t end;
} segments[5];
};
#define OUT_OF_RANGE -1
#define get_pio_ctx(__alock) (void*)__alock + ocf_alock_obj_size();
static inline bool page_belongs_to_section(struct pio_ctx *pio_ctx,
enum ocf_metadata_segment_id segment_id, uint32_t page)
{
return page >= pio_ctx->segments[segment_id].begin &&
page < pio_ctx->segments[segment_id].end;
}
static inline ocf_cache_line_t page_to_entry(struct pio_ctx *pio_ctx,
enum ocf_metadata_segment_id segment_id, uint32_t page)
{
uint32_t id_within_section;
id_within_section = page - pio_ctx->segments[segment_id].begin;
return pio_ctx->segments[segment_id].first_entry + id_within_section;
}
static ocf_cache_line_t ocf_pio_lock_get_entry(struct ocf_alock *alock,
struct ocf_request *req, uint32_t id)
{
uint32_t page;
enum ocf_metadata_segment_id segment_id;
struct pio_ctx *pio_ctx = get_pio_ctx(alock);
page = req->core_line_first + id;
for (segment_id = 0; segment_id < pio_ctx->segments_number; segment_id++) {
if (page_belongs_to_section(pio_ctx, segment_id, page))
return page_to_entry(pio_ctx, segment_id, page);
}
return OUT_OF_RANGE;
}
static int ocf_pio_lock_fast(struct ocf_alock *alock,
struct ocf_request *req, int rw)
{
ocf_cache_line_t entry;
int ret = OCF_LOCK_ACQUIRED;
int32_t i;
ENV_BUG_ON(rw != OCF_WRITE);
for (i = 0; i < req->core_line_count; i++) {
entry = ocf_pio_lock_get_entry(alock, req, i);
if (entry == OUT_OF_RANGE)
continue;
ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i));
if (ocf_alock_trylock_entry_wr(alock, entry)) {
ocf_alock_mark_index_locked(alock, req, i, true);
} else {
ret = OCF_LOCK_NOT_ACQUIRED;
break;
}
}
if (ret != OCF_LOCK_NOT_ACQUIRED)
return ret;
/* Request is not locked, discard acquired locks */
for (; i >= 0; i--) {
entry = ocf_pio_lock_get_entry(alock, req, i);
if (entry == OUT_OF_RANGE)
continue;
if (ocf_alock_is_index_locked(alock, req, i)) {
ocf_alock_unlock_one_wr(alock, entry);
ocf_alock_mark_index_locked(alock, req, i, false);
}
}
return ret;
}
static int ocf_pio_lock_slow(struct ocf_alock *alock,
struct ocf_request *req, int rw, ocf_req_async_lock_cb cmpl)
{
int32_t i;
ocf_cache_line_t entry;
int ret = OCF_LOCK_ACQUIRED;
ENV_BUG_ON(rw != OCF_WRITE);
for (i = 0; i < req->core_line_count; i++) {
entry = ocf_pio_lock_get_entry(alock, req, i);
if (entry == OUT_OF_RANGE)
continue;
ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i));
if (!ocf_alock_lock_one_wr(alock, entry, cmpl, req, i)) {
ENV_BUG();
/* lock not acquired and not added to wait list */
ret = -OCF_ERR_NO_MEM;
goto err;
}
}
return ret;
err:
for (; i >= 0; i--) {
entry = ocf_pio_lock_get_entry(alock, req, i);
if (entry == OUT_OF_RANGE)
continue;
ocf_alock_waitlist_remove_entry(alock, req, i, entry, OCF_WRITE);
}
return ret;
}
static struct ocf_alock_lock_cbs ocf_pio_conc_cbs = {
.lock_entries_fast = ocf_pio_lock_fast,
.lock_entries_slow = ocf_pio_lock_slow
};
int ocf_pio_async_lock(struct ocf_alock *alock, struct ocf_request *req,
ocf_req_async_lock_cb cmpl)
{
return ocf_alock_lock_wr(alock, req, cmpl);
}
void ocf_pio_async_unlock(struct ocf_alock *alock, struct ocf_request *req)
{
ocf_cache_line_t entry;
int i;
for (i = 0; i < req->core_line_count; i++) {
if (!ocf_alock_is_index_locked(alock, req, i))
continue;
entry = ocf_pio_lock_get_entry(alock, req, i);
if (entry == OUT_OF_RANGE)
continue;
ocf_alock_unlock_one_wr(alock, entry);
ocf_alock_mark_index_locked(alock, req, i, false);
}
req->alock_status = 0;
}
#define ALLOCATOR_NAME_FMT "ocf_%s_pio_conc"
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE)
int ocf_pio_concurrency_init(struct ocf_alock **self, ocf_cache_t cache)
{
struct ocf_metadata_ctrl *ctrl = cache->metadata.priv;
struct ocf_alock *alock;
struct pio_ctx *pio_ctx;
size_t base_size = ocf_alock_obj_size();
char name[ALLOCATOR_NAME_MAX];
int ret;
uint32_t pages_to_alloc = 0;
enum ocf_metadata_segment_id update_segments[] = {
metadata_segment_sb_config,
metadata_segment_part_config,
metadata_segment_core_config,
metadata_segment_core_uuid,
metadata_segment_collision,
};
int i;
ENV_BUILD_BUG_ON(
ARRAY_SIZE(update_segments) > ARRAY_SIZE(pio_ctx->segments));
ret = snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
ocf_cache_get_name(cache));
if (ret < 0)
return ret;
if (ret >= ALLOCATOR_NAME_MAX)
return -OCF_ERR_NO_MEM;
alock = env_vzalloc(base_size + sizeof(struct pio_ctx));
if (!alock)
return -OCF_ERR_NO_MEM;
pio_ctx = get_pio_ctx(alock);
pio_ctx->segments_number = ARRAY_SIZE(update_segments);
for (i = 0; i < pio_ctx->segments_number; i++) {
struct ocf_metadata_raw *raw = &(ctrl->raw_desc[update_segments[i]]);
pio_ctx->segments[i].first_entry = pages_to_alloc;
pio_ctx->segments[i].id = update_segments[i];
pio_ctx->segments[i].begin = raw->ssd_pages_offset;
pio_ctx->segments[i].end = raw->ssd_pages_offset + raw->ssd_pages;
pages_to_alloc += raw->ssd_pages;
}
ret = ocf_alock_init_inplace(alock, pages_to_alloc, name, &ocf_pio_conc_cbs, cache);
if (ret) {
env_vfree(alock);
return ret;
}
*self = alock;
return 0;
}
void ocf_pio_concurrency_deinit(struct ocf_alock **self)
{
ocf_alock_deinit(self);
}

View File

@ -0,0 +1,20 @@
/*
* Copyright(c) 2021-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __OCF_PIO_CONCURRENCY_H_
#define __OCF_PIO_CONCURRENCY_H_
#include "../utils/utils_alock.h"
int ocf_pio_async_lock(struct ocf_alock *alock, struct ocf_request *req,
ocf_req_async_lock_cb cmpl);
void ocf_pio_async_unlock(struct ocf_alock *alock, struct ocf_request *req);
int ocf_pio_concurrency_init(struct ocf_alock **self, ocf_cache_t cache);
void ocf_pio_concurrency_deinit(struct ocf_alock **self);
#endif

View File

@ -1566,6 +1566,31 @@ bool ocf_metadata_clear_valid_if_clean(struct ocf_cache *cache,
}
}
void ocf_metadata_clear_dirty_if_invalid(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop)
{
switch (cache->metadata.line_size) {
case ocf_cache_line_size_4:
return _ocf_metadata_clear_dirty_if_invalid_u8(cache,
line, start, stop);
case ocf_cache_line_size_8:
return _ocf_metadata_clear_dirty_if_invalid_u16(cache,
line, start, stop);
case ocf_cache_line_size_16:
return _ocf_metadata_clear_dirty_if_invalid_u32(cache,
line, start, stop);
case ocf_cache_line_size_32:
return _ocf_metadata_clear_dirty_if_invalid_u64(cache,
line, start, stop);
case ocf_cache_line_size_64:
return _ocf_metadata_clear_dirty_if_invalid_u128(cache,
line, start, stop);
case ocf_cache_line_size_none:
default:
ENV_BUG();
}
}
int ocf_metadata_init(struct ocf_cache *cache,
ocf_cache_line_size_t cache_line_size)
{
@ -2019,51 +2044,3 @@ void ocf_metadata_probe_cores(ocf_ctx_t ctx, ocf_volume_t volume,
ocf_metadata_query_cores(ctx, volume, uuids, uuids_count,
ocf_metadata_probe_cores_end, context);
}
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io)
{
struct ocf_metadata_ctrl *ctrl = cache->metadata.priv;
ctx_data_t *data = ocf_io_get_data(io);
uint64_t io_start_page = BYTES_TO_PAGES(io->addr);
uint64_t io_end_page = io_start_page + BYTES_TO_PAGES(io->bytes);
enum ocf_metadata_segment_id update_segments[] = {
metadata_segment_sb_config,
metadata_segment_part_config,
metadata_segment_core_config,
metadata_segment_core_uuid,
metadata_segment_collision,
};
int i;
if (io->dir == OCF_READ)
return 0;
if (io->addr % PAGE_SIZE || io->bytes % PAGE_SIZE) {
ocf_cache_log(cache, log_crit,
"Metadata update not aligned to page size!\n");
return -OCF_ERR_INVAL;
}
if (io_start_page >= ctrl->count_pages)
return 0;
for (i = 0; i < ARRAY_SIZE(update_segments); i++) {
enum ocf_metadata_segment_id seg = update_segments[i];
struct ocf_metadata_raw *raw = &(ctrl->raw_desc[seg]);
uint64_t raw_start_page = raw->ssd_pages_offset;
uint64_t raw_end_page = raw_start_page + raw->ssd_pages;
uint64_t overlap_start = OCF_MAX(io_start_page, raw_start_page);
uint64_t overlap_end = OCF_MIN(io_end_page, raw_end_page);
uint64_t overlap_start_data = overlap_start - io_start_page;
if (overlap_start < overlap_end) {
ctx_data_seek(cache->owner, data, ctx_data_seek_begin,
PAGES_TO_BYTES(overlap_start_data));
ocf_metadata_raw_update(cache, raw, data,
overlap_start - raw_start_page,
overlap_end - overlap_start);
}
}
return 0;
}

View File

@ -18,6 +18,7 @@
#include "metadata_collision.h"
#include "metadata_core.h"
#include "metadata_misc.h"
#include "metadata_passive_update.h"
#define INVALID 0
#define VALID 1
@ -226,6 +227,4 @@ static inline ocf_cache_line_t ocf_metadata_collision_table_entries(
return cache->device->collision_table_entries;
}
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io);
#endif /* METADATA_H_ */

View File

@ -245,6 +245,24 @@ static bool _ocf_metadata_clear_valid_if_clean_##type(struct ocf_cache *cache, \
return false; \
} \
} \
\
static void _ocf_metadata_clear_dirty_if_invalid_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_ctrl *ctrl = \
(struct ocf_metadata_ctrl *) cache->metadata.priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line); \
\
map[line].dirty &= (mask & map[line].valid) | (~mask); \
} \
#define ocf_metadata_bit_funcs(type) \
ocf_metadata_bit_struct(type); \

View File

@ -0,0 +1,388 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "metadata_passive_update.h"
#include "metadata_collision.h"
#include "metadata_segment_id.h"
#include "metadata_internal.h"
#include "metadata_io.h"
#include "metadata_raw.h"
#include "metadata_segment.h"
#include "../concurrency/ocf_concurrency.h"
#include "../ocf_def_priv.h"
#include "../ocf_priv.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../utils/utils_pipeline.h"
#include "../concurrency/ocf_pio_concurrency.h"
#include "../engine/engine_common.h"
#define MAX_PASSIVE_IO_SIZE (32*MiB)
static inline void _reset_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
/* The cacheline used to be dirty but it is not anymore so it needs to be
moved to a clean lru list */
ocf_lru_clean_cline(cache, &cache->user_parts[PARTITION_DEFAULT].part,
cline);
ocf_lru_rm_cline(cache, cline);
ocf_metadata_set_partition_id(cache, cline, PARTITION_FREELIST);
}
static inline void remove_from_freelist(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_part_id_t lru_list;
struct ocf_lru_list *list;
lru_list = (cline % OCF_NUM_LRU_LISTS);
list = ocf_lru_get_list(&cache->free, lru_list, true);
OCF_METADATA_LRU_WR_LOCK(cline);
ocf_lru_remove_locked(cache, list, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
}
static inline void remove_from_default(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_part_id_t part_id = PARTITION_DEFAULT;
ocf_part_id_t lru_list;
struct ocf_lru_list *list;
lru_list = (cline % OCF_NUM_LRU_LISTS);
list = ocf_lru_get_list(&cache->user_parts[part_id].part, lru_list, false);
OCF_METADATA_LRU_WR_LOCK(cline);
ocf_lru_remove_locked(cache, list, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
env_atomic_dec(&cache->user_parts[part_id].part.runtime->curr_size);
}
static void handle_previously_invalid(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_core_id_t core_id;
uint64_t core_line;
uint32_t lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue);
/* Pio lock provides exclusive access to the collision page thus either
mapping or status bits can't be changed by a concurrent thread */
ocf_metadata_get_core_info(cache, cline, &core_id, &core_line);
if (metadata_test_dirty(cache, cline) && core_id < OCF_CORE_MAX) {
ENV_BUG_ON(!metadata_test_valid_any(cache, cline));
/* Moving cline from the freelist to the default partitioin */
remove_from_freelist(cache, cline);
ocf_hb_cline_prot_lock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
OCF_METADATA_LRU_WR_LOCK(cline);
ocf_cline_rebuild_metadata(cache, core_id, core_line, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
ocf_cleaning_init_cache_block(cache, cline);
ocf_cleaning_set_hot_cache_line(cache, cline);
} else {
/* Cline stays on the freelist*/
/* To prevent random values in the metadata fill it with the defaults */
metadata_init_status_bits(cache, cline);
ocf_metadata_set_core_info(cache, cline, OCF_CORE_MAX, ULLONG_MAX);
}
}
static void handle_previously_valid(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_core_id_t core_id;
uint64_t core_line;
uint32_t lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue);
/* Pio lock provides exclusive access to the collision page thus either
mapping or status bits can't be changed by a concurrent thread */
ocf_metadata_get_core_info(cache, cline, &core_id, &core_line);
if (metadata_test_dirty(cache, cline) && core_id < OCF_CORE_MAX) {
/* Cline stays on the default partition*/
ENV_BUG_ON(!metadata_test_valid_any(cache, cline));
remove_from_default(cache, cline);
ocf_hb_cline_prot_lock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
OCF_METADATA_LRU_WR_LOCK(cline);
ocf_cline_rebuild_metadata(cache, core_id, core_line, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
ocf_cleaning_set_hot_cache_line(cache, cline);
} else {
/* Moving cline from the default partition to the freelist */
ocf_cleaning_purge_cache_block(cache, cline);
_reset_cline(cache, cline);
}
}
static inline void update_list_segment(ocf_cache_t cache,
ocf_cache_line_t start, ocf_cache_line_t count)
{
ocf_cache_line_t cline, end;
for (cline = start, end = start + count; cline < end; cline++) {
ocf_part_id_t part_id;
metadata_clear_dirty_if_invalid(cache, cline);
metadata_clear_valid_if_clean(cache, cline);
part_id = ocf_metadata_get_partition_id(cache, cline);
switch (part_id) {
case PARTITION_FREELIST:
handle_previously_invalid(cache, cline);
break;
case PARTITION_DEFAULT:
handle_previously_valid(cache, cline);
break;
default:
ocf_cache_log(cache, log_crit, "Passive update: invalid "
"part id for cacheline %u: %hu\n", cline, part_id);
ENV_BUG();
break;
}
}
}
static void _dec_core_stats(ocf_core_t core)
{
ocf_part_id_t part = PARTITION_DEFAULT;
env_atomic *core_occupancy_counter = &core->runtime_meta->cached_clines;
env_atomic *part_occupancy_counter =
&core->runtime_meta->part_counters[part].cached_clines;
env_atomic *core_dirty_counter = &core->runtime_meta->dirty_clines;
env_atomic *part_dirty_counter =
&core->runtime_meta->part_counters[part].dirty_clines;
ENV_BUG_ON(env_atomic_dec_return(core_occupancy_counter) < 0);
ENV_BUG_ON(env_atomic_dec_return(part_occupancy_counter) < 0);
ENV_BUG_ON(env_atomic_dec_return(core_dirty_counter) < 0);
ENV_BUG_ON(env_atomic_dec_return(part_dirty_counter) < 0);
}
static void cleanup_old_mapping(ocf_cache_t cache, ocf_cache_line_t start,
ocf_cache_line_t count)
{
ocf_cache_line_t cline, end;
uint32_t lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue);
for (cline = start, end = start + count; cline < end; cline++) {
ocf_core_id_t core_id;
uint64_t core_line;
ocf_core_t core;
ocf_metadata_get_core_info(cache, cline, &core_id, &core_line);
ENV_BUG_ON(core_id > OCF_CORE_ID_INVALID);
core = ocf_cache_get_core(cache, core_id);
if (!core)
continue;
_dec_core_stats(core);
ocf_hb_cline_prot_lock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
ocf_metadata_remove_from_collision(cache, cline, PARTITION_DEFAULT);
ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock, lock_idx, core_id,
core_line);
}
}
static int passive_io_resume(struct ocf_request *req)
{
ocf_cache_t cache = req->cache;
struct ocf_metadata_ctrl *ctrl = cache->metadata.priv;
struct ocf_io *io = (struct ocf_io*) req->data;
ctx_data_t *data = ocf_io_get_data(io);
uint64_t io_start_page = BYTES_TO_PAGES(io->addr);
uint64_t io_pages_count = BYTES_TO_PAGES(io->bytes);
uint64_t io_end_page = io_start_page + io_pages_count - 1;
ocf_end_io_t io_cmpl = req->master_io_req;
ocf_cache_line_t cache_etries = ocf_metadata_collision_table_entries(cache);
enum ocf_metadata_segment_id update_segments[] = {
metadata_segment_sb_config,
metadata_segment_part_config,
metadata_segment_core_config,
metadata_segment_core_uuid,
metadata_segment_collision,
};
int i;
for (i = 0; i < ARRAY_SIZE(update_segments); i++) {
ocf_cache_line_t cache_line_count, cache_line_range_start;
enum ocf_metadata_segment_id seg = update_segments[i];
struct ocf_metadata_raw *raw = &(ctrl->raw_desc[seg]);
uint64_t raw_start_page = raw->ssd_pages_offset;
uint64_t raw_end_page = raw_start_page + raw->ssd_pages - 1;
uint64_t overlap_start = OCF_MAX(io_start_page, raw_start_page);
uint64_t overlap_end = OCF_MIN(io_end_page, raw_end_page);
uint64_t overlap_start_data = overlap_start - io_start_page;
uint64_t overlap_page;
uint64_t overlap_count;
if (overlap_start > overlap_end)
continue;
overlap_page = overlap_start - raw_start_page;
overlap_count = overlap_end - overlap_start + 1;
if (seg == metadata_segment_collision) {
/* The range of cachelines with potentially updated collision
section */
cache_line_range_start = overlap_page * raw->entries_in_page;
cache_line_count = raw->entries_in_page * overlap_count;
/* The last page of collision section may contain fewer entries than
entries_in_page */
cache_line_count = OCF_MIN(cache_etries - cache_line_range_start,
cache_line_count);
/* The collision is not updated yet but the range of affected
cachelines is already known. Remove the old mapping related info
from the metadata */
cleanup_old_mapping(cache, cache_line_range_start,
cache_line_count);
}
ctx_data_seek(cache->owner, data, ctx_data_seek_begin,
PAGES_TO_BYTES(overlap_start_data));
ocf_metadata_raw_update(cache, raw, data, overlap_page, overlap_count);
if (seg != metadata_segment_collision)
continue;
update_list_segment(cache, cache_line_range_start,
cache_line_count);
}
ocf_pio_async_unlock(req->cache->standby.concurrency, req);
io_cmpl(io, 0);
env_allocator_del(cache->standby.allocator, req);
return 0;
}
static struct ocf_io_if passive_io_restart_if = {
.read = passive_io_resume,
.write = passive_io_resume,
};
static void passive_io_page_lock_acquired(struct ocf_request *req)
{
ocf_engine_push_req_front(req, true);
}
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
ocf_end_io_t io_cmpl)
{
struct ocf_metadata_ctrl *ctrl = cache->metadata.priv;
struct ocf_request *req;
uint64_t io_start_page = BYTES_TO_PAGES(io->addr);
uint64_t io_end_page = io_start_page + BYTES_TO_PAGES(io->bytes);
int lock = 0;
if (io->dir == OCF_READ) {
io_cmpl(io, 0);
return 0;
}
if (io_start_page >= ctrl->count_pages) {
io_cmpl(io, 0);
return 0;
}
if (io->addr % PAGE_SIZE || io->bytes % PAGE_SIZE) {
ocf_cache_log(cache, log_warn,
"Metadata update not aligned to page size!\n");
io_cmpl(io, -OCF_ERR_INVAL);
return -OCF_ERR_INVAL;
}
if (io->bytes > MAX_PASSIVE_IO_SIZE) {
//FIXME handle greater IOs
ocf_cache_log(cache, log_warn,
"IO size exceedes max supported size!\n");
io_cmpl(io, -OCF_ERR_INVAL);
return -OCF_ERR_INVAL;
}
req = (struct ocf_request*)env_allocator_new(cache->standby.allocator);
if (!req) {
io_cmpl(io, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
req->io_queue = io->io_queue;;
req->info.internal = true;
req->io_if = &passive_io_restart_if;
req->rw = OCF_WRITE;
req->data = io;
req->master_io_req = io_cmpl;
req->cache = cache;
env_atomic_set(&req->lock_remaining, 0);
req->core_line_first = io_start_page;
req->core_line_count = io_end_page - io_start_page;
req->alock_status = (uint8_t*)&req->map;
lock = ocf_pio_async_lock(req->cache->standby.concurrency,
req, passive_io_page_lock_acquired);
if (lock < 0) {
env_allocator_del(cache->standby.allocator, req);
io_cmpl(io, lock);
return lock;
}
if (lock == OCF_LOCK_ACQUIRED)
passive_io_resume(req);
return 0;
}
int ocf_metadata_passive_io_ctx_init(ocf_cache_t cache)
{
char *name = "ocf_cache_pio";
size_t element_size, header_size, size;
header_size = sizeof(struct ocf_request);
/* Only one bit per page is required. Since `alock_status` has `uint8_t*`
type, one entry can carry status for 8 pages. */
element_size = OCF_DIV_ROUND_UP(BYTES_TO_PAGES(MAX_PASSIVE_IO_SIZE), 8);
size = header_size + element_size;
cache->standby.allocator = env_allocator_create(size, name, true);
if (cache->standby.allocator == NULL)
return -1;
return 0;
}
void ocf_metadata_passive_io_ctx_deinit(ocf_cache_t cache)
{
env_allocator_destroy(cache->standby.allocator);
}

View File

@ -0,0 +1,16 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __OCF_METADATA_PASSIVE_IO_H__
#define __OCF_METADATA_PASSIVE_IO_H__
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,
ocf_end_io_t io_cmpl);
int ocf_metadata_passive_io_ctx_init(ocf_cache_t cache);
void ocf_metadata_passive_io_ctx_deinit(ocf_cache_t cache);
#endif

View File

@ -191,7 +191,6 @@ static void *_raw_ram_access(ocf_cache_t cache,
static int _raw_ram_drain_page(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ctx_data_t *data, uint32_t page)
{
uint32_t size = raw->entry_size * raw->entries_in_page;
ocf_cache_line_t line;
@ -222,6 +221,7 @@ static int _raw_ram_update(ocf_cache_t cache,
_raw_ram_drain_page(cache, raw, data, page + i);
return 0;
}
struct _raw_ram_load_all_context {

View File

@ -40,6 +40,8 @@ bool ocf_metadata_test_and_clear_valid(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all);
bool ocf_metadata_clear_valid_if_clean(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
void ocf_metadata_clear_dirty_if_invalid(struct ocf_cache *cache,
ocf_cache_line_t line, uint8_t start, uint8_t stop);
static inline void metadata_init_status_bits(struct ocf_cache *cache,
ocf_cache_line_t line)
@ -228,10 +230,10 @@ static inline void metadata_clear_valid(struct ocf_cache *cache,
ocf_metadata_clear_valid(cache, line, 0, ocf_line_end_sector(cache));
}
static inline void metadata_clear_valid_if_clean(struct ocf_cache *cache,
static inline bool metadata_clear_valid_if_clean(struct ocf_cache *cache,
ocf_cache_line_t line)
{
ocf_metadata_clear_valid_if_clean(cache, line, 0,
return ocf_metadata_clear_valid_if_clean(cache, line, 0,
ocf_line_end_sector(cache));
}
@ -249,6 +251,13 @@ static inline bool metadata_test_and_set_valid(struct ocf_cache *cache,
ocf_line_end_sector(cache), true);
}
static inline void metadata_clear_dirty_if_invalid(struct ocf_cache *cache,
ocf_cache_line_t line)
{
ocf_metadata_clear_dirty_if_invalid(cache, line, 0,
ocf_line_end_sector(cache));
}
/*******************************************************************************
* Valid - Sector Implementation
******************************************************************************/

View File

@ -25,6 +25,7 @@
#include "../ocf_ctx_priv.h"
#include "../cleaning/cleaning.h"
#include "../promotion/ops.h"
#include "../concurrency/ocf_pio_concurrency.h"
#define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device)
@ -144,6 +145,10 @@ struct ocf_cache_attach_context {
*/
bool concurrency_inited : 1;
bool pio_mpool : 1;
bool pio_concurrency : 1;
} flags;
struct {
@ -210,17 +215,23 @@ static void __init_parts_attached(ocf_cache_t cache)
ocf_lru_init(cache, &cache->free);
}
static void __populate_free(ocf_cache_t cache)
static void __populate_free_unsafe(ocf_cache_t cache)
{
uint64_t free_clines = ocf_metadata_collision_table_entries(cache);
ocf_lru_populate(cache, free_clines, false);
}
static void __populate_free_safe(ocf_cache_t cache)
{
uint64_t free_clines = ocf_metadata_collision_table_entries(cache) -
ocf_get_cache_occupancy(cache);
ocf_lru_populate(cache, free_clines);
ocf_lru_populate(cache, free_clines, true);
}
static ocf_error_t __init_cleaning_policy(ocf_cache_t cache)
{
ocf_cleaning_t cleaning_policy = ocf_cleaning_default;
int i;
OCF_ASSERT_PLUGGED(cache);
@ -230,9 +241,7 @@ static ocf_error_t __init_cleaning_policy(ocf_cache_t cache)
for (i = 0; i < ocf_cleaning_max; i++)
ocf_cleaning_setup(cache, i);
cache->conf_meta->cleaning_policy_type = ocf_cleaning_default;
return ocf_cleaning_initialize(cache, cleaning_policy, 1);
return ocf_cleaning_initialize(cache, cache->cleaner.policy, 1);
}
static void __deinit_cleaning_policy(ocf_cache_t cache)
@ -307,7 +316,7 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache)
ocf_metadata_init_hash_table(cache);
ocf_metadata_init_collision(cache);
__init_parts_attached(cache);
__populate_free(cache);
__populate_free_safe(cache);
result = __init_cleaning_policy(cache);
if (result) {
@ -321,9 +330,11 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache)
return 0;
}
static void init_attached_data_structures_recovery(ocf_cache_t cache)
static void init_attached_data_structures_recovery(ocf_cache_t cache,
bool init_collision)
{
ocf_metadata_init_hash_table(cache);
if (init_collision)
ocf_metadata_init_collision(cache);
__init_parts_attached(cache);
__reset_stats(cache);
@ -427,8 +438,8 @@ static void _ocf_mngt_load_add_cores(ocf_pipeline_t pipeline,
} else {
core->opened = true;
}
}
}
env_bit_set(core_id, cache->conf_meta->valid_core_bitmap);
core->added = true;
cache->conf_meta->core_count++;
@ -481,111 +492,94 @@ err:
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_START_CACHE_FAIL);
}
static void _recovery_rebuild_cline_metadata(ocf_cache_t cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t cache_line)
{
ocf_core_t core = ocf_cache_get_core(cache, core_id);
ocf_part_id_t part_id;
ocf_cache_line_t hash_index;
struct ocf_part_runtime *part;
part_id = PARTITION_DEFAULT;
part = cache->user_parts[part_id].part.runtime;
ocf_metadata_set_partition_id(cache, cache_line, part_id);
env_atomic_inc(&part->curr_size);
hash_index = ocf_metadata_hash_func(cache, core_line, core_id);
ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
cache_line);
ocf_lru_init_cline(cache, cache_line);
ocf_lru_add(cache, cache_line);
env_atomic_inc(&core->runtime_meta->cached_clines);
env_atomic_inc(&core->runtime_meta->
part_counters[part_id].cached_clines);
if (metadata_test_dirty(cache, cache_line)) {
env_atomic_inc(&core->runtime_meta->dirty_clines);
env_atomic_inc(&core->runtime_meta->
part_counters[part_id].dirty_clines);
if (!env_atomic64_read(&core->runtime_meta->dirty_since))
env_atomic64_cmpxchg(&core->runtime_meta->dirty_since, 0,
env_ticks_to_secs(env_get_tick_count()));
}
}
static void _recovery_reset_cline_metadata(struct ocf_cache *cache,
ocf_cache_line_t cline)
{
ocf_metadata_set_core_info(cache, cline, OCF_CORE_MAX, ULLONG_MAX);
metadata_clear_valid(cache, cline);
metadata_init_status_bits(cache, cline);
ocf_cleaning_init_cache_block(cache, cline);
}
static void _ocf_mngt_recovery_rebuild_metadata(ocf_cache_t cache)
static void _ocf_mngt_rebuild_metadata(ocf_cache_t cache, bool initialized)
{
ocf_cache_line_t cline;
ocf_core_id_t core_id;
uint64_t core_line;
unsigned char step = 0;
bool dirty_only = !ocf_volume_is_atomic(ocf_cache_get_volume(cache));
const uint64_t collision_table_entries =
ocf_metadata_collision_table_entries(cache);
ocf_metadata_start_exclusive_access(&cache->metadata.lock);
for (cline = 0; cline < collision_table_entries; cline++) {
ocf_metadata_get_core_info(cache, cline, &core_id, &core_line);
if (core_id != OCF_CORE_MAX &&
(!dirty_only || metadata_test_dirty(cache,
cline))) {
/* Rebuild metadata for mapped cache line */
_recovery_rebuild_cline_metadata(cache, core_id,
core_line, cline);
if (dirty_only)
metadata_clear_valid_if_clean(cache, cline);
} else {
/* Reset metadata for not mapped or clean cache line */
_recovery_reset_cline_metadata(cache, cline);
}
bool any_valid = true;
OCF_COND_RESCHED(step, 128);
ocf_metadata_get_core_info(cache, cline, &core_id, &core_line);
if (!initialized)
metadata_clear_dirty_if_invalid(cache, cline);
any_valid = metadata_clear_valid_if_clean(cache, cline);
if (!any_valid || core_id >= OCF_CORE_MAX) {
/* Reset metadata for not mapped or clean cache line */
_recovery_reset_cline_metadata(cache, cline);
continue;
}
/* Rebuild metadata for mapped cache line */
ocf_cline_rebuild_metadata(cache, core_id, core_line, cline);
}
ocf_metadata_end_exclusive_access(&cache->metadata.lock);
}
static void _ocf_mngt_recovery_rebuild_metadata(ocf_cache_t cache)
{
_ocf_mngt_rebuild_metadata(cache, false);
}
static void _ocf_mngt_bind_rebuild_metadata(ocf_cache_t cache)
{
_ocf_mngt_rebuild_metadata(cache, true);
}
static inline ocf_error_t _ocf_init_cleaning_policy(ocf_cache_t cache,
ocf_cleaning_t cleaning_policy,
enum ocf_metadata_shutdown_status shutdown_status)
{
ocf_error_t result;
if (shutdown_status == ocf_metadata_clean_shutdown)
result = ocf_cleaning_initialize(cache, cleaning_policy, 0);
else
result = ocf_cleaning_initialize(cache, cleaning_policy, 1);
if (result)
ocf_cache_log(cache, log_err, "Cannot initialize cleaning policy\n");
return result;
}
static void _ocf_mngt_load_post_metadata_load(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_attach_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_cleaning_t cleaning_policy;
ocf_error_t result;
if (context->metadata.shutdown_status != ocf_metadata_clean_shutdown) {
_ocf_mngt_recovery_rebuild_metadata(cache);
__populate_free(cache);
__populate_free_safe(cache);
}
cleaning_policy = cache->conf_meta->cleaning_policy_type;
result = _ocf_init_cleaning_policy(cache, cache->cleaner.policy,
context->metadata.shutdown_status);
if (context->metadata.shutdown_status == ocf_metadata_clean_shutdown)
result = ocf_cleaning_initialize(cache, cleaning_policy, 0);
else
result = ocf_cleaning_initialize(cache, cleaning_policy, 1);
if (result) {
ocf_cache_log(cache, log_err,
"Cannot initialize cleaning policy\n");
if (result)
OCF_PL_FINISH_RET(pipeline, result);
}
ocf_pipeline_next(pipeline);
}
@ -624,7 +618,7 @@ static void _ocf_mngt_load_init_instance_recovery(
{
ocf_cache_t cache = context->cache;
init_attached_data_structures_recovery(cache);
init_attached_data_structures_recovery(cache, true);
ocf_cache_log(cache, log_info, "Initiating recovery sequence...\n");
@ -1224,6 +1218,7 @@ static void _ocf_mngt_cache_init(ocf_cache_t cache,
cache->conf_meta->cache_mode = params->metadata.cache_mode;
cache->conf_meta->metadata_layout = params->metadata.layout;
cache->conf_meta->promotion_policy_type = params->metadata.promotion_policy;
__set_cleaning_policy(cache, ocf_cleaning_default);
INIT_LIST_HEAD(&cache->io_queues);
@ -1412,6 +1407,7 @@ static void _ocf_mngt_load_superblock_complete(void *priv, int error)
{
struct ocf_cache_attach_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_cleaning_t loaded_clean_policy = cache->conf_meta->cleaning_policy_type;
if (cache->conf_meta->cachelines !=
ocf_metadata_get_cachelines_count(cache)) {
@ -1421,6 +1417,15 @@ static void _ocf_mngt_load_superblock_complete(void *priv, int error)
-OCF_ERR_START_CACHE_FAIL);
}
if (loaded_clean_policy >= ocf_cleaning_max) {
ocf_cache_log(cache, log_err,
"ERROR: Invalid cleaning policy!\n");
OCF_PL_FINISH_RET(context->pipeline,
-OCF_ERR_START_CACHE_FAIL);
}
__set_cleaning_policy(cache, loaded_clean_policy);
if (error) {
ocf_cache_log(cache, log_err,
"ERROR: Cannot load cache state\n");
@ -1646,6 +1651,12 @@ static void _ocf_mngt_attach_handle_error(
cache->device = NULL;
}
if (context->flags.pio_concurrency)
ocf_pio_concurrency_deinit(&cache->standby.concurrency);
if (context->flags.pio_mpool)
ocf_metadata_passive_io_ctx_deinit(cache);
ocf_pipeline_destroy(cache->stop_pipeline);
}
@ -2025,6 +2036,22 @@ static void _ocf_mngt_load_unsafe_complete(void *priv, int error)
ocf_pipeline_next(context->pipeline);
}
static void _ocf_mngt_standby_init_properties(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_attach_context *context = priv;
ocf_cache_t cache = context->cache;
OCF_ASSERT_PLUGGED(cache);
context->metadata.shutdown_status = ocf_metadata_dirty_shutdown;
context->metadata.dirty_flushed = DIRTY_FLUSHED;
context->metadata.line_size = context->cfg.cache_line_size ?:
cache->metadata.line_size;
ocf_pipeline_next(pipeline);
}
static void _ocf_mngt_load_metadata_unsafe(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
@ -2036,15 +2063,62 @@ static void _ocf_mngt_load_metadata_unsafe(ocf_pipeline_t pipeline,
_ocf_mngt_load_unsafe_complete, context);
}
static void _ocf_mngt_bind_preapre_mempool(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_attach_context *context = priv;
ocf_cache_t cache = context->cache;
int result;
result = ocf_metadata_passive_io_ctx_init(cache);
if(!result)
context->flags.pio_mpool = true;
OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, result);
}
static void _ocf_mngt_bind_init_attached_structures(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_attach_context *context = priv;
ocf_cache_t cache = context->cache;
int result;
init_attached_data_structures_recovery(cache);
init_attached_data_structures_recovery(cache, false);
ocf_pipeline_next(context->pipeline);
result = ocf_pio_concurrency_init(&cache->standby.concurrency, cache);
if (!result)
context->flags.pio_concurrency = true;
OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, result);
}
static void _ocf_mngt_bind_recovery_unsafe(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_attach_context *context = priv;
ocf_cache_t cache = context->cache;
_ocf_mngt_bind_rebuild_metadata(cache);
__populate_free_unsafe(cache);
ocf_pipeline_next(pipeline);
}
static void _ocf_mngt_bind_init_cleaning(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_attach_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_error_t result;
result = _ocf_init_cleaning_policy(cache, cache->cleaner.policy,
context->metadata.shutdown_status);
if (result)
OCF_PL_FINISH_RET(pipeline, result);
ocf_pipeline_next(pipeline);
}
static void _ocf_mngt_bind_post_init(ocf_pipeline_t pipeline,
@ -2066,12 +2140,16 @@ struct ocf_pipeline_properties _ocf_mngt_cache_standby_pipeline_properties = {
OCF_PL_STEP(_ocf_mngt_init_attached_nonpersistent),
OCF_PL_STEP(_ocf_mngt_attach_cache_device),
OCF_PL_STEP(_ocf_mngt_init_cache_front_volume),
OCF_PL_STEP(_ocf_mngt_init_properties),
OCF_PL_STEP(_ocf_mngt_standby_init_properties),
OCF_PL_STEP(_ocf_mngt_attach_check_ram),
OCF_PL_STEP(_ocf_mngt_test_volume),
OCF_PL_STEP(_ocf_mngt_attach_prepare_metadata),
OCF_PL_STEP(_ocf_mngt_load_metadata_unsafe),
OCF_PL_STEP(_ocf_mngt_bind_preapre_mempool),
OCF_PL_STEP(_ocf_mngt_bind_init_attached_structures),
OCF_PL_STEP(_ocf_mngt_bind_recovery_unsafe),
OCF_PL_STEP(_ocf_mngt_init_cleaner),
OCF_PL_STEP(_ocf_mngt_bind_init_cleaning),
OCF_PL_STEP(_ocf_mngt_bind_post_init),
OCF_PL_STEP_TERMINATOR(),
},
@ -2183,11 +2261,15 @@ static void _ocf_mngt_activate_check_superblock_complete(void *priv, int error)
OCF_PL_FINISH_RET(context->pipeline, result);
if (cache->conf_meta->metadata_layout != cache->metadata.layout) {
ocf_cache_log(cache, log_err, "Failed to activate standby instance: "
"invaild metadata layout\n");
OCF_PL_FINISH_RET(context->pipeline,
-OCF_ERR_METADATA_LAYOUT_MISMATCH);
}
if (cache->conf_meta->line_size != cache->metadata.line_size) {
ocf_cache_log(cache, log_err, "Failed to activate standby instance: "
"invaild cache line size\n");
OCF_PL_FINISH_RET(context->pipeline,
-OCF_ERR_CACHE_LINE_SIZE_MISMATCH);
}
@ -2219,7 +2301,7 @@ static void _ocf_mngt_activate_compare_superblock_end(
OCF_PL_FINISH_RET(context->pipeline, result);
if (diff) {
ocf_cache_log(cache, log_err, "Superblock missmatch!\n");
ocf_cache_log(cache, log_err, "Superblock mismatch!\n");
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_INVAL);
}
@ -2330,10 +2412,8 @@ struct ocf_pipeline_properties _ocf_mngt_cache_activate_pipeline_properties = {
OCF_PL_STEP(_ocf_mngt_activate_compare_superblock),
OCF_PL_STEP(_ocf_mngt_activate_init_properties),
OCF_PL_STEP(_ocf_mngt_test_volume),
OCF_PL_STEP(_ocf_mngt_init_cleaner),
OCF_PL_STEP(_ocf_mngt_init_promotion),
OCF_PL_STEP(_ocf_mngt_load_add_cores),
OCF_PL_STEP(_ocf_mngt_load_post_metadata_load),
OCF_PL_STEP(_ocf_mngt_attach_shutdown_status),
OCF_PL_STEP(_ocf_mngt_attach_post_init),
OCF_PL_STEP_TERMINATOR(),
@ -2406,12 +2486,24 @@ static void _ocf_mngt_cache_load(ocf_cache_t cache,
OCF_PL_NEXT_RET(pipeline);
}
static void ocf_mngt_stop_standby_stop_cleaner(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_mngt_cache_stop_context *context = priv;
ocf_cache_t cache = context->cache;
ocf_stop_cleaner(cache);
ocf_pipeline_next(pipeline);
}
struct ocf_pipeline_properties
ocf_mngt_cache_stop_standby_pipeline_properties = {
.priv_size = sizeof(struct ocf_mngt_cache_stop_context),
.finish = ocf_mngt_cache_stop_finish,
.steps = {
OCF_PL_STEP(ocf_mngt_cache_stop_wait_metadata_io),
OCF_PL_STEP(ocf_mngt_stop_standby_stop_cleaner),
OCF_PL_STEP(ocf_mngt_cache_close_cache_volume),
OCF_PL_STEP(ocf_mngt_cache_deinit_metadata),
OCF_PL_STEP(ocf_mngt_cache_deinit_cache_volume),
@ -2761,7 +2853,7 @@ static int _ocf_mngt_cache_load_core_log(ocf_core_t core, void *cntx)
static void _ocf_mngt_cache_load_log(ocf_cache_t cache)
{
ocf_cache_mode_t cache_mode = ocf_cache_get_mode(cache);
ocf_cleaning_t cleaning_type = cache->conf_meta->cleaning_policy_type;
ocf_cleaning_t cleaning_type = cache->cleaner.policy;
ocf_promotion_t promotion_type = cache->conf_meta->promotion_policy_type;
ocf_cache_log(cache, log_info, "Successfully loaded\n");
@ -2875,6 +2967,9 @@ static void _ocf_mngt_cache_activate_complete(ocf_cache_t cache, void *priv1,
_ocf_mngt_cache_set_active(cache);
ocf_cache_log(cache, log_info, "Successfully activated\n");
ocf_pio_concurrency_deinit(&cache->standby.concurrency);
ocf_metadata_passive_io_ctx_deinit(cache);
OCF_CMPL_RET(cache, priv2, 0);
}

View File

@ -151,6 +151,13 @@ void ocf_mngt_cache_put(ocf_cache_t cache)
}
}
void __set_cleaning_policy(ocf_cache_t cache,
ocf_cleaning_t new_cleaning_policy)
{
cache->conf_meta->cleaning_policy_type = new_cleaning_policy;
cache->cleaner.policy = new_cleaning_policy;
}
int ocf_mngt_cache_get_by_name(ocf_ctx_t ctx, const char *name, size_t name_len,
ocf_cache_t *cache)
{

View File

@ -30,4 +30,7 @@ void ocf_mngt_cache_lock_deinit(ocf_cache_t cache);
bool ocf_mngt_cache_is_locked(ocf_cache_t cache);
void __set_cleaning_policy(ocf_cache_t cache,
ocf_cleaning_t new_cleaning_policy);
#endif /* __OCF_MNGT_COMMON_H__ */

View File

@ -508,6 +508,7 @@ static void ocf_mngt_cache_add_core_finish(ocf_pipeline_t pipeline,
ocf_core_t core = context->core;
if (error) {
_ocf_mngt_cache_add_core_handle_error(context);
if (error == -OCF_ERR_CORE_NOT_AVAIL) {

View File

@ -968,7 +968,7 @@ static void _ocf_mngt_init_clean_policy(ocf_pipeline_t pipeline, void *priv,
ocf_cleaning_get_name(new_policy));
}
cache->conf_meta->cleaning_policy_type = new_policy;
__set_cleaning_policy(cache, new_policy);
ocf_refcnt_unfreeze(&cache->cleaner.refcnt);
ocf_metadata_end_exclusive_access(&cache->metadata.lock);
@ -1014,7 +1014,7 @@ void ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache,
if (ocf_cache_is_standby(cache))
OCF_CMPL_RET(priv, -OCF_ERR_CACHE_STANDBY);
old_policy = cache->conf_meta->cleaning_policy_type;
old_policy = cache->cleaner.policy;
if (new_policy == old_policy) {
ocf_cache_log(cache, log_info, "Cleaning policy %s is already "
@ -1047,7 +1047,7 @@ int ocf_mngt_cache_cleaning_get_policy(ocf_cache_t cache, ocf_cleaning_t *type)
if (ocf_cache_is_standby(cache))
return -OCF_ERR_CACHE_STANDBY;
*type = cache->conf_meta->cleaning_policy_type;
*type = cache->cleaner.policy;
return 0;
}

View File

@ -5,6 +5,7 @@
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "metadata/metadata.h"
#include "engine/cache_engine.h"
#include "utils/utils_cache_line.h"
#include "ocf_request.h"
@ -196,7 +197,7 @@ int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
info->fallback_pt.error_counter =
env_atomic_read(&cache->fallback_pt_error_counter);
info->cleaning_policy = cache->conf_meta->cleaning_policy_type;
info->cleaning_policy = cache->cleaner.policy;
info->promotion_policy = cache->conf_meta->promotion_policy_type;
info->cache_line_size = ocf_line_size(cache);
@ -260,6 +261,7 @@ struct ocf_cache_volume_io_priv {
struct ocf_io *io;
struct ctx_data_t *data;
env_atomic remaining;
env_atomic error;
};
struct ocf_cache_volume {
@ -273,7 +275,8 @@ static inline ocf_cache_t ocf_volume_to_cache(ocf_volume_t volume)
return cache_volume->cache;
}
static void ocf_cache_volume_io_complete(struct ocf_io *vol_io, int error)
static void ocf_cache_volume_io_complete_generic(struct ocf_io *vol_io,
int error)
{
struct ocf_cache_volume_io_priv *priv;
struct ocf_io *io = vol_io->priv1;
@ -289,6 +292,33 @@ static void ocf_cache_volume_io_complete(struct ocf_io *vol_io, int error)
ocf_refcnt_dec(&cache->refcnt.metadata);
}
static void ocf_cache_io_complete(struct ocf_io *io, int error)
{
struct ocf_cache_volume_io_priv *priv;
ocf_cache_t cache;
cache = ocf_volume_to_cache(ocf_io_get_volume(io));
priv = ocf_io_get_priv(io);
env_atomic_cmpxchg(&priv->error, 0, error);
if (env_atomic_dec_return(&priv->remaining))
return;
ocf_refcnt_dec(&cache->refcnt.metadata);
ocf_io_end(io, env_atomic_read(&priv->error));
}
static void ocf_cache_volume_io_complete(struct ocf_io *vol_io, int error)
{
struct ocf_io *io = vol_io->priv1;
ocf_io_put(vol_io);
ocf_cache_io_complete(io, error);
}
static int ocf_cache_volume_prepare_vol_io(struct ocf_io *io,
struct ocf_io **vol_io)
{
@ -313,14 +343,11 @@ static int ocf_cache_volume_prepare_vol_io(struct ocf_io *io,
return result;
}
ocf_io_set_cmpl(tmp_io, io, NULL, ocf_cache_volume_io_complete);
*vol_io = tmp_io;
return 0;
}
static void ocf_cache_volume_submit_io(struct ocf_io *io)
{
struct ocf_cache_volume_io_priv *priv;
@ -336,7 +363,8 @@ static void ocf_cache_volume_submit_io(struct ocf_io *io)
return;
}
env_atomic_set(&priv->remaining, 2);
env_atomic_set(&priv->remaining, 3);
env_atomic_set(&priv->error, 0);
result = ocf_cache_volume_prepare_vol_io(io, &vol_io);
if (result) {
@ -344,20 +372,16 @@ static void ocf_cache_volume_submit_io(struct ocf_io *io)
return;
}
ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete);
ocf_volume_submit_io(vol_io);
result = ocf_metadata_passive_update(cache, io);
result = ocf_metadata_passive_update(cache, io, ocf_cache_io_complete);
if (result) {
ocf_cache_log(cache, log_crit,
"Metadata update error (error=%d)!\n", result);
}
if (env_atomic_dec_return(&priv->remaining))
return;
ocf_io_put(vol_io);
ocf_io_end(io, 0);
ocf_refcnt_dec(&cache->refcnt.metadata);
ocf_cache_io_complete(io, 0);
}
@ -383,6 +407,7 @@ static void ocf_cache_volume_submit_flush(struct ocf_io *io)
ocf_io_end(io, result);
return;
}
ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete_generic);
ocf_volume_submit_flush(vol_io);
}
@ -410,6 +435,7 @@ static void ocf_cache_volume_submit_discard(struct ocf_io *io)
ocf_io_end(io, result);
return;
}
ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete_generic);
ocf_volume_submit_discard(vol_io);
}

View File

@ -85,6 +85,11 @@ struct ocf_cache {
struct ocf_refcnt metadata __attribute__((aligned(64)));
} refcnt;
struct {
env_allocator *allocator;
struct ocf_alock *concurrency;
} standby;
struct ocf_core core[OCF_CORE_MAX];
ocf_pipeline_t stop_pipeline;

View File

@ -42,7 +42,7 @@ int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
info->min_size = cache->user_parts[part_id].config->min_size;
info->max_size = cache->user_parts[part_id].config->max_size;
info->cleaning_policy_type = cache->conf_meta->cleaning_policy_type;
info->cleaning_policy_type = cache->cleaner.policy;
info->cache_mode = cache->user_parts[part_id].config->cache_mode;

View File

@ -194,7 +194,7 @@ static void remove_lru_list_nobalance(ocf_cache_t cache,
node->hot = false;
}
static void remove_lru_list(ocf_cache_t cache, struct ocf_lru_list *list,
void ocf_lru_remove_locked(ocf_cache_t cache, struct ocf_lru_list *list,
ocf_cache_line_t cline)
{
remove_lru_list_nobalance(cache, list, cline);
@ -221,7 +221,7 @@ void ocf_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
node->next = end_marker;
}
static struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
uint32_t lru_idx, bool clean)
{
if (part->id == PARTITION_FREELIST)
@ -257,7 +257,7 @@ void ocf_lru_add(ocf_cache_t cache, ocf_cache_line_t cline)
static inline void ocf_lru_move(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_lru_list *src_list, struct ocf_lru_list *dst_list)
{
remove_lru_list(cache, src_list, cline);
ocf_lru_remove_locked(cache, src_list, cline);
add_lru_head(cache, dst_list, cline);
}
@ -846,7 +846,7 @@ void ocf_lru_clean_cline(ocf_cache_t cache, struct ocf_part *part,
dirty_list = ocf_lru_get_list(part, lru_list, false);
OCF_METADATA_LRU_WR_LOCK(cline);
remove_lru_list(cache, dirty_list, cline);
ocf_lru_remove_locked(cache, dirty_list, cline);
add_lru_head(cache, clean_list, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
}
@ -862,7 +862,7 @@ void ocf_lru_dirty_cline(ocf_cache_t cache, struct ocf_part *part,
dirty_list = ocf_lru_get_list(part, lru_list, false);
OCF_METADATA_LRU_WR_LOCK(cline);
remove_lru_list(cache, clean_list, cline);
ocf_lru_remove_locked(cache, clean_list, cline);
add_lru_head(cache, dirty_list, cline);
OCF_METADATA_LRU_WR_UNLOCK(cline);
}
@ -891,7 +891,8 @@ static ocf_cache_line_t next_phys_invalid(ocf_cache_t cache,
}
/* put invalid cachelines on freelist partition lru list */
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines,
bool safe)
{
ocf_cache_line_t phys, cline;
ocf_cache_line_t collision_table_entries =
@ -905,7 +906,10 @@ void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
for (i = 0; i < num_free_clines; i++) {
/* find first invalid cacheline */
phys = next_phys_invalid(cache, phys);
if (safe)
ENV_BUG_ON(phys == collision_table_entries);
else if (phys == collision_table_entries)
break;
cline = ocf_metadata_map_phy2lg(cache, phys);
++phys;
@ -921,9 +925,9 @@ void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines)
/* we should have reached the last invalid cache line */
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys != collision_table_entries);
ENV_BUG_ON(safe && phys != collision_table_entries);
env_atomic_set(&cache->free.runtime->curr_size, num_free_clines);
env_atomic_set(&cache->free.runtime->curr_size, i);
}
static bool _is_cache_line_acting(struct ocf_cache *cache,

View File

@ -31,6 +31,11 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_upart, struct ocf_part *dst_upart);
uint32_t ocf_lru_num_free(ocf_cache_t cache);
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines);
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines,
bool safe);
struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
uint32_t lru_idx, bool clean);
void ocf_lru_remove_locked(ocf_cache_t cache, struct ocf_lru_list *list,
ocf_cache_line_t cline);
#endif

View File

@ -167,3 +167,40 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
ocf_cleaning_set_hot_cache_line(cache, line);
}
void ocf_cline_rebuild_metadata(ocf_cache_t cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t cache_line)
{
ocf_core_t core = ocf_cache_get_core(cache, core_id);
ocf_part_id_t part_id;
ocf_cache_line_t hash_index;
struct ocf_part_runtime *part;
part_id = PARTITION_DEFAULT;
part = cache->user_parts[part_id].part.runtime;
ocf_metadata_set_partition_id(cache, cache_line, part_id);
env_atomic_inc(&part->curr_size);
hash_index = ocf_metadata_hash_func(cache, core_line, core_id);
ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
cache_line);
ocf_lru_init_cline(cache, cache_line);
ocf_lru_add(cache, cache_line);
env_atomic_inc(&core->runtime_meta->cached_clines);
env_atomic_inc(&core->runtime_meta->
part_counters[part_id].cached_clines);
if (metadata_test_dirty(cache, cache_line)) {
env_atomic_inc(&core->runtime_meta->dirty_clines);
env_atomic_inc(&core->runtime_meta->
part_counters[part_id].dirty_clines);
if (!env_atomic64_read(&core->runtime_meta->dirty_since))
env_atomic64_cmpxchg(&core->runtime_meta->dirty_since, 0,
env_ticks_to_secs(env_get_tick_count()));
}
}

View File

@ -123,6 +123,10 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
void ocf_cline_rebuild_metadata(ocf_cache_t cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t cache_line);
/**
* @brief Remove cache line from cleaning policy
*