diff --git a/src/metadata/metadata.c b/src/metadata/metadata.c index 9660cb6..5037021 100644 --- a/src/metadata/metadata.c +++ b/src/metadata/metadata.c @@ -2044,51 +2044,3 @@ void ocf_metadata_probe_cores(ocf_ctx_t ctx, ocf_volume_t volume, ocf_metadata_query_cores(ctx, volume, uuids, uuids_count, ocf_metadata_probe_cores_end, context); } - -int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io) -{ - struct ocf_metadata_ctrl *ctrl = cache->metadata.priv; - ctx_data_t *data = ocf_io_get_data(io); - uint64_t io_start_page = BYTES_TO_PAGES(io->addr); - uint64_t io_end_page = io_start_page + BYTES_TO_PAGES(io->bytes); - enum ocf_metadata_segment_id update_segments[] = { - metadata_segment_sb_config, - metadata_segment_part_config, - metadata_segment_core_config, - metadata_segment_core_uuid, - metadata_segment_collision, - }; - int i; - - if (io->dir == OCF_READ) - return 0; - - if (io->addr % PAGE_SIZE || io->bytes % PAGE_SIZE) { - ocf_cache_log(cache, log_crit, - "Metadata update not aligned to page size!\n"); - return -OCF_ERR_INVAL; - } - - if (io_start_page >= ctrl->count_pages) - return 0; - - for (i = 0; i < ARRAY_SIZE(update_segments); i++) { - enum ocf_metadata_segment_id seg = update_segments[i]; - struct ocf_metadata_raw *raw = &(ctrl->raw_desc[seg]); - uint64_t raw_start_page = raw->ssd_pages_offset; - uint64_t raw_end_page = raw_start_page + raw->ssd_pages; - uint64_t overlap_start = OCF_MAX(io_start_page, raw_start_page); - uint64_t overlap_end = OCF_MIN(io_end_page, raw_end_page); - uint64_t overlap_start_data = overlap_start - io_start_page; - - if (overlap_start < overlap_end) { - ctx_data_seek(cache->owner, data, ctx_data_seek_begin, - PAGES_TO_BYTES(overlap_start_data)); - ocf_metadata_raw_update(cache, raw, data, - overlap_start - raw_start_page, - overlap_end - overlap_start); - } - } - - return 0; -} diff --git a/src/metadata/metadata.h b/src/metadata/metadata.h index f6db6ea..29e3968 100644 --- a/src/metadata/metadata.h +++ b/src/metadata/metadata.h @@ -18,6 +18,7 @@ #include "metadata_collision.h" #include "metadata_core.h" #include "metadata_misc.h" +#include "metadata_passive_update.h" #define INVALID 0 #define VALID 1 @@ -226,6 +227,4 @@ static inline ocf_cache_line_t ocf_metadata_collision_table_entries( return cache->device->collision_table_entries; } -int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io); - #endif /* METADATA_H_ */ diff --git a/src/metadata/metadata_passive_update.c b/src/metadata/metadata_passive_update.c new file mode 100644 index 0000000..ed6551f --- /dev/null +++ b/src/metadata/metadata_passive_update.c @@ -0,0 +1,388 @@ +/* + * Copyright(c) 2012-2021 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "ocf/ocf.h" + +#include "metadata.h" +#include "metadata_passive_update.h" +#include "metadata_collision.h" +#include "metadata_segment_id.h" +#include "metadata_internal.h" +#include "metadata_io.h" +#include "metadata_raw.h" +#include "metadata_segment.h" +#include "../concurrency/ocf_concurrency.h" +#include "../ocf_def_priv.h" +#include "../ocf_priv.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_io.h" +#include "../utils/utils_pipeline.h" +#include "../concurrency/ocf_pio_concurrency.h" +#include "../engine/engine_common.h" + +#define MAX_PASSIVE_IO_SIZE (32*MiB) + + +static inline void _reset_cline(ocf_cache_t cache, ocf_cache_line_t cline) +{ + /* The cacheline used to be dirty but it is not anymore so it needs to be + moved to a clean lru list */ + ocf_lru_clean_cline(cache, &cache->user_parts[PARTITION_DEFAULT].part, + cline); + + ocf_lru_rm_cline(cache, cline); + ocf_metadata_set_partition_id(cache, cline, PARTITION_FREELIST); +} + +static inline void remove_from_freelist(ocf_cache_t cache, + ocf_cache_line_t cline) +{ + ocf_part_id_t lru_list; + struct ocf_lru_list *list; + + lru_list = (cline % OCF_NUM_LRU_LISTS); + list = ocf_lru_get_list(&cache->free, lru_list, true); + + OCF_METADATA_LRU_WR_LOCK(cline); + ocf_lru_remove_locked(cache, list, cline); + OCF_METADATA_LRU_WR_UNLOCK(cline); +} + +static inline void remove_from_default(ocf_cache_t cache, + ocf_cache_line_t cline) +{ + ocf_part_id_t part_id = PARTITION_DEFAULT; + ocf_part_id_t lru_list; + struct ocf_lru_list *list; + + lru_list = (cline % OCF_NUM_LRU_LISTS); + list = ocf_lru_get_list(&cache->user_parts[part_id].part, lru_list, false); + + OCF_METADATA_LRU_WR_LOCK(cline); + ocf_lru_remove_locked(cache, list, cline); + OCF_METADATA_LRU_WR_UNLOCK(cline); + + env_atomic_dec(&cache->user_parts[part_id].part.runtime->curr_size); + +} + +static void handle_previously_invalid(ocf_cache_t cache, + ocf_cache_line_t cline) +{ + ocf_core_id_t core_id; + uint64_t core_line; + uint32_t lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue); + + /* Pio lock provides exclusive access to the collision page thus either + mapping or status bits can't be changed by a concurrent thread */ + + ocf_metadata_get_core_info(cache, cline, &core_id, &core_line); + + if (metadata_test_dirty(cache, cline) && core_id < OCF_CORE_MAX) { + ENV_BUG_ON(!metadata_test_valid_any(cache, cline)); + /* Moving cline from the freelist to the default partitioin */ + remove_from_freelist(cache, cline); + + ocf_hb_cline_prot_lock_wr(&cache->metadata.lock, lock_idx, core_id, + core_line); + OCF_METADATA_LRU_WR_LOCK(cline); + ocf_cline_rebuild_metadata(cache, core_id, core_line, cline); + OCF_METADATA_LRU_WR_UNLOCK(cline); + ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock, lock_idx, core_id, + core_line); + ocf_cleaning_init_cache_block(cache, cline); + ocf_cleaning_set_hot_cache_line(cache, cline); + + } else { + /* Cline stays on the freelist*/ + + /* To prevent random values in the metadata fill it with the defaults */ + metadata_init_status_bits(cache, cline); + ocf_metadata_set_core_info(cache, cline, OCF_CORE_MAX, ULLONG_MAX); + } +} + +static void handle_previously_valid(ocf_cache_t cache, + ocf_cache_line_t cline) +{ + ocf_core_id_t core_id; + uint64_t core_line; + uint32_t lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue); + + /* Pio lock provides exclusive access to the collision page thus either + mapping or status bits can't be changed by a concurrent thread */ + + ocf_metadata_get_core_info(cache, cline, &core_id, &core_line); + + if (metadata_test_dirty(cache, cline) && core_id < OCF_CORE_MAX) { + /* Cline stays on the default partition*/ + ENV_BUG_ON(!metadata_test_valid_any(cache, cline)); + + remove_from_default(cache, cline); + + ocf_hb_cline_prot_lock_wr(&cache->metadata.lock, lock_idx, core_id, + core_line); + OCF_METADATA_LRU_WR_LOCK(cline); + ocf_cline_rebuild_metadata(cache, core_id, core_line, cline); + OCF_METADATA_LRU_WR_UNLOCK(cline); + ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock, lock_idx, core_id, + core_line); + ocf_cleaning_set_hot_cache_line(cache, cline); + + } else { + /* Moving cline from the default partition to the freelist */ + ocf_cleaning_purge_cache_block(cache, cline); + _reset_cline(cache, cline); + } +} + +static inline void update_list_segment(ocf_cache_t cache, + ocf_cache_line_t start, ocf_cache_line_t count) +{ + ocf_cache_line_t cline, end; + + for (cline = start, end = start + count; cline < end; cline++) { + ocf_part_id_t part_id; + + metadata_clear_dirty_if_invalid(cache, cline); + metadata_clear_valid_if_clean(cache, cline); + + part_id = ocf_metadata_get_partition_id(cache, cline); + switch (part_id) { + case PARTITION_FREELIST: + handle_previously_invalid(cache, cline); + break; + case PARTITION_DEFAULT: + handle_previously_valid(cache, cline); + break; + default: + ocf_cache_log(cache, log_crit, "Passive update: invalid " + "part id for cacheline %u: %hu\n", cline, part_id); + ENV_BUG(); + break; + } + } +} + +static void _dec_core_stats(ocf_core_t core) +{ + ocf_part_id_t part = PARTITION_DEFAULT; + + env_atomic *core_occupancy_counter = &core->runtime_meta->cached_clines; + env_atomic *part_occupancy_counter = + &core->runtime_meta->part_counters[part].cached_clines; + + env_atomic *core_dirty_counter = &core->runtime_meta->dirty_clines; + env_atomic *part_dirty_counter = + &core->runtime_meta->part_counters[part].dirty_clines; + + ENV_BUG_ON(env_atomic_dec_return(core_occupancy_counter) < 0); + ENV_BUG_ON(env_atomic_dec_return(part_occupancy_counter) < 0); + + ENV_BUG_ON(env_atomic_dec_return(core_dirty_counter) < 0); + ENV_BUG_ON(env_atomic_dec_return(part_dirty_counter) < 0); +} + +static void cleanup_old_mapping(ocf_cache_t cache, ocf_cache_line_t start, + ocf_cache_line_t count) +{ + ocf_cache_line_t cline, end; + uint32_t lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue); + + for (cline = start, end = start + count; cline < end; cline++) { + ocf_core_id_t core_id; + uint64_t core_line; + ocf_core_t core; + + ocf_metadata_get_core_info(cache, cline, &core_id, &core_line); + + ENV_BUG_ON(core_id > OCF_CORE_ID_INVALID); + + core = ocf_cache_get_core(cache, core_id); + if (!core) + continue; + + _dec_core_stats(core); + + ocf_hb_cline_prot_lock_wr(&cache->metadata.lock, lock_idx, core_id, + core_line); + ocf_metadata_remove_from_collision(cache, cline, PARTITION_DEFAULT); + ocf_hb_cline_prot_unlock_wr(&cache->metadata.lock, lock_idx, core_id, + core_line); + } +} + +static int passive_io_resume(struct ocf_request *req) +{ + ocf_cache_t cache = req->cache; + struct ocf_metadata_ctrl *ctrl = cache->metadata.priv; + struct ocf_io *io = (struct ocf_io*) req->data; + ctx_data_t *data = ocf_io_get_data(io); + uint64_t io_start_page = BYTES_TO_PAGES(io->addr); + uint64_t io_pages_count = BYTES_TO_PAGES(io->bytes); + uint64_t io_end_page = io_start_page + io_pages_count - 1; + ocf_end_io_t io_cmpl = req->master_io_req; + ocf_cache_line_t cache_etries = ocf_metadata_collision_table_entries(cache); + enum ocf_metadata_segment_id update_segments[] = { + metadata_segment_sb_config, + metadata_segment_part_config, + metadata_segment_core_config, + metadata_segment_core_uuid, + metadata_segment_collision, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(update_segments); i++) { + ocf_cache_line_t cache_line_count, cache_line_range_start; + enum ocf_metadata_segment_id seg = update_segments[i]; + struct ocf_metadata_raw *raw = &(ctrl->raw_desc[seg]); + uint64_t raw_start_page = raw->ssd_pages_offset; + uint64_t raw_end_page = raw_start_page + raw->ssd_pages - 1; + uint64_t overlap_start = OCF_MAX(io_start_page, raw_start_page); + uint64_t overlap_end = OCF_MIN(io_end_page, raw_end_page); + uint64_t overlap_start_data = overlap_start - io_start_page; + uint64_t overlap_page; + uint64_t overlap_count; + + if (overlap_start > overlap_end) + continue; + + overlap_page = overlap_start - raw_start_page; + overlap_count = overlap_end - overlap_start + 1; + + if (seg == metadata_segment_collision) { + /* The range of cachelines with potentially updated collision + section */ + cache_line_range_start = overlap_page * raw->entries_in_page; + cache_line_count = raw->entries_in_page * overlap_count; + + /* The last page of collision section may contain fewer entries than + entries_in_page */ + cache_line_count = OCF_MIN(cache_etries - cache_line_range_start, + cache_line_count); + + /* The collision is not updated yet but the range of affected + cachelines is already known. Remove the old mapping related info + from the metadata */ + cleanup_old_mapping(cache, cache_line_range_start, + cache_line_count); + } + + ctx_data_seek(cache->owner, data, ctx_data_seek_begin, + PAGES_TO_BYTES(overlap_start_data)); + ocf_metadata_raw_update(cache, raw, data, overlap_page, overlap_count); + + if (seg != metadata_segment_collision) + continue; + + update_list_segment(cache, cache_line_range_start, + cache_line_count); + } + + ocf_pio_async_unlock(req->cache->standby.concurrency, req); + io_cmpl(io, 0); + env_allocator_del(cache->standby.allocator, req); + return 0; +} + +static struct ocf_io_if passive_io_restart_if = { + .read = passive_io_resume, + .write = passive_io_resume, +}; + +static void passive_io_page_lock_acquired(struct ocf_request *req) +{ + ocf_engine_push_req_front(req, true); +} + +int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io, + ocf_end_io_t io_cmpl) +{ + struct ocf_metadata_ctrl *ctrl = cache->metadata.priv; + struct ocf_request *req; + uint64_t io_start_page = BYTES_TO_PAGES(io->addr); + uint64_t io_end_page = io_start_page + BYTES_TO_PAGES(io->bytes); + int lock = 0; + + if (io->dir == OCF_READ) { + io_cmpl(io, 0); + return 0; + } + + if (io_start_page >= ctrl->count_pages) { + io_cmpl(io, 0); + return 0; + } + + if (io->addr % PAGE_SIZE || io->bytes % PAGE_SIZE) { + ocf_cache_log(cache, log_warn, + "Metadata update not aligned to page size!\n"); + io_cmpl(io, -OCF_ERR_INVAL); + return -OCF_ERR_INVAL; + } + + if (io->bytes > MAX_PASSIVE_IO_SIZE) { + //FIXME handle greater IOs + ocf_cache_log(cache, log_warn, + "IO size exceedes max supported size!\n"); + io_cmpl(io, -OCF_ERR_INVAL); + return -OCF_ERR_INVAL; + } + + req = (struct ocf_request*)env_allocator_new(cache->standby.allocator); + if (!req) { + io_cmpl(io, -OCF_ERR_NO_MEM); + return -OCF_ERR_NO_MEM; + } + + req->io_queue = io->io_queue;; + req->info.internal = true; + req->io_if = &passive_io_restart_if; + req->rw = OCF_WRITE; + req->data = io; + req->master_io_req = io_cmpl; + req->cache = cache; + env_atomic_set(&req->lock_remaining, 0); + + req->core_line_first = io_start_page; + req->core_line_count = io_end_page - io_start_page; + req->alock_status = (uint8_t*)&req->map; + + lock = ocf_pio_async_lock(req->cache->standby.concurrency, + req, passive_io_page_lock_acquired); + if (lock < 0) { + env_allocator_del(cache->standby.allocator, req); + io_cmpl(io, lock); + return lock; + } + + if (lock == OCF_LOCK_ACQUIRED) + passive_io_resume(req); + + return 0; +} + +int ocf_metadata_passive_io_ctx_init(ocf_cache_t cache) +{ + char *name = "ocf_cache_pio"; + size_t element_size, header_size, size; + + header_size = sizeof(struct ocf_request); + /* Only one bit per page is required. Since `alock_status` has `uint8_t*` + type, one entry can carry status for 8 pages. */ + element_size = OCF_DIV_ROUND_UP(BYTES_TO_PAGES(MAX_PASSIVE_IO_SIZE), 8); + size = header_size + element_size; + + cache->standby.allocator = env_allocator_create(size, name, true); + if (cache->standby.allocator == NULL) + return -1; + + return 0; +} + +void ocf_metadata_passive_io_ctx_deinit(ocf_cache_t cache) +{ + env_allocator_destroy(cache->standby.allocator); +} diff --git a/src/metadata/metadata_passive_update.h b/src/metadata/metadata_passive_update.h new file mode 100644 index 0000000..18657a9 --- /dev/null +++ b/src/metadata/metadata_passive_update.h @@ -0,0 +1,16 @@ +/* + * Copyright(c) 2012-2021 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __OCF_METADATA_PASSIVE_IO_H__ +#define __OCF_METADATA_PASSIVE_IO_H__ + +int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io, + ocf_end_io_t io_cmpl); + +int ocf_metadata_passive_io_ctx_init(ocf_cache_t cache); + +void ocf_metadata_passive_io_ctx_deinit(ocf_cache_t cache); + +#endif diff --git a/src/metadata/metadata_raw.c b/src/metadata/metadata_raw.c index 5933101..99a532b 100644 --- a/src/metadata/metadata_raw.c +++ b/src/metadata/metadata_raw.c @@ -191,7 +191,6 @@ static void *_raw_ram_access(ocf_cache_t cache, static int _raw_ram_drain_page(ocf_cache_t cache, struct ocf_metadata_raw *raw, ctx_data_t *data, uint32_t page) { - uint32_t size = raw->entry_size * raw->entries_in_page; ocf_cache_line_t line; @@ -222,6 +221,7 @@ static int _raw_ram_update(ocf_cache_t cache, _raw_ram_drain_page(cache, raw, data, page + i); return 0; + } struct _raw_ram_load_all_context { diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 8581e67..88c742f 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -25,6 +25,7 @@ #include "../ocf_ctx_priv.h" #include "../cleaning/cleaning.h" #include "../promotion/ops.h" +#include "../concurrency/ocf_pio_concurrency.h" #define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device) @@ -144,6 +145,10 @@ struct ocf_cache_attach_context { */ bool concurrency_inited : 1; + + bool pio_mpool : 1; + + bool pio_concurrency : 1; } flags; struct { @@ -433,8 +438,8 @@ static void _ocf_mngt_load_add_cores(ocf_pipeline_t pipeline, } else { core->opened = true; } - } + } env_bit_set(core_id, cache->conf_meta->valid_core_bitmap); core->added = true; cache->conf_meta->core_count++; @@ -1646,6 +1651,12 @@ static void _ocf_mngt_attach_handle_error( cache->device = NULL; } + if (context->flags.pio_concurrency) + ocf_pio_concurrency_deinit(&cache->standby.concurrency); + + if (context->flags.pio_mpool) + ocf_metadata_passive_io_ctx_deinit(cache); + ocf_pipeline_destroy(cache->stop_pipeline); } @@ -2052,15 +2063,34 @@ static void _ocf_mngt_load_metadata_unsafe(ocf_pipeline_t pipeline, _ocf_mngt_load_unsafe_complete, context); } +static void _ocf_mngt_bind_preapre_mempool(ocf_pipeline_t pipeline, + void *priv, ocf_pipeline_arg_t arg) +{ + struct ocf_cache_attach_context *context = priv; + ocf_cache_t cache = context->cache; + int result; + + result = ocf_metadata_passive_io_ctx_init(cache); + if(!result) + context->flags.pio_mpool = true; + + OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, result); +} + static void _ocf_mngt_bind_init_attached_structures(ocf_pipeline_t pipeline, void *priv, ocf_pipeline_arg_t arg) { struct ocf_cache_attach_context *context = priv; ocf_cache_t cache = context->cache; + int result; init_attached_data_structures_recovery(cache, false); - ocf_pipeline_next(context->pipeline); + result = ocf_pio_concurrency_init(&cache->standby.concurrency, cache); + if (!result) + context->flags.pio_concurrency = true; + + OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, result); } static void _ocf_mngt_bind_recovery_unsafe(ocf_pipeline_t pipeline, @@ -2115,6 +2145,7 @@ struct ocf_pipeline_properties _ocf_mngt_cache_standby_pipeline_properties = { OCF_PL_STEP(_ocf_mngt_test_volume), OCF_PL_STEP(_ocf_mngt_attach_prepare_metadata), OCF_PL_STEP(_ocf_mngt_load_metadata_unsafe), + OCF_PL_STEP(_ocf_mngt_bind_preapre_mempool), OCF_PL_STEP(_ocf_mngt_bind_init_attached_structures), OCF_PL_STEP(_ocf_mngt_bind_recovery_unsafe), OCF_PL_STEP(_ocf_mngt_init_cleaner), @@ -2936,6 +2967,9 @@ static void _ocf_mngt_cache_activate_complete(ocf_cache_t cache, void *priv1, _ocf_mngt_cache_set_active(cache); ocf_cache_log(cache, log_info, "Successfully activated\n"); + ocf_pio_concurrency_deinit(&cache->standby.concurrency); + ocf_metadata_passive_io_ctx_deinit(cache); + OCF_CMPL_RET(cache, priv2, 0); } diff --git a/src/mngt/ocf_mngt_core.c b/src/mngt/ocf_mngt_core.c index b535068..a261069 100644 --- a/src/mngt/ocf_mngt_core.c +++ b/src/mngt/ocf_mngt_core.c @@ -508,6 +508,7 @@ static void ocf_mngt_cache_add_core_finish(ocf_pipeline_t pipeline, ocf_core_t core = context->core; if (error) { + _ocf_mngt_cache_add_core_handle_error(context); if (error == -OCF_ERR_CORE_NOT_AVAIL) { diff --git a/src/ocf_cache.c b/src/ocf_cache.c index 2522f61..1a5d203 100644 --- a/src/ocf_cache.c +++ b/src/ocf_cache.c @@ -5,6 +5,7 @@ #include "ocf/ocf.h" #include "metadata/metadata.h" +#include "metadata/metadata.h" #include "engine/cache_engine.h" #include "utils/utils_cache_line.h" #include "ocf_request.h" @@ -260,6 +261,7 @@ struct ocf_cache_volume_io_priv { struct ocf_io *io; struct ctx_data_t *data; env_atomic remaining; + env_atomic error; }; struct ocf_cache_volume { @@ -273,7 +275,8 @@ static inline ocf_cache_t ocf_volume_to_cache(ocf_volume_t volume) return cache_volume->cache; } -static void ocf_cache_volume_io_complete(struct ocf_io *vol_io, int error) +static void ocf_cache_volume_io_complete_generic(struct ocf_io *vol_io, + int error) { struct ocf_cache_volume_io_priv *priv; struct ocf_io *io = vol_io->priv1; @@ -289,6 +292,33 @@ static void ocf_cache_volume_io_complete(struct ocf_io *vol_io, int error) ocf_refcnt_dec(&cache->refcnt.metadata); } +static void ocf_cache_io_complete(struct ocf_io *io, int error) +{ + struct ocf_cache_volume_io_priv *priv; + ocf_cache_t cache; + + cache = ocf_volume_to_cache(ocf_io_get_volume(io)); + + priv = ocf_io_get_priv(io); + + env_atomic_cmpxchg(&priv->error, 0, error); + + if (env_atomic_dec_return(&priv->remaining)) + return; + + ocf_refcnt_dec(&cache->refcnt.metadata); + ocf_io_end(io, env_atomic_read(&priv->error)); +} + +static void ocf_cache_volume_io_complete(struct ocf_io *vol_io, int error) +{ + struct ocf_io *io = vol_io->priv1; + + ocf_io_put(vol_io); + + ocf_cache_io_complete(io, error); +} + static int ocf_cache_volume_prepare_vol_io(struct ocf_io *io, struct ocf_io **vol_io) { @@ -313,14 +343,11 @@ static int ocf_cache_volume_prepare_vol_io(struct ocf_io *io, return result; } - ocf_io_set_cmpl(tmp_io, io, NULL, ocf_cache_volume_io_complete); - *vol_io = tmp_io; return 0; } - static void ocf_cache_volume_submit_io(struct ocf_io *io) { struct ocf_cache_volume_io_priv *priv; @@ -336,7 +363,8 @@ static void ocf_cache_volume_submit_io(struct ocf_io *io) return; } - env_atomic_set(&priv->remaining, 2); + env_atomic_set(&priv->remaining, 3); + env_atomic_set(&priv->error, 0); result = ocf_cache_volume_prepare_vol_io(io, &vol_io); if (result) { @@ -344,20 +372,16 @@ static void ocf_cache_volume_submit_io(struct ocf_io *io) return; } + ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete); ocf_volume_submit_io(vol_io); - result = ocf_metadata_passive_update(cache, io); + result = ocf_metadata_passive_update(cache, io, ocf_cache_io_complete); if (result) { ocf_cache_log(cache, log_crit, "Metadata update error (error=%d)!\n", result); } - if (env_atomic_dec_return(&priv->remaining)) - return; - - ocf_io_put(vol_io); - ocf_io_end(io, 0); - ocf_refcnt_dec(&cache->refcnt.metadata); + ocf_cache_io_complete(io, 0); } @@ -383,6 +407,7 @@ static void ocf_cache_volume_submit_flush(struct ocf_io *io) ocf_io_end(io, result); return; } + ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete_generic); ocf_volume_submit_flush(vol_io); } @@ -410,6 +435,7 @@ static void ocf_cache_volume_submit_discard(struct ocf_io *io) ocf_io_end(io, result); return; } + ocf_io_set_cmpl(vol_io, io, NULL, ocf_cache_volume_io_complete_generic); ocf_volume_submit_discard(vol_io); } diff --git a/src/ocf_cache_priv.h b/src/ocf_cache_priv.h index 6771f0e..fdb6e42 100644 --- a/src/ocf_cache_priv.h +++ b/src/ocf_cache_priv.h @@ -85,6 +85,11 @@ struct ocf_cache { struct ocf_refcnt metadata __attribute__((aligned(64))); } refcnt; + struct { + env_allocator *allocator; + struct ocf_alock *concurrency; + } standby; + struct ocf_core core[OCF_CORE_MAX]; ocf_pipeline_t stop_pipeline;