diff --git a/src/cleaning/acp.c b/src/cleaning/acp.c index 7cf964f..2672562 100644 --- a/src/cleaning/acp.c +++ b/src/cleaning/acp.c @@ -651,8 +651,6 @@ static void _acp_flush(struct acp_context *acp) .cmpl_context = acp, .cmpl_fn = _acp_flush_end, .lock_cacheline = false, - .lock_metadata = true, - .do_sort = false, .cmpl_queue = true, .io_queue = cache->cleaner.io_queue, }; diff --git a/src/cleaning/alru.c b/src/cleaning/alru.c index e4e1d51..6bc6f4e 100644 --- a/src/cleaning/alru.c +++ b/src/cleaning/alru.c @@ -911,8 +911,9 @@ static void alru_clean(struct alru_context *ctx) to_clean = get_data_to_flush(ctx); if (to_clean > 0) { fctx->flush_perfomed = true; - ocf_cleaner_do_flush_data_async(cache, fctx->flush_data, to_clean, - &fctx->attribs); + ocf_cleaner_sort_flush_data(fctx->flush_data, to_clean); + ocf_cleaner_do_flush_data_async(cache, fctx->flush_data, + to_clean, &fctx->attribs); ocf_metadata_end_exclusive_access(&cache->metadata.lock); return; } @@ -939,8 +940,6 @@ void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl) fctx->attribs.cmpl_context = fctx; fctx->attribs.cmpl_fn = alru_clean_complete; fctx->attribs.lock_cacheline = true; - fctx->attribs.lock_metadata = false; - fctx->attribs.do_sort = true; fctx->attribs.io_queue = cache->cleaner.io_queue; fctx->attribs.cmpl_queue = true; diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index b54c865..24643f6 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation + * Copyright(c) 2024 Huawei Technologies Co., Ltd. * SPDX-License-Identifier: BSD-3-Clause */ @@ -555,7 +556,6 @@ void ocf_engine_clean(struct ocf_request *req) /* Initialize attributes for cleaner */ struct ocf_cleaner_attribs attribs = { .lock_cacheline = false, - .lock_metadata = false, .cmpl_context = req, .cmpl_fn = _ocf_engine_clean_end, diff --git a/src/metadata/metadata_partition.h b/src/metadata/metadata_partition.h index e05001e..e8ec033 100644 --- a/src/metadata/metadata_partition.h +++ b/src/metadata/metadata_partition.h @@ -1,12 +1,12 @@ /* * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2024 Huawei Technologies Co., Ltd. * SPDX-License-Identifier: BSD-3-Clause */ #ifndef __METADATA_PARTITION_H__ #define __METADATA_PARTITION_H__ -#include "metadata_partition_structs.h" #include "../ocf_cache_priv.h" #define PARTITION_DEFAULT 0 diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 83ca25f..c518d61 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -9,10 +9,10 @@ #include "ocf_mngt_core_priv.h" #include "../ocf_priv.h" #include "../ocf_core_priv.h" +#include "../ocf_part.h" #include "../ocf_queue_priv.h" #include "../metadata/metadata.h" #include "../metadata/metadata_io.h" -#include "../metadata/metadata_partition_structs.h" #include "../engine/cache_engine.h" #include "../utils/utils_user_part.h" #include "../utils/utils_cache_line.h" diff --git a/src/mngt/ocf_mngt_flush.c b/src/mngt/ocf_mngt_flush.c index e9d225e..2cfc565 100644 --- a/src/mngt/ocf_mngt_flush.c +++ b/src/mngt/ocf_mngt_flush.c @@ -444,7 +444,6 @@ static void _ocf_mngt_flush_container( fc->req = req; fc->attribs.lock_cacheline = true; - fc->attribs.lock_metadata = false; fc->attribs.cmpl_context = fc; fc->attribs.cmpl_fn = _ocf_mngt_flush_portion_end; fc->attribs.io_queue = cache->mngt_queue; diff --git a/src/ocf_cache_priv.h b/src/ocf_cache_priv.h index 807483a..1deb3eb 100644 --- a/src/ocf_cache_priv.h +++ b/src/ocf_cache_priv.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation + * Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -10,8 +11,8 @@ #include "ocf_env.h" #include "ocf_volume_priv.h" #include "ocf_core_priv.h" +#include "ocf_part.h" #include "metadata/metadata_structs.h" -#include "metadata/metadata_partition_structs.h" #include "utils/utils_list.h" #include "utils/utils_pipeline.h" #include "utils/utils_refcnt.h" diff --git a/src/ocf_lru.c b/src/ocf_lru.c index d617b98..16eaa74 100644 --- a/src/ocf_lru.c +++ b/src/ocf_lru.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation + * Copyright(c) 2023-2024 Huawei Technologies Co., Ltd. * SPDX-License-Identifier: BSD-3-Clause */ @@ -570,50 +571,33 @@ static inline ocf_cache_line_t lru_iter_cleaning_next(struct ocf_lru_iter *iter) static void ocf_lru_clean_end(void *private_data, int error) { struct ocf_part_cleaning_ctx *ctx = private_data; + struct flush_data *entries = ctx->entries; unsigned i; for (i = 0; i < OCF_EVICTION_CLEAN_SIZE; i++) { - if (ctx->cline[i] != end_marker) - ocf_cache_line_unlock_rd(ctx->cache->device->concurrency - .cache_line, ctx->cline[i]); + if (entries[i].cache_line == end_marker) + break; + ocf_cache_line_unlock_rd( + ctx->cache->device->concurrency.cache_line, + entries[i].cache_line); } ocf_refcnt_dec(&ctx->counter); } -static int ocf_lru_clean_get(ocf_cache_t cache, void *getter_context, - uint32_t idx, ocf_cache_line_t *line) -{ - struct ocf_part_cleaning_ctx *ctx = getter_context; - - if (ctx->cline[idx] == end_marker) - return -1; - - *line = ctx->cline[idx]; - - return 0; -} - void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part, ocf_queue_t io_queue, uint32_t count) { struct ocf_part_cleaning_ctx *ctx = &user_part->cleaning; struct ocf_cleaner_attribs attribs = { .lock_cacheline = false, - .lock_metadata = true, - .do_sort = true, .cmpl_context = ctx, .cmpl_fn = ocf_lru_clean_end, - .getter = ocf_lru_clean_get, - .getter_context = ctx, - - .count = min(count, OCF_EVICTION_CLEAN_SIZE), - .io_queue = io_queue }; - ocf_cache_line_t *cline = ctx->cline; + struct flush_data *entries = ctx->entries; struct ocf_lru_iter iter; unsigned lru_idx; int cnt; @@ -643,21 +627,26 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part, OCF_METADATA_LRU_WR_LOCK_ALL(); lru_iter_cleaning_init(&iter, cache, &user_part->part, lru_idx); - i = 0; - while (i < OCF_EVICTION_CLEAN_SIZE) { - cline[i] = lru_iter_cleaning_next(&iter); - if (cline[i] == end_marker) + count = min(count, OCF_EVICTION_CLEAN_SIZE); + for (i = 0; i < count; i++) { + entries[i].cache_line = lru_iter_cleaning_next(&iter); + if (entries[i].cache_line == end_marker) break; - i++; + ocf_metadata_get_core_info(cache, entries[i].cache_line, + &entries[i].core_id, &entries[i].core_line); } - while (i < OCF_EVICTION_CLEAN_SIZE) - cline[i++] = end_marker; OCF_METADATA_LRU_WR_UNLOCK_ALL(); ocf_metadata_end_shared_access(&cache->metadata.lock, lock_idx); - ocf_cleaner_fire(cache, &attribs); + if (i == 0) { + ocf_refcnt_dec(&ctx->counter); + return; + } + + ocf_cleaner_sort_flush_data(entries, i); + ocf_cleaner_do_flush_data_async(cache, entries, i, &attribs); } static void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline, diff --git a/src/metadata/metadata_partition_structs.h b/src/ocf_part.h similarity index 90% rename from src/metadata/metadata_partition_structs.h rename to src/ocf_part.h index 2b68528..8c32fd5 100644 --- a/src/metadata/metadata_partition_structs.h +++ b/src/ocf_part.h @@ -1,14 +1,16 @@ /* * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2023-2024 Huawei Technologies Co., Ltd. * SPDX-License-Identifier: BSD-3-Clause */ #ifndef __METADATA_PARTITION_STRUCTS_H__ #define __METADATA_PARTITION_STRUCTS_H__ -#include "../utils/utils_list.h" -#include "../cleaning/cleaning.h" -#include "../ocf_space.h" +#include "utils/utils_list.h" +#include "utils/utils_cleaner.h" +#include "cleaning/cleaning.h" +#include "ocf_space.h" #define OCF_NUM_PARTITIONS OCF_USER_IO_CLASS_MAX + 2 @@ -69,7 +71,7 @@ struct ocf_lru_iter struct ocf_part_cleaning_ctx { ocf_cache_t cache; struct ocf_refcnt counter; - ocf_cache_line_t cline[OCF_EVICTION_CLEAN_SIZE]; + struct flush_data entries[OCF_EVICTION_CLEAN_SIZE]; }; /* common partition data for both user-deined partitions as diff --git a/src/ocf_request.h b/src/ocf_request.h index 5e05139..d03a478 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -195,9 +195,6 @@ struct ocf_request { uint8_t part_evict : 1; /* !< Some cachelines from request's partition must be evicted */ - uint8_t complete_queue : 1; - /* !< Request needs to be completed from the queue context */ - uint8_t lock_idx : OCF_METADATA_GLOBAL_LOCK_IDX_BITS; /* !< Selected global metadata read lock */ diff --git a/src/utils/utils_cleaner.c b/src/utils/utils_cleaner.c index 4860f5b..7346e2c 100644 --- a/src/utils/utils_cleaner.c +++ b/src/utils/utils_cleaner.c @@ -73,77 +73,85 @@ enum { ocf_cleaner_req_type_slave = 2 }; -static struct ocf_request *_ocf_cleaner_alloc_master_req( - struct ocf_cache *cache, uint32_t count, - const struct ocf_cleaner_attribs *attribs) +static inline uint32_t _ocf_cleaner_get_req_max_count(uint32_t count, + bool low_mem) { - struct ocf_request *req = _ocf_cleaner_alloc_req(cache, count, attribs); + if (low_mem || count <= 4096) + return count < 128 ? count : 128; - if (req) { - /* Set type of cleaning request */ - req->master_io_req_type = ocf_cleaner_req_type_master; + return 1024; +} - /* In master, save completion context and function */ - req->priv = attribs->cmpl_context; - req->master_io_req = attribs->cmpl_fn; - req->complete_queue = attribs->cmpl_queue; +static struct ocf_request *_ocf_cleaner_alloc_master_req( + struct ocf_cache *cache, uint32_t count, + const struct ocf_cleaner_attribs *attribs) +{ + struct ocf_request *req; - /* The count of all requests */ - env_atomic_set(&req->master_remaining, 1); - - OCF_DEBUG_PARAM(cache, "New master request, count = %u", - count); + req =_ocf_cleaner_alloc_req(cache, count, attribs); + if (unlikely(!req)) { + /* Some memory allocation error, try re-allocate request */ + count = _ocf_cleaner_get_req_max_count(count, true); + req = _ocf_cleaner_alloc_req(cache, count, attribs); } + + if (unlikely(!req)) + return NULL; + + /* Set type of cleaning request */ + req->master_io_req_type = ocf_cleaner_req_type_master; + + /* In master, save completion context and function */ + req->priv = attribs->cmpl_context; + req->master_io_req = attribs->cmpl_fn; + + /* The count of all requests */ + env_atomic_set(&req->master_remaining, 1); + + /* Keep master alive till all sub-requests complete */ + ocf_req_get(req); + + OCF_DEBUG_PARAM(cache, "New master request, count = %u", count); + return req; } static struct ocf_request *_ocf_cleaner_alloc_slave_req( - struct ocf_request *master, - uint32_t count, const struct ocf_cleaner_attribs *attribs) + struct ocf_request *master, uint32_t count, + const struct ocf_cleaner_attribs *attribs) { - struct ocf_request *req = _ocf_cleaner_alloc_req( - master->cache, count, attribs); + struct ocf_request *req; - if (req) { - /* Set type of cleaning request */ - req->master_io_req_type = ocf_cleaner_req_type_slave; + req = _ocf_cleaner_alloc_req(master->cache, count, attribs); + if (unlikely(!req)) { + /* Some memory allocation error, try re-allocate request */ + count = _ocf_cleaner_get_req_max_count(count, true); + req = _ocf_cleaner_alloc_req(master->cache, count, attribs); + } - /* Slave refers to master request, get its reference counter */ - ocf_req_get(master); + if (unlikely(!req)) + return NULL; - /* Slave request contains reference to master */ - req->master_io_req = master; + /* Set type of cleaning request */ + req->master_io_req_type = ocf_cleaner_req_type_slave; - /* One more additional slave request, increase global counter - * of requests count - */ - env_atomic_inc(&master->master_remaining); + /* Slave request contains reference to master */ + req->master_io_req = master; - OCF_DEBUG_PARAM(req->cache, + /* One more additional slave request, increase global counter + * of requests count + */ + env_atomic_inc(&master->master_remaining); + + OCF_DEBUG_PARAM(req->cache, "New slave request, count = %u,all requests count = %d", count, env_atomic_read(&master->master_remaining)); - } + return req; } static void _ocf_cleaner_dealloc_req(struct ocf_request *req) { - if (ocf_cleaner_req_type_slave == req->master_io_req_type) { - /* Slave contains reference to the master request, - * release reference counter - */ - struct ocf_request *master = req->master_io_req; - - OCF_DEBUG_MSG(req->cache, "Put master request by slave"); - ocf_req_put(master); - - OCF_DEBUG_MSG(req->cache, "Free slave request"); - } else if (ocf_cleaner_req_type_master == req->master_io_req_type) { - OCF_DEBUG_MSG(req->cache, "Free master request"); - } else { - ENV_BUG(); - } - ctx_data_secure_erase(req->cache->owner, req->data); ctx_data_munlock(req->cache->owner, req->data); ctx_data_free(req->cache->owner, req->data); @@ -169,17 +177,6 @@ static void _ocf_cleaner_set_error(struct ocf_request *req) master->error = -OCF_ERR_IO; } -static int _ocf_cleaner_complete(struct ocf_request *master) -{ - ocf_req_end_t cmpl; - - cmpl = master->master_io_req; - cmpl(master->priv, master->error); - ocf_req_put(master); - - return 0; -} - static void _ocf_cleaner_complete_req(struct ocf_request *req) { struct ocf_request *master = NULL; @@ -206,15 +203,12 @@ static void _ocf_cleaner_complete_req(struct ocf_request *req) OCF_DEBUG_MSG(req->cache, "All cleaning request completed"); - if (master->complete_queue) { - ocf_req_get(master); - ocf_engine_push_req_front_cb(master, - _ocf_cleaner_complete, true); - } else { - /* Only master contains completion function and priv */ - cmpl = master->master_io_req; - cmpl(master->priv, master->error); - } + /* Only master contains completion function and priv */ + cmpl = master->master_io_req; + cmpl(master->priv, master->error); + + /* For additional get on master allocation */ + ocf_req_put(master); } static void _ocf_cleaner_on_resume(struct ocf_request *req) @@ -339,7 +333,7 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req) /* Update metadata */ for (i = 0; i < req->core_line_count; i++, iter++) { - if (iter->status == LOOKUP_MISS) + if (!iter->flush) continue; if (iter->invalid) { @@ -384,7 +378,7 @@ static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map, if (error) { /* Flush error, set error for all cache line of this core */ for (i = 0; i < req->core_line_count; i++, iter++) { - if (iter->status == LOOKUP_MISS) + if (!iter->flush) continue; if (iter->core_id == map->core_id) @@ -434,7 +428,7 @@ static int _ocf_cleaner_fire_flush_cores(struct ocf_request *req) continue; } - if (iter->status == LOOKUP_MISS) + if (!iter->flush) continue; if (core_id == iter->core_id) @@ -603,7 +597,7 @@ static int _ocf_cleaner_fire_core(struct ocf_request *req) continue; } - if (iter->status == LOOKUP_MISS) + if (!iter->flush) continue; ocf_hb_cline_prot_lock_rd(&cache->metadata.lock, @@ -677,7 +671,7 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req) core = ocf_cache_get_core(cache, iter->core_id); if (!core) continue; - if (iter->status == LOOKUP_MISS) + if (!iter->flush) continue; OCF_DEBUG_PARAM(req->cache, "Cache read, line = %u", @@ -722,11 +716,42 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req) return 0; } -static int _ocf_cleaner_fire(struct ocf_request *req) +static int _ocf_cleaner_check_map(struct ocf_request *req) +{ + ocf_core_id_t core_id; + uint64_t core_line; + int i; + + for (i = 0; i < req->core_line_count; ++i) { + ocf_metadata_get_core_info(req->cache, req->map[i].coll_idx, + &core_id, &core_line); + + if (core_id != req->map[i].core_id) + continue; + + if (core_line != req->map[i].core_line) + continue; + + if (!metadata_test_dirty(req->cache, req->map[i].coll_idx)) + continue; + + req->map[i].flush = true; + } + + _ocf_cleaner_fire_cache(req); + + return 0; +} + +static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t count) { int result; - req->engine_handler = _ocf_cleaner_fire_cache; + /* Set counts of cache IOs */ + env_atomic_set(&req->req_remaining, count); + + req->engine_handler = _ocf_cleaner_check_map; + req->core_line_count = count; /* Handle cache lines locks */ result = _ocf_cleaner_cache_line_lock(req); @@ -734,7 +759,7 @@ static int _ocf_cleaner_fire(struct ocf_request *req) if (result >= 0) { if (result == OCF_LOCK_ACQUIRED) { OCF_DEBUG_MSG(req->cache, "Lock acquired"); - _ocf_cleaner_fire_cache(req); + _ocf_cleaner_check_map(req); } else { OCF_DEBUG_MSG(req->cache, "NO Lock"); } @@ -746,67 +771,6 @@ static int _ocf_cleaner_fire(struct ocf_request *req) return result; } -/* Helper function for 'sort' */ -static int _ocf_cleaner_cmp_private(const void *a, const void *b) -{ - struct ocf_map_info *_a = (struct ocf_map_info *)a; - struct ocf_map_info *_b = (struct ocf_map_info *)b; - - static uint32_t step = 0; - - OCF_COND_RESCHED_DEFAULT(step); - - if (_a->core_id == _b->core_id) - return (_a->core_line > _b->core_line) ? 1 : -1; - - return (_a->core_id > _b->core_id) ? 1 : -1; -} - -/** - * Prepare cleaning request to be fired - * - * @param req cleaning request - * @param i_out number of already filled map requests (remaining to be filled - * with missed - */ -static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t i_out, - bool do_sort) -{ - uint32_t i; - /* Set counts of cache IOs */ - env_atomic_set(&req->req_remaining, i_out); - - /* fill tail of a request with fake MISSes so that it won't - * be cleaned - */ - for (; i_out < req->core_line_count; ++i_out) { - req->map[i_out].core_id = OCF_CORE_MAX; - req->map[i_out].core_line = ULLONG_MAX; - req->map[i_out].status = LOOKUP_MISS; - req->map[i_out].hash = i_out; - } - - if (do_sort) { - /* Sort by core id and core line */ - env_sort(req->map, req->core_line_count, sizeof(req->map[0]), - _ocf_cleaner_cmp_private, NULL); - for (i = 0; i < req->core_line_count; i++) - req->map[i].hash = i; - } - - /* issue actual request */ - return _ocf_cleaner_fire(req); -} - -static inline uint32_t _ocf_cleaner_get_req_max_count(uint32_t count, - bool low_mem) -{ - if (low_mem || count <= 4096) - return count < 128 ? count : 128; - - return 1024; -} - static void _ocf_cleaner_fire_error(struct ocf_request *master, struct ocf_request *req, int err) { @@ -841,51 +805,30 @@ void ocf_cleaner_fire(struct ocf_cache *cache, int err; ocf_core_id_t core_id; uint64_t core_sector; - bool skip; /* Allocate master request */ master = _ocf_cleaner_alloc_master_req(cache, max, attribs); - - if (!master) { - /* Some memory allocation error, try re-allocate request */ - max = _ocf_cleaner_get_req_max_count(count, true); - master = _ocf_cleaner_alloc_master_req(cache, max, attribs); - } - - if (!master) { + if (unlikely(!master)) { attribs->cmpl_fn(attribs->cmpl_context, -OCF_ERR_NO_MEM); return; } req = master; - /* prevent cleaning completion race */ - ocf_req_get(master); env_atomic_inc(&master->master_remaining); - for (i = 0; i < count; i++) { /* when request hasn't yet been allocated or is just issued */ - if (!req) { + if (unlikely(!req)) { if (max > count - i) { /* less than max left */ max = count - i; } req = _ocf_cleaner_alloc_slave_req(master, max, attribs); - } - - if (!req) { - /* Some memory allocation error, - * try re-allocate request - */ - max = _ocf_cleaner_get_req_max_count(max, true); - req = _ocf_cleaner_alloc_slave_req(master, max, attribs); - } - - /* when request allocation failed stop processing */ - if (!req) { - master->error = -OCF_ERR_NO_MEM; - break; + if (unlikely(!req)) { + master->error = -OCF_ERR_NO_MEM; + break; + } } if (attribs->getter(cache, attribs->getter_context, @@ -898,40 +841,6 @@ void ocf_cleaner_fire(struct ocf_cache *cache, ocf_metadata_get_core_info(cache, cache_line, &core_id, &core_sector); - if (attribs->lock_metadata) { - ocf_hb_cline_prot_lock_rd(&cache->metadata.lock, - req->lock_idx, core_id, core_sector); - } - - skip = false; - - /* when line already cleaned - rare condition under heavy - * I/O workload. - */ - if (!metadata_test_dirty(cache, cache_line)) { - OCF_DEBUG_MSG(cache, "Not dirty"); - skip = true; - } - - if (!skip && !metadata_test_valid_any(cache, cache_line)) { - OCF_DEBUG_MSG(cache, "No any valid"); - - /* - * Extremely disturbing cache line state - * Cache line (sector) cannot be dirty and not valid - */ - ENV_BUG(); - skip = true; - } - - if (attribs->lock_metadata) { - ocf_hb_cline_prot_unlock_rd(&cache->metadata.lock, - req->lock_idx, core_id, core_sector); - } - - if (skip) - continue; - if (unlikely(!cache->core[core_id].opened)) { OCF_DEBUG_MSG(cache, "Core object inactive"); continue; @@ -945,7 +854,7 @@ void ocf_cleaner_fire(struct ocf_cache *cache, i_out++; if (max == i_out) { - err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort); + err = _ocf_cleaner_do_fire(req, i_out); if (err) { _ocf_cleaner_fire_error(master, req, err); req = NULL; @@ -957,16 +866,17 @@ void ocf_cleaner_fire(struct ocf_cache *cache, } - if (req) { - err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort); + if (req && i_out) { + err = _ocf_cleaner_do_fire(req, i_out); if (err) _ocf_cleaner_fire_error(master, req, err); req = NULL; } - /* prevent cleaning completion race */ _ocf_cleaner_complete_req(master); - ocf_req_put(master); + + if (req && !i_out) + _ocf_cleaner_dealloc_req(req); } static int _ocf_cleaner_do_flush_data_getter(struct ocf_cache *cache, @@ -1023,9 +933,10 @@ static void _ocf_cleaner_swap(void *a, void *b, int size) *_b = t; } -void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num) +void ocf_cleaner_sort_flush_data(struct flush_data *flush_data, uint32_t count) { - env_sort(tbl, num, sizeof(*tbl), _ocf_cleaner_cmp, _ocf_cleaner_swap); + env_sort(flush_data, count, sizeof(*flush_data), + _ocf_cleaner_cmp, _ocf_cleaner_swap); } void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl, @@ -1034,9 +945,8 @@ void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl, int i; for (i = 0; i < num; i++) { - env_sort(fctbl[i].flush_data, fctbl[i].count, - sizeof(*fctbl[i].flush_data), _ocf_cleaner_cmp, - _ocf_cleaner_swap); + ocf_cleaner_sort_flush_data(fctbl[i].flush_data, + fctbl[i].count); } } diff --git a/src/utils/utils_cleaner.h b/src/utils/utils_cleaner.h index f92d57c..7c90fd5 100644 --- a/src/utils/utils_cleaner.h +++ b/src/utils/utils_cleaner.h @@ -28,9 +28,7 @@ typedef int (*ocf_cleaner_get_item)(struct ocf_cache *cache, */ struct ocf_cleaner_attribs { uint8_t lock_cacheline : 1; /*!< Cleaner to lock cachelines on its own */ - uint8_t lock_metadata : 1; /*!< Cleaner to lock metadata on its own */ - uint8_t do_sort : 1; /*!< Sort cache lines which will be cleaned */ uint8_t cmpl_queue : 1; /*!< Completion needs to be called from the queue context */ @@ -122,15 +120,15 @@ int ocf_cleaner_do_flush_data_async(struct ocf_cache *cache, /** * @brief Sort flush data by core sector * - * @param tbl Flush data to sort - * @param num Number of entries in tbl + * @param flush_data Flush data to sort + * @param count Number of entries in flush_data */ -void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num); +void ocf_cleaner_sort_flush_data(struct flush_data *flush_data, uint32_t count); /** * @brief Sort flush data in all flush containters * - * @param tbl Flush containers to sort + * @param fctbl Flush containers to sort * @param num Number of entries in fctbl */ void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl,