From 5684b53d9b1874f71c709ddee8fb0cfc96eea799 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Mon, 23 Sep 2019 11:08:27 -0400 Subject: [PATCH 1/7] Adding collision table locks Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_cache_line_concurrency.c | 6 +- src/concurrency/ocf_metadata_concurrency.c | 74 +++++++++++++++-- src/concurrency/ocf_metadata_concurrency.h | 11 ++- src/metadata/metadata_collision.h | 12 +++ src/metadata/metadata_hash.c | 86 +++++++++++++++----- src/metadata/metadata_raw.c | 14 ++++ src/metadata/metadata_raw.h | 14 ++++ src/metadata/metadata_raw_dynamic.c | 10 +++ src/metadata/metadata_raw_dynamic.h | 5 ++ src/metadata/metadata_structs.h | 8 ++ src/mngt/ocf_mngt_cache.c | 3 +- 11 files changed, 210 insertions(+), 33 deletions(-) diff --git a/src/concurrency/ocf_cache_line_concurrency.c b/src/concurrency/ocf_cache_line_concurrency.c index 5b50ee6..c0cb54c 100644 --- a/src/concurrency/ocf_cache_line_concurrency.c +++ b/src/concurrency/ocf_cache_line_concurrency.c @@ -71,6 +71,8 @@ int ocf_cache_line_concurrency_init(struct ocf_cache *cache) int error = 0; struct ocf_cache_line_concurrency *c; char name[ALLOCATOR_NAME_MAX]; + ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries( + cache); ENV_BUG_ON(cache->device->concurrency.cache_line); @@ -85,8 +87,8 @@ int ocf_cache_line_concurrency_init(struct ocf_cache *cache) cache->device->concurrency.cache_line = c; OCF_REALLOC_INIT(&c->access, &c->access_limit); - OCF_REALLOC_CP(&c->access, sizeof(c->access[0]), - cache->device->collision_table_entries, &c->access_limit); + OCF_REALLOC_CP(&c->access, sizeof(c->access[0]), line_entries, + &c->access_limit); if (!c->access) { error = __LINE__; diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c index 2d0a551..058271b 100644 --- a/src/concurrency/ocf_metadata_concurrency.c +++ b/src/concurrency/ocf_metadata_concurrency.c @@ -22,19 +22,44 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock) int ocf_metadata_concurrency_attached_init( struct ocf_metadata_lock *metadata_lock, ocf_cache_t cache, - uint64_t hash_table_entries) + uint64_t hash_table_entries, uint32_t colision_table_pages) { uint64_t i; + int err = 0; - metadata_lock->cache = cache; - metadata_lock->num_hash_entries = hash_table_entries; metadata_lock->hash = env_vzalloc(sizeof(env_rwsem) * hash_table_entries); - if (!metadata_lock->hash) + metadata_lock->collision_pages = env_vzalloc(sizeof(env_rwsem) * + colision_table_pages); + if (!metadata_lock->hash || + !metadata_lock->collision_pages) { + env_vfree(metadata_lock->hash); + env_vfree(metadata_lock->collision_pages); + metadata_lock->hash = NULL; + metadata_lock->collision_pages = NULL; return -OCF_ERR_NO_MEM; + } for (i = 0; i < hash_table_entries; i++) env_rwsem_init(&metadata_lock->hash[i]); + for (i = 0; i < colision_table_pages; i++) { + err = env_rwsem_init(&metadata_lock->collision_pages[i]); + if (err) + break; + } + + if (err) { + while (i--) + env_rwsem_destroy(&metadata_lock->collision_pages[i]); + env_vfree(metadata_lock->collision_pages); + metadata_lock->collision_pages = NULL; + ocf_metadata_concurrency_attached_deinit(metadata_lock); + return err; + } + + metadata_lock->cache = cache; + metadata_lock->num_hash_entries = hash_table_entries; + metadata_lock->num_collision_pages = colision_table_pages; return 0; } @@ -44,10 +69,21 @@ void ocf_metadata_concurrency_attached_deinit( { uint64_t i; - for (i = 0; i < metadata_lock->num_hash_entries; i++) - env_rwsem_destroy(&metadata_lock->hash[i]); + if (metadata_lock->hash) { + for (i = 0; i < metadata_lock->num_hash_entries; i++) + env_rwsem_destroy(&metadata_lock->hash[i]); + env_vfree(metadata_lock->hash); + metadata_lock->hash = NULL; + metadata_lock->num_hash_entries = 0; + } - env_vfree(metadata_lock->hash); + if (metadata_lock->collision_pages) { + for (i = 0; i < metadata_lock->num_collision_pages; i++) + env_rwsem_destroy(&metadata_lock->collision_pages[i]); + env_vfree(metadata_lock->collision_pages); + metadata_lock->collision_pages = NULL; + metadata_lock->num_collision_pages = 0; + } } void ocf_metadata_start_exclusive_access( @@ -266,3 +302,27 @@ void ocf_req_hash_unlock_wr(struct ocf_request *req) } ocf_metadata_end_shared_access(&req->cache->metadata.lock); } + +void ocf_collision_start_shared_access(struct ocf_metadata_lock *metadata_lock, + uint32_t page) +{ + env_rwsem_down_read(&metadata_lock->collision_pages[page]); +} + +void ocf_collision_end_shared_access(struct ocf_metadata_lock *metadata_lock, + uint32_t page) +{ + env_rwsem_up_read(&metadata_lock->collision_pages[page]); +} + +void ocf_collision_start_exclusive_access(struct ocf_metadata_lock *metadata_lock, + uint32_t page) +{ + env_rwsem_down_write(&metadata_lock->collision_pages[page]); +} + +void ocf_collision_end_exclusive_access(struct ocf_metadata_lock *metadata_lock, + uint32_t page) +{ + env_rwsem_up_write(&metadata_lock->collision_pages[page]); +} diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h index 0875237..ffef123 100644 --- a/src/concurrency/ocf_metadata_concurrency.h +++ b/src/concurrency/ocf_metadata_concurrency.h @@ -16,7 +16,7 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock); int ocf_metadata_concurrency_attached_init( struct ocf_metadata_lock *metadata_lock, ocf_cache_t cache, - uint64_t hash_table_entries); + uint64_t hash_table_entries, uint32_t colision_table_pages); void ocf_metadata_concurrency_attached_deinit( struct ocf_metadata_lock *metadata_lock); @@ -111,4 +111,13 @@ void ocf_req_hash_lock_wr(struct ocf_request *req); void ocf_req_hash_unlock_wr(struct ocf_request *req); void ocf_req_hash_lock_upgrade(struct ocf_request *req); +/* collision table page lock interface */ +void ocf_collision_start_shared_access(struct ocf_metadata_lock *metadata_lock, + uint32_t page); +void ocf_collision_end_shared_access(struct ocf_metadata_lock *metadata_lock, + uint32_t page); +void ocf_collision_start_exclusive_access(struct ocf_metadata_lock *metadata_lock, + uint32_t page); +void ocf_collision_end_exclusive_access(struct ocf_metadata_lock *metadata_lock, + uint32_t page); #endif diff --git a/src/metadata/metadata_collision.h b/src/metadata/metadata_collision.h index 05e6664..33459b9 100644 --- a/src/metadata/metadata_collision.h +++ b/src/metadata/metadata_collision.h @@ -105,4 +105,16 @@ void ocf_metadata_add_to_collision(struct ocf_cache *cache, void ocf_metadata_remove_from_collision(struct ocf_cache *cache, ocf_cache_line_t line, ocf_part_id_t part_id); +static inline void ocf_metadata_start_collision_shared_access( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + cache->metadata.iface.start_collision_shared_access(cache, line); +} + +static inline void ocf_metadata_end_collision_shared_access( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + cache->metadata.iface.end_collision_shared_access(cache, line); +} + #endif /* METADATA_COLLISION_H_ */ diff --git a/src/metadata/metadata_hash.c b/src/metadata/metadata_hash.c index 8bb59f3..b1c1737 100644 --- a/src/metadata/metadata_hash.c +++ b/src/metadata/metadata_hash.c @@ -414,6 +414,8 @@ static void ocf_metadata_hash_deinit_variable_size(struct ocf_cache *cache) OCF_DEBUG_TRACE(cache); + ocf_metadata_concurrency_attached_deinit(&cache->metadata.lock); + /* * De initialize RAW types */ @@ -982,29 +984,30 @@ finalize: * Hash De-Init also contains RAW deinitialization */ ocf_metadata_hash_deinit_variable_size(cache); - } else { - cache->device->runtime_meta = METADATA_MEM_POOL(ctrl, - metadata_segment_sb_runtime); - - cache->device->collision_table_entries = ctrl->cachelines; - - cache->device->hash_table_entries = - ctrl->raw_desc[metadata_segment_hash].entries; - - cache->device->metadata_offset = ctrl->count_pages * PAGE_SIZE; - - cache->conf_meta->cachelines = ctrl->cachelines; - cache->conf_meta->line_size = cache_line_size; - - ocf_metadata_hash_raw_info(cache, ctrl); - - ocf_cache_log(cache, log_info, "Cache line size: %llu kiB\n", - settings->size / KiB); - - ocf_cache_log(cache, log_info, "Metadata capacity: %llu MiB\n", - (uint64_t)ocf_metadata_size_of(cache) / MiB); + return result; } + cache->device->runtime_meta = METADATA_MEM_POOL(ctrl, + metadata_segment_sb_runtime); + + cache->device->collision_table_entries = ctrl->cachelines; + + cache->device->hash_table_entries = + ctrl->raw_desc[metadata_segment_hash].entries; + + cache->device->metadata_offset = ctrl->count_pages * PAGE_SIZE; + + cache->conf_meta->cachelines = ctrl->cachelines; + cache->conf_meta->line_size = cache_line_size; + + ocf_metadata_hash_raw_info(cache, ctrl); + + ocf_cache_log(cache, log_info, "Cache line size: %llu kiB\n", + settings->size / KiB); + + ocf_cache_log(cache, log_info, "Metadata capacity: %llu MiB\n", + (uint64_t)ocf_metadata_size_of(cache) / MiB); + /* * Self test of metadata */ @@ -1029,7 +1032,18 @@ finalize: "OCF metadata self-test ERROR\n"); } - return result; + result = ocf_metadata_concurrency_attached_init(&cache->metadata.lock, + cache, ctrl->raw_desc[metadata_segment_hash].entries, + (uint32_t)ctrl->raw_desc[metadata_segment_collision]. + ssd_pages); + if (result) { + ocf_cache_log(cache, log_err, "Failed to initialize attached " + "metadata concurrency\n"); + ocf_metadata_hash_deinit_variable_size(cache); + return result; + } + + return 0; } static inline void _ocf_init_collision_entry(struct ocf_cache *cache, @@ -2533,6 +2547,30 @@ static void ocf_metadata_hash_get_collision_info( } } +void ocf_metadata_hash_start_collision_shared_access(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + struct ocf_metadata_raw *raw = + &ctrl->raw_desc[metadata_segment_collision]; + uint32_t page = ocf_metadata_raw_page(raw, line); + + ocf_collision_start_shared_access(&cache->metadata.lock, page); +} + +void ocf_metadata_hash_end_collision_shared_access(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + struct ocf_metadata_raw *raw = + &ctrl->raw_desc[metadata_segment_collision]; + uint32_t page = ocf_metadata_raw_page(raw, line); + + ocf_collision_start_shared_access(&cache->metadata.lock, page); +} + /******************************************************************************* * Partition ******************************************************************************/ @@ -2682,6 +2720,10 @@ static const struct ocf_metadata_iface metadata_hash_iface = { .set_collision_info = ocf_metadata_hash_set_collision_info, .set_collision_next = ocf_metadata_hash_set_collision_next, .set_collision_prev = ocf_metadata_hash_set_collision_prev, + .start_collision_shared_access = + ocf_metadata_hash_start_collision_shared_access, + .end_collision_shared_access = + ocf_metadata_hash_end_collision_shared_access, /* * Partition Info diff --git a/src/metadata/metadata_raw.c b/src/metadata/metadata_raw.c index 7c0e3ca..633e64b 100644 --- a/src/metadata/metadata_raw.c +++ b/src/metadata/metadata_raw.c @@ -149,6 +149,16 @@ static uint32_t _raw_ram_checksum(ocf_cache_t cache, return crc; } +/* + * RAM Implementation - Entry page number + */ +uint32_t _raw_ram_page(struct ocf_metadata_raw *raw, uint32_t entry) +{ + ENV_BUG_ON(entry >= raw->entries); + + return _RAW_RAM_PAGE(raw, entry); +} + /* * RAM Implementation - Get entry */ @@ -541,6 +551,7 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = { .size_of = _raw_ram_size_of, .size_on_ssd = _raw_ram_size_on_ssd, .checksum = _raw_ram_checksum, + .page = _raw_ram_page, .get = _raw_ram_get, .set = _raw_ram_set, .access = _raw_ram_access, @@ -555,6 +566,7 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = { .size_of = raw_dynamic_size_of, .size_on_ssd = raw_dynamic_size_on_ssd, .checksum = raw_dynamic_checksum, + .page = raw_dynamic_page, .get = raw_dynamic_get, .set = raw_dynamic_set, .access = raw_dynamic_access, @@ -569,6 +581,7 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = { .size_of = _raw_ram_size_of, .size_on_ssd = raw_volatile_size_on_ssd, .checksum = raw_volatile_checksum, + .page = _raw_ram_page, .get = _raw_ram_get, .set = _raw_ram_set, .access = _raw_ram_access, @@ -583,6 +596,7 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = { .size_of = _raw_ram_size_of, .size_on_ssd = _raw_ram_size_on_ssd, .checksum = _raw_ram_checksum, + .page = _raw_ram_page, .get = _raw_ram_get, .set = _raw_ram_set, .access = _raw_ram_access, diff --git a/src/metadata/metadata_raw.h b/src/metadata/metadata_raw.h index 2da5137..66b10ad 100644 --- a/src/metadata/metadata_raw.h +++ b/src/metadata/metadata_raw.h @@ -102,6 +102,7 @@ struct raw_iface { uint32_t (*checksum)(ocf_cache_t cache, struct ocf_metadata_raw *raw); + uint32_t (*page)(struct ocf_metadata_raw *raw, uint32_t entry); int (*get)(ocf_cache_t cache, struct ocf_metadata_raw *raw, uint32_t entry, void *data); @@ -183,6 +184,19 @@ static inline uint32_t ocf_metadata_raw_checksum(struct ocf_cache* cache, return raw->iface->checksum(cache, raw); } +/** + * @brief Calculate entry page index + * + * @param raw - RAW descriptor + * @param entry - Entry number + * @return Page index + */ +static inline uint32_t ocf_metadata_raw_page(struct ocf_metadata_raw* raw, + uint32_t entry) +{ + return raw->iface->page(raw, entry); +} + /** * @brief Get specified element of metadata * diff --git a/src/metadata/metadata_raw_dynamic.c b/src/metadata/metadata_raw_dynamic.c index e45ade1..9ef666b 100644 --- a/src/metadata/metadata_raw_dynamic.c +++ b/src/metadata/metadata_raw_dynamic.c @@ -219,6 +219,16 @@ uint32_t raw_dynamic_checksum(ocf_cache_t cache, return crc; } +/* + * RAM DYNAMIC Implementation - Entry page number + */ +uint32_t raw_dynamic_page(struct ocf_metadata_raw *raw, uint32_t entry) +{ + ENV_BUG_ON(entry >= raw->entries); + + return _RAW_DYNAMIC_PAGE(raw, entry); +} + /* * RAM DYNAMIC Implementation - Get */ diff --git a/src/metadata/metadata_raw_dynamic.h b/src/metadata/metadata_raw_dynamic.h index 94c0657..b8f541b 100644 --- a/src/metadata/metadata_raw_dynamic.h +++ b/src/metadata/metadata_raw_dynamic.h @@ -40,6 +40,11 @@ uint32_t raw_dynamic_size_on_ssd(struct ocf_metadata_raw *raw); uint32_t raw_dynamic_checksum(ocf_cache_t cache, struct ocf_metadata_raw *raw); +/* + * RAM DYNAMIC Implementation - Entry page number + */ +uint32_t raw_dynamic_page(struct ocf_metadata_raw *raw, uint32_t entry); + /* * RAW DYNAMIC - Get specified entry */ diff --git a/src/metadata/metadata_structs.h b/src/metadata/metadata_structs.h index 22b3703..9d6e681 100644 --- a/src/metadata/metadata_structs.h +++ b/src/metadata/metadata_structs.h @@ -360,6 +360,12 @@ struct ocf_metadata_iface { void (*set_collision_prev)(struct ocf_cache *cache, ocf_cache_line_t line, ocf_cache_line_t prev); + void (*start_collision_shared_access)(struct ocf_cache *cache, + ocf_cache_line_t line); + + void (*end_collision_shared_access)(struct ocf_cache *cache, + ocf_cache_line_t line); + void (*get_partition_info)(struct ocf_cache *cache, ocf_cache_line_t line, ocf_part_id_t *part_id, ocf_cache_line_t *next_line, @@ -434,7 +440,9 @@ struct ocf_metadata_lock env_rwlock status; /*!< Fast lock for status bits */ env_spinlock eviction; /*!< Fast lock for eviction policy */ env_rwsem *hash; /*!< Hash bucket locks */ + env_rwsem *collision_pages; /*!< Collision table page locks */ uint32_t num_hash_entries; /*!< Hash bucket count */ + uint32_t num_collision_pages; /*!< Collision table page count */ ocf_cache_t cache; /*!< Parent cache object */ }; diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 84e2084..9e508b6 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -980,7 +980,8 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline, context->flags.attached_metadata_inited = true; if (ocf_metadata_concurrency_attached_init(&cache->metadata.lock, - cache, cache->device->hash_table_entries)) { + cache, cache->device->hash_table_entries, + ocf_metadata_get_num_collision_pages(cache))) { ocf_cache_log(cache, log_err, "Failed to initialize attached " "metadata concurrency\n"); OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL); From be3b402162bb6d4289f931f5a678a287b8ec523a Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Mon, 23 Sep 2019 11:08:27 -0400 Subject: [PATCH 2/7] Synchronization of collision table Adding synchronization around metadata collision segment pages. This part of metadata is modified when cacheline is mapped/unmapped and when dirty status changes. Synchronization on page level is required on top of cacheline and hash bucket locks to assure metadata flush always reads consistent state when copying entire collision table memory page. Signed-off-by: Adam Rutkowski --- src/engine/engine_common.c | 10 ++++++++ src/eviction/lru.c | 4 ++++ src/metadata/metadata.c | 4 ++-- src/metadata/metadata_hash.c | 2 +- src/metadata/metadata_io.c | 4 ++-- src/metadata/metadata_misc.c | 4 ++++ src/mngt/ocf_mngt_cache.c | 10 -------- src/utils/utils_cache_line.h | 44 ++++++++++++++++++++++++++++++++++++ 8 files changed, 67 insertions(+), 15 deletions(-) diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index 4cdab86..55e6624 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -260,8 +260,10 @@ static void ocf_engine_map_cache_line(struct ocf_request *req, ocf_metadata_add_to_partition(cache, part_id, *cache_line); /* Add the block to the corresponding collision list */ + ocf_metadata_start_collision_shared_access(cache, *cache_line); ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index, *cache_line); + ocf_metadata_end_collision_shared_access(cache, *cache_line); ocf_eviction_init_cache_line(cache, *cache_line, part_id); @@ -295,9 +297,17 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache, case LOOKUP_MAPPED: OCF_DEBUG_RQ(req, "Canceling cache line %u", entry->coll_idx); + + ocf_metadata_start_collision_shared_access(cache, + entry->coll_idx); + set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache), entry->coll_idx); + + ocf_metadata_end_collision_shared_access(cache, + entry->coll_idx); + break; default: diff --git a/src/eviction/lru.c b/src/eviction/lru.c index 2bf9122..ebbeaba 100644 --- a/src/eviction/lru.c +++ b/src/eviction/lru.c @@ -432,9 +432,13 @@ uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue, evp_lru_zero_line(cache, io_queue, curr_cline); } else { + ocf_metadata_start_collision_shared_access(cache, + curr_cline); set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache), curr_cline); + ocf_metadata_end_collision_shared_access(cache, + curr_cline); /* Goto next item. */ i++; diff --git a/src/metadata/metadata.c b/src/metadata/metadata.c index ddcd247..ba74cbb 100644 --- a/src/metadata/metadata.c +++ b/src/metadata/metadata.c @@ -113,9 +113,9 @@ ocf_cache_line_t ocf_metadata_get_cachelines_count(ocf_cache_t cache) void ocf_metadata_flush_all(ocf_cache_t cache, ocf_metadata_end_t cmpl, void *priv) { - ocf_metadata_start_exclusive_access(&cache->metadata.lock); + ocf_metadata_start_shared_access(&cache->metadata.lock); cache->metadata.iface.flush_all(cache, cmpl, priv); - ocf_metadata_end_exclusive_access(&cache->metadata.lock); + ocf_metadata_end_shared_access(&cache->metadata.lock); } void ocf_metadata_load_all(ocf_cache_t cache, diff --git a/src/metadata/metadata_hash.c b/src/metadata/metadata_hash.c index b1c1737..a33e944 100644 --- a/src/metadata/metadata_hash.c +++ b/src/metadata/metadata_hash.c @@ -2568,7 +2568,7 @@ void ocf_metadata_hash_end_collision_shared_access(struct ocf_cache *cache, &ctrl->raw_desc[metadata_segment_collision]; uint32_t page = ocf_metadata_raw_page(raw, line); - ocf_collision_start_shared_access(&cache->metadata.lock, page); + ocf_collision_end_shared_access(&cache->metadata.lock, page); } /******************************************************************************* diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c index 859c5e2..a00c674 100644 --- a/src/metadata/metadata_io.c +++ b/src/metadata/metadata_io.c @@ -226,9 +226,9 @@ static int ocf_restart_meta_io(struct ocf_request *req) int ret; /* Fill with the latest metadata. */ - /* TODO: synchronize with concurrent metadata io and hash bucket locks - */ + ocf_metadata_start_shared_access(&cache->metadata.lock); metadata_io_req_fill(meta_io_req); + ocf_metadata_end_shared_access(&cache->metadata.lock); io = ocf_new_cache_io(cache, req->io_queue, PAGES_TO_BYTES(meta_io_req->page), diff --git a/src/metadata/metadata_misc.c b/src/metadata/metadata_misc.c index ff17e77..b51a147 100644 --- a/src/metadata/metadata_misc.c +++ b/src/metadata/metadata_misc.c @@ -107,6 +107,8 @@ void ocf_metadata_sparse_cache_line(struct ocf_cache *cache, static void _ocf_metadata_sparse_cache_line(struct ocf_cache *cache, uint32_t cache_line) { + ocf_metadata_start_collision_shared_access(cache, cache_line); + set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache), cache_line); @@ -114,6 +116,8 @@ static void _ocf_metadata_sparse_cache_line(struct ocf_cache *cache, * This is especially for removing inactive core */ metadata_clear_dirty(cache, cache_line); + + ocf_metadata_end_collision_shared_access(cache, cache_line); } /* caller must hold metadata lock diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 9e508b6..d48a310 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -976,17 +976,8 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline, cache->conf_meta->metadata_layout)) { OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL); } - context->flags.attached_metadata_inited = true; - if (ocf_metadata_concurrency_attached_init(&cache->metadata.lock, - cache, cache->device->hash_table_entries, - ocf_metadata_get_num_collision_pages(cache))) { - ocf_cache_log(cache, log_err, "Failed to initialize attached " - "metadata concurrency\n"); - OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL); - } - cache->freelist = ocf_freelist_init(cache); if (!cache->freelist) OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL); @@ -1729,7 +1720,6 @@ static void _ocf_mngt_cache_unplug_complete(void *priv, int error) ocf_volume_close(&cache->device->volume); - ocf_metadata_concurrency_attached_deinit(&cache->metadata.lock); ocf_metadata_deinit_variable_size(cache); ocf_concurrency_deinit(cache); ocf_freelist_deinit(cache->freelist); diff --git a/src/utils/utils_cache_line.h b/src/utils/utils_cache_line.h index e9329f5..d91de44 100644 --- a/src/utils/utils_cache_line.h +++ b/src/utils/utils_cache_line.h @@ -72,6 +72,10 @@ static inline uint64_t ocf_lines_2_bytes(struct ocf_cache *cache, /** * @brief Set cache line invalid * + * @note Collision page must be locked by the caller (either exclusive access + * to collision table page OR write lock on metadata hash bucket combined with + * shared access to the collision page) + * * @param cache Cache instance * @param start_bit Start bit of cache line for which state will be set * @param end_bit End bit of cache line for which state will be set @@ -85,6 +89,10 @@ void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit, /** * @brief Set cache line invalid without flush * + * @note Collision page must be locked by the caller (either exclusive access + * to collision table page OR write lock on metadata hash bucket combined with + * shared access to the collision page) + * * @param cache Cache instance * @param start_bit Start bit of cache line for which state will be set * @param end_bit End bit of cache line for which state will be set @@ -96,6 +104,10 @@ void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit, /** * @brief Set cache line valid * + * @note Collision page must be locked by the caller (either exclusive access + * to collision table page OR write lock on metadata hash bucket combined with + * shared access to the collision page) + * * @param cache Cache instance * @param start_bit Start bit of cache line for which state will be set * @param end_bit End bit of cache line for which state will be set @@ -108,6 +120,10 @@ void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit, /** * @brief Set cache line clean * + * @note Collision page must be locked by the caller (either exclusive access + * to collision table page OR write lock on metadata hash bucket combined with + * shared access to the collision page) + * * @param cache Cache instance * @param start_bit Start bit of cache line for which state will be set * @param end_bit End bit of cache line for which state will be set @@ -120,6 +136,10 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit, /** * @brief Set cache line dirty * + * @note Collision page must be locked by the caller (either exclusive access + * to collision table page OR write lock on metadata hash bucket combined with + * shared access to the collision page) + * * @param cache Cache instance * @param start_bit Start bit of cache line for which state will be set * @param end_bit End bit of cache line for which state will be set @@ -163,6 +183,10 @@ static inline void ocf_purge_eviction_policy(struct ocf_cache *cache, /** * @brief Set cache line clean and invalid and remove form lists * + * @note Collision page must be locked by the caller (either exclusive access + * to collision table page OR write lock on metadata hash bucket combined with + * shared access to the collision page) + * * @param cache Cache instance * @param start Start bit of range in cache line to purge * @param end End bit of range in cache line to purge @@ -224,8 +248,12 @@ static inline void ocf_purge_map_info(struct ocf_request *req) ocf_line_sectors(cache); } + ocf_metadata_start_collision_shared_access(cache, map[map_idx]. + coll_idx); _ocf_purge_cache_line_sec(cache, start_bit, end_bit, req, map_idx); + ocf_metadata_end_collision_shared_access(cache, map[map_idx]. + coll_idx); } } @@ -273,7 +301,11 @@ static inline void ocf_set_valid_map_info(struct ocf_request *req) start_bit = ocf_map_line_start_sector(req, map_idx); end_bit = ocf_map_line_end_sector(req, map_idx); + ocf_metadata_start_collision_shared_access(cache, map[map_idx]. + coll_idx); set_cache_line_valid(cache, start_bit, end_bit, req, map_idx); + ocf_metadata_end_collision_shared_access(cache, map[map_idx]. + coll_idx); } } @@ -284,6 +316,7 @@ static inline void ocf_set_dirty_map_info(struct ocf_request *req) uint8_t end_bit; struct ocf_cache *cache = req->cache; uint32_t count = req->core_line_count; + struct ocf_map_info *map = req->map; /* Set valid bits for sectors on the basis of map info * @@ -295,7 +328,12 @@ static inline void ocf_set_dirty_map_info(struct ocf_request *req) for (map_idx = 0; map_idx < count; map_idx++) { start_bit = ocf_map_line_start_sector(req, map_idx); end_bit = ocf_map_line_end_sector(req, map_idx); + + ocf_metadata_start_collision_shared_access(cache, map[map_idx]. + coll_idx); set_cache_line_dirty(cache, start_bit, end_bit, req, map_idx); + ocf_metadata_end_collision_shared_access(cache, map[map_idx]. + coll_idx); } } @@ -306,6 +344,7 @@ static inline void ocf_set_clean_map_info(struct ocf_request *req) uint8_t end_bit; struct ocf_cache *cache = req->cache; uint32_t count = req->core_line_count; + struct ocf_map_info *map = req->map; /* Set valid bits for sectors on the basis of map info * @@ -317,7 +356,12 @@ static inline void ocf_set_clean_map_info(struct ocf_request *req) for (map_idx = 0; map_idx < count; map_idx++) { start_bit = ocf_map_line_start_sector(req, map_idx); end_bit = ocf_map_line_end_sector(req, map_idx); + + ocf_metadata_start_collision_shared_access(cache, map[map_idx]. + coll_idx); set_cache_line_clean(cache, start_bit, end_bit, req, map_idx); + ocf_metadata_end_collision_shared_access(cache, map[map_idx]. + coll_idx); } } From 41d3542952aa6e8c01f99c72155852a6355ed6f2 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Wed, 25 Sep 2019 00:01:57 -0400 Subject: [PATCH 3/7] Lock collision page in metadata flush Signed-off-by: Adam Rutkowski --- src/metadata/metadata_hash.c | 33 +++++++++++++++++++++++++++-- src/metadata/metadata_raw.c | 21 ++++++++++++++++-- src/metadata/metadata_raw.h | 17 +++++++++++++++ src/metadata/metadata_raw_dynamic.c | 9 ++++++++ src/metadata/metadata_raw_dynamic.h | 2 ++ 5 files changed, 78 insertions(+), 4 deletions(-) diff --git a/src/metadata/metadata_hash.c b/src/metadata/metadata_hash.c index a33e944..ae483c6 100644 --- a/src/metadata/metadata_hash.c +++ b/src/metadata/metadata_hash.c @@ -542,7 +542,8 @@ int ocf_metadata_hash_init(struct ocf_cache *cache, metadata->iface_priv = ctrl; for (i = 0; i < metadata_segment_fixed_size_max; i++) { - result |= ocf_metadata_raw_init(cache, &(ctrl->raw_desc[i])); + result |= ocf_metadata_raw_init(cache, NULL, NULL, + &(ctrl->raw_desc[i])); if (result) break; } @@ -880,6 +881,23 @@ exit: ocf_metadata_query_cores_end(context, err); } +static void ocf_metadata_hash_flush_lock_collision_page(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, uint32_t page) + +{ + ocf_collision_start_exclusive_access(&cache->metadata.lock, + page); +} + +static void ocf_metadata_hash_flush_unlock_collision_page( + struct ocf_cache *cache, struct ocf_metadata_raw *raw, + uint32_t page) + +{ + ocf_collision_end_exclusive_access(&cache->metadata.lock, + page); +} + /* * Initialize hash metadata interface */ @@ -893,6 +911,7 @@ static int ocf_metadata_hash_init_variable_size(struct ocf_cache *cache, struct ocf_metadata_hash_ctrl *ctrl = NULL; struct ocf_cache_line_settings *settings = (struct ocf_cache_line_settings *)&cache->metadata.settings; + ocf_flush_page_synch_t lock_page, unlock_page; OCF_DEBUG_TRACE(cache); @@ -949,7 +968,17 @@ static int ocf_metadata_hash_init_variable_size(struct ocf_cache *cache, */ for (i = metadata_segment_variable_size_start; i < metadata_segment_max; i++) { - result |= ocf_metadata_raw_init(cache, &(ctrl->raw_desc[i])); + if (i == metadata_segment_collision) { + lock_page = + ocf_metadata_hash_flush_lock_collision_page; + unlock_page = + ocf_metadata_hash_flush_unlock_collision_page; + } else { + lock_page = unlock_page = NULL; + } + + result |= ocf_metadata_raw_init(cache, lock_page, unlock_page, + &(ctrl->raw_desc[i])); if (result) goto finalize; diff --git a/src/metadata/metadata_raw.c b/src/metadata/metadata_raw.c index 633e64b..5a57b27 100644 --- a/src/metadata/metadata_raw.c +++ b/src/metadata/metadata_raw.c @@ -90,7 +90,9 @@ static int _raw_ram_deinit(ocf_cache_t cache, * RAM Implementation - Initialize */ static int _raw_ram_init(ocf_cache_t cache, - struct ocf_metadata_raw *raw) + ocf_flush_page_synch_t lock_page_pfn, + ocf_flush_page_synch_t unlock_page_pfn, + struct ocf_metadata_raw *raw) { size_t mem_pool_size; @@ -105,6 +107,9 @@ static int _raw_ram_init(ocf_cache_t cache, return -OCF_ERR_NO_MEM; ENV_BUG_ON(env_memset(raw->mem_pool, mem_pool_size, 0)); + raw->lock_page = lock_page_pfn; + raw->unlock_page = unlock_page_pfn; + return 0; } @@ -286,7 +291,12 @@ static int _raw_ram_flush_all_fill(ocf_cache_t cache, OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page); + if (raw->lock_page) + raw->lock_page(cache, raw, raw_page); ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size); + if (raw->unlock_page) + raw->unlock_page(cache, raw, raw_page); + ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size); return 0; @@ -398,7 +408,12 @@ static int _raw_ram_flush_do_asynch_fill(ocf_cache_t cache, OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page); + if (raw->lock_page) + raw->lock_page(cache, raw, raw_page); ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size); + if (raw->unlock_page) + raw->unlock_page(cache, raw, raw_page); + ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size); return 0; @@ -612,13 +627,15 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = { ******************************************************************************/ int ocf_metadata_raw_init(ocf_cache_t cache, + ocf_flush_page_synch_t lock_page_pfn, + ocf_flush_page_synch_t unlock_page_pfn, struct ocf_metadata_raw *raw) { ENV_BUG_ON(raw->raw_type < metadata_raw_type_min); ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max); raw->iface = &(IRAW[raw->raw_type]); - return raw->iface->init(cache, raw); + return raw->iface->init(cache, lock_page_pfn, unlock_page_pfn, raw); } int ocf_metadata_raw_deinit(ocf_cache_t cache, diff --git a/src/metadata/metadata_raw.h b/src/metadata/metadata_raw.h index 66b10ad..c9d46fb 100644 --- a/src/metadata/metadata_raw.h +++ b/src/metadata/metadata_raw.h @@ -42,6 +42,14 @@ enum ocf_metadata_raw_type { metadata_raw_type_min = metadata_raw_type_ram /*!< MAX */ }; +struct ocf_metadata_raw; + +/** + * @brief Container page lock/unlock callback + */ +typedef void (*ocf_flush_page_synch_t)(ocf_cache_t cache, + struct ocf_metadata_raw *raw, uint32_t page); + /** * @brief RAW instance descriptor */ @@ -75,6 +83,9 @@ struct ocf_metadata_raw { size_t mem_pool_limit; /*! Current memory pool size (limit) */ void *priv; /*!< Private data - context */ + + ocf_flush_page_synch_t lock_page; /*!< Page lock callback */ + ocf_flush_page_synch_t unlock_page; /*!< Page unlock callback */ }; /** @@ -82,6 +93,8 @@ struct ocf_metadata_raw { */ struct raw_iface { int (*init)(ocf_cache_t cache, + ocf_flush_page_synch_t lock_page_pfn, + ocf_flush_page_synch_t unlock_page_pfn, struct ocf_metadata_raw *raw); int (*deinit)(ocf_cache_t cache, @@ -131,10 +144,14 @@ struct raw_iface { * @brief Initialize RAW instance * * @param cache - Cache instance + * @param lock_page_pfn - Optional page lock callback + * @param lock_page_pfn - Optional page unlock callback * @param raw - RAW descriptor * @return 0 - Operation success, otherwise error */ int ocf_metadata_raw_init(ocf_cache_t cache, + ocf_flush_page_synch_t lock_page_pfn, + ocf_flush_page_synch_t unlock_page_pfn, struct ocf_metadata_raw *raw); /** diff --git a/src/metadata/metadata_raw_dynamic.c b/src/metadata/metadata_raw_dynamic.c index 9ef666b..d558b6c 100644 --- a/src/metadata/metadata_raw_dynamic.c +++ b/src/metadata/metadata_raw_dynamic.c @@ -141,6 +141,8 @@ int raw_dynamic_deinit(ocf_cache_t cache, * RAM DYNAMIC Implementation - Initialize */ int raw_dynamic_init(ocf_cache_t cache, + ocf_flush_page_synch_t lock_page_pfn, + ocf_flush_page_synch_t unlock_page_pfn, struct ocf_metadata_raw *raw) { struct _raw_ctrl *ctrl; @@ -164,6 +166,9 @@ int raw_dynamic_init(ocf_cache_t cache, raw->priv = ctrl; + raw->lock_page = lock_page_pfn; + raw->unlock_page = unlock_page_pfn; + return 0; } @@ -504,8 +509,12 @@ static int raw_dynamic_flush_all_fill(ocf_cache_t cache, if (ctrl->pages[raw_page]) { OCF_DEBUG_PARAM(cache, "Page = %u", raw_page); + if (raw->lock_page) + raw->lock_page(cache, raw, raw_page); ctx_data_wr_check(cache->owner, data, ctrl->pages[raw_page], PAGE_SIZE); + if (raw->unlock_page) + raw->unlock_page(cache, raw, raw_page); } else { OCF_DEBUG_PARAM(cache, "Zero fill, Page = %u", raw_page); /* Page was not allocated before set only zeros */ diff --git a/src/metadata/metadata_raw_dynamic.h b/src/metadata/metadata_raw_dynamic.h index b8f541b..facddf9 100644 --- a/src/metadata/metadata_raw_dynamic.h +++ b/src/metadata/metadata_raw_dynamic.h @@ -15,6 +15,8 @@ * RAW DYNAMIC - Initialize */ int raw_dynamic_init(ocf_cache_t cache, + ocf_flush_page_synch_t lock_page_pfn, + ocf_flush_page_synch_t unlock_page_pfn, struct ocf_metadata_raw *raw); /* From 5e2847432223b6dc5bdb25a1f18a5217afdd8127 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Mon, 23 Sep 2019 11:08:27 -0400 Subject: [PATCH 4/7] Adding partition locks Adding locks to assure partition list consistency. Partition lists is typically modified under cacheline or hash bucket lock. This is not sufficient synchronization as adding/removing cacheline from partition list affects neighbouring cachelines state as well. Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_metadata_concurrency.c | 12 ++++++++++++ src/concurrency/ocf_metadata_concurrency.h | 14 ++++++++++++++ src/metadata/metadata_partition.c | 8 ++++++++ src/metadata/metadata_structs.h | 1 + 4 files changed, 35 insertions(+) diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c index 058271b..59814b3 100644 --- a/src/concurrency/ocf_metadata_concurrency.c +++ b/src/concurrency/ocf_metadata_concurrency.c @@ -8,13 +8,25 @@ void ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock) { + unsigned i; + env_spinlock_init(&metadata_lock->eviction); env_rwlock_init(&metadata_lock->status); env_rwsem_init(&metadata_lock->global); + + for (i = 0; i < OCF_IO_CLASS_MAX; i++) { + env_spinlock_init(&metadata_lock->partition[i]); + } } void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock) { + unsigned i; + + for (i = 0; i < OCF_IO_CLASS_MAX; i++) { + env_spinlock_destroy(&metadata_lock->partition[i]); + } + env_spinlock_destroy(&metadata_lock->eviction); env_rwlock_destroy(&metadata_lock->status); env_rwsem_destroy(&metadata_lock->global); diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h index ffef123..1445602 100644 --- a/src/concurrency/ocf_metadata_concurrency.h +++ b/src/concurrency/ocf_metadata_concurrency.h @@ -33,6 +33,20 @@ static inline void ocf_metadata_eviction_unlock( env_spinlock_unlock(&metadata_lock->eviction); } +static inline void ocf_metadata_partition_lock( + struct ocf_metadata_lock *metadata_lock, + ocf_part_id_t part_id) +{ + env_spinlock_lock(&metadata_lock->partition[part_id]); +} + +static inline void ocf_metadata_partition_unlock( + struct ocf_metadata_lock *metadata_lock, + ocf_part_id_t part_id) +{ + env_spinlock_unlock(&metadata_lock->partition[part_id]); +} + #define OCF_METADATA_EVICTION_LOCK() \ ocf_metadata_eviction_lock(&cache->metadata.lock) diff --git a/src/metadata/metadata_partition.c b/src/metadata/metadata_partition.c index b802101..437f400 100644 --- a/src/metadata/metadata_partition.c +++ b/src/metadata/metadata_partition.c @@ -26,6 +26,8 @@ void ocf_metadata_add_to_partition(struct ocf_cache *cache, ENV_BUG_ON(!(line < line_entries)); + ocf_metadata_partition_lock(&cache->metadata.lock, part_id); + /* First node to be added/ */ if (!part->runtime->curr_size) { @@ -55,6 +57,8 @@ void ocf_metadata_add_to_partition(struct ocf_cache *cache, } part->runtime->curr_size++; + + ocf_metadata_partition_unlock(&cache->metadata.lock, part_id); } /* Deletes the node with the given collision_index from the Partition list */ @@ -68,6 +72,8 @@ void ocf_metadata_remove_from_partition(struct ocf_cache *cache, ENV_BUG_ON(!(line < line_entries)); + ocf_metadata_partition_lock(&cache->metadata.lock, part_id); + /* Get Partition info */ ocf_metadata_get_partition_info(cache, line, NULL, &next_line, &prev_line); @@ -131,4 +137,6 @@ void ocf_metadata_remove_from_partition(struct ocf_cache *cache, } part->runtime->curr_size--; + + ocf_metadata_partition_unlock(&cache->metadata.lock, part_id); } diff --git a/src/metadata/metadata_structs.h b/src/metadata/metadata_structs.h index 9d6e681..71eb02a 100644 --- a/src/metadata/metadata_structs.h +++ b/src/metadata/metadata_structs.h @@ -441,6 +441,7 @@ struct ocf_metadata_lock env_spinlock eviction; /*!< Fast lock for eviction policy */ env_rwsem *hash; /*!< Hash bucket locks */ env_rwsem *collision_pages; /*!< Collision table page locks */ + env_spinlock partition[OCF_IO_CLASS_MAX]; /* partition lock */ uint32_t num_hash_entries; /*!< Hash bucket count */ uint32_t num_collision_pages; /*!< Collision table page count */ ocf_cache_t cache; /*!< Parent cache object */ From 937b010ef612b50a92735a1b8eeb11b7d1866f41 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Mon, 23 Sep 2019 11:08:27 -0400 Subject: [PATCH 5/7] Synchronize access to cleaner shared structures Cleaning policy callbacks are typically called with hash buckets or cache lines locked. However cleaning policies maintain structures which are shared across multiple cache lines (e.g. ALRU list). Additional synchronization is required for theese structures to maintain integrity. ACP already implements hash bucket locks. This patch is adding ALRU list mutex. Signed-off-by: Adam Rutkowski --- src/cleaning/acp.c | 4 ++ src/cleaning/alru.c | 105 ++++++++++++++++++++++---------------------- 2 files changed, 56 insertions(+), 53 deletions(-) diff --git a/src/cleaning/acp.c b/src/cleaning/acp.c index 1a1d1d9..a294bd0 100644 --- a/src/cleaning/acp.c +++ b/src/cleaning/acp.c @@ -636,6 +636,8 @@ void cleaning_policy_acp_purge_block(struct ocf_cache *cache, struct acp_cleaning_policy_meta *acp_meta; struct acp_chunk_info *chunk; + ACP_LOCK_CHUNKS_WR(); + acp_meta = _acp_meta_get(cache, cache_line, &policy_meta); chunk = _acp_get_chunk(cache, cache_line); @@ -646,6 +648,8 @@ void cleaning_policy_acp_purge_block(struct ocf_cache *cache, } _acp_update_bucket(acp, chunk); + + ACP_UNLOCK_CHUNKS_WR(); } int cleaning_policy_acp_purge_range(struct ocf_cache *cache, diff --git a/src/cleaning/alru.c b/src/cleaning/alru.c index e3e08f4..3d6cb63 100644 --- a/src/cleaning/alru.c +++ b/src/cleaning/alru.c @@ -43,12 +43,6 @@ #define OCF_DEBUG_PARAM(cache, format, ...) #endif -struct flush_merge_struct { - ocf_cache_line_t cache_line; - ocf_core_id_t core_id; - uint64_t core_sector; -}; - struct alru_flush_ctx { struct ocf_cleaner_attribs attribs; bool flush_perfomed; @@ -59,6 +53,12 @@ struct alru_flush_ctx { size_t flush_data_limit; }; +struct alru_context { + struct alru_flush_ctx flush_ctx; + env_spinlock list_lock[OCF_IO_CLASS_MAX]; +}; + + /* -- Start of ALRU functions -- */ @@ -319,20 +319,29 @@ void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache, void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache, uint32_t cache_line) { + struct alru_context *alru = cache->cleaner.cleaning_policy_context; ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cache_line); + env_spinlock_lock(&alru->list_lock[part_id]); remove_alru_list(cache, part_id, cache_line); + env_spinlock_unlock(&alru->list_lock[part_id]); } static void __cleaning_policy_alru_purge_cache_block_any( struct ocf_cache *cache, uint32_t cache_line) { + struct alru_context *alru = cache->cleaner.cleaning_policy_context; + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cache_line); + env_spinlock_lock(&alru->list_lock[part_id]); + if (is_on_alru_list(cache, part_id, cache_line)) remove_alru_list(cache, part_id, cache_line); + + env_spinlock_unlock(&alru->list_lock[part_id]); } int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id, @@ -357,6 +366,7 @@ int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id, void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache, uint32_t cache_line) { + struct alru_context *alru = cache->cleaner.cleaning_policy_context; ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cache_line); struct ocf_user_part *part = &cache->user_parts[part_id]; @@ -368,6 +378,8 @@ void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache, ENV_WARN_ON(!metadata_test_dirty(cache, cache_line)); ENV_WARN_ON(!metadata_test_valid_any(cache, cache_line)); + env_spinlock_lock(&alru->list_lock[part_id]); + ocf_metadata_get_cleaning_policy(cache, cache_line, &policy); next_lru_node = policy.meta.alru.lru_next; prev_lru_node = policy.meta.alru.lru_prev; @@ -381,6 +393,8 @@ void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache, remove_alru_list(cache, part_id, cache_line); add_alru_head(cache, part_id, cache_line); + + env_spinlock_unlock(&alru->list_lock[part_id]); } static void _alru_rebuild(struct ocf_cache *cache) @@ -452,15 +466,19 @@ int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata) { struct ocf_user_part *part; ocf_part_id_t part_id; - struct alru_flush_ctx *fctx; + struct alru_context *alru; + unsigned i; - fctx = env_vzalloc(sizeof(*fctx)); - if (!fctx) { - ocf_cache_log(cache, log_err, "alru ctx allocation error\n"); + alru = env_vzalloc(sizeof(*alru)); + if (!alru) { + ocf_cache_log(cache, log_err, "alru context allocation error\n"); return -OCF_ERR_NO_MEM; } - cache->cleaner.cleaning_policy_context = fctx; + for (i = 0; i < OCF_IO_CLASS_MAX; i++) + env_spinlock_init(&alru->list_lock[i]); + + cache->cleaner.cleaning_policy_context = alru; for_each_part(cache, part, part_id) { cleaning_policy_alru_initialize_part(cache, @@ -477,6 +495,12 @@ int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata) void cleaning_policy_alru_deinitialize(struct ocf_cache *cache) { + struct alru_context *alru = cache->cleaner.cleaning_policy_context; + unsigned i; + + for (i = 0; i < OCF_IO_CLASS_MAX; i++) + env_spinlock_destroy(&alru->list_lock[i]); + env_vfree(cache->cleaner.cleaning_policy_context); cache->cleaner.cleaning_policy_context = NULL; } @@ -587,36 +611,6 @@ static int check_for_io_activity(struct ocf_cache *cache, return 0; } -static int cmp_ocf_user_parts(const void *p1, const void *p2) { - const struct ocf_user_part *t1 = *(const struct ocf_user_part**)p1; - const struct ocf_user_part *t2 = *(const struct ocf_user_part**)p2; - - if (t1->config->priority > t2->config->priority) - return 1; - else if (t1->config->priority < t2->config->priority) - return -1; - - return 0; -} - -static void swp_ocf_user_part(void *part1, void *part2, int size) { - void *tmp = *(void **)part1; - - *(void **)part1 = *(void **) part2; - *(void **)part2 = tmp; -} - -static void get_parts_sorted(struct ocf_user_part **parts, - struct ocf_cache *cache) { - int i; - - for (i = 0; i < OCF_IO_CLASS_MAX; i++) - parts[i] = &cache->user_parts[i]; - - env_sort(parts, OCF_IO_CLASS_MAX, sizeof(struct ocf_user_part*), - cmp_ocf_user_parts, swp_ocf_user_part); -} - static bool clean_later(ocf_cache_t cache, uint32_t *delta) { struct alru_cleaning_policy_config *config; @@ -706,24 +700,24 @@ static bool block_is_busy(struct ocf_cache *cache, return false; } -static int get_data_to_flush(struct alru_flush_ctx *fctx) +static int get_data_to_flush(struct alru_context *alru) { + struct alru_flush_ctx *fctx = &alru->flush_ctx; ocf_cache_t cache = fctx->cache; struct alru_cleaning_policy_config *config; struct cleaning_policy_meta policy; ocf_cache_line_t cache_line; - struct ocf_user_part *parts[OCF_IO_CLASS_MAX]; + struct ocf_user_part *part; uint32_t last_access; int to_flush = 0; int part_id = OCF_IO_CLASS_ID_MAX; config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; - get_parts_sorted(parts, cache); + for_each_part(cache, part, part_id) { + env_spinlock_lock(&alru->list_lock[part_id]); - while (part_id >= OCF_IO_CLASS_ID_MIN) { - cache_line = - parts[part_id]->runtime->cleaning.policy.alru.lru_tail; + cache_line = part->runtime->cleaning.policy.alru.lru_tail; last_access = compute_timestamp(config); @@ -732,8 +726,10 @@ static int get_data_to_flush(struct alru_flush_ctx *fctx) policy.meta.alru.timestamp < last_access); while (more_blocks_to_flush(cache, cache_line, last_access)) { - if (to_flush >= fctx->clines_no) + if (to_flush >= fctx->clines_no) { + env_spinlock_unlock(&alru->list_lock[part_id]); goto end; + } if (!block_is_busy(cache, cache_line)) { get_block_to_flush(&fctx->flush_data[to_flush], cache_line, @@ -744,7 +740,8 @@ static int get_data_to_flush(struct alru_flush_ctx *fctx) ocf_metadata_get_cleaning_policy(cache, cache_line, &policy); cache_line = policy.meta.alru.lru_prev; } - part_id--; + + env_spinlock_unlock(&alru->list_lock[part_id]); } end: @@ -769,8 +766,9 @@ static void alru_clean_complete(void *priv, int err) fctx->cmpl(&fctx->cache->cleaner, interval); } -static void alru_clean(struct alru_flush_ctx *fctx) +static void alru_clean(struct alru_context *alru) { + struct alru_flush_ctx *fctx = &alru->flush_ctx; ocf_cache_t cache = fctx->cache; int to_clean; @@ -792,7 +790,7 @@ static void alru_clean(struct alru_flush_ctx *fctx) goto end; } - to_clean = get_data_to_flush(fctx); + to_clean = get_data_to_flush(alru); if (to_clean > 0) { fctx->flush_perfomed = true; ocf_cleaner_do_flush_data_async(cache, fctx->flush_data, to_clean, @@ -812,7 +810,8 @@ end: void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl) { - struct alru_flush_ctx *fctx = cache->cleaner.cleaning_policy_context; + struct alru_context *alru = cache->cleaner.cleaning_policy_context; + struct alru_flush_ctx *fctx = &alru->flush_ctx; struct alru_cleaning_policy_config *config; config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; @@ -830,5 +829,5 @@ void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl) fctx->cmpl = cmpl; fctx->flush_perfomed = false; - alru_clean(fctx); + alru_clean(alru); } From 6de280283af473c5e4ffec5698c88db30d0d3333 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Tue, 24 Sep 2019 16:05:48 -0400 Subject: [PATCH 6/7] Fix hash_table_entries param type in ocf_metadata_concurrency_attached_init Number of hash buckets is 32 bit integer. Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_metadata_concurrency.c | 6 +++--- src/concurrency/ocf_metadata_concurrency.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c index 59814b3..ecea7a5 100644 --- a/src/concurrency/ocf_metadata_concurrency.c +++ b/src/concurrency/ocf_metadata_concurrency.c @@ -34,9 +34,9 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock) int ocf_metadata_concurrency_attached_init( struct ocf_metadata_lock *metadata_lock, ocf_cache_t cache, - uint64_t hash_table_entries, uint32_t colision_table_pages) + uint32_t hash_table_entries, uint32_t colision_table_pages) { - uint64_t i; + uint32_t i; int err = 0; metadata_lock->hash = env_vzalloc(sizeof(env_rwsem) * @@ -79,7 +79,7 @@ int ocf_metadata_concurrency_attached_init( void ocf_metadata_concurrency_attached_deinit( struct ocf_metadata_lock *metadata_lock) { - uint64_t i; + uint32_t i; if (metadata_lock->hash) { for (i = 0; i < metadata_lock->num_hash_entries; i++) diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h index 1445602..e7d3a23 100644 --- a/src/concurrency/ocf_metadata_concurrency.h +++ b/src/concurrency/ocf_metadata_concurrency.h @@ -16,7 +16,7 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock); int ocf_metadata_concurrency_attached_init( struct ocf_metadata_lock *metadata_lock, ocf_cache_t cache, - uint64_t hash_table_entries, uint32_t colision_table_pages); + uint32_t hash_table_entries, uint32_t colision_table_pages); void ocf_metadata_concurrency_attached_deinit( struct ocf_metadata_lock *metadata_lock); From a934b43aec8d3e9ed8748fc82db51033e4977db5 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Wed, 25 Sep 2019 13:58:14 -0400 Subject: [PATCH 7/7] Add missing error handling in hash bucket locks initialization Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_metadata_concurrency.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c index ecea7a5..df996b4 100644 --- a/src/concurrency/ocf_metadata_concurrency.c +++ b/src/concurrency/ocf_metadata_concurrency.c @@ -52,14 +52,26 @@ int ocf_metadata_concurrency_attached_init( return -OCF_ERR_NO_MEM; } - for (i = 0; i < hash_table_entries; i++) - env_rwsem_init(&metadata_lock->hash[i]); + for (i = 0; i < hash_table_entries; i++) { + err = env_rwsem_init(&metadata_lock->hash[i]); + if (err) + break; + } + if (err) { + while (i--) + env_rwsem_destroy(&metadata_lock->hash[i]); + env_vfree(metadata_lock->hash); + metadata_lock->hash = NULL; + ocf_metadata_concurrency_attached_deinit(metadata_lock); + return err; + } + + for (i = 0; i < colision_table_pages; i++) { err = env_rwsem_init(&metadata_lock->collision_pages[i]); if (err) break; } - if (err) { while (i--) env_rwsem_destroy(&metadata_lock->collision_pages[i]);