From f34cacf150183207157673a626bfe36479e179d5 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Tue, 10 Sep 2019 15:23:49 -0400 Subject: [PATCH 01/10] Move resume callback to async lock function params (refactoring) This is a step towards common async lock interface in OCF. Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_cache_line_concurrency.c | 110 +++++++++---------- src/concurrency/ocf_cache_line_concurrency.h | 9 +- src/engine/cache_engine.h | 2 - src/engine/engine_discard.c | 7 +- src/engine/engine_fast.c | 6 +- src/engine/engine_pt.c | 3 +- src/engine/engine_rd.c | 11 +- src/engine/engine_wb.c | 5 +- src/engine/engine_wi.c | 3 +- src/engine/engine_wo.c | 3 +- src/engine/engine_wt.c | 5 +- src/engine/engine_zero.c | 3 +- src/utils/utils_cleaner.c | 15 ++- 13 files changed, 84 insertions(+), 98 deletions(-) diff --git a/src/concurrency/ocf_cache_line_concurrency.c b/src/concurrency/ocf_cache_line_concurrency.c index 9b79b98..bee26b7 100644 --- a/src/concurrency/ocf_cache_line_concurrency.c +++ b/src/concurrency/ocf_cache_line_concurrency.c @@ -35,14 +35,11 @@ #define _WAITERS_LIST_ITEM(cache_line) ((cache_line) % _WAITERS_LIST_ENTRIES) -typedef void (*__on_lock)(void *ctx, uint32_t ctx_id, ocf_cache_line_t line, - int rw); - struct __waiter { ocf_cache_line_t line; void *ctx; uint32_t ctx_id; - __on_lock on_lock; + ocf_req_async_lock_cb cb; struct list_head item; int rw; }; @@ -353,11 +350,37 @@ static inline bool __try_lock_rd2rd(struct ocf_cache_line_concurrency *c, return true; } +/* + * + */ +static void _req_on_lock(void *ctx, ocf_req_async_lock_cb cb, + uint32_t ctx_id, ocf_cache_line_t line, int rw) +{ + struct ocf_request *req = ctx; + struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. + cache_line; + + if (rw == OCF_READ) + req->map[ctx_id].rd_locked = true; + else if (rw == OCF_WRITE) + req->map[ctx_id].wr_locked = true; + else + ENV_BUG(); + + if (env_atomic_dec_return(&req->lock_remaining) == 0) { + /* All cache line locked, resume request */ + OCF_DEBUG_RQ(req, "Resume"); + ENV_BUG_ON(!cb); + env_atomic_dec(&c->waiting); + cb(req); + } +} + /* * */ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c, - const ocf_cache_line_t line, __on_lock on_lock, + const ocf_cache_line_t line, ocf_req_async_lock_cb cb, void *ctx, uint32_t ctx_id) { struct __waiter *waiter; @@ -367,8 +390,8 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c, if (__try_lock_wr(c, line)) { /* No activity before look get */ - if (on_lock) - on_lock(ctx, ctx_id, line, OCF_WRITE); + if (cb) + _req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE); return true; } @@ -382,7 +405,7 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c, locked = true; } else { waiter = NULL; - if (on_lock != NULL) { + if (cb != NULL) { /* Need to create waiters and add it into list */ waiter = env_allocator_new(c->allocator); } @@ -391,7 +414,7 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c, waiter->line = line; waiter->ctx = ctx; waiter->ctx_id = ctx_id; - waiter->on_lock = on_lock; + waiter->cb = cb; waiter->rw = OCF_WRITE; INIT_LIST_HEAD(&waiter->item); @@ -403,8 +426,8 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c, __unlock_waiters_list(c, line, flags); - if (locked && on_lock) - on_lock(ctx, ctx_id, line, OCF_WRITE); + if (locked && cb) + _req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE); return locked || waiting; } @@ -414,7 +437,7 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c, * In case cache line is locked, attempt to add caller on wait list. */ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c, - const ocf_cache_line_t line, __on_lock on_lock, + const ocf_cache_line_t line, ocf_req_async_lock_cb cb, void *ctx, uint32_t ctx_id) { struct __waiter *waiter; @@ -424,8 +447,8 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c, if (__try_lock_rd_idle(c, line)) { /* No activity before look get, it is first reader */ - if (on_lock) - on_lock(ctx, ctx_id, line, OCF_READ); + if (cb) + _req_on_lock(ctx, cb, ctx_id, line, OCF_READ); return true; } @@ -444,7 +467,7 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c, if (!locked) { waiter = NULL; - if (on_lock) { + if (cb) { /* Need to create waiters and add it into list */ waiter = env_allocator_new(c->allocator); } @@ -453,7 +476,7 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c, waiter->line = line; waiter->ctx = ctx; waiter->ctx_id = ctx_id; - waiter->on_lock = on_lock; + waiter->cb = cb; waiter->rw = OCF_READ; INIT_LIST_HEAD(&waiter->item); @@ -465,8 +488,8 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c, __unlock_waiters_list(c, line, flags); - if (locked && on_lock) - on_lock(ctx, ctx_id, line, OCF_READ); + if (locked && cb) + _req_on_lock(ctx, cb, ctx_id, line, OCF_READ); return locked || waiting; } @@ -520,8 +543,8 @@ static inline void __unlock_cache_line_rd_common(struct ocf_cache_line_concurren exchanged = false; list_del(iter); - waiter->on_lock(waiter->ctx, waiter->ctx_id, line, - waiter->rw); + _req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id, + line, waiter->rw); env_allocator_del(c->allocator, waiter); } else { @@ -601,7 +624,7 @@ static inline void __unlock_cache_line_wr_common(struct ocf_cache_line_concurren exchanged = false; list_del(iter); - waiter->on_lock(waiter->ctx, waiter->ctx_id, line, + _req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id, line, waiter->rw); env_allocator_del(c->allocator, waiter); @@ -668,31 +691,6 @@ static inline void __remove_line_from_waiters_list(struct ocf_cache_line_concurr __unlock_waiters_list(c, line, flags); } -/* - * - */ -static void _req_on_lock(void *ctx, uint32_t ctx_id, - ocf_cache_line_t line, int rw) -{ - struct ocf_request *req = ctx; - struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line; - - if (rw == OCF_READ) - req->map[ctx_id].rd_locked = true; - else if (rw == OCF_WRITE) - req->map[ctx_id].wr_locked = true; - else - ENV_BUG(); - - if (env_atomic_dec_return(&req->lock_remaining) == 0) { - /* All cache line locked, resume request */ - OCF_DEBUG_RQ(req, "Resume"); - ENV_BUG_ON(!req->io_if->resume); - env_atomic_dec(&c->waiting); - req->io_if->resume(req); - } -} - /* Try to read-lock request without adding waiters. Function should be called * under read lock, multiple threads may attempt to acquire the lock * concurrently. */ @@ -750,13 +748,12 @@ static int _ocf_req_trylock_rd(struct ocf_request *req) * Read-lock request cache lines. Must be called under cacheline concurrency * write lock. */ -static int _ocf_req_lock_rd(struct ocf_request *req) +static int _ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. cache_line; ocf_cache_line_t line; - __on_lock on_lock = _req_on_lock; int ret = OCF_LOCK_NOT_ACQUIRED; ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); @@ -778,7 +775,7 @@ static int _ocf_req_lock_rd(struct ocf_request *req) ENV_BUG_ON(req->map[i].rd_locked); ENV_BUG_ON(req->map[i].wr_locked); - if (!__lock_cache_line_rd(c, line, on_lock, req, i)) { + if (!__lock_cache_line_rd(c, line, cb, req, i)) { /* lock not acquired and not added to wait list */ ret = -OCF_ERR_NO_MEM; goto err; @@ -804,7 +801,7 @@ err: } -int ocf_req_trylock_rd(struct ocf_request *req) +int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb) { struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line; @@ -816,7 +813,7 @@ int ocf_req_trylock_rd(struct ocf_request *req) if (lock != OCF_LOCK_ACQUIRED) { env_rwlock_write_lock(&c->lock); - lock = _ocf_req_lock_rd(req); + lock = _ocf_req_lock_rd(req, cb); env_rwlock_write_unlock(&c->lock); } @@ -878,17 +875,16 @@ static int _ocf_req_trylock_wr(struct ocf_request *req) * Write-lock request cache lines. Must be called under cacheline concurrency * write lock. */ -static int _ocf_req_lock_wr(struct ocf_request *req) +static int _ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. cache_line; ocf_cache_line_t line; - __on_lock on_lock = _req_on_lock; int ret = OCF_LOCK_NOT_ACQUIRED; ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); - ENV_BUG_ON(!req->io_if->resume); + ENV_BUG_ON(!cb); env_atomic_inc(&c->waiting); env_atomic_set(&req->lock_remaining, req->core_line_count); @@ -907,7 +903,7 @@ static int _ocf_req_lock_wr(struct ocf_request *req) ENV_BUG_ON(req->map[i].rd_locked); ENV_BUG_ON(req->map[i].wr_locked); - if (!__lock_cache_line_wr(c, line, on_lock, req, i)) { + if (!__lock_cache_line_wr(c, line, cb, req, i)) { /* lock not acquired and not added to wait list */ ret = -OCF_ERR_NO_MEM; goto err; @@ -932,7 +928,7 @@ err: return ret; } -int ocf_req_trylock_wr(struct ocf_request *req) +int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb) { struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line; @@ -944,7 +940,7 @@ int ocf_req_trylock_wr(struct ocf_request *req) if (lock != OCF_LOCK_ACQUIRED) { env_rwlock_write_lock(&c->lock); - lock = _ocf_req_lock_wr(req); + lock = _ocf_req_lock_wr(req, cb); env_rwlock_write_unlock(&c->lock); } diff --git a/src/concurrency/ocf_cache_line_concurrency.h b/src/concurrency/ocf_cache_line_concurrency.h index 5a1fa60..e6ab7c8 100644 --- a/src/concurrency/ocf_cache_line_concurrency.h +++ b/src/concurrency/ocf_cache_line_concurrency.h @@ -50,19 +50,23 @@ uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache); */ size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache); +/* async request cacheline lock acquisition callback */ +typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req); + /** * @brief Lock OCF request for WRITE access (Lock all cache lines in map info) * * @note io_if->resume callback has to be set * * @param req - OCF request + * @param cb - async lock acquisition callback * * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed * * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was * added into waiting list. When lock will be acquired io_if->resume be called */ -int ocf_req_trylock_wr(struct ocf_request *req); +int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb); /** * @brief Lock OCF request for READ access (Lock all cache lines in map info) @@ -70,13 +74,14 @@ int ocf_req_trylock_wr(struct ocf_request *req); * @note io_if->resume callback has to be set * * @param req - OCF request + * @param cb - async lock acquisition callback * * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed * * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was * added into waiting list. When lock will be acquired io_if->resume be called */ -int ocf_req_trylock_rd(struct ocf_request *req); +int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb); /** * @brief Unlock OCF request from WRITE access diff --git a/src/engine/cache_engine.h b/src/engine/cache_engine.h index 17c36f7..cd2e699 100644 --- a/src/engine/cache_engine.h +++ b/src/engine/cache_engine.h @@ -37,8 +37,6 @@ struct ocf_io_if { int (*write)(struct ocf_request *req); - void (*resume)(struct ocf_request *req); - const char *name; }; diff --git a/src/engine/engine_discard.c b/src/engine/engine_discard.c index c9146c2..e460f4f 100644 --- a/src/engine/engine_discard.c +++ b/src/engine/engine_discard.c @@ -22,30 +22,25 @@ static int _ocf_discard_step_do(struct ocf_request *req); static int _ocf_discard_step(struct ocf_request *req); static int _ocf_discard_flush_cache(struct ocf_request *req); static int _ocf_discard_core(struct ocf_request *req); -static void _ocf_discard_on_resume(struct ocf_request *req); static const struct ocf_io_if _io_if_discard_step = { .read = _ocf_discard_step, .write = _ocf_discard_step, - .resume = _ocf_discard_on_resume, }; static const struct ocf_io_if _io_if_discard_step_resume = { .read = _ocf_discard_step_do, .write = _ocf_discard_step_do, - .resume = _ocf_discard_on_resume, }; static const struct ocf_io_if _io_if_discard_flush_cache = { .read = _ocf_discard_flush_cache, .write = _ocf_discard_flush_cache, - .resume = _ocf_discard_on_resume, }; static const struct ocf_io_if _io_if_discard_core = { .read = _ocf_discard_core, .write = _ocf_discard_core, - .resume = _ocf_discard_on_resume, }; static void _ocf_discard_complete_req(struct ocf_request *req, int error) @@ -239,7 +234,7 @@ static int _ocf_discard_step(struct ocf_request *req) if (ocf_engine_mapped_count(req)) { /* Some cache line are mapped, lock request for WRITE access */ - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume); } else { lock = OCF_LOCK_ACQUIRED; } diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c index d2a8153..78bf350 100644 --- a/src/engine/engine_fast.c +++ b/src/engine/engine_fast.c @@ -104,7 +104,6 @@ static int _ocf_read_fast_do(struct ocf_request *req) static const struct ocf_io_if _io_if_read_fast_resume = { .read = _ocf_read_fast_do, .write = _ocf_read_fast_do, - .resume = ocf_engine_on_resume, }; int ocf_read_fast(struct ocf_request *req) @@ -129,7 +128,7 @@ int ocf_read_fast(struct ocf_request *req) hit = ocf_engine_is_hit(req); if (hit) { ocf_io_start(&req->ioi.io); - lock = ocf_req_trylock_rd(req); + lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); } OCF_METADATA_UNLOCK_RD(); @@ -174,7 +173,6 @@ int ocf_read_fast(struct ocf_request *req) static const struct ocf_io_if _io_if_write_fast_resume = { .read = ocf_write_wb_do, .write = ocf_write_wb_do, - .resume = ocf_engine_on_resume, }; int ocf_write_fast(struct ocf_request *req) @@ -199,7 +197,7 @@ int ocf_write_fast(struct ocf_request *req) mapped = ocf_engine_is_mapped(req); if (mapped) { ocf_io_start(&req->ioi.io); - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } OCF_METADATA_UNLOCK_RD(); diff --git a/src/engine/engine_pt.c b/src/engine/engine_pt.c index bf18431..0a910a8 100644 --- a/src/engine/engine_pt.c +++ b/src/engine/engine_pt.c @@ -99,7 +99,6 @@ int ocf_read_pt_do(struct ocf_request *req) static const struct ocf_io_if _io_if_pt_resume = { .read = ocf_read_pt_do, .write = ocf_read_pt_do, - .resume = ocf_engine_on_resume, }; int ocf_read_pt(struct ocf_request *req) @@ -130,7 +129,7 @@ int ocf_read_pt(struct ocf_request *req) /* There are mapped cache line, * lock request for READ access */ - lock = ocf_req_trylock_rd(req); + lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); } else { /* No mapped cache lines, no need to get lock */ lock = OCF_LOCK_ACQUIRED; diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c index 2ca2d48..a26ef10 100644 --- a/src/engine/engine_rd.c +++ b/src/engine/engine_rd.c @@ -208,7 +208,6 @@ static int _ocf_read_generic_do(struct ocf_request *req) static const struct ocf_io_if _io_if_read_generic_resume = { .read = _ocf_read_generic_do, .write = _ocf_read_generic_do, - .resume = ocf_engine_on_resume, }; int ocf_read_generic(struct ocf_request *req) @@ -243,13 +242,13 @@ int ocf_read_generic(struct ocf_request *req) /* Request is fully mapped, no need to call eviction */ if (ocf_engine_is_hit(req)) { /* There is a hit, lock request for READ access */ - lock = ocf_req_trylock_rd(req); + lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); } else { /* All cache line mapped, but some sectors are not valid * and cache insert will be performed - lock for * WRITE is required */ - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } } @@ -274,12 +273,14 @@ int ocf_read_generic(struct ocf_request *req) /* After mapping turns out there is hit, * so lock OCF request for read access */ - lock = ocf_req_trylock_rd(req); + lock = ocf_req_async_lock_rd(req, + ocf_engine_on_resume); } else { /* Miss, new cache lines were mapped, * need to lock OCF request for write access */ - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, + ocf_engine_on_resume); } } OCF_METADATA_UNLOCK_WR(); diff --git a/src/engine/engine_wb.c b/src/engine/engine_wb.c index f97793d..1a9938b 100644 --- a/src/engine/engine_wb.c +++ b/src/engine/engine_wb.c @@ -21,7 +21,6 @@ static const struct ocf_io_if _io_if_wb_resume = { .read = ocf_write_wb_do, .write = ocf_write_wb_do, - .resume = ocf_engine_on_resume, }; static void _ocf_write_wb_update_bits(struct ocf_request *req) @@ -190,7 +189,7 @@ int ocf_write_wb(struct ocf_request *req) mapped = ocf_engine_is_mapped(req); if (mapped) { /* All cache line are mapped, lock request for WRITE access */ - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ @@ -206,7 +205,7 @@ int ocf_write_wb(struct ocf_request *req) if (!req->info.mapping_error) { /* Lock request for WRITE access */ - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ diff --git a/src/engine/engine_wi.c b/src/engine/engine_wi.c index b0d204c..482b156 100644 --- a/src/engine/engine_wi.c +++ b/src/engine/engine_wi.c @@ -130,7 +130,6 @@ static void _ocf_write_wi_on_resume(struct ocf_request *req) static const struct ocf_io_if _io_if_wi_resume = { .read = _ocf_write_wi_do, .write = _ocf_write_wi_do, - .resume = _ocf_write_wi_on_resume, }; int ocf_write_wi(struct ocf_request *req) @@ -155,7 +154,7 @@ int ocf_write_wi(struct ocf_request *req) if (ocf_engine_mapped_count(req)) { /* Some cache line are mapped, lock request for WRITE access */ - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume); } else { lock = OCF_LOCK_ACQUIRED; } diff --git a/src/engine/engine_wo.c b/src/engine/engine_wo.c index 022d70b..5687280 100644 --- a/src/engine/engine_wo.c +++ b/src/engine/engine_wo.c @@ -197,7 +197,6 @@ int ocf_read_wo_do(struct ocf_request *req) static const struct ocf_io_if _io_if_wo_resume = { .read = ocf_read_wo_do, .write = ocf_read_wo_do, - .resume = ocf_engine_on_resume, }; int ocf_read_wo(struct ocf_request *req) @@ -224,7 +223,7 @@ int ocf_read_wo(struct ocf_request *req) /* There are mapped cache lines, * lock request for READ access */ - lock = ocf_req_trylock_rd(req); + lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); } OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ diff --git a/src/engine/engine_wt.c b/src/engine/engine_wt.c index ac5fc41..9db3ec3 100644 --- a/src/engine/engine_wt.c +++ b/src/engine/engine_wt.c @@ -158,7 +158,6 @@ static int _ocf_write_wt_do(struct ocf_request *req) static const struct ocf_io_if _io_if_wt_resume = { .read = _ocf_write_wt_do, .write = _ocf_write_wt_do, - .resume = ocf_engine_on_resume, }; int ocf_write_wt(struct ocf_request *req) @@ -183,7 +182,7 @@ int ocf_write_wt(struct ocf_request *req) mapped = ocf_engine_is_mapped(req); if (mapped) { /* All cache line are mapped, lock request for WRITE access */ - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ @@ -199,7 +198,7 @@ int ocf_write_wt(struct ocf_request *req) if (!req->info.mapping_error) { /* Lock request for WRITE access */ - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ diff --git a/src/engine/engine_zero.c b/src/engine/engine_zero.c index 7caa52f..d5cb240 100644 --- a/src/engine/engine_zero.c +++ b/src/engine/engine_zero.c @@ -129,7 +129,6 @@ static int _ocf_zero_do(struct ocf_request *req) static const struct ocf_io_if _io_if_ocf_zero_do = { .read = _ocf_zero_do, .write = _ocf_zero_do, - .resume = ocf_engine_on_resume, }; /** @@ -151,7 +150,7 @@ void ocf_engine_zero_line(struct ocf_request *req) req->io_if = &_io_if_ocf_zero_do; /* Some cache line are mapped, lock request for WRITE access */ - lock = ocf_req_trylock_wr(req); + lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); if (lock >= 0) { ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED); diff --git a/src/utils/utils_cleaner.c b/src/utils/utils_cleaner.c index 1cc90fd..6f9aa23 100644 --- a/src/utils/utils_cleaner.c +++ b/src/utils/utils_cleaner.c @@ -197,6 +197,12 @@ static void _ocf_cleaner_complete_req(struct ocf_request *req) cmpl(master->priv, master->error); } +static void _ocf_cleaner_on_resume(struct ocf_request *req) +{ + OCF_DEBUG_TRACE(req->cache); + ocf_engine_push_req_front(req, true); +} + /* * cleaner - Cache line lock, function lock cache lines depends on attributes */ @@ -207,7 +213,7 @@ static int _ocf_cleaner_cache_line_lock(struct ocf_request *req) OCF_DEBUG_TRACE(req->cache); - return ocf_req_trylock_rd(req); + return ocf_req_async_lock_rd(req, _ocf_cleaner_on_resume); } /* @@ -697,16 +703,9 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req) return 0; } -static void _ocf_cleaner_on_resume(struct ocf_request *req) -{ - OCF_DEBUG_TRACE(req->cache); - ocf_engine_push_req_front(req, true); -} - static const struct ocf_io_if _io_if_fire_cache = { .read = _ocf_cleaner_fire_cache, .write = _ocf_cleaner_fire_cache, - .resume = _ocf_cleaner_on_resume, }; static int _ocf_cleaner_fire(struct ocf_request *req) From 42f65c3fbb78ad7c2bba1506a81140b36bc2b56b Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Thu, 1 Aug 2019 16:58:56 -0400 Subject: [PATCH 02/10] Change ocf_metadata_(un)lock -> OCF_METADATA_(UN)LOCK Signed-off-by: Adam Rutkowski --- src/mngt/ocf_mngt_flush.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/mngt/ocf_mngt_flush.c b/src/mngt/ocf_mngt_flush.c index 4ddb7a8..237e80e 100644 --- a/src/mngt/ocf_mngt_flush.c +++ b/src/mngt/ocf_mngt_flush.c @@ -385,9 +385,9 @@ static int _ofc_flush_container_step(struct ocf_request *req) struct flush_container *fc = req->priv; ocf_cache_t cache = fc->cache; - ocf_metadata_lock(cache, OCF_METADATA_WR); + OCF_METADATA_LOCK_WR(); _ocf_mngt_flush_portion(fc); - ocf_metadata_unlock(cache, OCF_METADATA_WR); + OCF_METADATA_UNLOCK_WR(); return 0; } @@ -501,7 +501,7 @@ static void _ocf_mngt_flush_core( return; } - ocf_metadata_lock(cache, OCF_METADATA_WR); + OCF_METADATA_LOCK_WR(); ret = _ocf_mngt_get_sectors(cache, core_id, &fc->flush_data, &fc->count); @@ -509,7 +509,7 @@ static void _ocf_mngt_flush_core( ocf_core_log(core, log_err, "Flushing operation aborted, " "no memory\n"); env_vfree(fc); - ocf_metadata_unlock(cache, OCF_METADATA_WR); + OCF_METADATA_UNLOCK_WR(); complete(context, -OCF_ERR_NO_MEM); return; } @@ -519,7 +519,7 @@ static void _ocf_mngt_flush_core( _ocf_mngt_flush_containers(context, fc, 1, complete); - ocf_metadata_unlock(cache, OCF_METADATA_WR); + OCF_METADATA_UNLOCK_WR(); } static void _ocf_mngt_flush_all_cores( @@ -538,21 +538,21 @@ static void _ocf_mngt_flush_all_cores( env_atomic_set(&cache->flush_in_progress, 1); - ocf_metadata_lock(cache, OCF_METADATA_WR); + OCF_METADATA_LOCK_WR(); /* Get all 'dirty' sectors for all cores */ ret = _ocf_mngt_get_flush_containers(cache, &fctbl, &fcnum); if (ret) { ocf_cache_log(cache, log_err, "Flushing operation aborted, " "no memory\n"); - ocf_metadata_unlock(cache, OCF_METADATA_WR); + OCF_METADATA_UNLOCK_WR(); complete(context, ret); return; } _ocf_mngt_flush_containers(context, fctbl, fcnum, complete); - ocf_metadata_unlock(cache, OCF_METADATA_WR); + OCF_METADATA_UNLOCK_WR(); } static void _ocf_mngt_flush_all_cores_complete( @@ -907,7 +907,7 @@ int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type) return 0; } - ocf_metadata_lock(cache, OCF_METADATA_WR); + OCF_METADATA_LOCK_WR(); if (cleaning_policy_ops[old_type].deinitialize) cleaning_policy_ops[old_type].deinitialize(cache); @@ -925,7 +925,7 @@ int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type) cache->conf_meta->cleaning_policy_type = type; - ocf_metadata_unlock(cache, OCF_METADATA_WR); + OCF_METADATA_UNLOCK_WR(); ocf_cache_log(cache, log_info, "Changing cleaning policy from " "%s to %s\n", cleaning_policy_ops[old_type].name, @@ -957,12 +957,12 @@ int ocf_mngt_cache_cleaning_set_param(ocf_cache_t cache, ocf_cleaning_t type, if (!cleaning_policy_ops[type].set_cleaning_param) return -OCF_ERR_INVAL; - ocf_metadata_lock(cache, OCF_METADATA_WR); + OCF_METADATA_LOCK_WR(); ret = cleaning_policy_ops[type].set_cleaning_param(cache, param_id, param_value); - ocf_metadata_unlock(cache, OCF_METADATA_WR); + OCF_METADATA_UNLOCK_WR(); return ret; } From d91012f4b47581e18cd4b0fd3661234254c86ad6 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Thu, 1 Aug 2019 16:57:14 -0400 Subject: [PATCH 03/10] Introduce hash bucket locks There is one RW lock per hash bucket. Write lock is required to map cacheline, read lock is sufficient for traversing. Hash bucket locks are always acquired under global metadata read lock. This assures mutual exclusion with eviction and management paths, where global metadata write lock is held. Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_metadata_concurrency.c | 216 ++++++++++++++++++++- src/concurrency/ocf_metadata_concurrency.h | 128 ++++++------ src/metadata/metadata.c | 4 +- src/metadata/metadata_structs.h | 15 +- src/mngt/ocf_mngt_cache.c | 4 +- src/ocf_request.c | 11 ++ src/ocf_request.h | 7 + 7 files changed, 298 insertions(+), 87 deletions(-) diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c index 5304780..7952ac2 100644 --- a/src/concurrency/ocf_metadata_concurrency.c +++ b/src/concurrency/ocf_metadata_concurrency.c @@ -5,21 +5,219 @@ #include "ocf_metadata_concurrency.h" -void ocf_metadata_concurrency_init(struct ocf_cache *cache) +void ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock) { - env_spinlock_init(&cache->metadata.lock.eviction); - env_rwlock_init(&cache->metadata.lock.status); - env_rwsem_init(&cache->metadata.lock.collision); + env_spinlock_init(&metadata_lock->eviction); + env_rwlock_init(&metadata_lock->status); + env_rwsem_init(&metadata_lock->global); } -void ocf_metadata_concurrency_deinit(struct ocf_cache *cache) +void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock) { - env_spinlock_destroy(&cache->metadata.lock.eviction); - env_rwlock_destroy(&cache->metadata.lock.status); - env_rwsem_destroy(&cache->metadata.lock.collision); + env_spinlock_destroy(&metadata_lock->eviction); + env_rwlock_destroy(&metadata_lock->status); + env_rwsem_destroy(&metadata_lock->global); } -int ocf_metadata_concurrency_attached_init(struct ocf_cache *cache) +int ocf_metadata_concurrency_attached_init( + struct ocf_metadata_lock *metadata_lock, + uint64_t hash_table_entries) { + uint64_t i; + + metadata_lock->num_hash_entries = hash_table_entries; + metadata_lock->hash = env_vzalloc(sizeof(env_rwsem) * + hash_table_entries); + if (!metadata_lock->hash) + return -OCF_ERR_NO_MEM; + + for (i = 0; i < hash_table_entries; i++) + env_rwsem_init(&metadata_lock->hash[i]); + return 0; } + +void ocf_metadata_concurrency_attached_deinit( + struct ocf_metadata_lock *metadata_lock) +{ + uint64_t i; + + for (i = 0; i < metadata_lock->num_hash_entries; i++) + env_rwsem_destroy(&metadata_lock->hash[i]); + + env_vfree(metadata_lock->hash); +} + +void ocf_metadata_start_exclusive_access( + struct ocf_metadata_lock *metadata_lock) +{ + env_rwsem_down_write(&metadata_lock->global); +} + +int ocf_metadata_try_start_exclusive_access( + struct ocf_metadata_lock *metadata_lock) +{ + return env_rwsem_down_write_trylock(&metadata_lock->global); +} + +void ocf_metadata_end_exclusive_access( + struct ocf_metadata_lock *metadata_lock) +{ + env_rwsem_up_write(&metadata_lock->global); +} + +void ocf_metadata_start_shared_access( + struct ocf_metadata_lock *metadata_lock) +{ + env_rwsem_down_read(&metadata_lock->global); +} + +int ocf_metadata_try_start_shared_access( + struct ocf_metadata_lock *metadata_lock) +{ + return env_rwsem_down_read_trylock(&metadata_lock->global); +} + +void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock) +{ + env_rwsem_up_read(&metadata_lock->global); +} + +void ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock, + ocf_cache_line_t hash, int rw) +{ + ENV_BUG_ON(hash >= metadata_lock->num_hash_entries); + + if (rw == OCF_METADATA_WR) + env_rwsem_down_write(&metadata_lock->hash[hash]); + else if (rw == OCF_METADATA_RD) + env_rwsem_down_read(&metadata_lock->hash[hash]); + else + ENV_BUG(); +} + +void ocf_metadata_hash_unlock(struct ocf_metadata_lock *metadata_lock, + ocf_cache_line_t hash, int rw) +{ + ENV_BUG_ON(hash >= metadata_lock->num_hash_entries); + + if (rw == OCF_METADATA_WR) + env_rwsem_up_write(&metadata_lock->hash[hash]); + else if (rw == OCF_METADATA_RD) + env_rwsem_up_read(&metadata_lock->hash[hash]); + else + ENV_BUG(); +} + +int ocf_metadata_hash_try_lock(struct ocf_metadata_lock *metadata_lock, + ocf_cache_line_t hash, int rw) +{ + int result = -1; + + ENV_BUG_ON(hash >= metadata_lock->num_hash_entries); + + if (rw == OCF_METADATA_WR) { + result = env_rwsem_down_write_trylock( + &metadata_lock->hash[hash]); + } else if (rw == OCF_METADATA_RD) { + result = env_rwsem_down_read_trylock( + &metadata_lock->hash[hash]); + } else { + ENV_BUG(); + } + + if (!result) + return -1; + + return 0; +} + +#define _NUM_HASH_ENTRIES req->cache->metadata.lock.num_hash_entries + +/* + * Iterate over hash buckets for all core lines in the request in ascending hash + * bucket value order. Each hash bucket is visited only once. + * + * @i is used as iteration counter, starting from 0 + * @hash stores hash values for each iteration + * @start is internal helper variable. It set to the index of first occurence + * of hash with minimal value within the request. + * + * Example hash iteration order for _NUM_HASH_ENTRIES == 5: + * Request hashes Iteration order start + * [2, 3, 4] [2, 3, 4] 0 + * [2, 3, 4, 0] [0, 2, 3, 4] 3 + * [2, 3, 4, 0, 1, 2, 3, 4, 0, 1] [0, 1, 2, 3, 4] 3 + * [4, 0] [0, 4] 1 + * [0, 1, 2, 3, 4, 0, 1] [0, 1, 2, 3, 4] 0 + * + */ +#define for_each_req_hash_asc(req, i, hash, start) \ + for (i = 0, start = (req->map[0].hash + req->core_line_count <= \ + _NUM_HASH_ENTRIES) ? 0 : (_NUM_HASH_ENTRIES - req->map[0].hash)\ + % _NUM_HASH_ENTRIES, hash = req->map[start].hash; \ + i < OCF_MIN(req->core_line_count, _NUM_HASH_ENTRIES); \ + i++, hash = req->map[(start + i) % req->core_line_count].hash) + +void ocf_req_hash_lock_rd(struct ocf_request *req) +{ + unsigned i, start; + ocf_cache_line_t hash; + + ocf_metadata_start_shared_access(&req->cache->metadata.lock); + for_each_req_hash_asc(req, i, hash, start) { + ocf_metadata_hash_lock(&req->cache->metadata.lock, hash, + OCF_METADATA_RD); + } +} + +void ocf_req_hash_unlock_rd(struct ocf_request *req) +{ + unsigned i, start; + ocf_cache_line_t hash; + + for_each_req_hash_asc(req, i, hash, start) { + ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash, + OCF_METADATA_RD); + } + ocf_metadata_end_shared_access(&req->cache->metadata.lock); +} + +void ocf_req_hash_lock_wr(struct ocf_request *req) +{ + unsigned i, start; + ocf_cache_line_t hash; + + ocf_metadata_start_shared_access(&req->cache->metadata.lock); + for_each_req_hash_asc(req, i, hash, start) { + ocf_metadata_hash_lock(&req->cache->metadata.lock, hash, + OCF_METADATA_WR); + } +} + +void ocf_req_hash_lock_upgrade(struct ocf_request *req) +{ + unsigned i, start; + ocf_cache_line_t hash; + + for_each_req_hash_asc(req, i, hash, start) { + ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash, + OCF_METADATA_RD); + } + for_each_req_hash_asc(req, i, hash, start) { + ocf_metadata_hash_lock(&req->cache->metadata.lock, hash, + OCF_METADATA_WR); + } +} + +void ocf_req_hash_unlock_wr(struct ocf_request *req) +{ + unsigned i, start; + ocf_cache_line_t hash; + + for_each_req_hash_asc(req, i, hash, start) { + ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash, + OCF_METADATA_WR); + } + ocf_metadata_end_shared_access(&req->cache->metadata.lock); +} diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h index 85b0c10..ccd996b 100644 --- a/src/concurrency/ocf_metadata_concurrency.h +++ b/src/concurrency/ocf_metadata_concurrency.h @@ -10,125 +10,113 @@ #define OCF_METADATA_RD 0 #define OCF_METADATA_WR 1 -void ocf_metadata_concurrency_init(struct ocf_cache *cache); +void ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock); -void ocf_metadata_concurrency_deinit(struct ocf_cache *cache); +void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock); -int ocf_metadata_concurrency_attached_init(struct ocf_cache *cache); +int ocf_metadata_concurrency_attached_init( + struct ocf_metadata_lock *metadata_lock, + uint64_t hash_table_entries); -static inline void ocf_metadata_eviction_lock(struct ocf_cache *cache) +void ocf_metadata_concurrency_attached_deinit( + struct ocf_metadata_lock *metadata_lock); + +static inline void ocf_metadata_eviction_lock( + struct ocf_metadata_lock *metadata_lock) { - env_spinlock_lock(&cache->metadata.lock.eviction); + env_spinlock_lock(&metadata_lock->eviction); } -static inline void ocf_metadata_eviction_unlock(struct ocf_cache *cache) +static inline void ocf_metadata_eviction_unlock( + struct ocf_metadata_lock *metadata_lock) { - env_spinlock_unlock(&cache->metadata.lock.eviction); + env_spinlock_unlock(&metadata_lock->eviction); } #define OCF_METADATA_EVICTION_LOCK() \ - ocf_metadata_eviction_lock(cache) + ocf_metadata_eviction_lock(&cache->metadata.lock) #define OCF_METADATA_EVICTION_UNLOCK() \ - ocf_metadata_eviction_unlock(cache) + ocf_metadata_eviction_unlock(&cache->metadata.lock) -static inline void ocf_metadata_lock(struct ocf_cache *cache, int rw) -{ - if (rw == OCF_METADATA_WR) - env_rwsem_down_write(&cache->metadata.lock.collision); - else if (rw == OCF_METADATA_RD) - env_rwsem_down_read(&cache->metadata.lock.collision); - else - ENV_BUG(); -} +void ocf_metadata_start_exclusive_access( + struct ocf_metadata_lock *metadata_lock); +int ocf_metadata_try_start_exclusive_access( + struct ocf_metadata_lock *metadata_lock); -static inline void ocf_metadata_unlock(struct ocf_cache *cache, int rw) -{ - if (rw == OCF_METADATA_WR) - env_rwsem_up_write(&cache->metadata.lock.collision); - else if (rw == OCF_METADATA_RD) - env_rwsem_up_read(&cache->metadata.lock.collision); - else - ENV_BUG(); -} +void ocf_metadata_end_exclusive_access( + struct ocf_metadata_lock *metadata_lock); -static inline int ocf_metadata_try_lock(struct ocf_cache *cache, int rw) -{ - int result = 0; +int ocf_metadata_try_start_shared_access( + struct ocf_metadata_lock *metadata_lock); - if (rw == OCF_METADATA_WR) { - result = env_rwsem_down_write_trylock( - &cache->metadata.lock.collision); - } else if (rw == OCF_METADATA_RD) { - result = env_rwsem_down_read_trylock( - &cache->metadata.lock.collision); - } else { - ENV_BUG(); - } +void ocf_metadata_start_shared_access( + struct ocf_metadata_lock *metadata_lock); - if (result) - return -1; - - return 0; -} +void ocf_metadata_end_shared_access( + struct ocf_metadata_lock *metadata_lock); static inline void ocf_metadata_status_bits_lock( - struct ocf_cache *cache, int rw) + struct ocf_metadata_lock *metadata_lock, int rw) { if (rw == OCF_METADATA_WR) - env_rwlock_write_lock(&cache->metadata.lock.status); + env_rwlock_write_lock(&metadata_lock->status); else if (rw == OCF_METADATA_RD) - env_rwlock_read_lock(&cache->metadata.lock.status); + env_rwlock_read_lock(&metadata_lock->status); else ENV_BUG(); } static inline void ocf_metadata_status_bits_unlock( - struct ocf_cache *cache, int rw) + struct ocf_metadata_lock *metadata_lock, int rw) { if (rw == OCF_METADATA_WR) - env_rwlock_write_unlock(&cache->metadata.lock.status); + env_rwlock_write_unlock(&metadata_lock->status); else if (rw == OCF_METADATA_RD) - env_rwlock_read_unlock(&cache->metadata.lock.status); + env_rwlock_read_unlock(&metadata_lock->status); else ENV_BUG(); } -#define OCF_METADATA_LOCK_RD() \ - ocf_metadata_lock(cache, OCF_METADATA_RD) +#define OCF_METADATA_LOCK_RD() ocf_metadata_start_shared_access( \ + &cache->metadata.lock) -#define OCF_METADATA_UNLOCK_RD() \ - ocf_metadata_unlock(cache, OCF_METADATA_RD) +#define OCF_METADATA_UNLOCK_RD() ocf_metadata_end_shared_access( \ + &cache->metadata.lock) -#define OCF_METADATA_LOCK_RD_TRY() \ - ocf_metadata_try_lock(cache, OCF_METADATA_RD) +#define OCF_METADATA_LOCK_RD_TRY() ocf_metadata_try_start_shared_access( \ + &cache->metadata.lock) -#define OCF_METADATA_LOCK_WR() \ - ocf_metadata_lock(cache, OCF_METADATA_WR) +#define OCF_METADATA_LOCK_WR() ocf_metadata_start_exclusive_access( \ + &cache->metadata.lock) #define OCF_METADATA_LOCK_WR_TRY() \ - ocf_metadata_try_lock(cache, OCF_METADATA_WR) + ocf_metadata_try_start_exclusive_access(&cache->metadata.lock) -#define OCF_METADATA_UNLOCK_WR() \ - ocf_metadata_unlock(cache, OCF_METADATA_WR) +#define OCF_METADATA_UNLOCK_WR() ocf_metadata_end_exclusive_access( \ + &cache->metadata.lock) #define OCF_METADATA_BITS_LOCK_RD() \ - ocf_metadata_status_bits_lock(cache, OCF_METADATA_RD) + ocf_metadata_status_bits_lock(&cache->metadata.lock, \ + OCF_METADATA_RD) #define OCF_METADATA_BITS_UNLOCK_RD() \ - ocf_metadata_status_bits_unlock(cache, OCF_METADATA_RD) + ocf_metadata_status_bits_unlock(&cache->metadata.lock, \ + OCF_METADATA_RD) #define OCF_METADATA_BITS_LOCK_WR() \ - ocf_metadata_status_bits_lock(cache, OCF_METADATA_WR) + ocf_metadata_status_bits_lock(&cache->metadata.lock, \ + OCF_METADATA_WR) #define OCF_METADATA_BITS_UNLOCK_WR() \ - ocf_metadata_status_bits_unlock(cache, OCF_METADATA_WR) + ocf_metadata_status_bits_unlock(&cache->metadata.lock, \ + OCF_METADATA_WR) -#define OCF_METADATA_FLUSH_LOCK() \ - ocf_metadata_flush_lock(cache) - -#define OCF_METADATA_FLUSH_UNLOCK() \ - ocf_metadata_flush_unlock(cache) +void ocf_req_hash_lock_rd(struct ocf_request *req); +void ocf_req_hash_unlock_rd(struct ocf_request *req); +void ocf_req_hash_lock_wr(struct ocf_request *req); +void ocf_req_hash_unlock_wr(struct ocf_request *req); +void ocf_req_hash_lock_upgrade(struct ocf_request *req); #endif diff --git a/src/metadata/metadata.c b/src/metadata/metadata.c index 1b31132..5e5d02f 100644 --- a/src/metadata/metadata.c +++ b/src/metadata/metadata.c @@ -39,7 +39,7 @@ int ocf_metadata_init(struct ocf_cache *cache, return ret; } - ocf_metadata_concurrency_init(cache); + ocf_metadata_concurrency_init(&cache->metadata.lock); return 0; } @@ -73,7 +73,7 @@ void ocf_metadata_deinit(struct ocf_cache *cache) cache->metadata.iface.deinit(cache); } - ocf_metadata_concurrency_deinit(cache); + ocf_metadata_concurrency_deinit(&cache->metadata.lock); ocf_metadata_io_deinit(cache); } diff --git a/src/metadata/metadata_structs.h b/src/metadata/metadata_structs.h index e9b3ac5..ccea1a0 100644 --- a/src/metadata/metadata_structs.h +++ b/src/metadata/metadata_structs.h @@ -428,6 +428,15 @@ struct ocf_cache_line_settings { uint64_t sector_end; }; +struct ocf_metadata_lock +{ + env_rwsem global; /*!< global metadata lock (GML) */ + env_rwlock status; /*!< Fast lock for status bits */ + env_spinlock eviction; /*!< Fast lock for eviction policy */ + env_rwsem *hash; /*!< Hash bucket locks */ + uint32_t num_hash_entries; /*!< Hash bucket count */ +}; + /** * @brief Metadata control structure */ @@ -444,11 +453,7 @@ struct ocf_metadata { bool is_volatile; /*!< true if metadata used in volatile mode (RAM only) */ - struct { - env_rwsem collision; /*!< lock for collision table */ - env_rwlock status; /*!< Fast lock for status bits */ - env_spinlock eviction; /*!< Fast lock for eviction policy */ - } lock; + struct ocf_metadata_lock lock; }; #endif /* __METADATA_STRUCTS_H__ */ diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 4a496da..847efef 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -986,7 +986,8 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline, context->flags.attached_metadata_inited = true; - if (ocf_metadata_concurrency_attached_init(cache)) { + if (ocf_metadata_concurrency_attached_init(&cache->metadata.lock, + cache->device->hash_table_entries)) { ocf_cache_log(cache, log_err, "Failed to initialize attached " "metadata concurrency\n"); OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL); @@ -1734,6 +1735,7 @@ static void _ocf_mngt_cache_unplug_complete(void *priv, int error) ocf_volume_close(&cache->device->volume); + ocf_metadata_concurrency_attached_deinit(&cache->metadata.lock); ocf_metadata_deinit_variable_size(cache); ocf_concurrency_deinit(cache); ocf_freelist_deinit(cache->freelist); diff --git a/src/ocf_request.c b/src/ocf_request.c index 57cc838..8584b71 100644 --- a/src/ocf_request.c +++ b/src/ocf_request.c @@ -311,3 +311,14 @@ void ocf_req_clear_map(struct ocf_request *req) ENV_BUG_ON(env_memset(req->map, sizeof(req->map[0]) * req->core_line_count, 0)); } + +void ocf_req_hash(struct ocf_request *req) +{ + int i; + + for (i = 0; i < req->core_line_count; i++) { + req->map[i].hash = ocf_metadata_hash_func(req->cache, + req->core_line_first + i, + ocf_core_get_id(req->core)); + } +} diff --git a/src/ocf_request.h b/src/ocf_request.h index 1fa924b..fcb2a16 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -319,6 +319,13 @@ void ocf_req_clear_info(struct ocf_request *req); */ void ocf_req_clear_map(struct ocf_request *req); +/** + * @brief Calculate hashes for all core lines within the request + * + * @param req - OCF request + */ +void ocf_req_hash(struct ocf_request *req); + /** * @brief Clear OCF request * From b39bcf86d4a445a5b65e8157c212da98b4131423 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Tue, 30 Jul 2019 16:03:16 -0400 Subject: [PATCH 04/10] Separate engine map/evict (refactoring) This temporarily increases amount of boiler-plate code, but this is going to be mitigated in the following commits. Signed-off-by: Adam Rutkowski --- src/engine/engine_common.c | 23 ++++++++++++++--------- src/engine/engine_common.h | 19 ++++++++++++++----- src/engine/engine_rd.c | 15 ++++++++++----- src/engine/engine_wb.c | 13 ++++++++++--- src/engine/engine_wt.c | 13 ++++++++++--- 5 files changed, 58 insertions(+), 25 deletions(-) diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index c57aefe..88a8d52 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -16,7 +16,6 @@ #include "../utils/utils_cleaner.h" #include "../metadata/metadata.h" #include "../eviction/eviction.h" -#include "../promotion/promotion.h" void ocf_engine_error(struct ocf_request *req, bool stop_cache, const char *msg) @@ -315,18 +314,15 @@ void ocf_engine_map(struct ocf_request *req) int status = LOOKUP_MAPPED; ocf_core_id_t core_id = ocf_core_get_id(req->core); - if (!ocf_promotion_req_should_promote(cache->promotion_policy, req)) { + if (!ocf_engine_unmapped_count(req)) + return; + + if (ocf_engine_unmapped_count(req) > + ocf_freelist_num_free(cache->freelist)) { req->info.mapping_error = 1; return; } - if (ocf_engine_unmapped_count(req)) - status = space_managment_evict_do(cache, req, - ocf_engine_unmapped_count(req)); - - if (req->info.mapping_error) - return; - ocf_req_clear_info(req); req->info.seq_req = true; @@ -397,6 +393,15 @@ static void _ocf_engine_clean_end(void *private_data, int error) } } +int ocf_engine_evict(struct ocf_request *req) +{ + if (!ocf_engine_unmapped_count(req)) + return 0; + + return space_managment_evict_do(req->cache, req, + ocf_engine_unmapped_count(req)); +} + static int _ocf_engine_clean_getter(struct ocf_cache *cache, void *getter_context, uint32_t item, ocf_cache_line_t *line) { diff --git a/src/engine/engine_common.h b/src/engine/engine_common.h index b581da5..1a4d689 100644 --- a/src/engine/engine_common.h +++ b/src/engine/engine_common.h @@ -162,19 +162,28 @@ void ocf_engine_lookup_map_entry(struct ocf_cache *cache, uint64_t core_line); /** - * @brief Traverse request in order to lookup cache lines If there are misses - * need to call eviction. This process is called 'mapping'. - * - * @note This function CALL EVICTION + * @brief Traverse request in order to lookup cache lines. If there are misses, + * attempt to map free cache lines. * * @param req OCF request */ void ocf_engine_map(struct ocf_request *req); +/** + * @brief Evict cachelines to populate freelist. + * + * @param req OCF request + * + * @returns eviction status + * @retval LOOKUP_MAPPED successfully evicted required number of cachelines + * @retval LOOKUP_MISS eviction failure + */ +int ocf_engine_evict(struct ocf_request *req); + /** * @brief Traverse OCF request (lookup cache) * - * @note This function DO NOT CALL EVICTION. Only lookup in metadata is + * @note This function does not evict cachelines. Only lookup in metadata is * performed. Main purpose of this function is to check if there is a HIT. * * @param req OCF request diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c index a26ef10..8e593c7 100644 --- a/src/engine/engine_rd.c +++ b/src/engine/engine_rd.c @@ -215,6 +215,7 @@ int ocf_read_generic(struct ocf_request *req) bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; struct ocf_cache *cache = req->cache; + bool promote = true; ocf_io_start(&req->ioi.io); @@ -252,21 +253,25 @@ int ocf_read_generic(struct ocf_request *req) } } + if (!mapped) { + promote = ocf_promotion_req_should_promote( + cache->promotion_policy, req); + } + OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -------------------------------------------*/ - if (!mapped) { - + if (!mapped && promote) { /*- Metadata WR access ---------------------------------------*/ - OCF_METADATA_LOCK_WR(); /* Now there is exclusive access for metadata. May traverse once * again. If there are misses need to call eviction. This * process is called 'mapping'. */ - ocf_engine_map(req); + if (ocf_engine_evict(req) == LOOKUP_MAPPED) + ocf_engine_map(req); if (!req->info.mapping_error) { if (ocf_engine_is_hit(req)) { @@ -288,7 +293,7 @@ int ocf_read_generic(struct ocf_request *req) /*- END Metadata WR access -----------------------------------*/ } - if (!req->info.mapping_error) { + if (promote && !req->info.mapping_error) { if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { /* Lock was not acquired, need to wait for resume */ diff --git a/src/engine/engine_wb.c b/src/engine/engine_wb.c index 1a9938b..1cf052a 100644 --- a/src/engine/engine_wb.c +++ b/src/engine/engine_wb.c @@ -170,6 +170,7 @@ int ocf_write_wb(struct ocf_request *req) bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; struct ocf_cache *cache = req->cache; + bool promote = true; ocf_io_start(&req->ioi.io); @@ -192,16 +193,22 @@ int ocf_write_wb(struct ocf_request *req) lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } + if (!mapped) { + promote = ocf_promotion_req_should_promote( + cache->promotion_policy, req); + } + OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ - if (!mapped) { + if (!mapped && promote) { OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/ /* Now there is exclusive access for metadata. May traverse once * again. If there are misses need to call eviction. This * process is called 'mapping'. */ - ocf_engine_map(req); + if (ocf_engine_evict(req) == LOOKUP_MAPPED) + ocf_engine_map(req); if (!req->info.mapping_error) { /* Lock request for WRITE access */ @@ -211,7 +218,7 @@ int ocf_write_wb(struct ocf_request *req) OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ } - if (!req->info.mapping_error) { + if (promote && !req->info.mapping_error) { if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { /* WR lock was not acquired, need to wait for resume */ diff --git a/src/engine/engine_wt.c b/src/engine/engine_wt.c index 9db3ec3..db2a81c 100644 --- a/src/engine/engine_wt.c +++ b/src/engine/engine_wt.c @@ -165,6 +165,7 @@ int ocf_write_wt(struct ocf_request *req) bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; struct ocf_cache *cache = req->cache; + bool promote = true; ocf_io_start(&req->ioi.io); @@ -185,16 +186,22 @@ int ocf_write_wt(struct ocf_request *req) lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } + if (!mapped) { + promote = ocf_promotion_req_should_promote( + cache->promotion_policy, req); + } + OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ - if (!mapped) { + if (!mapped && promote) { OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/ /* Now there is exclusive access for metadata. May traverse once * again. If there are misses need to call eviction. This * process is called 'mapping'. */ - ocf_engine_map(req); + if (ocf_engine_evict(req) == LOOKUP_MAPPED) + ocf_engine_map(req); if (!req->info.mapping_error) { /* Lock request for WRITE access */ @@ -204,7 +211,7 @@ int ocf_write_wt(struct ocf_request *req) OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ } - if (!req->info.mapping_error) { + if (promote && !req->info.mapping_error) { if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { /* WR lock was not acquired, need to wait for resume */ From 3a70d68d389a5e0933310f9c3f7e354de6e68a95 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Wed, 31 Jul 2019 16:31:28 -0400 Subject: [PATCH 05/10] Switch from global metadata locks to hash-bucket locks in engines Signed-off-by: Adam Rutkowski --- src/engine/engine_common.c | 5 ++-- src/engine/engine_discard.c | 15 +++++----- src/engine/engine_fast.c | 18 ++++++------ src/engine/engine_inv.c | 4 +-- src/engine/engine_pt.c | 16 +++++------ src/engine/engine_rd.c | 57 ++++++++++++++++++------------------- src/engine/engine_wa.c | 8 +++--- src/engine/engine_wb.c | 52 ++++++++++++++++----------------- src/engine/engine_wi.c | 10 +++---- src/engine/engine_wo.c | 6 ++-- src/engine/engine_wt.c | 52 ++++++++++++++++----------------- src/engine/engine_zero.c | 10 ++++--- 12 files changed, 124 insertions(+), 129 deletions(-) diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index 88a8d52..a8d5fc1 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -538,15 +538,14 @@ void inc_fallback_pt_error_counter(ocf_cache_t cache) static int _ocf_engine_refresh(struct ocf_request *req) { - struct ocf_cache *cache = req->cache; int result; - OCF_METADATA_LOCK_RD(); /* Check under metadata RD lock */ + ocf_req_hash_lock_rd(req); result = ocf_engine_check(req); - OCF_METADATA_UNLOCK_RD(); + ocf_req_hash_unlock_rd(req); if (result == 0) { diff --git a/src/engine/engine_discard.c b/src/engine/engine_discard.c index e460f4f..8c10c7d 100644 --- a/src/engine/engine_discard.c +++ b/src/engine/engine_discard.c @@ -170,7 +170,7 @@ int _ocf_discard_step_do(struct ocf_request *req) if (ocf_engine_mapped_count(req)) { /* There are mapped cache line, need to remove them */ - OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/ + ocf_req_hash_lock_wr(req); /* Remove mapped cache lines from metadata */ ocf_purge_map_info(req); @@ -181,16 +181,16 @@ int _ocf_discard_step_do(struct ocf_request *req) _ocf_discard_step_complete); } - OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ + ocf_req_hash_unlock_wr(req); } - OCF_METADATA_LOCK_RD(); + ocf_req_hash_lock_rd(req); /* Even if no cachelines are mapped they could be tracked in promotion * policy. RD lock suffices. */ ocf_promotion_req_purge(req->cache->promotion_policy, req); - OCF_METADATA_UNLOCK_RD(); + ocf_req_hash_unlock_rd(req); OCF_DEBUG_RQ(req, "Discard"); _ocf_discard_step_complete(req, 0); @@ -224,11 +224,12 @@ static int _ocf_discard_step(struct ocf_request *req) req->core_line_count = req->core_line_last - req->core_line_first + 1; req->io_if = &_io_if_discard_step_resume; - OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ - ENV_BUG_ON(env_memset(req->map, sizeof(*req->map) * req->core_line_count, 0)); + ocf_req_hash(req); + ocf_req_hash_lock_rd(req); + /* Travers to check if request is mapped fully */ ocf_engine_traverse(req); @@ -239,7 +240,7 @@ static int _ocf_discard_step(struct ocf_request *req) lock = OCF_LOCK_ACQUIRED; } - OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ + ocf_req_hash_unlock_rd(req); if (lock >= 0) { if (OCF_LOCK_ACQUIRED == lock) { diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c index 78bf350..8cfe96c 100644 --- a/src/engine/engine_fast.c +++ b/src/engine/engine_fast.c @@ -58,8 +58,6 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error) static int _ocf_read_fast_do(struct ocf_request *req) { - struct ocf_cache *cache = req->cache; - if (ocf_engine_is_miss(req)) { /* It seams that after resume, now request is MISS, do PT */ OCF_DEBUG_RQ(req, "Switching to read PT"); @@ -74,14 +72,14 @@ static int _ocf_read_fast_do(struct ocf_request *req) if (req->info.re_part) { OCF_DEBUG_RQ(req, "Re-Part"); - OCF_METADATA_LOCK_WR(); + ocf_req_hash_lock_wr(req); /* Probably some cache lines are assigned into wrong * partition. Need to move it to new one */ ocf_part_move(req); - OCF_METADATA_UNLOCK_WR(); + ocf_req_hash_unlock_wr(req); } /* Submit IO */ @@ -110,7 +108,6 @@ int ocf_read_fast(struct ocf_request *req) { bool hit; int lock = OCF_LOCK_NOT_ACQUIRED; - struct ocf_cache *cache = req->cache; /* Get OCF request - increase reference counter */ ocf_req_get(req); @@ -120,7 +117,8 @@ int ocf_read_fast(struct ocf_request *req) /*- Metadata RD access -----------------------------------------------*/ - OCF_METADATA_LOCK_RD(); + ocf_req_hash(req); + ocf_req_hash_lock_rd(req); /* Traverse request to cache if there is hit */ ocf_engine_traverse(req); @@ -131,7 +129,7 @@ int ocf_read_fast(struct ocf_request *req) lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); } - OCF_METADATA_UNLOCK_RD(); + ocf_req_hash_unlock_rd(req); if (hit) { OCF_DEBUG_RQ(req, "Fast path success"); @@ -179,7 +177,6 @@ int ocf_write_fast(struct ocf_request *req) { bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; - struct ocf_cache *cache = req->cache; /* Get OCF request - increase reference counter */ ocf_req_get(req); @@ -189,7 +186,8 @@ int ocf_write_fast(struct ocf_request *req) /*- Metadata RD access -----------------------------------------------*/ - OCF_METADATA_LOCK_RD(); + ocf_req_hash(req); + ocf_req_hash_lock_rd(req); /* Traverse request to cache if there is hit */ ocf_engine_traverse(req); @@ -200,7 +198,7 @@ int ocf_write_fast(struct ocf_request *req) lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } - OCF_METADATA_UNLOCK_RD(); + ocf_req_hash_unlock_rd(req); if (mapped) { if (lock >= 0) { diff --git a/src/engine/engine_inv.c b/src/engine/engine_inv.c index 347d3cf..4824fb0 100644 --- a/src/engine/engine_inv.c +++ b/src/engine/engine_inv.c @@ -43,9 +43,9 @@ static int _ocf_invalidate_do(struct ocf_request *req) ENV_BUG_ON(env_atomic_read(&req->req_remaining)); - OCF_METADATA_LOCK_WR(); + ocf_req_hash_lock_wr(req); ocf_purge_map_info(req); - OCF_METADATA_UNLOCK_WR(); + ocf_req_hash_unlock_wr(req); env_atomic_inc(&req->req_remaining); diff --git a/src/engine/engine_pt.c b/src/engine/engine_pt.c index 0a910a8..799f7b0 100644 --- a/src/engine/engine_pt.c +++ b/src/engine/engine_pt.c @@ -52,16 +52,14 @@ static inline void _ocf_read_pt_submit(struct ocf_request *req) int ocf_read_pt_do(struct ocf_request *req) { - struct ocf_cache *cache = req->cache; - /* Get OCF request - increase reference counter */ ocf_req_get(req); if (req->info.dirty_any) { - OCF_METADATA_LOCK_RD(); + ocf_req_hash_lock_rd(req); /* Need to clean, start it */ ocf_engine_clean(req); - OCF_METADATA_UNLOCK_RD(); + ocf_req_hash_unlock_rd(req); /* Do not processing, because first we need to clean request */ ocf_req_put(req); @@ -72,14 +70,14 @@ int ocf_read_pt_do(struct ocf_request *req) if (req->info.re_part) { OCF_DEBUG_RQ(req, "Re-Part"); - OCF_METADATA_LOCK_WR(); + ocf_req_hash_lock_wr(req); /* Probably some cache lines are assigned into wrong * partition. Need to move it to new one */ ocf_part_move(req); - OCF_METADATA_UNLOCK_WR(); + ocf_req_hash_unlock_wr(req); } /* Submit read IO to the core */ @@ -105,7 +103,6 @@ int ocf_read_pt(struct ocf_request *req) { bool use_cache = false; int lock = OCF_LOCK_NOT_ACQUIRED; - struct ocf_cache *cache = req->cache; OCF_DEBUG_TRACE(req->cache); @@ -117,7 +114,8 @@ int ocf_read_pt(struct ocf_request *req) /* Set resume io_if */ req->io_if = &_io_if_pt_resume; - OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/ + ocf_req_hash(req); + ocf_req_hash_lock_rd(req); /* Traverse request to check if there are mapped cache lines */ ocf_engine_traverse(req); @@ -136,7 +134,7 @@ int ocf_read_pt(struct ocf_request *req) } } - OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ + ocf_req_hash_unlock_rd(req); if (use_cache) { /* diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c index 8e593c7..cf1c550 100644 --- a/src/engine/engine_rd.c +++ b/src/engine/engine_rd.c @@ -137,8 +137,6 @@ err_alloc: static int _ocf_read_generic_do(struct ocf_request *req) { - struct ocf_cache *cache = req->cache; - if (ocf_engine_is_miss(req) && req->map->rd_locked) { /* Miss can be handled only on write locks. * Need to switch to PT @@ -153,12 +151,12 @@ static int _ocf_read_generic_do(struct ocf_request *req) if (ocf_engine_is_miss(req)) { if (req->info.dirty_any) { - OCF_METADATA_LOCK_RD(); + ocf_req_hash_lock_rd(req); /* Request is dirty need to clean request */ ocf_engine_clean(req); - OCF_METADATA_UNLOCK_RD(); + ocf_req_hash_unlock_rd(req); /* We need to clean request before processing, return */ ocf_req_put(req); @@ -166,25 +164,25 @@ static int _ocf_read_generic_do(struct ocf_request *req) return 0; } - OCF_METADATA_LOCK_RD(); + ocf_req_hash_lock_rd(req); /* Set valid status bits map */ ocf_set_valid_map_info(req); - OCF_METADATA_UNLOCK_RD(); + ocf_req_hash_unlock_rd(req); } if (req->info.re_part) { OCF_DEBUG_RQ(req, "Re-Part"); - OCF_METADATA_LOCK_WR(); + ocf_req_hash_lock_wr(req); /* Probably some cache lines are assigned into wrong * partition. Need to move it to new one */ ocf_part_move(req); - OCF_METADATA_UNLOCK_WR(); + ocf_req_hash_unlock_wr(req); } OCF_DEBUG_RQ(req, "Submit"); @@ -216,6 +214,7 @@ int ocf_read_generic(struct ocf_request *req) int lock = OCF_LOCK_NOT_ACQUIRED; struct ocf_cache *cache = req->cache; bool promote = true; + struct ocf_metadata_lock *metadata_lock = &cache->metadata.lock; ocf_io_start(&req->ioi.io); @@ -231,10 +230,11 @@ int ocf_read_generic(struct ocf_request *req) /* Set resume call backs */ req->io_if = &_io_if_read_generic_resume; + /* calculate hashes for hash-bucket locking */ + ocf_req_hash(req); + /*- Metadata RD access -----------------------------------------------*/ - - OCF_METADATA_LOCK_RD(); - + ocf_req_hash_lock_rd(req); /* Traverse request to cache if there is hit */ ocf_engine_traverse(req); @@ -251,27 +251,27 @@ int ocf_read_generic(struct ocf_request *req) */ lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } - } - - if (!mapped) { + } else { promote = ocf_promotion_req_should_promote( cache->promotion_policy, req); } - OCF_METADATA_UNLOCK_RD(); + if (mapped || !promote) { + ocf_req_hash_unlock_rd(req); + } else { + /*- Metadata RD access ---------------------------------------*/ + ocf_req_hash_lock_upgrade(req); + ocf_engine_map(req); + ocf_req_hash_unlock_wr(req); - /*- END Metadata RD access -------------------------------------------*/ - - if (!mapped && promote) { - /*- Metadata WR access ---------------------------------------*/ - OCF_METADATA_LOCK_WR(); - - /* Now there is exclusive access for metadata. May traverse once - * again. If there are misses need to call eviction. This - * process is called 'mapping'. - */ - if (ocf_engine_evict(req) == LOOKUP_MAPPED) - ocf_engine_map(req); + if (req->info.mapping_error) { + /* Still not mapped - evict cachelines under global + * metadata write lock */ + ocf_metadata_start_exclusive_access(metadata_lock); + if (ocf_engine_evict(req) == LOOKUP_MAPPED) + ocf_engine_map(req); + ocf_metadata_end_exclusive_access(metadata_lock); + } if (!req->info.mapping_error) { if (ocf_engine_is_hit(req)) { @@ -288,9 +288,6 @@ int ocf_read_generic(struct ocf_request *req) ocf_engine_on_resume); } } - OCF_METADATA_UNLOCK_WR(); - - /*- END Metadata WR access -----------------------------------*/ } if (promote && !req->info.mapping_error) { diff --git a/src/engine/engine_wa.c b/src/engine/engine_wa.c index 3c854c2..f5face0 100644 --- a/src/engine/engine_wa.c +++ b/src/engine/engine_wa.c @@ -38,19 +38,19 @@ static void _ocf_read_wa_complete(struct ocf_request *req, int error) int ocf_write_wa(struct ocf_request *req) { - struct ocf_cache *cache = req->cache; - ocf_io_start(&req->ioi.io); /* Get OCF request - increase reference counter */ ocf_req_get(req); - OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/ + ocf_req_hash(req); + + ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/ /* Traverse request to check if there are mapped cache lines */ ocf_engine_traverse(req); - OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ + ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/ if (ocf_engine_is_hit(req)) { ocf_req_clear(req); diff --git a/src/engine/engine_wb.c b/src/engine/engine_wb.c index 1cf052a..4546b18 100644 --- a/src/engine/engine_wb.c +++ b/src/engine/engine_wb.c @@ -25,23 +25,21 @@ static const struct ocf_io_if _io_if_wb_resume = { static void _ocf_write_wb_update_bits(struct ocf_request *req) { - struct ocf_cache *cache = req->cache; - if (ocf_engine_is_miss(req)) { - OCF_METADATA_LOCK_RD(); + ocf_req_hash_lock_rd(req); /* Update valid status bits */ ocf_set_valid_map_info(req); - OCF_METADATA_UNLOCK_RD(); + ocf_req_hash_unlock_rd(req); } if (!ocf_engine_is_dirty_all(req)) { - OCF_METADATA_LOCK_WR(); + ocf_req_hash_lock_wr(req); /* set dirty bits, and mark if metadata flushing is required */ ocf_set_dirty_map_info(req); - OCF_METADATA_UNLOCK_WR(); + ocf_req_hash_unlock_wr(req); } } @@ -127,14 +125,14 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req) if (req->info.re_part) { OCF_DEBUG_RQ(req, "Re-Part"); - OCF_METADATA_LOCK_WR(); + ocf_req_hash_lock_wr(req); /* Probably some cache lines are assigned into wrong * partition. Need to move it to new one */ ocf_part_move(req); - OCF_METADATA_UNLOCK_WR(); + ocf_req_hash_unlock_wr(req); } OCF_DEBUG_RQ(req, "Submit Data"); @@ -169,8 +167,8 @@ int ocf_write_wb(struct ocf_request *req) { bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; - struct ocf_cache *cache = req->cache; bool promote = true; + struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; ocf_io_start(&req->ioi.io); @@ -182,7 +180,8 @@ int ocf_write_wb(struct ocf_request *req) /* TODO: Handle fits into dirty */ - OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ + ocf_req_hash(req); + ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/ /* Travers to check if request is mapped fully */ ocf_engine_traverse(req); @@ -191,31 +190,32 @@ int ocf_write_wb(struct ocf_request *req) if (mapped) { /* All cache line are mapped, lock request for WRITE access */ lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); - } - - if (!mapped) { + } else { promote = ocf_promotion_req_should_promote( - cache->promotion_policy, req); + req->cache->promotion_policy, req); } - OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ + if (mapped || !promote) { + ocf_req_hash_unlock_rd(req); + } else { + /*- Metadata RD access ---------------------------------------*/ + ocf_req_hash_lock_upgrade(req); + ocf_engine_map(req); + ocf_req_hash_unlock_wr(req); - if (!mapped && promote) { - OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/ - - /* Now there is exclusive access for metadata. May traverse once - * again. If there are misses need to call eviction. This - * process is called 'mapping'. - */ - if (ocf_engine_evict(req) == LOOKUP_MAPPED) - ocf_engine_map(req); + if (req->info.mapping_error) { + /* Still not mapped - evict cachelines under global + * metadata write lock */ + ocf_metadata_start_exclusive_access(metadata_lock); + if (ocf_engine_evict(req) == LOOKUP_MAPPED) + ocf_engine_map(req); + ocf_metadata_end_exclusive_access(metadata_lock); + } if (!req->info.mapping_error) { /* Lock request for WRITE access */ lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } - - OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ } if (promote && !req->info.mapping_error) { diff --git a/src/engine/engine_wi.c b/src/engine/engine_wi.c index 482b156..ff947e4 100644 --- a/src/engine/engine_wi.c +++ b/src/engine/engine_wi.c @@ -52,12 +52,12 @@ static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req) if (ocf_engine_mapped_count(req)) { /* There are mapped cache line, need to remove them */ - OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/ + ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/ /* Remove mapped cache lines from metadata */ ocf_purge_map_info(req); - OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ + ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/ if (req->info.flush_metadata) { /* Request was dirty and need to flush metadata */ @@ -135,7 +135,6 @@ static const struct ocf_io_if _io_if_wi_resume = { int ocf_write_wi(struct ocf_request *req) { int lock = OCF_LOCK_NOT_ACQUIRED; - struct ocf_cache *cache = req->cache; OCF_DEBUG_TRACE(req->cache); @@ -147,7 +146,8 @@ int ocf_write_wi(struct ocf_request *req) /* Set resume io_if */ req->io_if = &_io_if_wi_resume; - OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ + ocf_req_hash(req); + ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/ /* Travers to check if request is mapped fully */ ocf_engine_traverse(req); @@ -159,7 +159,7 @@ int ocf_write_wi(struct ocf_request *req) lock = OCF_LOCK_ACQUIRED; } - OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ + ocf_req_hash_unlock_rd(req); /*- END Metadata READ access----------------*/ if (lock >= 0) { if (lock == OCF_LOCK_ACQUIRED) { diff --git a/src/engine/engine_wo.c b/src/engine/engine_wo.c index 5687280..193198d 100644 --- a/src/engine/engine_wo.c +++ b/src/engine/engine_wo.c @@ -201,7 +201,6 @@ static const struct ocf_io_if _io_if_wo_resume = { int ocf_read_wo(struct ocf_request *req) { - ocf_cache_t cache = req->cache; int lock = OCF_LOCK_ACQUIRED; OCF_DEBUG_TRACE(req->cache); @@ -214,7 +213,8 @@ int ocf_read_wo(struct ocf_request *req) /* Set resume call backs */ req->io_if = &_io_if_wo_resume; - OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/ + ocf_req_hash(req); + ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/ /* Traverse request to check if there are mapped cache lines */ ocf_engine_traverse(req); @@ -226,7 +226,7 @@ int ocf_read_wo(struct ocf_request *req) lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); } - OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ + ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/ if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { diff --git a/src/engine/engine_wt.c b/src/engine/engine_wt.c index db2a81c..7273f95 100644 --- a/src/engine/engine_wt.c +++ b/src/engine/engine_wt.c @@ -97,19 +97,17 @@ static inline void _ocf_write_wt_submit(struct ocf_request *req) static void _ocf_write_wt_update_bits(struct ocf_request *req) { - struct ocf_cache *cache = req->cache; - if (ocf_engine_is_miss(req)) { - OCF_METADATA_LOCK_RD(); + ocf_req_hash_lock_rd(req); /* Update valid status bits */ ocf_set_valid_map_info(req); - OCF_METADATA_UNLOCK_RD(); + ocf_req_hash_unlock_rd(req); } if (req->info.dirty_any) { - OCF_METADATA_LOCK_WR(); + ocf_req_hash_lock_wr(req); /* Writes goes to SDD and HDD, need to update status bits from * dirty to clean @@ -117,20 +115,20 @@ static void _ocf_write_wt_update_bits(struct ocf_request *req) ocf_set_clean_map_info(req); - OCF_METADATA_UNLOCK_WR(); + ocf_req_hash_unlock_wr(req); } if (req->info.re_part) { OCF_DEBUG_RQ(req, "Re-Part"); - OCF_METADATA_LOCK_WR(); + ocf_req_hash_lock_wr(req); /* Probably some cache lines are assigned into wrong * partition. Need to move it to new one */ ocf_part_move(req); - OCF_METADATA_UNLOCK_WR(); + ocf_req_hash_unlock_wr(req); } } @@ -164,8 +162,8 @@ int ocf_write_wt(struct ocf_request *req) { bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; - struct ocf_cache *cache = req->cache; bool promote = true; + struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; ocf_io_start(&req->ioi.io); @@ -175,7 +173,8 @@ int ocf_write_wt(struct ocf_request *req) /* Set resume io_if */ req->io_if = &_io_if_wt_resume; - OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ + ocf_req_hash(req); + ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/ /* Travers to check if request is mapped fully */ ocf_engine_traverse(req); @@ -184,31 +183,32 @@ int ocf_write_wt(struct ocf_request *req) if (mapped) { /* All cache line are mapped, lock request for WRITE access */ lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); - } - - if (!mapped) { + } else { promote = ocf_promotion_req_should_promote( - cache->promotion_policy, req); + req->cache->promotion_policy, req); } - OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ + if (mapped || !promote) { + ocf_req_hash_unlock_rd(req); + } else { + /*- Metadata RD access ---------------------------------------*/ + ocf_req_hash_lock_upgrade(req); + ocf_engine_map(req); + ocf_req_hash_unlock_wr(req); - if (!mapped && promote) { - OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/ - - /* Now there is exclusive access for metadata. May traverse once - * again. If there are misses need to call eviction. This - * process is called 'mapping'. - */ - if (ocf_engine_evict(req) == LOOKUP_MAPPED) - ocf_engine_map(req); + if (req->info.mapping_error) { + /* Still not mapped - evict cachelines under global + * metadata write lock */ + ocf_metadata_start_exclusive_access(metadata_lock); + if (ocf_engine_evict(req) == LOOKUP_MAPPED) + ocf_engine_map(req); + ocf_metadata_end_exclusive_access(metadata_lock); + } if (!req->info.mapping_error) { /* Lock request for WRITE access */ lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); } - - OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ } if (promote && !req->info.mapping_error) { diff --git a/src/engine/engine_zero.c b/src/engine/engine_zero.c index d5cb240..23f665c 100644 --- a/src/engine/engine_zero.c +++ b/src/engine/engine_zero.c @@ -18,19 +18,17 @@ static int ocf_zero_purge(struct ocf_request *req) { - struct ocf_cache *cache = req->cache; - if (req->error) { ocf_engine_error(req, true, "Failed to discard data on cache"); } else { /* There are mapped cache line, need to remove them */ - OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/ + ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/ /* Remove mapped cache lines from metadata */ ocf_purge_map_info(req); - OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ + ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/ } ocf_req_unlock_wr(req); @@ -142,6 +140,10 @@ void ocf_engine_zero_line(struct ocf_request *req) ENV_BUG_ON(req->core_line_count != 1); + /* No hash bucket locking here - ocf_engine_zero_line caller must hold + * metadata global write lock, so we have exclusive access to all hash + * buckets here. */ + /* Traverse to check if request is mapped */ ocf_engine_traverse(req); From 2333d837fb598b809db193ef3b750f46108e2bcf Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Thu, 1 Aug 2019 16:53:20 -0400 Subject: [PATCH 06/10] Add single hash bucket lock interface Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_metadata_concurrency.c | 47 +++++++++++++++++++++- src/concurrency/ocf_metadata_concurrency.h | 12 +++++- src/metadata/metadata_structs.h | 1 + src/mngt/ocf_mngt_cache.c | 2 +- 4 files changed, 59 insertions(+), 3 deletions(-) diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c index 7952ac2..2d0a551 100644 --- a/src/concurrency/ocf_metadata_concurrency.c +++ b/src/concurrency/ocf_metadata_concurrency.c @@ -4,6 +4,7 @@ */ #include "ocf_metadata_concurrency.h" +#include "../metadata/metadata_misc.h" void ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock) { @@ -20,11 +21,12 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock) } int ocf_metadata_concurrency_attached_init( - struct ocf_metadata_lock *metadata_lock, + struct ocf_metadata_lock *metadata_lock, ocf_cache_t cache, uint64_t hash_table_entries) { uint64_t i; + metadata_lock->cache = cache; metadata_lock->num_hash_entries = hash_table_entries; metadata_lock->hash = env_vzalloc(sizeof(env_rwsem) * hash_table_entries); @@ -132,6 +134,49 @@ int ocf_metadata_hash_try_lock(struct ocf_metadata_lock *metadata_lock, return 0; } +/* NOTE: attempt to acquire hash lock for multiple core lines may end up + * in deadlock. In order to hash lock multiple core lines safely, use + * ocf_req_hash_lock_* functions */ +void ocf_metadata_hash_lock_rd(struct ocf_metadata_lock *metadata_lock, + uint32_t core_id, uint64_t core_line) +{ + ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache, + core_line, core_id); + + ocf_metadata_start_shared_access(metadata_lock); + ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_RD); +} + +void ocf_metadata_hash_unlock_rd(struct ocf_metadata_lock *metadata_lock, + uint32_t core_id, uint64_t core_line) +{ + ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache, + core_line, core_id); + + ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_RD); + ocf_metadata_end_shared_access(metadata_lock); +} + +void ocf_metadata_hash_lock_wr(struct ocf_metadata_lock *metadata_lock, + uint32_t core_id, uint64_t core_line) +{ + ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache, + core_line, core_id); + + ocf_metadata_start_shared_access(metadata_lock); + ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_WR); +} + +void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock, + uint32_t core_id, uint64_t core_line) +{ + ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache, + core_line, core_id); + + ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_WR); + ocf_metadata_end_shared_access(metadata_lock); +} + #define _NUM_HASH_ENTRIES req->cache->metadata.lock.num_hash_entries /* diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h index ccd996b..c085c97 100644 --- a/src/concurrency/ocf_metadata_concurrency.h +++ b/src/concurrency/ocf_metadata_concurrency.h @@ -15,7 +15,7 @@ void ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock); void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock); int ocf_metadata_concurrency_attached_init( - struct ocf_metadata_lock *metadata_lock, + struct ocf_metadata_lock *metadata_lock, ocf_cache_t cache, uint64_t hash_table_entries); void ocf_metadata_concurrency_attached_deinit( @@ -113,6 +113,16 @@ static inline void ocf_metadata_status_bits_unlock( ocf_metadata_status_bits_unlock(&cache->metadata.lock, \ OCF_METADATA_WR) +void ocf_metadata_hash_lock_rd(struct ocf_metadata_lock *metadata_lock, + uint32_t core_id, uint64_t core_line); +void ocf_metadata_hash_unlock_rd(struct ocf_metadata_lock *metadata_lock, + uint32_t core_id, uint64_t core_line); +void ocf_metadata_hash_lock_wr(struct ocf_metadata_lock *metadata_lock, + uint32_t core_id, uint64_t core_line); +void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock, + uint32_t core_id, uint64_t core_line); + +/* lock entire request in deadlock-free manner */ void ocf_req_hash_lock_rd(struct ocf_request *req); void ocf_req_hash_unlock_rd(struct ocf_request *req); void ocf_req_hash_lock_wr(struct ocf_request *req); diff --git a/src/metadata/metadata_structs.h b/src/metadata/metadata_structs.h index ccea1a0..22b3703 100644 --- a/src/metadata/metadata_structs.h +++ b/src/metadata/metadata_structs.h @@ -435,6 +435,7 @@ struct ocf_metadata_lock env_spinlock eviction; /*!< Fast lock for eviction policy */ env_rwsem *hash; /*!< Hash bucket locks */ uint32_t num_hash_entries; /*!< Hash bucket count */ + ocf_cache_t cache; /*!< Parent cache object */ }; /** diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 847efef..22b8558 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -987,7 +987,7 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline, context->flags.attached_metadata_inited = true; if (ocf_metadata_concurrency_attached_init(&cache->metadata.lock, - cache->device->hash_table_entries)) { + cache, cache->device->hash_table_entries)) { ocf_cache_log(cache, log_err, "Failed to initialize attached " "metadata concurrency\n"); OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL); From d2bd807e49d776fb115ccc9fe724e85bd449f098 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Thu, 1 Aug 2019 16:54:26 -0400 Subject: [PATCH 07/10] Remove calls to OCF_METADATA_(UN)LOCK_WR(RD) Signed-off-by: Adam Rutkowski --- src/cleaning/acp.c | 4 ++-- src/cleaning/alru.c | 6 ++--- src/concurrency/ocf_metadata_concurrency.h | 18 -------------- src/metadata/metadata.c | 8 +++---- src/metadata/metadata_hash.c | 4 ++-- src/metadata/metadata_io.c | 4 ++-- src/mngt/ocf_mngt_cache.c | 23 +++++++----------- src/mngt/ocf_mngt_common.c | 17 ++++++------- src/mngt/ocf_mngt_flush.c | 28 +++++++++++----------- src/mngt/ocf_mngt_io_class.c | 4 ++-- src/utils/utils_cleaner.c | 4 ++-- 11 files changed, 48 insertions(+), 72 deletions(-) diff --git a/src/cleaning/acp.c b/src/cleaning/acp.c index 533f4e6..1a1d1d9 100644 --- a/src/cleaning/acp.c +++ b/src/cleaning/acp.c @@ -392,7 +392,7 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache, struct ocf_map_info info; bool locked = false; - OCF_METADATA_LOCK_RD(); + ocf_metadata_hash_lock_rd(&cache->metadata.lock, core_id, core_line); ocf_engine_lookup_map_entry(cache, &info, core_id, core_line); @@ -403,7 +403,7 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache, locked = true; } - OCF_METADATA_UNLOCK_RD(); + ocf_metadata_hash_unlock_rd(&cache->metadata.lock, core_id, core_line); return locked ? info.coll_idx : cache->device->collision_table_entries; } diff --git a/src/cleaning/alru.c b/src/cleaning/alru.c index 91c6781..e3e08f4 100644 --- a/src/cleaning/alru.c +++ b/src/cleaning/alru.c @@ -779,7 +779,7 @@ static void alru_clean(struct alru_flush_ctx *fctx) return; } - if (OCF_METADATA_LOCK_WR_TRY()) { + if (ocf_metadata_try_start_exclusive_access(&cache->metadata.lock)) { alru_clean_complete(fctx, 0); return; } @@ -797,7 +797,7 @@ static void alru_clean(struct alru_flush_ctx *fctx) fctx->flush_perfomed = true; ocf_cleaner_do_flush_data_async(cache, fctx->flush_data, to_clean, &fctx->attribs); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); return; } @@ -806,7 +806,7 @@ static void alru_clean(struct alru_flush_ctx *fctx) env_ticks_to_secs(env_get_tick_count()); end: - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); alru_clean_complete(fctx, 0); } diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h index c085c97..0875237 100644 --- a/src/concurrency/ocf_metadata_concurrency.h +++ b/src/concurrency/ocf_metadata_concurrency.h @@ -79,24 +79,6 @@ static inline void ocf_metadata_status_bits_unlock( ENV_BUG(); } -#define OCF_METADATA_LOCK_RD() ocf_metadata_start_shared_access( \ - &cache->metadata.lock) - -#define OCF_METADATA_UNLOCK_RD() ocf_metadata_end_shared_access( \ - &cache->metadata.lock) - -#define OCF_METADATA_LOCK_RD_TRY() ocf_metadata_try_start_shared_access( \ - &cache->metadata.lock) - -#define OCF_METADATA_LOCK_WR() ocf_metadata_start_exclusive_access( \ - &cache->metadata.lock) - -#define OCF_METADATA_LOCK_WR_TRY() \ - ocf_metadata_try_start_exclusive_access(&cache->metadata.lock) - -#define OCF_METADATA_UNLOCK_WR() ocf_metadata_end_exclusive_access( \ - &cache->metadata.lock) - #define OCF_METADATA_BITS_LOCK_RD() \ ocf_metadata_status_bits_lock(&cache->metadata.lock, \ OCF_METADATA_RD) diff --git a/src/metadata/metadata.c b/src/metadata/metadata.c index 5e5d02f..ddcd247 100644 --- a/src/metadata/metadata.c +++ b/src/metadata/metadata.c @@ -113,17 +113,17 @@ ocf_cache_line_t ocf_metadata_get_cachelines_count(ocf_cache_t cache) void ocf_metadata_flush_all(ocf_cache_t cache, ocf_metadata_end_t cmpl, void *priv) { - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); cache->metadata.iface.flush_all(cache, cmpl, priv); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); } void ocf_metadata_load_all(ocf_cache_t cache, ocf_metadata_end_t cmpl, void *priv) { - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); cache->metadata.iface.load_all(cache, cmpl, priv); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); } void ocf_metadata_load_recovery(ocf_cache_t cache, diff --git a/src/metadata/metadata_hash.c b/src/metadata/metadata_hash.c index 628df6d..8bb59f3 100644 --- a/src/metadata/metadata_hash.c +++ b/src/metadata/metadata_hash.c @@ -1903,7 +1903,7 @@ static void _recovery_rebuild_metadata(ocf_pipeline_t pipeline, const uint64_t collision_table_entries = ocf_metadata_collision_table_entries(cache); - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); for (cline = 0; cline < collision_table_entries; cline++) { ocf_metadata_get_core_info(cache, cline, &core_id, &core_line); @@ -1923,7 +1923,7 @@ static void _recovery_rebuild_metadata(ocf_pipeline_t pipeline, OCF_COND_RESCHED(step, 128); } - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); ocf_pipeline_next(pipeline); } diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c index 35183a3..859c5e2 100644 --- a/src/metadata/metadata_io.c +++ b/src/metadata/metadata_io.c @@ -226,9 +226,9 @@ static int ocf_restart_meta_io(struct ocf_request *req) int ret; /* Fill with the latest metadata. */ - OCF_METADATA_LOCK_RD(); + /* TODO: synchronize with concurrent metadata io and hash bucket locks + */ metadata_io_req_fill(meta_io_req); - OCF_METADATA_UNLOCK_RD(); io = ocf_new_cache_io(cache, req->io_queue, PAGES_TO_BYTES(meta_io_req->page), diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 22b8558..84e2084 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -292,7 +292,6 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache, ocf_error_t result; /* Lock to ensure consistency */ - OCF_METADATA_LOCK_WR(); ocf_metadata_init_hash_table(cache); ocf_metadata_init_collision(cache); @@ -303,7 +302,6 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache, if (result) { ocf_cache_log(cache, log_err, "Cannot initialize cleaning policy\n"); - OCF_METADATA_UNLOCK_WR(); return result; } @@ -313,24 +311,19 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache, ocf_cache_log(cache, log_err, "Cannot initialize promotion policy\n"); __deinit_cleaning_policy(cache); - OCF_METADATA_UNLOCK_WR(); return result; } - OCF_METADATA_UNLOCK_WR(); - return 0; } static void init_attached_data_structures_recovery(ocf_cache_t cache) { - OCF_METADATA_LOCK_WR(); ocf_metadata_init_hash_table(cache); ocf_metadata_init_collision(cache); __init_partitions_attached(cache); __reset_stats(cache); __init_metadata_version(cache); - OCF_METADATA_UNLOCK_WR(); } /**************************************************************** @@ -2242,11 +2235,11 @@ int ocf_mngt_cache_promotion_set_policy(ocf_cache_t cache, ocf_promotion_t type) { int result; - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); result = ocf_promotion_set_policy(cache->promotion_policy, type); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); return result; } @@ -2255,11 +2248,11 @@ ocf_promotion_t ocf_mngt_cache_promotion_get_policy(ocf_cache_t cache) { ocf_promotion_t result; - OCF_METADATA_LOCK_RD(); + ocf_metadata_start_shared_access(&cache->metadata.lock); result = cache->conf_meta->promotion_policy_type; - OCF_METADATA_UNLOCK_RD(); + ocf_metadata_end_shared_access(&cache->metadata.lock); return result; } @@ -2269,12 +2262,12 @@ int ocf_mngt_cache_promotion_get_param(ocf_cache_t cache, uint8_t param_id, { int result; - OCF_METADATA_LOCK_RD(); + ocf_metadata_start_shared_access(&cache->metadata.lock); result = ocf_promotion_get_param(cache->promotion_policy, param_id, param_value); - OCF_METADATA_UNLOCK_RD(); + ocf_metadata_end_shared_access(&cache->metadata.lock); return result; } @@ -2284,12 +2277,12 @@ int ocf_mngt_cache_promotion_set_param(ocf_cache_t cache, uint8_t param_id, { int result; - OCF_METADATA_LOCK_RD(); + ocf_metadata_start_shared_access(&cache->metadata.lock); result = ocf_promotion_set_param(cache->promotion_policy, param_id, param_value); - OCF_METADATA_UNLOCK_RD(); + ocf_metadata_end_shared_access(&cache->metadata.lock); return result; } diff --git a/src/mngt/ocf_mngt_common.c b/src/mngt/ocf_mngt_common.c index bc21cd0..2ab6118 100644 --- a/src/mngt/ocf_mngt_common.c +++ b/src/mngt/ocf_mngt_common.c @@ -39,7 +39,7 @@ void cache_mngt_core_remove_from_cleaning_pol(ocf_core_t core) ocf_core_id_t core_id = ocf_core_get_id(core); ocf_cleaning_t clean_pol_type; - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); clean_pol_type = cache->conf_meta->cleaning_policy_type; if (cache->core[core_id].opened) { @@ -49,7 +49,7 @@ void cache_mngt_core_remove_from_cleaning_pol(ocf_core_t core) } } - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); } /* Deinitialize core metadata in attached metadata */ @@ -65,7 +65,7 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core) if (!core_size) core_size = ~0ULL; - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); clean_pol_type = cache->conf_meta->cleaning_policy_type; while (retry) { @@ -82,13 +82,14 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core) } if (retry) { - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); env_msleep(100); - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access( + &cache->metadata.lock); } } - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); } /* Mark core as removed in metadata */ @@ -96,7 +97,7 @@ void cache_mngt_core_remove_from_meta(ocf_core_t core) { ocf_cache_t cache = ocf_core_get_cache(core); - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); /* In metadata mark data this core was removed from cache */ core->conf_meta->valid = false; @@ -105,7 +106,7 @@ void cache_mngt_core_remove_from_meta(ocf_core_t core) ocf_mngt_core_clear_uuid_metadata(core); core->conf_meta->seq_no = OCF_SEQ_NO_INVALID; - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); } /* Deinit in-memory structures related to this core */ diff --git a/src/mngt/ocf_mngt_flush.c b/src/mngt/ocf_mngt_flush.c index 237e80e..3bfdee4 100644 --- a/src/mngt/ocf_mngt_flush.c +++ b/src/mngt/ocf_mngt_flush.c @@ -385,9 +385,9 @@ static int _ofc_flush_container_step(struct ocf_request *req) struct flush_container *fc = req->priv; ocf_cache_t cache = fc->cache; - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); _ocf_mngt_flush_portion(fc); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); return 0; } @@ -501,7 +501,7 @@ static void _ocf_mngt_flush_core( return; } - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); ret = _ocf_mngt_get_sectors(cache, core_id, &fc->flush_data, &fc->count); @@ -509,7 +509,7 @@ static void _ocf_mngt_flush_core( ocf_core_log(core, log_err, "Flushing operation aborted, " "no memory\n"); env_vfree(fc); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); complete(context, -OCF_ERR_NO_MEM); return; } @@ -519,7 +519,7 @@ static void _ocf_mngt_flush_core( _ocf_mngt_flush_containers(context, fc, 1, complete); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); } static void _ocf_mngt_flush_all_cores( @@ -538,21 +538,21 @@ static void _ocf_mngt_flush_all_cores( env_atomic_set(&cache->flush_in_progress, 1); - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); /* Get all 'dirty' sectors for all cores */ ret = _ocf_mngt_get_flush_containers(cache, &fctbl, &fcnum); if (ret) { ocf_cache_log(cache, log_err, "Flushing operation aborted, " "no memory\n"); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); complete(context, ret); return; } _ocf_mngt_flush_containers(context, fctbl, fcnum, complete); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); } static void _ocf_mngt_flush_all_cores_complete( @@ -774,10 +774,10 @@ static void _ocf_mngt_cache_invalidate(ocf_pipeline_t pipeline, void *priv, ocf_cache_t cache = context->cache; int result; - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); result = ocf_metadata_sparse_range(cache, context->purge.core_id, 0, context->purge.end_byte); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, result); } @@ -907,7 +907,7 @@ int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type) return 0; } - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); if (cleaning_policy_ops[old_type].deinitialize) cleaning_policy_ops[old_type].deinitialize(cache); @@ -925,7 +925,7 @@ int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type) cache->conf_meta->cleaning_policy_type = type; - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); ocf_cache_log(cache, log_info, "Changing cleaning policy from " "%s to %s\n", cleaning_policy_ops[old_type].name, @@ -957,12 +957,12 @@ int ocf_mngt_cache_cleaning_set_param(ocf_cache_t cache, ocf_cleaning_t type, if (!cleaning_policy_ops[type].set_cleaning_param) return -OCF_ERR_INVAL; - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); ret = cleaning_policy_ops[type].set_cleaning_param(cache, param_id, param_value); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); return ret; } diff --git a/src/mngt/ocf_mngt_io_class.c b/src/mngt/ocf_mngt_io_class.c index ae0f086..cb42f93 100644 --- a/src/mngt/ocf_mngt_io_class.c +++ b/src/mngt/ocf_mngt_io_class.c @@ -275,7 +275,7 @@ int ocf_mngt_cache_io_classes_configure(ocf_cache_t cache, if (!old_config) return -OCF_ERR_NO_MEM; - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); result = env_memcpy(old_config, sizeof(cache->user_parts), cache->user_parts, sizeof(cache->user_parts)); @@ -300,7 +300,7 @@ out_edit: } out_cpy: - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); env_free(old_config); return result; diff --git a/src/utils/utils_cleaner.c b/src/utils/utils_cleaner.c index 6f9aa23..41b425e 100644 --- a/src/utils/utils_cleaner.c +++ b/src/utils/utils_cleaner.c @@ -320,7 +320,7 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req) OCF_DEBUG_TRACE(req->cache); - OCF_METADATA_LOCK_WR(); + ocf_metadata_start_exclusive_access(&cache->metadata.lock); /* Update metadata */ for (i = 0; i < req->core_line_count; i++, iter++) { if (iter->status == LOOKUP_MISS) @@ -345,7 +345,7 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req) } ocf_metadata_flush_do_asynch(cache, req, _ocf_cleaner_metadata_io_end); - OCF_METADATA_UNLOCK_WR(); + ocf_metadata_end_exclusive_access(&cache->metadata.lock); return 0; } From 5248093e1fd38aeb280f1f897a69c358f24f4344 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Mon, 12 Aug 2019 17:36:30 -0400 Subject: [PATCH 08/10] Move common mapping and locking logic to dedicated function Signed-off-by: Adam Rutkowski --- src/engine/engine_common.c | 73 ++++++++++++++++++++++++++++++++++- src/engine/engine_common.h | 34 ++++++++++++---- src/engine/engine_rd.c | 79 ++++++++------------------------------ src/engine/engine_wb.c | 54 +++++++------------------- src/engine/engine_wt.c | 54 +++++++------------------- 5 files changed, 140 insertions(+), 154 deletions(-) diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index a8d5fc1..92d49c0 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -16,6 +16,8 @@ #include "../utils/utils_cleaner.h" #include "../metadata/metadata.h" #include "../eviction/eviction.h" +#include "../promotion/promotion.h" +#include "../concurrency/ocf_concurrency.h" void ocf_engine_error(struct ocf_request *req, bool stop_cache, const char *msg) @@ -305,7 +307,7 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache, } } -void ocf_engine_map(struct ocf_request *req) +static void ocf_engine_map(struct ocf_request *req) { struct ocf_cache *cache = req->cache; uint32_t i; @@ -393,7 +395,7 @@ static void _ocf_engine_clean_end(void *private_data, int error) } } -int ocf_engine_evict(struct ocf_request *req) +static int ocf_engine_evict(struct ocf_request *req) { if (!ocf_engine_unmapped_count(req)) return 0; @@ -402,6 +404,73 @@ int ocf_engine_evict(struct ocf_request *req) ocf_engine_unmapped_count(req)); } +static int lock_clines(struct ocf_request *req, enum ocf_engine_lock_type lock, + ocf_req_async_lock_cb cb) +{ + switch (lock) { + case ocf_engine_lock_write: + return ocf_req_async_lock_wr(req, cb); + case ocf_engine_lock_read: + return ocf_req_async_lock_rd(req, cb); + default: + return OCF_LOCK_ACQUIRED; + } +} + +int ocf_engine_prepare_clines(struct ocf_request *req, + const struct ocf_engine_callbacks *engine_cbs) +{ + bool mapped; + bool promote = true; + int lock = -ENOENT; + enum ocf_engine_lock_type lock_type; + struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; + + ocf_req_hash(req); + ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/ + + /* Travers to check if request is mapped fully */ + ocf_engine_traverse(req); + + mapped = ocf_engine_is_mapped(req); + if (mapped) { + lock_type = engine_cbs->get_lock_type(req); + lock = lock_clines(req, lock_type, engine_cbs->resume); + } else { + promote = ocf_promotion_req_should_promote( + req->cache->promotion_policy, req); + if (!promote) + req->info.mapping_error = 1; + } + + if (mapped || !promote) { + ocf_req_hash_unlock_rd(req); + } else { + /* need to attempt mapping / eviction */ + ocf_req_hash_lock_upgrade(req); /*- Metadata WR access, eviction -----*/ + ocf_engine_map(req); + ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/ + + if (req->info.mapping_error) { + ocf_metadata_start_exclusive_access(metadata_lock); + /* Now there is exclusive access for metadata. May + * traverse once again and evict cachelines if needed. + */ + if (ocf_engine_evict(req) == LOOKUP_MAPPED) + ocf_engine_map(req); + ocf_metadata_end_exclusive_access(metadata_lock); + } + + if (!req->info.mapping_error) { + lock_type = engine_cbs->get_lock_type(req); + lock = lock_clines(req, lock_type, engine_cbs->resume); + } + } + + + return lock; +} + static int _ocf_engine_clean_getter(struct ocf_cache *cache, void *getter_context, uint32_t item, ocf_cache_line_t *line) { diff --git a/src/engine/engine_common.h b/src/engine/engine_common.h index 1a4d689..ad99c67 100644 --- a/src/engine/engine_common.h +++ b/src/engine/engine_common.h @@ -162,15 +162,34 @@ void ocf_engine_lookup_map_entry(struct ocf_cache *cache, uint64_t core_line); /** - * @brief Traverse request in order to lookup cache lines. If there are misses, - * attempt to map free cache lines. - * - * @param req OCF request + * @brief Request cacheline lock type */ -void ocf_engine_map(struct ocf_request *req); +enum ocf_engine_lock_type +{ + /** No lock */ + ocf_engine_lock_none = 0, + /** Write lock */ + ocf_engine_lock_write, + /** Read lock */ + ocf_engine_lock_read, +}; /** - * @brief Evict cachelines to populate freelist. + * @brief Engine-specific callbacks for common request handling rountine + * + * TODO(arutk): expand this structure to fit all engines and all steps + */ +struct ocf_engine_callbacks +{ + /** Specify locking requirements after request is mapped */ + enum ocf_engine_lock_type (*get_lock_type)(struct ocf_request *req); + + /** Resume handling after acquiring asynchronous lock */ + ocf_req_async_lock_cb resume; +}; + +/** + * @brief Map and lock cachelines * * @param req OCF request * @@ -178,7 +197,8 @@ void ocf_engine_map(struct ocf_request *req); * @retval LOOKUP_MAPPED successfully evicted required number of cachelines * @retval LOOKUP_MISS eviction failure */ -int ocf_engine_evict(struct ocf_request *req); +int ocf_engine_prepare_clines(struct ocf_request *req, + const struct ocf_engine_callbacks *engine_cbs); /** * @brief Traverse OCF request (lookup cache) diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c index cf1c550..74c000f 100644 --- a/src/engine/engine_rd.c +++ b/src/engine/engine_rd.c @@ -208,13 +208,24 @@ static const struct ocf_io_if _io_if_read_generic_resume = { .write = _ocf_read_generic_do, }; +static enum ocf_engine_lock_type ocf_rd_get_lock_type(struct ocf_request *req) +{ + if (ocf_engine_is_hit(req)) + return ocf_engine_lock_read; + else + return ocf_engine_lock_write; +} + +static const struct ocf_engine_callbacks _rd_engine_callbacks = +{ + .get_lock_type = ocf_rd_get_lock_type, + .resume = ocf_engine_on_resume, +}; + int ocf_read_generic(struct ocf_request *req) { - bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; struct ocf_cache *cache = req->cache; - bool promote = true; - struct ocf_metadata_lock *metadata_lock = &cache->metadata.lock; ocf_io_start(&req->ioi.io); @@ -230,67 +241,9 @@ int ocf_read_generic(struct ocf_request *req) /* Set resume call backs */ req->io_if = &_io_if_read_generic_resume; - /* calculate hashes for hash-bucket locking */ - ocf_req_hash(req); + lock = ocf_engine_prepare_clines(req, &_rd_engine_callbacks); - /*- Metadata RD access -----------------------------------------------*/ - ocf_req_hash_lock_rd(req); - /* Traverse request to cache if there is hit */ - ocf_engine_traverse(req); - - mapped = ocf_engine_is_mapped(req); - if (mapped) { - /* Request is fully mapped, no need to call eviction */ - if (ocf_engine_is_hit(req)) { - /* There is a hit, lock request for READ access */ - lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); - } else { - /* All cache line mapped, but some sectors are not valid - * and cache insert will be performed - lock for - * WRITE is required - */ - lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); - } - } else { - promote = ocf_promotion_req_should_promote( - cache->promotion_policy, req); - } - - if (mapped || !promote) { - ocf_req_hash_unlock_rd(req); - } else { - /*- Metadata RD access ---------------------------------------*/ - ocf_req_hash_lock_upgrade(req); - ocf_engine_map(req); - ocf_req_hash_unlock_wr(req); - - if (req->info.mapping_error) { - /* Still not mapped - evict cachelines under global - * metadata write lock */ - ocf_metadata_start_exclusive_access(metadata_lock); - if (ocf_engine_evict(req) == LOOKUP_MAPPED) - ocf_engine_map(req); - ocf_metadata_end_exclusive_access(metadata_lock); - } - - if (!req->info.mapping_error) { - if (ocf_engine_is_hit(req)) { - /* After mapping turns out there is hit, - * so lock OCF request for read access - */ - lock = ocf_req_async_lock_rd(req, - ocf_engine_on_resume); - } else { - /* Miss, new cache lines were mapped, - * need to lock OCF request for write access - */ - lock = ocf_req_async_lock_wr(req, - ocf_engine_on_resume); - } - } - } - - if (promote && !req->info.mapping_error) { + if (!req->info.mapping_error) { if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { /* Lock was not acquired, need to wait for resume */ diff --git a/src/engine/engine_wb.c b/src/engine/engine_wb.c index 4546b18..7602f8e 100644 --- a/src/engine/engine_wb.c +++ b/src/engine/engine_wb.c @@ -163,12 +163,20 @@ int ocf_write_wb_do(struct ocf_request *req) return 0; } +static enum ocf_engine_lock_type ocf_wb_get_lock_type(struct ocf_request *req) +{ + return ocf_engine_lock_write; +} + +static const struct ocf_engine_callbacks _wb_engine_callbacks = +{ + .get_lock_type = ocf_wb_get_lock_type, + .resume = ocf_engine_on_resume, +}; + int ocf_write_wb(struct ocf_request *req) { - bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; - bool promote = true; - struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; ocf_io_start(&req->ioi.io); @@ -180,45 +188,9 @@ int ocf_write_wb(struct ocf_request *req) /* TODO: Handle fits into dirty */ - ocf_req_hash(req); - ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/ + lock = ocf_engine_prepare_clines(req, &_wb_engine_callbacks); - /* Travers to check if request is mapped fully */ - ocf_engine_traverse(req); - - mapped = ocf_engine_is_mapped(req); - if (mapped) { - /* All cache line are mapped, lock request for WRITE access */ - lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); - } else { - promote = ocf_promotion_req_should_promote( - req->cache->promotion_policy, req); - } - - if (mapped || !promote) { - ocf_req_hash_unlock_rd(req); - } else { - /*- Metadata RD access ---------------------------------------*/ - ocf_req_hash_lock_upgrade(req); - ocf_engine_map(req); - ocf_req_hash_unlock_wr(req); - - if (req->info.mapping_error) { - /* Still not mapped - evict cachelines under global - * metadata write lock */ - ocf_metadata_start_exclusive_access(metadata_lock); - if (ocf_engine_evict(req) == LOOKUP_MAPPED) - ocf_engine_map(req); - ocf_metadata_end_exclusive_access(metadata_lock); - } - - if (!req->info.mapping_error) { - /* Lock request for WRITE access */ - lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); - } - } - - if (promote && !req->info.mapping_error) { + if (!req->info.mapping_error) { if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { /* WR lock was not acquired, need to wait for resume */ diff --git a/src/engine/engine_wt.c b/src/engine/engine_wt.c index 7273f95..5adb64b 100644 --- a/src/engine/engine_wt.c +++ b/src/engine/engine_wt.c @@ -158,12 +158,20 @@ static const struct ocf_io_if _io_if_wt_resume = { .write = _ocf_write_wt_do, }; +static enum ocf_engine_lock_type ocf_wt_get_lock_type(struct ocf_request *req) +{ + return ocf_engine_lock_write; +} + +static const struct ocf_engine_callbacks _wt_engine_callbacks = +{ + .get_lock_type = ocf_wt_get_lock_type, + .resume = ocf_engine_on_resume, +}; + int ocf_write_wt(struct ocf_request *req) { - bool mapped; int lock = OCF_LOCK_NOT_ACQUIRED; - bool promote = true; - struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; ocf_io_start(&req->ioi.io); @@ -173,45 +181,9 @@ int ocf_write_wt(struct ocf_request *req) /* Set resume io_if */ req->io_if = &_io_if_wt_resume; - ocf_req_hash(req); - ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/ + lock = ocf_engine_prepare_clines(req, &_wt_engine_callbacks); - /* Travers to check if request is mapped fully */ - ocf_engine_traverse(req); - - mapped = ocf_engine_is_mapped(req); - if (mapped) { - /* All cache line are mapped, lock request for WRITE access */ - lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); - } else { - promote = ocf_promotion_req_should_promote( - req->cache->promotion_policy, req); - } - - if (mapped || !promote) { - ocf_req_hash_unlock_rd(req); - } else { - /*- Metadata RD access ---------------------------------------*/ - ocf_req_hash_lock_upgrade(req); - ocf_engine_map(req); - ocf_req_hash_unlock_wr(req); - - if (req->info.mapping_error) { - /* Still not mapped - evict cachelines under global - * metadata write lock */ - ocf_metadata_start_exclusive_access(metadata_lock); - if (ocf_engine_evict(req) == LOOKUP_MAPPED) - ocf_engine_map(req); - ocf_metadata_end_exclusive_access(metadata_lock); - } - - if (!req->info.mapping_error) { - /* Lock request for WRITE access */ - lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); - } - } - - if (promote && !req->info.mapping_error) { + if (!req->info.mapping_error) { if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { /* WR lock was not acquired, need to wait for resume */ From 30f22d4f471af525671e19595023534f29e287e2 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Wed, 31 Jul 2019 15:13:32 -0400 Subject: [PATCH 09/10] Optimize cacheline locking in ocf_engine_prepare_clines Hash bucket read/write lock is sufficient to safely attempt cacheline trylock/lock. This change removes cacheline lock global RW semaprhore and moves cacheline trylock/lock under hash bucket read/write lock respectively. Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_cache_line_concurrency.c | 60 ++----------- src/concurrency/ocf_cache_line_concurrency.h | 46 ++++++++-- src/engine/engine_common.c | 94 +++++++++++++++----- src/engine/engine_discard.c | 10 ++- src/engine/engine_fast.c | 13 ++- src/engine/engine_pt.c | 15 ++-- src/engine/engine_wi.c | 10 ++- src/engine/engine_wo.c | 10 ++- 8 files changed, 161 insertions(+), 97 deletions(-) diff --git a/src/concurrency/ocf_cache_line_concurrency.c b/src/concurrency/ocf_cache_line_concurrency.c index bee26b7..5b50ee6 100644 --- a/src/concurrency/ocf_cache_line_concurrency.c +++ b/src/concurrency/ocf_cache_line_concurrency.c @@ -50,7 +50,6 @@ struct __waiters_list { }; struct ocf_cache_line_concurrency { - env_rwlock lock; env_atomic *access; env_atomic waiting; size_t access_limit; @@ -112,8 +111,6 @@ int ocf_cache_line_concurrency_init(struct ocf_cache *cache) env_spinlock_init(&c->waiters_lsts[i].lock); } - env_rwlock_init(&c->lock); - return 0; ocf_cache_line_concurrency_init: @@ -141,8 +138,6 @@ void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache) concurrency = cache->device->concurrency.cache_line; - env_rwlock_destroy(&concurrency->lock); - for (i = 0; i < _WAITERS_LIST_ENTRIES; i++) env_spinlock_destroy(&concurrency->waiters_lsts[i].lock); @@ -694,7 +689,7 @@ static inline void __remove_line_from_waiters_list(struct ocf_cache_line_concurr /* Try to read-lock request without adding waiters. Function should be called * under read lock, multiple threads may attempt to acquire the lock * concurrently. */ -static int _ocf_req_trylock_rd(struct ocf_request *req) +int ocf_req_trylock_rd(struct ocf_request *req) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. @@ -745,10 +740,10 @@ static int _ocf_req_trylock_rd(struct ocf_request *req) } /* - * Read-lock request cache lines. Must be called under cacheline concurrency - * write lock. + * Asynchronously read-lock request cache lines. Must be called under cacheline + * concurrency write lock. */ -static int _ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb) +int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. @@ -801,29 +796,10 @@ err: } -int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb) -{ - struct ocf_cache_line_concurrency *c = - req->cache->device->concurrency.cache_line; - int lock; - - env_rwlock_read_lock(&c->lock); - lock = _ocf_req_trylock_rd(req); - env_rwlock_read_unlock(&c->lock); - - if (lock != OCF_LOCK_ACQUIRED) { - env_rwlock_write_lock(&c->lock); - lock = _ocf_req_lock_rd(req, cb); - env_rwlock_write_unlock(&c->lock); - } - - return lock; -} - /* Try to write-lock request without adding waiters. Function should be called * under read lock, multiple threads may attempt to acquire the lock * concurrently. */ -static int _ocf_req_trylock_wr(struct ocf_request *req) +int ocf_req_trylock_wr(struct ocf_request *req) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. @@ -872,10 +848,10 @@ static int _ocf_req_trylock_wr(struct ocf_request *req) } /* - * Write-lock request cache lines. Must be called under cacheline concurrency - * write lock. + * Asynchronously write-lock request cache lines. Must be called under cacheline + * concurrency write lock. */ -static int _ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb) +int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. @@ -928,26 +904,6 @@ err: return ret; } -int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb) -{ - struct ocf_cache_line_concurrency *c = - req->cache->device->concurrency.cache_line; - int lock; - - env_rwlock_read_lock(&c->lock); - lock = _ocf_req_trylock_wr(req); - env_rwlock_read_unlock(&c->lock); - - if (lock != OCF_LOCK_ACQUIRED) { - env_rwlock_write_lock(&c->lock); - lock = _ocf_req_lock_wr(req, cb); - env_rwlock_write_unlock(&c->lock); - } - - return lock; -} - - /* * */ diff --git a/src/concurrency/ocf_cache_line_concurrency.h b/src/concurrency/ocf_cache_line_concurrency.h index e6ab7c8..6c84ec4 100644 --- a/src/concurrency/ocf_cache_line_concurrency.h +++ b/src/concurrency/ocf_cache_line_concurrency.h @@ -54,35 +54,63 @@ size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache); typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req); /** - * @brief Lock OCF request for WRITE access (Lock all cache lines in map info) - * - * @note io_if->resume callback has to be set + * @brief Lock OCF request for write access asynchronously. Attempts to lock all + * cache lines in map info. * * @param req - OCF request * @param cb - async lock acquisition callback * + * @returns lock acquisition status or negative error code in case of internal + * error * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed - * * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was - * added into waiting list. When lock will be acquired io_if->resume be called + * added into waiting list. When lock is acquired, @cb will be + * called. */ int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb); /** - * @brief Lock OCF request for READ access (Lock all cache lines in map info) + * @brief Try to lock OCF request for write access. Serves the same purpose as + * ocf_req_async_lock_wr, except that this function fails if lock is already + * held by someone else. * - * @note io_if->resume callback has to be set + * @param req - OCF request + * + * @returns lock acquisition status + * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed + * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired + */ +int ocf_req_trylock_wr(struct ocf_request *req); + +/** + * @brief Lock OCF request for read access asynchronously. Attempts to lock all + * cache lines in map info. * * @param req - OCF request * @param cb - async lock acquisition callback * + * @returns lock acquisition status or negative error code in case of internal + * error * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed - * * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was - * added into waiting list. When lock will be acquired io_if->resume be called + * added into waiting list. When lock is acquired, @cb will be + * called. */ int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb); +/** + * @brief Try to lock OCF request forread access. Serves the same purpose as + * ocf_req_async_lock_rd, except that this function fails if lock is already + * held by someone else. + * + * @param req - OCF request + * + * @returns lock acquisition status + * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed + * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired + */ +int ocf_req_trylock_rd(struct ocf_request *req); + /** * @brief Unlock OCF request from WRITE access * diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index 92d49c0..4cdab86 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -395,7 +395,7 @@ static void _ocf_engine_clean_end(void *private_data, int error) } } -static int ocf_engine_evict(struct ocf_request *req) +static int ocf_engine_evict(struct ocf_request *req) { if (!ocf_engine_unmapped_count(req)) return 0; @@ -417,6 +417,19 @@ static int lock_clines(struct ocf_request *req, enum ocf_engine_lock_type lock, } } +static int trylock_clines(struct ocf_request *req, + enum ocf_engine_lock_type lock) +{ + switch (lock) { + case ocf_engine_lock_write: + return ocf_req_trylock_wr(req); + case ocf_engine_lock_read: + return ocf_req_trylock_rd(req); + default: + return OCF_LOCK_ACQUIRED; + } +} + int ocf_engine_prepare_clines(struct ocf_request *req, const struct ocf_engine_callbacks *engine_cbs) { @@ -426,48 +439,87 @@ int ocf_engine_prepare_clines(struct ocf_request *req, enum ocf_engine_lock_type lock_type; struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; + /* Calculate hashes for hash-bucket locking */ ocf_req_hash(req); - ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/ - /* Travers to check if request is mapped fully */ + /* Read-lock hash buckets associated with request target core & LBAs + * (core lines) to assure that cache mapping for these core lines does + * not change during traversation */ + ocf_req_hash_lock_rd(req); + + /* Traverse request to cache if there is hit */ ocf_engine_traverse(req); - mapped = ocf_engine_is_mapped(req); + if (mapped) { + /* We are holding hash buckets read lock, so we can attempt + * per-cacheline locking fast path, which would fail either if + * cachelines are already locked without putting request to a + * waiter list */ lock_type = engine_cbs->get_lock_type(req); - lock = lock_clines(req, lock_type, engine_cbs->resume); + lock = trylock_clines(req, lock_type); + + if (lock == OCF_LOCK_ACQUIRED) { + /* Cachelines are mapped and locked, we don't need the + * hash bucket lock any more */ + ocf_req_hash_unlock_rd(req); + } else { + /* Failed to acquire cachelines lock in fast path, + * acquire hash-buckets write lock and attempt the lock + * again, allowing slow path and async assignment of + * the lock. */ + ocf_req_hash_lock_upgrade(req); + lock = lock_clines(req, lock_type, engine_cbs->resume); + ocf_req_hash_unlock_wr(req); + } } else { + /* check if request should promote cachelines */ promote = ocf_promotion_req_should_promote( req->cache->promotion_policy, req); - if (!promote) + if (!promote) { req->info.mapping_error = 1; + ocf_req_hash_unlock_rd(req); + } } - if (mapped || !promote) { - ocf_req_hash_unlock_rd(req); - } else { - /* need to attempt mapping / eviction */ - ocf_req_hash_lock_upgrade(req); /*- Metadata WR access, eviction -----*/ + if (!mapped && promote) { + /* Need to map (potentially evict) cachelines. Mapping must be + * performed holding (at least) hash-bucket write lock */ + ocf_req_hash_lock_upgrade(req); + ocf_engine_map(req); - ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/ + if (!req->info.mapping_error) { + /* Lock cachelines, potentially putting the request on + * waiter list */ + lock_type = engine_cbs->get_lock_type(req); + lock = trylock_clines(req, lock_type); + if (lock != OCF_LOCK_ACQUIRED) { + lock = lock_clines(req, lock_type, + engine_cbs->resume); + } + } + + /* At this point the request is mapped or we need to evict, + * which is done under global metadata lock */ + ocf_req_hash_unlock_wr(req); if (req->info.mapping_error) { + /* Not mapped - evict cachelines */ ocf_metadata_start_exclusive_access(metadata_lock); - /* Now there is exclusive access for metadata. May - * traverse once again and evict cachelines if needed. - */ if (ocf_engine_evict(req) == LOOKUP_MAPPED) ocf_engine_map(req); + if (!req->info.mapping_error) { + lock_type = engine_cbs->get_lock_type(req); + lock = trylock_clines(req, lock_type); + if (lock != OCF_LOCK_ACQUIRED) { + lock = lock_clines(req, lock_type, + engine_cbs->resume); + } + } ocf_metadata_end_exclusive_access(metadata_lock); } - - if (!req->info.mapping_error) { - lock_type = engine_cbs->get_lock_type(req); - lock = lock_clines(req, lock_type, engine_cbs->resume); - } } - return lock; } diff --git a/src/engine/engine_discard.c b/src/engine/engine_discard.c index 8c10c7d..62807dd 100644 --- a/src/engine/engine_discard.c +++ b/src/engine/engine_discard.c @@ -235,12 +235,18 @@ static int _ocf_discard_step(struct ocf_request *req) if (ocf_engine_mapped_count(req)) { /* Some cache line are mapped, lock request for WRITE access */ - lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume); + lock = ocf_req_trylock_wr(req); } else { lock = OCF_LOCK_ACQUIRED; } - ocf_req_hash_unlock_rd(req); + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } if (lock >= 0) { if (OCF_LOCK_ACQUIRED == lock) { diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c index 8cfe96c..11c3b7f 100644 --- a/src/engine/engine_fast.c +++ b/src/engine/engine_fast.c @@ -195,11 +195,18 @@ int ocf_write_fast(struct ocf_request *req) mapped = ocf_engine_is_mapped(req); if (mapped) { ocf_io_start(&req->ioi.io); - lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); + lock = ocf_req_trylock_wr(req); + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } + } else { + ocf_req_hash_unlock_rd(req); } - ocf_req_hash_unlock_rd(req); - if (mapped) { if (lock >= 0) { OCF_DEBUG_RQ(req, "Fast path success"); diff --git a/src/engine/engine_pt.c b/src/engine/engine_pt.c index 799f7b0..8aabeee 100644 --- a/src/engine/engine_pt.c +++ b/src/engine/engine_pt.c @@ -102,7 +102,7 @@ static const struct ocf_io_if _io_if_pt_resume = { int ocf_read_pt(struct ocf_request *req) { bool use_cache = false; - int lock = OCF_LOCK_NOT_ACQUIRED; + int lock = OCF_LOCK_ACQUIRED; OCF_DEBUG_TRACE(req->cache); @@ -127,14 +127,17 @@ int ocf_read_pt(struct ocf_request *req) /* There are mapped cache line, * lock request for READ access */ - lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); - } else { - /* No mapped cache lines, no need to get lock */ - lock = OCF_LOCK_ACQUIRED; + lock = ocf_req_trylock_rd(req); } } - ocf_req_hash_unlock_rd(req); + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } if (use_cache) { /* diff --git a/src/engine/engine_wi.c b/src/engine/engine_wi.c index ff947e4..58b7f83 100644 --- a/src/engine/engine_wi.c +++ b/src/engine/engine_wi.c @@ -154,12 +154,18 @@ int ocf_write_wi(struct ocf_request *req) if (ocf_engine_mapped_count(req)) { /* Some cache line are mapped, lock request for WRITE access */ - lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume); + lock = ocf_req_trylock_wr(req); } else { lock = OCF_LOCK_ACQUIRED; } - ocf_req_hash_unlock_rd(req); /*- END Metadata READ access----------------*/ + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } if (lock >= 0) { if (lock == OCF_LOCK_ACQUIRED) { diff --git a/src/engine/engine_wo.c b/src/engine/engine_wo.c index 193198d..289d38b 100644 --- a/src/engine/engine_wo.c +++ b/src/engine/engine_wo.c @@ -223,10 +223,16 @@ int ocf_read_wo(struct ocf_request *req) /* There are mapped cache lines, * lock request for READ access */ - lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); + lock = ocf_req_trylock_rd(req); } - ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/ + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) { From 938795e0818a1aadef3175ea28ed72f97a2c7830 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Fri, 20 Sep 2019 00:33:17 -0400 Subject: [PATCH 10/10] Unit tests for hash bucket locks ordering Signed-off-by: Adam Rutkowski --- .../ocf_metadata_concurrency.c | 126 ++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c diff --git a/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c b/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c new file mode 100644 index 0000000..23096f3 --- /dev/null +++ b/tests/unit/tests/concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency.c @@ -0,0 +1,126 @@ +/* + * src/concurrency/ocf_metadata_concurrency.c + * ocf_req_hash_lock_rd + * + * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE + * ONE FUNCTION PER LINE + * + */ + +#undef static + +#undef inline + + +#include +#include +#include +#include +#include "print_desc.h" + +#include "ocf_metadata_concurrency.h" +#include "../metadata/metadata_misc.h" + +#include "concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency_generated_warps.c" + +void __wrap_ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock, + ocf_cache_line_t hash, int rw) +{ + check_expected(hash); + function_called(); +} + +#define MAP_SIZE 16 + +static struct ocf_request *alloc_req() +{ + struct ocf_request *req; + struct ocf_cache *cache = malloc(sizeof(*cache)); + + req = malloc(sizeof(*req) + MAP_SIZE * sizeof(req->map[0])); + req->map = req->__map; + req->cache = cache; + + return req; +} + +static void _test_lock_order(struct ocf_request* req, + unsigned hash[], unsigned hash_count, + unsigned expected_call[], unsigned expected_call_count) +{ + unsigned i; + + req->core_line_count = hash_count; + + for (i = 0; i < hash_count; i++) + req->map[i].hash = hash[i]; + + for (i = 0; i < expected_call_count; i++) { + expect_function_call(__wrap_ocf_metadata_hash_lock); + expect_value(__wrap_ocf_metadata_hash_lock, hash, expected_call[i]); + } + + ocf_req_hash_lock_rd(req); + +} + +static void ocf_req_hash_lock_rd_test01(void **state) +{ + struct ocf_request *req = alloc_req(); + struct { + struct { + unsigned val[MAP_SIZE]; + unsigned count; + } hash, expected_call; + } test_cases[] = { + { + .hash = {.val = {2}, .count = 1}, + .expected_call = {.val = {2}, .count = 1} + }, + { + .hash = {.val = {2, 3, 4}, .count = 3}, + .expected_call = {.val = {2, 3, 4}, .count = 3} + }, + { + .hash = {.val = {2, 3, 4, 0}, .count = 4}, + .expected_call = {.val = {0, 2, 3, 4}, .count = 4} + }, + { + .hash = {.val = {2, 3, 4, 0, 1, 2, 3, 4, 0, 1}, .count = 10}, + .expected_call = {.val = {0, 1, 2, 3, 4}, .count = 5} + }, + { + .hash = {.val = {4, 0}, .count = 2}, + .expected_call = {.val = {0, 4}, .count = 2} + }, + { + .hash = {.val = {0, 1, 2, 3, 4, 0, 1}, .count = 7}, + .expected_call = {.val = {0, 1, 2, 3, 4}, .count = 5} + }, + }; + const unsigned test_case_count = sizeof(test_cases) / sizeof(test_cases[0]); + unsigned i; + + req->cache->metadata.lock.num_hash_entries = 5; + + print_test_description("Verify hash locking order\n"); + + for (i = 0; i < test_case_count; i++) { + _test_lock_order(req, test_cases[i].hash.val, test_cases[i].hash.count, + test_cases[i].expected_call.val, test_cases[i].expected_call.count); + } + + free(req->cache); + free(req); +} + +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(ocf_req_hash_lock_rd_test01) + }; + + print_message("Unit test for ocf_req_hash_lock_rd\n"); + + return cmocka_run_group_tests(tests, NULL, NULL); +}