From 30f22d4f471af525671e19595023534f29e287e2 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Wed, 31 Jul 2019 15:13:32 -0400 Subject: [PATCH] Optimize cacheline locking in ocf_engine_prepare_clines Hash bucket read/write lock is sufficient to safely attempt cacheline trylock/lock. This change removes cacheline lock global RW semaprhore and moves cacheline trylock/lock under hash bucket read/write lock respectively. Signed-off-by: Adam Rutkowski --- src/concurrency/ocf_cache_line_concurrency.c | 60 ++----------- src/concurrency/ocf_cache_line_concurrency.h | 46 ++++++++-- src/engine/engine_common.c | 94 +++++++++++++++----- src/engine/engine_discard.c | 10 ++- src/engine/engine_fast.c | 13 ++- src/engine/engine_pt.c | 15 ++-- src/engine/engine_wi.c | 10 ++- src/engine/engine_wo.c | 10 ++- 8 files changed, 161 insertions(+), 97 deletions(-) diff --git a/src/concurrency/ocf_cache_line_concurrency.c b/src/concurrency/ocf_cache_line_concurrency.c index bee26b7..5b50ee6 100644 --- a/src/concurrency/ocf_cache_line_concurrency.c +++ b/src/concurrency/ocf_cache_line_concurrency.c @@ -50,7 +50,6 @@ struct __waiters_list { }; struct ocf_cache_line_concurrency { - env_rwlock lock; env_atomic *access; env_atomic waiting; size_t access_limit; @@ -112,8 +111,6 @@ int ocf_cache_line_concurrency_init(struct ocf_cache *cache) env_spinlock_init(&c->waiters_lsts[i].lock); } - env_rwlock_init(&c->lock); - return 0; ocf_cache_line_concurrency_init: @@ -141,8 +138,6 @@ void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache) concurrency = cache->device->concurrency.cache_line; - env_rwlock_destroy(&concurrency->lock); - for (i = 0; i < _WAITERS_LIST_ENTRIES; i++) env_spinlock_destroy(&concurrency->waiters_lsts[i].lock); @@ -694,7 +689,7 @@ static inline void __remove_line_from_waiters_list(struct ocf_cache_line_concurr /* Try to read-lock request without adding waiters. Function should be called * under read lock, multiple threads may attempt to acquire the lock * concurrently. */ -static int _ocf_req_trylock_rd(struct ocf_request *req) +int ocf_req_trylock_rd(struct ocf_request *req) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. @@ -745,10 +740,10 @@ static int _ocf_req_trylock_rd(struct ocf_request *req) } /* - * Read-lock request cache lines. Must be called under cacheline concurrency - * write lock. + * Asynchronously read-lock request cache lines. Must be called under cacheline + * concurrency write lock. */ -static int _ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb) +int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. @@ -801,29 +796,10 @@ err: } -int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb) -{ - struct ocf_cache_line_concurrency *c = - req->cache->device->concurrency.cache_line; - int lock; - - env_rwlock_read_lock(&c->lock); - lock = _ocf_req_trylock_rd(req); - env_rwlock_read_unlock(&c->lock); - - if (lock != OCF_LOCK_ACQUIRED) { - env_rwlock_write_lock(&c->lock); - lock = _ocf_req_lock_rd(req, cb); - env_rwlock_write_unlock(&c->lock); - } - - return lock; -} - /* Try to write-lock request without adding waiters. Function should be called * under read lock, multiple threads may attempt to acquire the lock * concurrently. */ -static int _ocf_req_trylock_wr(struct ocf_request *req) +int ocf_req_trylock_wr(struct ocf_request *req) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. @@ -872,10 +848,10 @@ static int _ocf_req_trylock_wr(struct ocf_request *req) } /* - * Write-lock request cache lines. Must be called under cacheline concurrency - * write lock. + * Asynchronously write-lock request cache lines. Must be called under cacheline + * concurrency write lock. */ -static int _ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb) +int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb) { int32_t i; struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. @@ -928,26 +904,6 @@ err: return ret; } -int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb) -{ - struct ocf_cache_line_concurrency *c = - req->cache->device->concurrency.cache_line; - int lock; - - env_rwlock_read_lock(&c->lock); - lock = _ocf_req_trylock_wr(req); - env_rwlock_read_unlock(&c->lock); - - if (lock != OCF_LOCK_ACQUIRED) { - env_rwlock_write_lock(&c->lock); - lock = _ocf_req_lock_wr(req, cb); - env_rwlock_write_unlock(&c->lock); - } - - return lock; -} - - /* * */ diff --git a/src/concurrency/ocf_cache_line_concurrency.h b/src/concurrency/ocf_cache_line_concurrency.h index e6ab7c8..6c84ec4 100644 --- a/src/concurrency/ocf_cache_line_concurrency.h +++ b/src/concurrency/ocf_cache_line_concurrency.h @@ -54,35 +54,63 @@ size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache); typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req); /** - * @brief Lock OCF request for WRITE access (Lock all cache lines in map info) - * - * @note io_if->resume callback has to be set + * @brief Lock OCF request for write access asynchronously. Attempts to lock all + * cache lines in map info. * * @param req - OCF request * @param cb - async lock acquisition callback * + * @returns lock acquisition status or negative error code in case of internal + * error * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed - * * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was - * added into waiting list. When lock will be acquired io_if->resume be called + * added into waiting list. When lock is acquired, @cb will be + * called. */ int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb); /** - * @brief Lock OCF request for READ access (Lock all cache lines in map info) + * @brief Try to lock OCF request for write access. Serves the same purpose as + * ocf_req_async_lock_wr, except that this function fails if lock is already + * held by someone else. * - * @note io_if->resume callback has to be set + * @param req - OCF request + * + * @returns lock acquisition status + * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed + * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired + */ +int ocf_req_trylock_wr(struct ocf_request *req); + +/** + * @brief Lock OCF request for read access asynchronously. Attempts to lock all + * cache lines in map info. * * @param req - OCF request * @param cb - async lock acquisition callback * + * @returns lock acquisition status or negative error code in case of internal + * error * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed - * * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was - * added into waiting list. When lock will be acquired io_if->resume be called + * added into waiting list. When lock is acquired, @cb will be + * called. */ int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb); +/** + * @brief Try to lock OCF request forread access. Serves the same purpose as + * ocf_req_async_lock_rd, except that this function fails if lock is already + * held by someone else. + * + * @param req - OCF request + * + * @returns lock acquisition status + * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed + * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired + */ +int ocf_req_trylock_rd(struct ocf_request *req); + /** * @brief Unlock OCF request from WRITE access * diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c index 92d49c0..4cdab86 100644 --- a/src/engine/engine_common.c +++ b/src/engine/engine_common.c @@ -395,7 +395,7 @@ static void _ocf_engine_clean_end(void *private_data, int error) } } -static int ocf_engine_evict(struct ocf_request *req) +static int ocf_engine_evict(struct ocf_request *req) { if (!ocf_engine_unmapped_count(req)) return 0; @@ -417,6 +417,19 @@ static int lock_clines(struct ocf_request *req, enum ocf_engine_lock_type lock, } } +static int trylock_clines(struct ocf_request *req, + enum ocf_engine_lock_type lock) +{ + switch (lock) { + case ocf_engine_lock_write: + return ocf_req_trylock_wr(req); + case ocf_engine_lock_read: + return ocf_req_trylock_rd(req); + default: + return OCF_LOCK_ACQUIRED; + } +} + int ocf_engine_prepare_clines(struct ocf_request *req, const struct ocf_engine_callbacks *engine_cbs) { @@ -426,48 +439,87 @@ int ocf_engine_prepare_clines(struct ocf_request *req, enum ocf_engine_lock_type lock_type; struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock; + /* Calculate hashes for hash-bucket locking */ ocf_req_hash(req); - ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/ - /* Travers to check if request is mapped fully */ + /* Read-lock hash buckets associated with request target core & LBAs + * (core lines) to assure that cache mapping for these core lines does + * not change during traversation */ + ocf_req_hash_lock_rd(req); + + /* Traverse request to cache if there is hit */ ocf_engine_traverse(req); - mapped = ocf_engine_is_mapped(req); + if (mapped) { + /* We are holding hash buckets read lock, so we can attempt + * per-cacheline locking fast path, which would fail either if + * cachelines are already locked without putting request to a + * waiter list */ lock_type = engine_cbs->get_lock_type(req); - lock = lock_clines(req, lock_type, engine_cbs->resume); + lock = trylock_clines(req, lock_type); + + if (lock == OCF_LOCK_ACQUIRED) { + /* Cachelines are mapped and locked, we don't need the + * hash bucket lock any more */ + ocf_req_hash_unlock_rd(req); + } else { + /* Failed to acquire cachelines lock in fast path, + * acquire hash-buckets write lock and attempt the lock + * again, allowing slow path and async assignment of + * the lock. */ + ocf_req_hash_lock_upgrade(req); + lock = lock_clines(req, lock_type, engine_cbs->resume); + ocf_req_hash_unlock_wr(req); + } } else { + /* check if request should promote cachelines */ promote = ocf_promotion_req_should_promote( req->cache->promotion_policy, req); - if (!promote) + if (!promote) { req->info.mapping_error = 1; + ocf_req_hash_unlock_rd(req); + } } - if (mapped || !promote) { - ocf_req_hash_unlock_rd(req); - } else { - /* need to attempt mapping / eviction */ - ocf_req_hash_lock_upgrade(req); /*- Metadata WR access, eviction -----*/ + if (!mapped && promote) { + /* Need to map (potentially evict) cachelines. Mapping must be + * performed holding (at least) hash-bucket write lock */ + ocf_req_hash_lock_upgrade(req); + ocf_engine_map(req); - ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/ + if (!req->info.mapping_error) { + /* Lock cachelines, potentially putting the request on + * waiter list */ + lock_type = engine_cbs->get_lock_type(req); + lock = trylock_clines(req, lock_type); + if (lock != OCF_LOCK_ACQUIRED) { + lock = lock_clines(req, lock_type, + engine_cbs->resume); + } + } + + /* At this point the request is mapped or we need to evict, + * which is done under global metadata lock */ + ocf_req_hash_unlock_wr(req); if (req->info.mapping_error) { + /* Not mapped - evict cachelines */ ocf_metadata_start_exclusive_access(metadata_lock); - /* Now there is exclusive access for metadata. May - * traverse once again and evict cachelines if needed. - */ if (ocf_engine_evict(req) == LOOKUP_MAPPED) ocf_engine_map(req); + if (!req->info.mapping_error) { + lock_type = engine_cbs->get_lock_type(req); + lock = trylock_clines(req, lock_type); + if (lock != OCF_LOCK_ACQUIRED) { + lock = lock_clines(req, lock_type, + engine_cbs->resume); + } + } ocf_metadata_end_exclusive_access(metadata_lock); } - - if (!req->info.mapping_error) { - lock_type = engine_cbs->get_lock_type(req); - lock = lock_clines(req, lock_type, engine_cbs->resume); - } } - return lock; } diff --git a/src/engine/engine_discard.c b/src/engine/engine_discard.c index 8c10c7d..62807dd 100644 --- a/src/engine/engine_discard.c +++ b/src/engine/engine_discard.c @@ -235,12 +235,18 @@ static int _ocf_discard_step(struct ocf_request *req) if (ocf_engine_mapped_count(req)) { /* Some cache line are mapped, lock request for WRITE access */ - lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume); + lock = ocf_req_trylock_wr(req); } else { lock = OCF_LOCK_ACQUIRED; } - ocf_req_hash_unlock_rd(req); + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } if (lock >= 0) { if (OCF_LOCK_ACQUIRED == lock) { diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c index 8cfe96c..11c3b7f 100644 --- a/src/engine/engine_fast.c +++ b/src/engine/engine_fast.c @@ -195,11 +195,18 @@ int ocf_write_fast(struct ocf_request *req) mapped = ocf_engine_is_mapped(req); if (mapped) { ocf_io_start(&req->ioi.io); - lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); + lock = ocf_req_trylock_wr(req); + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } + } else { + ocf_req_hash_unlock_rd(req); } - ocf_req_hash_unlock_rd(req); - if (mapped) { if (lock >= 0) { OCF_DEBUG_RQ(req, "Fast path success"); diff --git a/src/engine/engine_pt.c b/src/engine/engine_pt.c index 799f7b0..8aabeee 100644 --- a/src/engine/engine_pt.c +++ b/src/engine/engine_pt.c @@ -102,7 +102,7 @@ static const struct ocf_io_if _io_if_pt_resume = { int ocf_read_pt(struct ocf_request *req) { bool use_cache = false; - int lock = OCF_LOCK_NOT_ACQUIRED; + int lock = OCF_LOCK_ACQUIRED; OCF_DEBUG_TRACE(req->cache); @@ -127,14 +127,17 @@ int ocf_read_pt(struct ocf_request *req) /* There are mapped cache line, * lock request for READ access */ - lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); - } else { - /* No mapped cache lines, no need to get lock */ - lock = OCF_LOCK_ACQUIRED; + lock = ocf_req_trylock_rd(req); } } - ocf_req_hash_unlock_rd(req); + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } if (use_cache) { /* diff --git a/src/engine/engine_wi.c b/src/engine/engine_wi.c index ff947e4..58b7f83 100644 --- a/src/engine/engine_wi.c +++ b/src/engine/engine_wi.c @@ -154,12 +154,18 @@ int ocf_write_wi(struct ocf_request *req) if (ocf_engine_mapped_count(req)) { /* Some cache line are mapped, lock request for WRITE access */ - lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume); + lock = ocf_req_trylock_wr(req); } else { lock = OCF_LOCK_ACQUIRED; } - ocf_req_hash_unlock_rd(req); /*- END Metadata READ access----------------*/ + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } if (lock >= 0) { if (lock == OCF_LOCK_ACQUIRED) { diff --git a/src/engine/engine_wo.c b/src/engine/engine_wo.c index 193198d..289d38b 100644 --- a/src/engine/engine_wo.c +++ b/src/engine/engine_wo.c @@ -223,10 +223,16 @@ int ocf_read_wo(struct ocf_request *req) /* There are mapped cache lines, * lock request for READ access */ - lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); + lock = ocf_req_trylock_rd(req); } - ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/ + if (lock != OCF_LOCK_ACQUIRED) { + ocf_req_hash_lock_upgrade(req); + lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume); + ocf_req_hash_unlock_wr(req); + } else { + ocf_req_hash_unlock_rd(req); + } if (lock >= 0) { if (lock != OCF_LOCK_ACQUIRED) {