diff --git a/src/concurrency/ocf_cache_line_concurrency.c b/src/concurrency/ocf_cache_line_concurrency.c index 3f3a27d..637eb88 100644 --- a/src/concurrency/ocf_cache_line_concurrency.c +++ b/src/concurrency/ocf_cache_line_concurrency.c @@ -25,24 +25,6 @@ static bool ocf_cl_lock_line_is_acting(struct ocf_alock *alock, return req->map[index].status != LOOKUP_MISS; } -static bool ocf_cl_lock_line_is_locked(struct ocf_alock *alock, - struct ocf_request *req, unsigned index, int rw) -{ - if (rw == OCF_WRITE) - return req->map[index].wr_locked; - else - return req->map[index].rd_locked; -} - -static void ocf_cl_lock_line_mark_locked(struct ocf_alock *alock, - struct ocf_request *req, unsigned index, int rw, bool locked) -{ - if (rw == OCF_WRITE) - req->map[index].wr_locked = locked; - else - req->map[index].rd_locked = locked; -} - static ocf_cache_line_t ocf_cl_lock_line_get_entry( struct ocf_alock *alock, struct ocf_request *req, unsigned index) @@ -50,12 +32,118 @@ static ocf_cache_line_t ocf_cl_lock_line_get_entry( return req->map[index].coll_idx; } +static int ocf_cl_lock_line_fast(struct ocf_alock *alock, + struct ocf_request *req, int rw) +{ + int32_t i; + ocf_cache_line_t entry; + int ret = OCF_LOCK_ACQUIRED; + + for (i = 0; i < req->core_line_count; i++) { + if (!ocf_cl_lock_line_needs_lock(alock, req, i)) { + /* nothing to lock */ + continue; + } + + entry = ocf_cl_lock_line_get_entry(alock, req, i); + ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i)); + + if (rw == OCF_WRITE) { + if (ocf_alock_trylock_entry_wr(alock, entry)) { + /* cache entry locked */ + ocf_alock_mark_index_locked(alock, req, i, true); + } else { + /* Not possible to lock all cachelines */ + ret = OCF_LOCK_NOT_ACQUIRED; + break; + } + } else { + if (ocf_alock_trylock_entry_rd_idle(alock, entry)) { + /* cache entry locked */ + ocf_alock_mark_index_locked(alock, req, i, true); + } else { + /* Not possible to lock all cachelines */ + ret = OCF_LOCK_NOT_ACQUIRED; + break; + } + } + } + + /* Check if request is locked */ + if (ret == OCF_LOCK_NOT_ACQUIRED) { + /* Request is not locked, discard acquired locks */ + for (; i >= 0; i--) { + if (!ocf_cl_lock_line_needs_lock(alock, req, i)) + continue; + + entry = ocf_cl_lock_line_get_entry(alock, req, i); + + if (ocf_alock_is_index_locked(alock, req, i)) { + + if (rw == OCF_WRITE) { + ocf_alock_unlock_one_wr(alock, entry); + } else { + ocf_alock_unlock_one_rd(alock, entry); + } + ocf_alock_mark_index_locked(alock, req, i, false); + } + } + } + + return ret; +} + +static int ocf_cl_lock_line_slow(struct ocf_alock *alock, + struct ocf_request *req, int rw, ocf_req_async_lock_cb cmpl) +{ + int32_t i; + ocf_cache_line_t entry; + int ret = 0; + + for (i = 0; i < req->core_line_count; i++) { + + if (!ocf_cl_lock_line_needs_lock(alock, req, i)) { + /* nothing to lock */ + env_atomic_dec(&req->lock_remaining); + continue; + } + + entry = ocf_cl_lock_line_get_entry(alock, req, i); + ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i)); + + + if (rw == OCF_WRITE) { + if (!ocf_alock_lock_one_wr(alock, entry, cmpl, req, i)) { + /* lock not acquired and not added to wait list */ + ret = -OCF_ERR_NO_MEM; + goto err; + } + } else { + if (!ocf_alock_lock_one_rd(alock, entry, cmpl, req, i)) { + /* lock not acquired and not added to wait list */ + ret = -OCF_ERR_NO_MEM; + goto err; + } + } + } + + return ret; + +err: + for (; i >= 0; i--) { + if (!ocf_cl_lock_line_needs_lock(alock, req, i)) + continue; + + entry = ocf_cl_lock_line_get_entry(alock, req, i); + ocf_alock_waitlist_remove_entry(alock, req, i, entry, rw); + } + + return ret; +} + static struct ocf_alock_lock_cbs ocf_cline_conc_cbs = { - .line_needs_lock = ocf_cl_lock_line_needs_lock, - .line_is_acting = ocf_cl_lock_line_is_acting, - .line_is_locked = ocf_cl_lock_line_is_locked, - .line_mark_locked = ocf_cl_lock_line_mark_locked, - .line_get_entry = ocf_cl_lock_line_get_entry + .lock_entries_fast = ocf_cl_lock_line_fast, + .lock_entries_slow = ocf_cl_lock_line_slow }; bool ocf_cache_line_try_lock_rd(struct ocf_alock *alock, @@ -95,17 +183,48 @@ int ocf_req_async_lock_wr(struct ocf_alock *alock, void ocf_req_unlock_rd(struct ocf_alock *alock, struct ocf_request *req) { - ocf_alock_unlock_rd(alock, req); + int32_t i; + ocf_cache_line_t entry; + + for (i = 0; i < req->core_line_count; i++) { + if (!ocf_cl_lock_line_is_acting(alock, req, i)) + continue; + + if (!ocf_alock_is_index_locked(alock, req, i)) + continue; + + entry = ocf_cl_lock_line_get_entry(alock, req, i); + + ocf_alock_unlock_one_rd(alock, entry); + ocf_alock_mark_index_locked(alock, req, i, false); + } } void ocf_req_unlock_wr(struct ocf_alock *alock, struct ocf_request *req) { - ocf_alock_unlock_wr(alock, req); + int32_t i; + ocf_cache_line_t entry; + + for (i = 0; i < req->core_line_count; i++) { + if (!ocf_cl_lock_line_is_acting(alock, req, i)) + continue; + + if (!ocf_alock_is_index_locked(alock, req, i)) + continue; + + entry = ocf_cl_lock_line_get_entry(alock, req, i); + + ocf_alock_unlock_one_wr(alock, entry); + ocf_alock_mark_index_locked(alock, req, i, false); + } } void ocf_req_unlock(struct ocf_alock *alock, struct ocf_request *req) { - ocf_alock_unlock(alock, req); + if (req->alock_rw == OCF_WRITE) + ocf_req_unlock_wr(alock, req); + else + ocf_req_unlock_rd(alock, req); } bool ocf_cache_line_are_waiters(struct ocf_alock *alock, diff --git a/src/concurrency/ocf_mio_concurrency.c b/src/concurrency/ocf_mio_concurrency.c index 07d6caa..80cbbcc 100644 --- a/src/concurrency/ocf_mio_concurrency.c +++ b/src/concurrency/ocf_mio_concurrency.c @@ -16,43 +16,7 @@ struct ocf_mio_alock unsigned num_pages; }; -static bool ocf_mio_lock_line_needs_lock(struct ocf_alock *alock, - struct ocf_request *req, unsigned index) -{ - return true; -} - -static bool ocf_mio_lock_line_is_acting(struct ocf_alock *alock, - struct ocf_request *req, unsigned index) -{ - return true; -} - -static bool ocf_mio_lock_line_is_locked(struct ocf_alock *alock, - struct ocf_request *req, unsigned index, int rw) -{ - struct metadata_io_request *m_req = (struct metadata_io_request *)req; - - if (rw == OCF_WRITE) - return env_bit_test(index, &m_req->map); - else - return false; -} - -static void ocf_mio_lock_line_mark_locked(struct ocf_alock *alock, - struct ocf_request *req, unsigned index, int rw, bool locked) -{ - struct metadata_io_request *m_req = (struct metadata_io_request *)req; - - if (rw == OCF_READ) - return; - if (locked) - env_bit_set(index, &m_req->map); - else - env_bit_clear(index, &m_req->map); -} - -static ocf_cache_line_t ocf_mio_lock_line_get_entry( +static ocf_cache_line_t ocf_mio_lock_get_entry( struct ocf_alock *alock, struct ocf_request *req, unsigned index) { @@ -66,12 +30,77 @@ static ocf_cache_line_t ocf_mio_lock_line_get_entry( return page - mio_alock->first_page; } +static int ocf_mio_lock_fast(struct ocf_alock *alock, + struct ocf_request *req, int rw) +{ + ocf_cache_line_t entry; + int ret = OCF_LOCK_ACQUIRED; + int32_t i; + ENV_BUG_ON(rw != OCF_WRITE); + + for (i = 0; i < req->core_line_count; i++) { + entry = ocf_mio_lock_get_entry(alock, req, i); + ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i)); + + if (ocf_alock_trylock_entry_wr(alock, entry)) { + /* cache entry locked */ + ocf_alock_mark_index_locked(alock, req, i, true); + } else { + /* Not possible to lock all cachelines */ + ret = OCF_LOCK_NOT_ACQUIRED; + break; + } + } + + /* Check if request is locked */ + if (ret == OCF_LOCK_NOT_ACQUIRED) { + /* Request is not locked, discard acquired locks */ + for (; i >= 0; i--) { + entry = ocf_mio_lock_get_entry(alock, req, i); + + if (ocf_alock_is_index_locked(alock, req, i)) { + ocf_alock_unlock_one_wr(alock, entry); + ocf_alock_mark_index_locked(alock, req, i, false); + } + } + } + + return ret; +} + +static int ocf_mio_lock_slow(struct ocf_alock *alock, + struct ocf_request *req, int rw, ocf_req_async_lock_cb cmpl) +{ + int32_t i; + ocf_cache_line_t entry; + int ret = 0; + ENV_BUG_ON(rw != OCF_WRITE); + + for (i = 0; i < req->core_line_count; i++) { + entry = ocf_mio_lock_get_entry(alock, req, i); + ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i)); + + if (!ocf_alock_lock_one_wr(alock, entry, cmpl, req, i)) { + /* lock not acquired and not added to wait list */ + ret = -OCF_ERR_NO_MEM; + goto err; + } + } + + return ret; + +err: + for (; i >= 0; i--) { + entry = ocf_mio_lock_get_entry(alock, req, i); + ocf_alock_waitlist_remove_entry(alock, req, i, entry, OCF_WRITE); + } + + return ret; +} + static struct ocf_alock_lock_cbs ocf_mio_conc_cbs = { - .line_needs_lock = ocf_mio_lock_line_needs_lock, - .line_is_acting = ocf_mio_lock_line_is_acting, - .line_is_locked = ocf_mio_lock_line_is_locked, - .line_mark_locked = ocf_mio_lock_line_mark_locked, - .line_get_entry = ocf_mio_lock_line_get_entry + .lock_entries_fast = ocf_mio_lock_fast, + .lock_entries_slow = ocf_mio_lock_slow }; int ocf_mio_async_lock(struct ocf_alock *alock, @@ -84,8 +113,21 @@ int ocf_mio_async_lock(struct ocf_alock *alock, void ocf_mio_async_unlock(struct ocf_alock *alock, struct metadata_io_request *m_req) { - ocf_alock_unlock_wr(alock, &m_req->req); - m_req->map = 0; + ocf_cache_line_t entry; + struct ocf_request *req = &m_req->req; + int i; + + for (i = 0; i < req->core_line_count; i++) { + if (!ocf_alock_is_index_locked(alock, req, i)) + continue; + + entry = ocf_mio_lock_get_entry(alock, req, i); + + ocf_alock_unlock_one_wr(alock, entry); + ocf_alock_mark_index_locked(alock, req, i, false); + } + + m_req->alock_status = 0; } @@ -107,15 +149,17 @@ int ocf_mio_concurrency_init(struct ocf_alock **self, if (ret < 0) return ret; if (ret >= ALLOCATOR_NAME_MAX) - return -ENOSPC; + return -OCF_ERR_NO_MEM; alock = env_vzalloc(base_size + sizeof(struct ocf_mio_alock)); if (!alock) return -OCF_ERR_NO_MEM; ret = ocf_alock_init_inplace(alock, num_pages, name, &ocf_mio_conc_cbs, cache); - if (ret) + if (ret) { + env_free(alock); return ret; + } mio_alock = (void*)alock + base_size; mio_alock->first_page = first_page; diff --git a/src/concurrency/ocf_mio_concurrency.h b/src/concurrency/ocf_mio_concurrency.h index 06d846e..d3726c4 100644 --- a/src/concurrency/ocf_mio_concurrency.h +++ b/src/concurrency/ocf_mio_concurrency.h @@ -3,8 +3,8 @@ * SPDX-License-Identifier: BSD-3-Clause-Clear */ -#ifndef OCF_COLLISION_UPDATE_CONCURRENCY_H_ -#define OCF_COLLISION_UPDATE_CONCURRENCY_H_ +#ifndef OCF_MIO_CONCURRENCY_H_ +#define OCF_MIO_CONCURRENCY_H_ #include "../utils/utils_alock.h" diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c index 1abe4bb..178b4b6 100644 --- a/src/engine/engine_rd.c +++ b/src/engine/engine_rd.c @@ -139,7 +139,7 @@ err_alloc: static int _ocf_read_generic_do(struct ocf_request *req) { - if (ocf_engine_is_miss(req) && req->map->rd_locked) { + if (ocf_engine_is_miss(req) && req->alock_rw == OCF_READ) { /* Miss can be handled only on write locks. * Need to switch to PT */ diff --git a/src/eviction/lru.c b/src/eviction/lru.c index 4616ace..c6010d0 100644 --- a/src/eviction/lru.c +++ b/src/eviction/lru.c @@ -611,6 +611,7 @@ bool evp_lru_can_evict(ocf_cache_t cache) uint32_t evp_lru_req_clines(struct ocf_request *req, struct ocf_user_part *part, uint32_t cline_no) { + struct ocf_alock* alock; struct ocf_lru_iter iter; uint32_t i; ocf_cache_line_t cline; @@ -680,10 +681,10 @@ uint32_t evp_lru_req_clines(struct ocf_request *req, req->map[req_idx].status = LOOKUP_REMAPPED; ocf_engine_patch_req_info(cache, req, req_idx); - if (cl_write_lock) - req->map[req_idx].wr_locked = true; - else - req->map[req_idx].rd_locked = true; + alock = ocf_cache_line_concurrency(iter.cache); + + ocf_alock_mark_index_locked(alock, req, req_idx, true); + req->alock_rw = cl_write_lock ? OCF_WRITE : OCF_READ; ++req_idx; ++i; diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c index b45b0bf..8cc7206 100644 --- a/src/metadata/metadata_io.c +++ b/src/metadata/metadata_io.c @@ -348,7 +348,7 @@ static uint32_t metadata_io_max_page(ocf_cache_t cache) uint32_t volume_max_io_pages = ocf_volume_get_max_io_size( &cache->device->volume) / PAGE_SIZE; struct metadata_io_request *m_req; - uint32_t request_map_capacity_pages = sizeof(m_req->map) * 8; + uint32_t request_map_capacity_pages = sizeof(m_req->alock_status) * 8; return OCF_MIN(volume_max_io_pages, request_map_capacity_pages); } @@ -458,6 +458,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, m_req->req.info.internal = true; m_req->req.rw = dir; m_req->req.map = LIST_POISON1; + m_req->req.alock_status = (uint8_t*)&m_req->alock_status; /* If req_count == io_count and count is not multiple of * max_count, for last we can allocate data smaller that diff --git a/src/metadata/metadata_io.h b/src/metadata/metadata_io.h index ba378bb..5a9c514 100644 --- a/src/metadata/metadata_io.h +++ b/src/metadata/metadata_io.h @@ -56,7 +56,7 @@ struct metadata_io_request { env_atomic finished; uint32_t page; uint32_t count; - uint64_t map; + uint64_t alock_status; }; /* diff --git a/src/ocf_request.c b/src/ocf_request.c index 687bd21..5b55245 100644 --- a/src/ocf_request.c +++ b/src/ocf_request.c @@ -43,10 +43,19 @@ static inline size_t ocf_req_sizeof_map(struct ocf_request *req) return size; } +static inline size_t ocf_req_sizeof_alock_status(struct ocf_request *req) +{ + uint32_t lines = req->core_line_count; + size_t size = (lines * sizeof(uint8_t)); + + ENV_BUG_ON(lines == 0); + return size; +} + int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx) { ocf_ctx->resources.req = env_mpool_create(sizeof(struct ocf_request), - sizeof(struct ocf_map_info), ENV_MEM_NORMAL, ocf_req_size_128, + sizeof(struct ocf_map_info) + sizeof(uint8_t), ENV_MEM_NORMAL, ocf_req_size_128, false, NULL, "ocf_req", true); if (ocf_ctx->resources.req == NULL) @@ -90,6 +99,7 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, if (map_allocated) { req->map = req->__map; + req->alock_status = (uint8_t*)&req->__map[core_line_count]; req->alloc_core_line_count = core_line_count; } else { req->alloc_core_line_count = 1; @@ -131,12 +141,15 @@ int ocf_req_alloc_map(struct ocf_request *req) if (req->map) return 0; - req->map = env_zalloc(ocf_req_sizeof_map(req), ENV_MEM_NOIO); + req->map = env_zalloc(ocf_req_sizeof_map(req) + + ocf_req_sizeof_alock_status(req), ENV_MEM_NOIO); if (!req->map) { req->error = -OCF_ERR_NO_MEM; return -OCF_ERR_NO_MEM; } + req->alock_status = &((uint8_t*)req->map)[ocf_req_sizeof_map(req)]; + return 0; } diff --git a/src/ocf_request.h b/src/ocf_request.h index b3db712..6188ece 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -61,12 +61,6 @@ struct ocf_map_info { uint16_t status : 8; /*!< Traverse or mapping status - HIT, MISS, etc... */ - uint16_t rd_locked : 1; - /*!< Indicates if cache line is locked for READ access */ - - uint16_t wr_locked : 1; - /*!< Indicates if cache line is locked for WRITE access */ - uint16_t invalid : 1; /*!< This bit indicates that mapping is invalid */ @@ -222,9 +216,15 @@ struct ocf_request { struct ocf_req_discard_info discard; + uint32_t alock_rw; + /*!< Read/Write mode for alock*/ + + uint8_t *alock_status; + /*!< Mapping for locked/unlocked alock entries */ + struct ocf_map_info *map; - struct ocf_map_info __map[]; + struct ocf_map_info __map[0]; }; typedef void (*ocf_req_end_t)(struct ocf_request *req, int error); diff --git a/src/utils/utils_alock.c b/src/utils/utils_alock.c index 5690e5e..1c5690b 100644 --- a/src/utils/utils_alock.c +++ b/src/utils/utils_alock.c @@ -3,13 +3,10 @@ * SPDX-License-Identifier: BSD-3-Clause-Clear */ -//#include "ocf_concurrency.h" #include "../ocf_cache_priv.h" #include "../ocf_priv.h" #include "../ocf_request.h" #include "utils_alock.h" -//#include "../utils/utils_cache_line.h" -//#include "../utils/utils_realloc.h" #define OCF_CACHE_CONCURRENCY_DEBUG 0 @@ -63,6 +60,21 @@ struct ocf_alock { }; +void ocf_alock_mark_index_locked(struct ocf_alock *alock, + struct ocf_request *req, unsigned index, bool locked) +{ + if (locked) + env_bit_set(index, req->alock_status); + else + env_bit_clear(index, req->alock_status); +} + +bool ocf_alock_is_index_locked(struct ocf_alock *alock, + struct ocf_request *req, unsigned index) +{ + return env_bit_test(index, (unsigned long*)req->alock_status); +} + size_t ocf_alock_obj_size(void) { return sizeof(struct ocf_alock); @@ -122,9 +134,8 @@ allocation_err: if (self->access) env_vfree(self->access); -rwsem_err: env_mutex_destroy(&self->lock); - +rwsem_err: ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, " "ERROR %d", error); @@ -249,13 +260,10 @@ bool ocf_alock_trylock_entry_wr(struct ocf_alock *alock, int prev = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_IDLE, OCF_CACHE_LINE_ACCESS_WR); - if (prev == OCF_CACHE_LINE_ACCESS_IDLE) - return true; - else - return false; + return prev == OCF_CACHE_LINE_ACCESS_IDLE; } -static inline bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock, +bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock, ocf_cache_line_t entry) { env_atomic *access = &alock->access[entry]; @@ -287,8 +295,10 @@ static inline void ocf_alock_unlock_entry_rd(struct ocf_alock *alock, { env_atomic *access = &alock->access[entry]; - ENV_BUG_ON(env_atomic_read(access) == 0); - ENV_BUG_ON(env_atomic_read(access) == OCF_CACHE_LINE_ACCESS_WR); + int v = env_atomic_read(access); + + ENV_BUG_ON(v == 0); + ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_WR); env_atomic_dec(access); } @@ -296,8 +306,9 @@ static inline bool ocf_alock_trylock_entry_wr2wr(struct ocf_alock *alock, ocf_cache_line_t entry) { env_atomic *access = &alock->access[entry]; + int v = env_atomic_read(access); - ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR); + ENV_BUG_ON(v != OCF_CACHE_LINE_ACCESS_WR); return true; } @@ -305,8 +316,9 @@ static inline bool ocf_alock_trylock_entry_wr2rd(struct ocf_alock *alock, ocf_cache_line_t entry) { env_atomic *access = &alock->access[entry]; + int v = env_atomic_read(access); - ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR); + ENV_BUG_ON(v != OCF_CACHE_LINE_ACCESS_WR); env_atomic_set(access, OCF_CACHE_LINE_ACCESS_ONE_RD); return true; } @@ -352,7 +364,7 @@ static void ocf_alock_entry_locked(struct ocf_alock *alock, } } -static inline bool ocf_alock_lock_one_wr(struct ocf_alock *alock, +bool ocf_alock_lock_one_wr(struct ocf_alock *alock, const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl, void *req, uint32_t idx) { @@ -364,7 +376,7 @@ static inline bool ocf_alock_lock_one_wr(struct ocf_alock *alock, if (ocf_alock_trylock_entry_wr(alock, entry)) { /* lock was not owned by anyone */ - alock->cbs->line_mark_locked(alock, req, idx, OCF_WRITE, true); + ocf_alock_mark_index_locked(alock, req, idx, true); ocf_alock_entry_locked(alock, req, cmpl); return true; } @@ -397,7 +409,7 @@ unlock: ocf_alock_waitlist_unlock(alock, entry, flags); if (!waiting) { - alock->cbs->line_mark_locked(alock, req, idx, OCF_WRITE, true); + ocf_alock_mark_index_locked(alock, req, idx, true); ocf_alock_entry_locked(alock, req, cmpl); env_allocator_del(alock->allocator, waiter); } @@ -409,7 +421,7 @@ unlock: * Attempt to lock cache entry for read. * In case cache entry is locked, attempt to add caller on wait list. */ -static inline bool ocf_alock_lock_one_rd(struct ocf_alock *alock, +bool ocf_alock_lock_one_rd(struct ocf_alock *alock, const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl, void *req, uint32_t idx) { @@ -421,7 +433,7 @@ static inline bool ocf_alock_lock_one_rd(struct ocf_alock *alock, if( ocf_alock_trylock_entry_rd_idle(alock, entry)) { /* lock was not owned by anyone */ - alock->cbs->line_mark_locked(alock, req, idx, OCF_READ, true); + ocf_alock_mark_index_locked(alock, req, idx, true); ocf_alock_entry_locked(alock, req, cmpl); return true; } @@ -433,7 +445,7 @@ static inline bool ocf_alock_lock_one_rd(struct ocf_alock *alock, /* Lock waiters list */ ocf_alock_waitlist_lock(alock, entry, flags); - if (!ocf_alock_waitlist_is_empty_locked(alock, entry)) { + if (ocf_alock_waitlist_is_empty_locked(alock, entry)) { /* No waiters at the moment */ /* Check if read lock can be obtained */ @@ -459,7 +471,7 @@ unlock: ocf_alock_waitlist_unlock(alock, entry, flags); if (!waiting) { - alock->cbs->line_mark_locked(alock, req, idx, OCF_READ, true); + ocf_alock_mark_index_locked(alock, req, idx, true); ocf_alock_entry_locked(alock, req, cmpl); env_allocator_del(alock->allocator, waiter); } @@ -477,7 +489,6 @@ static inline void ocf_alock_unlock_one_rd_common(struct ocf_alock *alock, { bool locked = false; bool exchanged = true; - uint32_t i = 0; uint32_t idx = _WAITERS_LIST_ITEM(entry); struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx]; @@ -515,14 +526,11 @@ static inline void ocf_alock_unlock_one_rd_common(struct ocf_alock *alock, ENV_BUG(); } - i++; - if (locked) { exchanged = false; list_del(iter); - alock->cbs->line_mark_locked(alock, waiter->req, waiter->idx, - waiter->rw, true); + ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true); ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl); env_allocator_del(alock->allocator, waiter); @@ -566,7 +574,6 @@ void ocf_alock_unlock_one_rd(struct ocf_alock *alock, static inline void ocf_alock_unlock_one_wr_common(struct ocf_alock *alock, const ocf_cache_line_t entry) { - uint32_t i = 0; bool locked = false; bool exchanged = true; @@ -606,14 +613,11 @@ static inline void ocf_alock_unlock_one_wr_common(struct ocf_alock *alock, ENV_BUG(); } - i++; - if (locked) { exchanged = false; list_del(iter); - alock->cbs->line_mark_locked(alock, waiter->req, waiter->idx, - waiter->rw, true); + ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true); ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl); env_allocator_del(alock->allocator, waiter); @@ -648,10 +652,9 @@ void ocf_alock_unlock_one_wr(struct ocf_alock *alock, * Request can be assigned with lock asynchronously at any point of time, * so need to check lock state under a common lock. */ -static inline void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock, - struct ocf_request *req, int i, int rw) +void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock, + struct ocf_request *req, ocf_cache_line_t entry, int i, int rw) { - ocf_cache_line_t entry = alock->cbs->line_get_entry(alock, req, i); uint32_t idx = _WAITERS_LIST_ITEM(entry); struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx]; struct list_head *iter, *next; @@ -660,18 +663,19 @@ static inline void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock, ocf_alock_waitlist_lock(alock, entry, flags); - if (alock->cbs->line_is_locked(alock, req, i, rw)) { + if (ocf_alock_is_index_locked(alock, req, i)) { if (rw == OCF_READ) ocf_alock_unlock_one_rd_common(alock, entry); else ocf_alock_unlock_one_wr_common(alock, entry); - alock->cbs->line_mark_locked(alock, req, i, rw, false); + ocf_alock_mark_index_locked(alock, req, i, false); } else { list_for_each_safe(iter, next, &lst->head) { waiter = list_entry(iter, struct ocf_alock_waiter, item); if (waiter->req == req) { list_del(iter); env_allocator_del(alock->allocator, waiter); + break; } } } @@ -679,365 +683,77 @@ static inline void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock, ocf_alock_waitlist_unlock(alock, entry, flags); } -/* Try to read-lock request without adding waiters. Function should be called - * under read lock, multiple threads may attempt to acquire the lock - * concurrently. - */ -static int ocf_alock_lock_rd_fast(struct ocf_alock *alock, - struct ocf_request *req) -{ - int32_t i; - ocf_cache_line_t entry; - int ret = OCF_LOCK_ACQUIRED; - - OCF_DEBUG_RQ(req, "Lock"); - - ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); - - for (i = 0; i < req->core_line_count; i++) { - if (!alock->cbs->line_needs_lock(alock, req, i)) { - /* nothing to lock */ - continue; - } - - entry = alock->cbs->line_get_entry(alock, req, i); - ENV_BUG_ON(entry >= alock->num_entries); - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ)); - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)); - - if( ocf_alock_trylock_entry_rd_idle(alock, entry)) { - /* cache entry locked */ - alock->cbs->line_mark_locked(alock, req, i, OCF_READ, true); - } else { - /* Not possible to lock all cachelines */ - ret = OCF_LOCK_NOT_ACQUIRED; - OCF_DEBUG_RQ(req, "NO Lock, cache entry = %u", entry); - break; - } - } - - /* Check if request is locked */ - if (ret == OCF_LOCK_NOT_ACQUIRED) { - /* Request is not locked, discard acquired locks */ - for (; i >= 0; i--) { - if (!alock->cbs->line_needs_lock(alock, req, i)) { - /* nothing to discard */ - continue; - } - - entry = alock->cbs->line_get_entry(alock, req, i); - - if (alock->cbs->line_is_locked(alock, req, i, OCF_READ)) { - ocf_alock_unlock_one_rd(alock, entry); - alock->cbs->line_mark_locked(alock, req, i, OCF_READ, false); - } - } - } - - return ret; -} - -/* - * Read-lock request cache lines. Must be called under cacheline concurrency - * write lock. - */ -static int ocf_alock_lock_rd_slow(struct ocf_alock *alock, - struct ocf_request *req, ocf_req_async_lock_cb cmpl) -{ - int32_t i; - ocf_cache_line_t entry; - int ret = OCF_LOCK_NOT_ACQUIRED; - - ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); - - env_atomic_inc(&alock->waiting); - env_atomic_set(&req->lock_remaining, req->core_line_count); - env_atomic_inc(&req->lock_remaining); - - for (i = 0; i < req->core_line_count; i++) { - if (!alock->cbs->line_needs_lock(alock, req, i)) { - /* nothing to lock */ - env_atomic_dec(&req->lock_remaining); - continue; - } - - entry = alock->cbs->line_get_entry(alock, req, i); - ENV_BUG_ON(entry >= alock->num_entries); - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ)); - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)); - - if (!ocf_alock_lock_one_rd(alock, entry, cmpl, req, i)) { - /* lock not acquired and not added to wait list */ - ret = -OCF_ERR_NO_MEM; - goto err; - } - } - - if (env_atomic_dec_return(&req->lock_remaining) == 0) { - ret = OCF_LOCK_ACQUIRED; - env_atomic_dec(&alock->waiting); - } - - return ret; - -err: - for (; i >= 0; i--) { - if (!alock->cbs->line_needs_lock(alock, req, i)) - continue; - - ocf_alock_waitlist_remove_entry(alock, req, i ,OCF_READ); - } - env_atomic_set(&req->lock_remaining, 0); - env_atomic_dec(&alock->waiting); - - return ret; - -} - int ocf_alock_lock_rd(struct ocf_alock *alock, struct ocf_request *req, ocf_req_async_lock_cb cmpl) { - int lock; + int lock, status; - lock = ocf_alock_lock_rd_fast(alock, req); + ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); + req->alock_rw = OCF_READ; + + lock = alock->cbs->lock_entries_fast(alock, req, OCF_READ); if (lock != OCF_LOCK_ACQUIRED) { env_mutex_lock(&alock->lock); - lock = ocf_alock_lock_rd_slow(alock, req, cmpl); + + ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); + ENV_BUG_ON(!cmpl); + + env_atomic_inc(&alock->waiting); + env_atomic_set(&req->lock_remaining, req->core_line_count); + env_atomic_inc(&req->lock_remaining); + + status = alock->cbs->lock_entries_slow(alock, req, OCF_READ, cmpl); + if (!status) { + if (env_atomic_dec_return(&req->lock_remaining) == 0) { + lock = OCF_LOCK_ACQUIRED; + env_atomic_dec(&alock->waiting); + } + } else { + env_atomic_set(&req->lock_remaining, 0); + env_atomic_dec(&alock->waiting); + } env_mutex_unlock(&alock->lock); } return lock; } -/* Try to write-lock request without adding waiters. Function should be called - * under read lock, multiple threads may attempt to acquire the lock - * concurrently. */ -static int ocf_alock_lock_wr_fast(struct ocf_alock *alock, - struct ocf_request *req) -{ - int32_t i; - ocf_cache_line_t entry; - int ret = OCF_LOCK_ACQUIRED; - - ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); - - for (i = 0; i < req->core_line_count; i++) { - if (!alock->cbs->line_needs_lock(alock, req, i)) { - /* nothing to lock */ - continue; - } - - entry = alock->cbs->line_get_entry(alock, req, i); - ENV_BUG_ON(entry >= alock->num_entries); - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ)); - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)); - - if (ocf_alock_trylock_entry_wr(alock, entry)) { - /* cache entry locked */ - alock->cbs->line_mark_locked(alock, req, i, OCF_WRITE, true); - } else { - /* Not possible to lock all cachelines */ - ret = OCF_LOCK_NOT_ACQUIRED; - OCF_DEBUG_RQ(req, "NO Lock, cache entry = %u", entry); - break; - } - } - - /* Check if request is locked */ - if (ret == OCF_LOCK_NOT_ACQUIRED) { - /* Request is not locked, discard acquired locks */ - for (; i >= 0; i--) { - if (!alock->cbs->line_needs_lock(alock, req, i)) - continue; - - entry = alock->cbs->line_get_entry(alock, req, i); - - if (alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)) { - ocf_alock_unlock_one_wr(alock, entry); - alock->cbs->line_mark_locked(alock, req, i, OCF_WRITE, false); - } - } - } - - return ret; -} - -/* - * Write-lock request cache lines. Must be called under cacheline concurrency - * write lock. - */ -static int ocf_alock_lock_wr_slow(struct ocf_alock *alock, - struct ocf_request *req, ocf_req_async_lock_cb cmpl) -{ - int32_t i; - ocf_cache_line_t entry; - int ret = OCF_LOCK_NOT_ACQUIRED; - - ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); - ENV_BUG_ON(!cmpl); - - env_atomic_inc(&alock->waiting); - env_atomic_set(&req->lock_remaining, req->core_line_count); - env_atomic_inc(&req->lock_remaining); - - for (i = 0; i < req->core_line_count; i++) { - - if (!alock->cbs->line_needs_lock(alock, req, i)) { - /* nothing to lock */ - env_atomic_dec(&req->lock_remaining); - continue; - } - - entry = alock->cbs->line_get_entry(alock, req, i); - ENV_BUG_ON(entry >= alock->num_entries); - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ)); - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)); - - if (!ocf_alock_lock_one_wr(alock, entry, cmpl, req, i)) { - /* lock not acquired and not added to wait list */ - ret = -OCF_ERR_NO_MEM; - goto err; - } - } - - if (env_atomic_dec_return(&req->lock_remaining) == 0) { - ret = OCF_LOCK_ACQUIRED; - env_atomic_dec(&alock->waiting); - } - - return ret; - -err: - for (; i >= 0; i--) { - if (!alock->cbs->line_needs_lock(alock, req, i)) - continue; - - ocf_alock_waitlist_remove_entry(alock, req, i, OCF_WRITE); - } - env_atomic_set(&req->lock_remaining, 0); - env_atomic_dec(&alock->waiting); - - return ret; -} - int ocf_alock_lock_wr(struct ocf_alock *alock, struct ocf_request *req, ocf_req_async_lock_cb cmpl) { - int lock; + int lock, status; - lock = ocf_alock_lock_wr_fast(alock, req); + ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); + req->alock_rw = OCF_WRITE; + lock = alock->cbs->lock_entries_fast(alock, req, OCF_WRITE); if (lock != OCF_LOCK_ACQUIRED) { env_mutex_lock(&alock->lock); - lock = ocf_alock_lock_wr_slow(alock, req, cmpl); + + ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); + ENV_BUG_ON(!cmpl); + + env_atomic_inc(&alock->waiting); + env_atomic_set(&req->lock_remaining, req->core_line_count); + env_atomic_inc(&req->lock_remaining); + + status = alock->cbs->lock_entries_slow(alock, req, OCF_WRITE, cmpl); + if (!status) { + if (env_atomic_dec_return(&req->lock_remaining) == 0) { + lock = OCF_LOCK_ACQUIRED; + env_atomic_dec(&alock->waiting); + } + } else { + env_atomic_set(&req->lock_remaining, 0); + env_atomic_dec(&alock->waiting); + } env_mutex_unlock(&alock->lock); } return lock; } -void ocf_alock_unlock_rd(struct ocf_alock *alock, - struct ocf_request *req) -{ - int32_t i; - ocf_cache_line_t entry; - - OCF_DEBUG_RQ(req, "Unlock"); - - for (i = 0; i < req->core_line_count; i++) { - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)); - - if (!alock->cbs->line_is_acting(alock, req, i)) - continue; - - if (!alock->cbs->line_is_locked(alock, req, i, OCF_READ)) - continue; - - entry = alock->cbs->line_get_entry(alock, req, i); - - ENV_BUG_ON(entry >= alock->num_entries); - - ocf_alock_unlock_one_rd(alock, entry); - alock->cbs->line_mark_locked(alock, req, i, OCF_READ, false); - } -} - -void ocf_alock_unlock_wr(struct ocf_alock *alock, - struct ocf_request *req) -{ - int32_t i; - ocf_cache_line_t entry; - - OCF_DEBUG_RQ(req, "Unlock"); - - for (i = 0; i < req->core_line_count; i++) { - ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ)); - - if (!alock->cbs->line_is_acting(alock, req, i)) - continue; - - if (!alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)) - continue; - - entry = alock->cbs->line_get_entry(alock, req, i); - - ENV_BUG_ON(entry >= alock->num_entries); - - ocf_alock_unlock_one_wr(alock, entry); - alock->cbs->line_mark_locked(alock, req, i, OCF_WRITE, false); - } -} - -void ocf_alock_unlock(struct ocf_alock *alock, - struct ocf_request *req) -{ - int32_t i; - ocf_cache_line_t entry; - - OCF_DEBUG_RQ(req, "Unlock"); - - for (i = 0; i < req->core_line_count; i++) { - if (!alock->cbs->line_is_acting(alock, req, i)) - continue; - - entry = alock->cbs->line_get_entry(alock, req, i); - ENV_BUG_ON(entry >= alock->num_entries); - - if (alock->cbs->line_is_locked(alock, req, i, OCF_READ) && - alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)) { - ENV_BUG(); - } else if (alock->cbs->line_is_locked(alock, req, i, OCF_READ)) { - ocf_alock_unlock_one_rd(alock, entry); - alock->cbs->line_mark_locked(alock, req, i, OCF_READ, false); - } else if (alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)) { - ocf_alock_unlock_one_wr(alock, entry); - alock->cbs->line_mark_locked(alock, req, i, OCF_WRITE, false); - } - } -} - -void ocf_alock_unlock_one(struct ocf_alock *alock, - struct ocf_request *req, uint32_t idx) -{ - ocf_cache_line_t entry = alock->cbs->line_get_entry(alock, req, idx); - - ENV_BUG_ON(!alock->cbs->line_is_acting(alock, req, idx)); - - if (alock->cbs->line_is_locked(alock, req, idx, OCF_READ) && - alock->cbs->line_is_locked(alock, req, idx, OCF_WRITE)) { - ENV_BUG(); - } else if (alock->cbs->line_is_locked(alock, req, idx, OCF_READ)) { - ocf_alock_unlock_one_rd(alock, entry); - alock->cbs->line_mark_locked(alock, req, idx, OCF_READ, false); - } else if (alock->cbs->line_is_locked(alock, req, idx, OCF_WRITE)) { - ocf_alock_unlock_one_wr(alock, entry); - alock->cbs->line_mark_locked(alock, req, idx, OCF_WRITE, false); - } else { - ENV_BUG(); - } -} - bool ocf_cache_line_is_used(struct ocf_alock *alock, ocf_cache_line_t entry) { diff --git a/src/utils/utils_alock.h b/src/utils/utils_alock.h index 11f66b7..2970755 100644 --- a/src/utils/utils_alock.h +++ b/src/utils/utils_alock.h @@ -20,29 +20,16 @@ struct ocf_alock; /* async request cacheline lock acquisition callback */ typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req); -typedef bool (*ocf_cl_lock_line_needs_lock_cb)(struct ocf_alock *alock, - struct ocf_request *req, unsigned index); +typedef int (*ocf_cl_lock_fast)(struct ocf_alock *alock, + struct ocf_request *req, int rw); -typedef bool (*ocf_cl_lock_line_is_acting_cb)(struct ocf_alock *alock, - struct ocf_request *req, unsigned index); - -typedef bool (*ocf_cl_lock_line_is_locked_cb)(struct ocf_alock *alock, - struct ocf_request *req, unsigned index, int rw); - -typedef void (*ocf_cl_lock_line_mark_locked_cb)(struct ocf_alock *alock, - struct ocf_request *req, unsigned index, int rw, bool locked); - -typedef ocf_cache_line_t (*ocf_cl_lock_line_get_entry_cb)( - struct ocf_alock *alock, struct ocf_request *req, - unsigned index); +typedef int (*ocf_cl_lock_slow)(struct ocf_alock *alock, + struct ocf_request *req, int rw, ocf_req_async_lock_cb cmpl); struct ocf_alock_lock_cbs { - ocf_cl_lock_line_needs_lock_cb line_needs_lock; - ocf_cl_lock_line_is_acting_cb line_is_acting; - ocf_cl_lock_line_is_locked_cb line_is_locked; - ocf_cl_lock_line_mark_locked_cb line_mark_locked; - ocf_cl_lock_line_get_entry_cb line_get_entry; + ocf_cl_lock_fast lock_entries_fast; + ocf_cl_lock_slow lock_entries_slow; }; bool ocf_alock_trylock_one_rd(struct ocf_alock *alock, @@ -63,15 +50,6 @@ int ocf_alock_lock_rd(struct ocf_alock *alock, int ocf_alock_lock_wr(struct ocf_alock *alock, struct ocf_request *req, ocf_req_async_lock_cb cmpl); -void ocf_alock_unlock_rd(struct ocf_alock *alock, - struct ocf_request *req); - -void ocf_alock_unlock_wr(struct ocf_alock *alock, - struct ocf_request *req); - -void ocf_alock_unlock(struct ocf_alock *alock, - struct ocf_request *req); - bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock, ocf_cache_line_t entry); @@ -92,4 +70,24 @@ void ocf_alock_deinit(struct ocf_alock **self); size_t ocf_alock_size(unsigned num_entries); +bool ocf_alock_is_index_locked(struct ocf_alock *alock, + struct ocf_request *req, unsigned index); + +void ocf_alock_mark_index_locked(struct ocf_alock *alock, + struct ocf_request *req, unsigned index, bool locked); + +bool ocf_alock_lock_one_wr(struct ocf_alock *alock, + const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl, + void *req, uint32_t idx); + +bool ocf_alock_lock_one_rd(struct ocf_alock *alock, + const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl, + void *req, uint32_t idx); + +void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock, + struct ocf_request *req, ocf_cache_line_t entry, int i, int rw); + +bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock, + ocf_cache_line_t entry); + #endif diff --git a/tests/unit/tests/concurrency/ocf_cache_line_concurrency.c/ocf_cache_line_concurrency.c b/tests/unit/tests/concurrency/ocf_cache_line_concurrency.c/ocf_cache_line_concurrency.c index 24c54d8..c797bf3 100644 --- a/tests/unit/tests/concurrency/ocf_cache_line_concurrency.c/ocf_cache_line_concurrency.c +++ b/tests/unit/tests/concurrency/ocf_cache_line_concurrency.c/ocf_cache_line_concurrency.c @@ -160,6 +160,7 @@ pthread_mutex_t prog_mutex = PTHREAD_MUTEX_INITIALIZER; struct test_req { struct ocf_request r; struct ocf_map_info map[TEST_MAX_MAP_SIZE]; + uint8_t alock_map[TEST_MAX_MAP_SIZE]; pthread_cond_t completion; pthread_mutex_t completion_mutex; bool finished; @@ -249,6 +250,7 @@ void thread(void *_ctx) bool locked; ctx->treq.r.map = &ctx->treq.map; + ctx->treq.r.alock_status = &ctx->treq.alock_map; pthread_cond_init(&ctx->treq.completion, NULL); pthread_mutex_init(&ctx->treq.completion_mutex, NULL); @@ -399,12 +401,13 @@ static void cctest(unsigned num_threads, unsigned num_iterations, unsigned cline { if (!threads[i].finished) { - unsigned num_clines = threads[i].treq.r.core_line_count; + struct ocf_request *req = &threads[i].treq.r; + unsigned num_clines = req->core_line_count; struct ocf_map_info **clines = malloc(num_clines * sizeof(*clines)); for (j = 0; j < num_clines; j++) { - clines[j] = &threads[i].treq.r.map[j]; + clines[j] = &req->map[j]; } qsort(clines, num_clines, sizeof(*clines), cmp_map); @@ -412,8 +415,8 @@ static void cctest(unsigned num_threads, unsigned num_iterations, unsigned cline print_message("thread no %u\n", i); for (j = 0; j < num_clines; j++) { struct ocf_map_info *map = clines[j]; - const char *status = map->rd_locked ? "R" : - map->wr_locked ? "W" : "X"; + const char *status = env_bit_test(index, (unsigned long*)req->alock_status) ? + (req->alock_rw == OCF_WRITE ? "W" : "R") : "X"; print_message("[%u] %u %s\n", j, map->coll_idx, status); }