Finish separating cacheline conc and alock

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2021-04-02 20:20:50 -05:00 committed by Kozlowski Mateusz
parent d22a3ad0e0
commit 2cb7270f63

View File

@ -54,8 +54,7 @@ struct ocf_alock {
env_mutex lock;
env_atomic *access;
env_atomic waiting;
size_t access_limit;
ocf_cache_line_t num_clines;
ocf_cache_line_t num_entries;
env_allocator *allocator;
struct ocf_alock_waiters_list waiters_lsts[_WAITERS_LIST_ENTRIES];
@ -82,48 +81,6 @@ struct ocf_alock_lock_cbs
ocf_cl_lock_line_mark_locked_cb line_mark_locked;
};
static bool ocf_cl_lock_line_needs_lock(struct ocf_request *req,
unsigned index)
{
/* Remapped cachelines are assigned cacheline lock individually
* during eviction
*/
return req->map[index].status != LOOKUP_MISS &&
req->map[index].status != LOOKUP_REMAPPED;
}
static bool ocf_cl_lock_line_is_acting(struct ocf_request *req,
unsigned index)
{
return req->map[index].status != LOOKUP_MISS;
}
static bool ocf_cl_lock_line_is_locked(struct ocf_request *req,
unsigned index, int rw)
{
if (rw == OCF_WRITE)
return req->map[index].wr_locked;
else
return req->map[index].rd_locked;
}
static void ocf_cl_lock_line_mark_locked(struct ocf_request *req,
unsigned index, int rw, bool locked)
{
if (rw == OCF_WRITE)
req->map[index].wr_locked = locked;
else
req->map[index].rd_locked = locked;
}
static struct ocf_alock_lock_cbs ocf_cline_conc_cbs = {
.line_needs_lock = ocf_cl_lock_line_needs_lock,
.line_is_acting = ocf_cl_lock_line_is_acting,
.line_is_locked = ocf_cl_lock_line_is_locked,
.line_mark_locked = ocf_cl_lock_line_mark_locked
};
/*
*
*/
@ -131,13 +88,12 @@ static struct ocf_alock_lock_cbs ocf_cline_conc_cbs = {
#define ALLOCATOR_NAME_FMT "ocf_%s_concurrency"
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE)
int ocf_cache_line_concurrency_init(struct ocf_alock **self,
unsigned num_clines, ocf_cache_t cache)
int ocf_alock_init(struct ocf_alock **self, unsigned num_entries,
const char* name, ocf_cache_t cache)
{
uint32_t i;
int error = 0;
struct ocf_alock *alock;
char name[ALLOCATOR_NAME_MAX];
OCF_DEBUG_TRACE(cache);
@ -148,7 +104,7 @@ int ocf_cache_line_concurrency_init(struct ocf_alock **self,
}
alock->cache = cache;
alock->num_clines = num_clines;
alock->num_entries = num_entries;
error = env_mutex_init(&alock->lock);
if (error) {
@ -156,19 +112,13 @@ int ocf_cache_line_concurrency_init(struct ocf_alock **self,
goto rwsem_err;
}
alock->access = env_vzalloc(num_clines * sizeof(alock->access[0]));
alock->access = env_vzalloc(num_entries * sizeof(alock->access[0]));
if (!alock->access) {
error = __LINE__;
goto allocation_err;
}
if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
ocf_cache_get_name(cache)) < 0) {
error = __LINE__;
goto allocation_err;
}
alock->allocator = env_allocator_create(sizeof(struct ocf_alock_waiter), name, false);
if (!alock->allocator) {
error = __LINE__;
@ -215,7 +165,7 @@ exit_err:
/*
*
*/
void ocf_cache_line_concurrency_deinit(struct ocf_alock **self)
void ocf_alock_deinit(struct ocf_alock **self)
{
struct ocf_alock *concurrency = *self;
int i;
@ -231,8 +181,7 @@ void ocf_cache_line_concurrency_deinit(struct ocf_alock **self)
env_spinlock_destroy(&concurrency->waiters_lsts[i].lock);
if (concurrency->access)
OCF_REALLOC_DEINIT(&concurrency->access,
&concurrency->access_limit);
env_vfree(concurrency->access);
if (concurrency->allocator)
env_allocator_destroy(concurrency->allocator);
@ -242,12 +191,12 @@ void ocf_cache_line_concurrency_deinit(struct ocf_alock **self)
*self = NULL;
}
size_t ocf_cache_line_concurrency_size_of(ocf_cache_t cache)
size_t ocf_alock_size(unsigned num_entries)
{
size_t size;
size = sizeof(env_atomic);
size *= cache->device->collision_table_entries;
size *= num_entries;
size += sizeof(struct ocf_alock);
@ -257,7 +206,7 @@ size_t ocf_cache_line_concurrency_size_of(ocf_cache_t cache)
/*
*
*/
static inline bool ocf_alock_waitlist_are_waiters(struct ocf_alock *alock,
static inline bool ocf_alock_waitlist_is_empty_locked(struct ocf_alock *alock,
ocf_cache_line_t line)
{
bool are = false;
@ -268,7 +217,7 @@ static inline bool ocf_alock_waitlist_are_waiters(struct ocf_alock *alock,
/* If list empty that means there are no waiters on cache line */
if (list_empty(&lst->head))
return false;
return true;
list_for_each(iter, &lst->head) {
waiter = list_entry(iter, struct ocf_alock_waiter, item);
@ -279,7 +228,7 @@ static inline bool ocf_alock_waitlist_are_waiters(struct ocf_alock *alock,
}
}
return are;
return !are;
}
/*
@ -297,15 +246,15 @@ static inline void ocf_alock_waitlist_add(struct ocf_alock *alock,
#define ocf_alock_waitlist_lock(cncrrncy, line, flags) \
do { \
uint32_t idx = _WAITERS_LIST_ITEM(line); \
struct ocf_alock_waiters_list *lst = &cncrrncy->waiters_lsts[idx]; \
uint32_t list = _WAITERS_LIST_ITEM(line); \
struct ocf_alock_waiters_list *lst = &cncrrncy->waiters_lsts[list]; \
env_spinlock_lock_irqsave(&lst->lock, flags); \
} while (0)
#define ocf_alock_waitlist_unlock(cncrrncy, line, flags) \
do { \
uint32_t idx = _WAITERS_LIST_ITEM(line); \
struct ocf_alock_waiters_list *lst = &cncrrncy->waiters_lsts[idx]; \
uint32_t list = _WAITERS_LIST_ITEM(line); \
struct ocf_alock_waiters_list *lst = &cncrrncy->waiters_lsts[list]; \
env_spinlock_unlock_irqrestore(&lst->lock, flags); \
} while (0)
@ -439,7 +388,7 @@ static inline bool ocf_alock_trylock_entry_rd2rd(struct ocf_alock *alock,
/*
*
*/
static void ocf_alock_on_lock(struct ocf_alock *alock,
static void ocf_alock_entry_locked(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
@ -468,7 +417,7 @@ static inline bool ocf_alock_lock_one_wr(struct ocf_alock *alock,
if (ocf_alock_trylock_entry_wr(alock, line)) {
/* lock was not owned by anyone */
cbs->line_mark_locked(req, idx, OCF_WRITE, true);
ocf_alock_on_lock(alock, req, cmpl);
ocf_alock_entry_locked(alock, req, cmpl);
return true;
}
@ -501,7 +450,7 @@ unlock:
if (!waiting) {
cbs->line_mark_locked(req, idx, OCF_WRITE, true);
ocf_alock_on_lock(alock, req, cmpl);
ocf_alock_entry_locked(alock, req, cmpl);
env_allocator_del(alock->allocator, waiter);
}
@ -526,7 +475,7 @@ static inline bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
if( ocf_alock_trylock_entry_rd_idle(alock, line)) {
/* lock was not owned by anyone */
cbs->line_mark_locked(req, idx, OCF_READ, true);
ocf_alock_on_lock(alock, req, cmpl);
ocf_alock_entry_locked(alock, req, cmpl);
return true;
}
@ -537,7 +486,7 @@ static inline bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
/* Lock waiters list */
ocf_alock_waitlist_lock(alock, line, flags);
if (!ocf_alock_waitlist_are_waiters(alock, line)) {
if (!ocf_alock_waitlist_is_empty_locked(alock, line)) {
/* No waiters at the moment */
/* Check if read lock can be obtained */
@ -564,7 +513,7 @@ unlock:
if (!waiting) {
cbs->line_mark_locked(req, idx, OCF_READ, true);
ocf_alock_on_lock(alock, req, cmpl);
ocf_alock_entry_locked(alock, req, cmpl);
env_allocator_del(alock->allocator, waiter);
}
@ -623,7 +572,7 @@ static inline void ocf_alock_unlock_one_rd_common(struct ocf_alock *alock,
cbs->line_mark_locked(waiter->req, waiter->idx,
waiter->rw, true);
ocf_alock_on_lock(alock, waiter->req, waiter->cmpl);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
env_allocator_del(alock->allocator, waiter);
} else {
@ -639,6 +588,12 @@ static inline void ocf_alock_unlock_one_rd_common(struct ocf_alock *alock,
}
}
bool ocf_alock_trylock_one_rd(struct ocf_alock *alock,
ocf_cache_line_t line)
{
return ocf_alock_trylock_entry_rd_idle(alock, line);
}
/*
*
*/
@ -707,7 +662,7 @@ static inline void ocf_alock_unlock_one_wr_common(struct ocf_alock *alock,
cbs->line_mark_locked(waiter->req, waiter->idx,
waiter->rw, true);
ocf_alock_on_lock(alock, waiter->req, waiter->cmpl);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
env_allocator_del(alock->allocator, waiter);
} else {
@ -798,7 +753,7 @@ static int ocf_alock_lock_rd_fast(struct ocf_alock *alock,
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= alock->num_clines);
ENV_BUG_ON(line >= alock->num_entries);
ENV_BUG_ON(cbs->line_is_locked(req, i, OCF_READ));
ENV_BUG_ON(cbs->line_is_locked(req, i, OCF_WRITE));
@ -860,7 +815,7 @@ static int ocf_alock_lock_rd_slow(struct ocf_alock *alock,
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= alock->num_clines);
ENV_BUG_ON(line >= alock->num_entries);
ENV_BUG_ON(cbs->line_is_locked(req, i, OCF_READ));
ENV_BUG_ON(cbs->line_is_locked(req, i, OCF_WRITE));
@ -909,15 +864,6 @@ static int ocf_alock_lock_rd(struct ocf_alock *alock,
return lock;
}
int ocf_req_async_lock_rd(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
return ocf_alock_lock_rd(alock, cbs, req, cmpl);
}
/* Try to write-lock request without adding waiters. Function should be called
* under read lock, multiple threads may attempt to acquire the lock
* concurrently. */
@ -938,7 +884,7 @@ static int ocf_alock_lock_wr_fast(struct ocf_alock *alock,
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= alock->num_clines);
ENV_BUG_ON(line >= alock->num_entries);
ENV_BUG_ON(cbs->line_is_locked(req, i, OCF_READ));
ENV_BUG_ON(cbs->line_is_locked(req, i, OCF_WRITE));
@ -1000,7 +946,7 @@ static int ocf_alock_lock_wr_slow(struct ocf_alock *alock,
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= alock->num_clines);
ENV_BUG_ON(line >= alock->num_entries);
ENV_BUG_ON(cbs->line_is_locked(req, i, OCF_READ));
ENV_BUG_ON(cbs->line_is_locked(req, i, OCF_WRITE));
@ -1048,15 +994,6 @@ int ocf_alock_lock_wr(struct ocf_alock *alock,
return lock;
}
int ocf_req_async_lock_wr(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
return ocf_alock_lock_wr(alock, cbs, req, cmpl);
}
/*
*
*/
@ -1080,21 +1017,13 @@ void ocf_alock_unlock_rd(struct ocf_alock *alock,
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= alock->num_clines);
ENV_BUG_ON(line >= alock->num_entries);
ocf_alock_unlock_one_rd(alock, cbs, line);
cbs->line_mark_locked(req, i, OCF_READ, false);
}
}
void ocf_req_unlock_rd(struct ocf_alock *alock, struct ocf_request *req)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
ocf_alock_unlock_rd(alock, cbs, req);
}
/*
*
*/
@ -1118,21 +1047,13 @@ void ocf_alock_unlock_wr(struct ocf_alock *alock,
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= alock->num_clines);
ENV_BUG_ON(line >= alock->num_entries);
ocf_alock_unlock_one_wr(alock, cbs, line);
cbs->line_mark_locked(req, i, OCF_WRITE, false);
}
}
void ocf_req_unlock_wr(struct ocf_alock *alock, struct ocf_request *req)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
ocf_alock_unlock_wr(alock, cbs, req);
}
/*
*
*/
@ -1150,7 +1071,7 @@ void ocf_alock_unlock(struct ocf_alock *alock,
continue;
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= alock->num_clines);
ENV_BUG_ON(line >= alock->num_entries);
if (cbs->line_is_locked(req, i, OCF_READ) &&
cbs->line_is_locked(req, i, OCF_WRITE)) {
@ -1165,14 +1086,6 @@ void ocf_alock_unlock(struct ocf_alock *alock,
}
}
void ocf_req_unlock(struct ocf_alock *alock, struct ocf_request *req)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
ocf_alock_unlock(alock, cbs, req);
}
/*
*
*/
@ -1196,22 +1109,13 @@ void ocf_alock_unlock_one(struct ocf_alock *alock,
}
}
void ocf_req_unlock_entry(struct ocf_alock *alock,
struct ocf_request *req, uint32_t entry)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
ocf_alock_unlock_one(alock, cbs, req, entry);
}
/*
*
*/
bool ocf_cache_line_is_used(struct ocf_alock *alock,
ocf_cache_line_t line)
{
ENV_BUG_ON(line >= alock->num_clines);
ENV_BUG_ON(line >= alock->num_entries);
if (env_atomic_read(&(alock->access[line])))
return true;
@ -1225,37 +1129,34 @@ bool ocf_cache_line_is_used(struct ocf_alock *alock,
/*
*
*/
bool ocf_cache_line_are_waiters(struct ocf_alock *alock,
ocf_cache_line_t line)
bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock, unsigned line)
{
bool are;
bool empty;
unsigned long flags = 0;
ENV_BUG_ON(line >= alock->num_clines);
ENV_BUG_ON(line >= alock->num_entries);
/* Lock waiters list */
ocf_alock_waitlist_lock(alock, line, flags);
are = ocf_alock_waitlist_are_waiters(alock, line);
empty = ocf_alock_waitlist_is_empty_locked(alock, line);
ocf_alock_waitlist_unlock(alock, line, flags);
return are;
return empty;
}
/* NOTE: it is caller responsibility to assure that noone acquires
* a lock in background */
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
ocf_cache_line_t line)
bool ocf_alock_is_locked_exclusively(struct ocf_alock *alock,
unsigned idx)
{
struct ocf_alock *alock =
ocf_cache_line_concurrency(cache);
env_atomic *access = &alock->access[line];
env_atomic *access = &alock->access[idx];
int val = env_atomic_read(access);
ENV_BUG_ON(val == OCF_CACHE_LINE_ACCESS_IDLE);
if (ocf_cache_line_are_waiters(alock, line))
if (!ocf_alock_waitlist_is_empty(alock, idx))
return false;
return val == OCF_CACHE_LINE_ACCESS_ONE_RD ||
@ -1265,20 +1166,59 @@ bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
/*
*
*/
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_alock *alock)
uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock)
{
return env_atomic_read(&alock->waiting);
}
static bool ocf_cl_lock_line_needs_lock(struct ocf_request *req,
unsigned index)
{
/* Remapped cachelines are assigned cacheline lock individually
* during eviction
*/
return req->map[index].status != LOOKUP_MISS &&
req->map[index].status != LOOKUP_REMAPPED;
}
static bool ocf_cl_lock_line_is_acting(struct ocf_request *req,
unsigned index)
{
return req->map[index].status != LOOKUP_MISS;
}
static bool ocf_cl_lock_line_is_locked(struct ocf_request *req,
unsigned index, int rw)
{
if (rw == OCF_WRITE)
return req->map[index].wr_locked;
else
return req->map[index].rd_locked;
}
static void ocf_cl_lock_line_mark_locked(struct ocf_request *req,
unsigned index, int rw, bool locked)
{
if (rw == OCF_WRITE)
req->map[index].wr_locked = locked;
else
req->map[index].rd_locked = locked;
}
static struct ocf_alock_lock_cbs ocf_cline_conc_cbs = {
.line_needs_lock = ocf_cl_lock_line_needs_lock,
.line_is_acting = ocf_cl_lock_line_is_acting,
.line_is_locked = ocf_cl_lock_line_is_locked,
.line_mark_locked = ocf_cl_lock_line_mark_locked
};
bool ocf_cache_line_try_lock_rd(struct ocf_alock *alock,
ocf_cache_line_t line)
{
return ocf_alock_trylock_entry_rd_idle(alock, line);
return ocf_alock_trylock_one_rd(alock, line);
}
/*
*
*/
void ocf_cache_line_unlock_rd(struct ocf_alock *alock, ocf_cache_line_t line)
{
struct ocf_alock_lock_cbs *cbs =
@ -1303,3 +1243,96 @@ void ocf_cache_line_unlock_wr(struct ocf_alock *alock,
OCF_DEBUG_RQ(alock->cache, "Cache line = %u", line);
ocf_alock_unlock_one_wr(alock, cbs, line);
}
int ocf_req_async_lock_rd(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
return ocf_alock_lock_rd(alock, cbs, req, cmpl);
}
int ocf_req_async_lock_wr(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
return ocf_alock_lock_wr(alock, cbs, req, cmpl);
}
void ocf_req_unlock_rd(struct ocf_alock *alock, struct ocf_request *req)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
ocf_alock_unlock_rd(alock, cbs, req);
}
void ocf_req_unlock_wr(struct ocf_alock *alock, struct ocf_request *req)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
ocf_alock_unlock_wr(alock, cbs, req);
}
void ocf_req_unlock(struct ocf_alock *alock, struct ocf_request *req)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_cline_conc_cbs;
ocf_alock_unlock(alock, cbs, req);
}
bool ocf_cache_line_are_waiters(struct ocf_alock *alock,
ocf_cache_line_t line)
{
return !ocf_alock_waitlist_is_empty(alock, line);
}
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_alock *alock =
ocf_cache_line_concurrency(cache);
return ocf_alock_is_locked_exclusively(alock, line);
}
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_alock *alock)
{
return ocf_alock_waitlist_count(alock);
}
#define ALLOCATOR_NAME_FMT "ocf_%s_cache_concurrency"
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE)
int ocf_cache_line_concurrency_init(struct ocf_alock **self,
unsigned num_clines, ocf_cache_t cache)
{
char name[ALLOCATOR_NAME_MAX];
int ret;
ret = snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
ocf_cache_get_name(cache));
if (ret < 0)
return ret;
if (ret >= ALLOCATOR_NAME_MAX)
return -ENOSPC;
return ocf_alock_init(self, num_clines, name, cache);
}
void ocf_cache_line_concurrency_deinit(struct ocf_alock **self)
{
ocf_alock_deinit(self);
}
size_t ocf_cache_line_concurrency_size_of(ocf_cache_t cache)
{
return ocf_alock_size(cache->device->collision_table_entries);
}