Merge pull request #453 from arutk/no_cl_gl_lock

Skip cacheline concurrency global lock in fast path
This commit is contained in:
Robert Baldyga 2021-03-04 12:33:50 +01:00 committed by GitHub
commit ac9bd5b094
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 754 additions and 233 deletions

View File

@ -396,9 +396,10 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
core_line);
if (info.status == LOOKUP_HIT &&
metadata_test_dirty(cache, info.coll_idx) &&
ocf_cache_line_try_lock_rd(cache, info.coll_idx)) {
locked = true;
metadata_test_dirty(cache, info.coll_idx)) {
locked = ocf_cache_line_try_lock_rd(
cache->device->concurrency.cache_line,
info.coll_idx);
}
ocf_hb_cline_prot_unlock_rd(&cache->metadata.lock, lock_idx, core_id,
@ -472,7 +473,9 @@ static void _acp_flush_end(void *priv, int error)
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
for (i = 0; i < flush->size; i++) {
ocf_cache_line_unlock_rd(cache, flush->data[i].cache_line);
ocf_cache_line_unlock_rd(
cache->device->concurrency.cache_line,
flush->data[i].cache_line);
ACP_DEBUG_END(acp, flush->data[i].cache_line);
}

View File

@ -682,8 +682,11 @@ static bool block_is_busy(struct ocf_cache *cache,
if (!cache->core[core_id].opened)
return true;
if (ocf_cache_line_is_used(cache, cache_line))
if (ocf_cache_line_is_used(
cache->device->concurrency.cache_line,
cache_line)) {
return true;
}
return false;
}

View File

@ -50,10 +50,12 @@ struct __waiters_list {
};
struct ocf_cache_line_concurrency {
env_rwsem lock;
ocf_cache_t cache;
env_mutex lock;
env_atomic *access;
env_atomic waiting;
size_t access_limit;
ocf_cache_line_t num_clines;
env_allocator *allocator;
struct __waiters_list waiters_lsts[_WAITERS_LIST_ENTRIES];
@ -66,94 +68,103 @@ struct ocf_cache_line_concurrency {
#define ALLOCATOR_NAME_FMT "ocf_%s_cache_concurrency"
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE)
int ocf_cache_line_concurrency_init(struct ocf_cache *cache)
int ocf_cache_line_concurrency_init(struct ocf_cache_line_concurrency **self,
unsigned num_clines, ocf_cache_t cache)
{
uint32_t i;
int error = 0;
struct ocf_cache_line_concurrency *c;
char name[ALLOCATOR_NAME_MAX];
ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
cache);
ENV_BUG_ON(cache->device->concurrency.cache_line);
OCF_DEBUG_TRACE(cache);
c = env_vmalloc(sizeof(*c));
c = env_vzalloc(sizeof(*c));
if (!c) {
error = __LINE__;
goto ocf_cache_line_concurrency_init;
goto exit_err;
}
error = env_rwsem_init(&c->lock);
c->cache = cache;
c->num_clines = num_clines;
error = env_mutex_init(&c->lock);
if (error) {
env_vfree(c);
error = __LINE__;
goto ocf_cache_line_concurrency_init;
goto rwsem_err;
}
cache->device->concurrency.cache_line = c;
OCF_REALLOC_INIT(&c->access, &c->access_limit);
OCF_REALLOC_CP(&c->access, sizeof(c->access[0]), line_entries,
OCF_REALLOC_CP(&c->access, sizeof(c->access[0]), num_clines,
&c->access_limit);
if (!c->access) {
error = __LINE__;
goto ocf_cache_line_concurrency_init;
goto allocation_err;
}
if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
ocf_cache_get_name(cache)) < 0) {
error = __LINE__;
goto ocf_cache_line_concurrency_init;
goto allocation_err;
}
c->allocator = env_allocator_create(sizeof(struct __waiter), name);
if (!c->allocator) {
error = __LINE__;
goto ocf_cache_line_concurrency_init;
goto allocation_err;
}
/* Init concurrency control table */
for (i = 0; i < _WAITERS_LIST_ENTRIES; i++) {
INIT_LIST_HEAD(&c->waiters_lsts[i].head);
error = env_spinlock_init(&c->waiters_lsts[i].lock);
if (error)
if (error) {
error = __LINE__;
goto spinlock_err;
}
}
*self = c;
return 0;
spinlock_err:
while (i--)
env_spinlock_destroy(&c->waiters_lsts[i].lock);
ocf_cache_line_concurrency_init:
allocation_err:
if (c->allocator)
env_allocator_destroy(c->allocator);
if (c->access)
OCF_REALLOC_DEINIT(&c->access, &c->access_limit);
rwsem_err:
env_mutex_destroy(&c->lock);
exit_err:
ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, "
"ERROR %d", error);
if (c)
env_vfree(c);
if (cache->device->concurrency.cache_line)
ocf_cache_line_concurrency_deinit(cache);
*self = NULL;
return -1;
}
/*
*
*/
void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache)
void ocf_cache_line_concurrency_deinit(struct ocf_cache_line_concurrency **self)
{
struct ocf_cache_line_concurrency *concurrency = *self;
int i;
struct ocf_cache_line_concurrency *concurrency;
if (!cache->device->concurrency.cache_line)
if (!concurrency)
return;
OCF_DEBUG_TRACE(cache);
OCF_DEBUG_TRACE(concurrency->cache);
concurrency = cache->device->concurrency.cache_line;
env_rwsem_destroy(&concurrency->lock);
env_mutex_destroy(&concurrency->lock);
for (i = 0; i < _WAITERS_LIST_ENTRIES; i++)
env_spinlock_destroy(&concurrency->waiters_lsts[i].lock);
@ -166,10 +177,11 @@ void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache)
env_allocator_destroy(concurrency->allocator);
env_vfree(concurrency);
cache->device->concurrency.cache_line = NULL;
*self = NULL;
}
size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache)
size_t ocf_cache_line_concurrency_size_of(ocf_cache_t cache)
{
size_t size;
@ -365,12 +377,11 @@ static inline bool __try_lock_rd2rd(struct ocf_cache_line_concurrency *c,
/*
*
*/
static void _req_on_lock(void *ctx, ocf_req_async_lock_cb cb,
static void _req_on_lock(struct ocf_cache_line_concurrency *c,
void *ctx, ocf_req_async_lock_cb cb,
uint32_t ctx_id, ocf_cache_line_t line, int rw)
{
struct ocf_request *req = ctx;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
if (rw == OCF_READ)
req->map[ctx_id].rd_locked = true;
@ -396,34 +407,29 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
void *ctx, uint32_t ctx_id)
{
struct __waiter *waiter;
bool locked = false;
bool waiting = false;
unsigned long flags = 0;
ENV_BUG_ON(!cb);
if (__try_lock_wr(c, line)) {
/* No activity before look get */
if (cb)
_req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE);
/* lock was not owned by anyone */
_req_on_lock(c, ctx, cb, ctx_id, line, OCF_WRITE);
return true;
}
waiter = NULL;
if (cb) {
/* Need to create waiter */
waiter = env_allocator_new(c->allocator);
if (!waiter)
return false;
}
__lock_waiters_list(c, line, flags);
/* At the moment list is protected, double check if the cache line is
* unlocked
*/
if (__try_lock_wr(c, line)) {
/* Look get */
locked = true;
} else if (cb) {
if (__try_lock_wr(c, line))
goto unlock;
/* Setup waiters filed */
waiter->line = line;
waiter->ctx = ctx;
@ -435,16 +441,16 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
/* Add to waiters list */
__add_waiter(c, line, waiter);
waiting = true;
}
unlock:
__unlock_waiters_list(c, line, flags);
if (locked && cb)
_req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE);
if (!waiting && waiter)
if (!waiting) {
_req_on_lock(c, ctx, cb, ctx_id, line, OCF_WRITE);
env_allocator_del(c->allocator, waiter);
}
return locked || waiting;
return true;
}
/*
@ -456,20 +462,21 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
void *ctx, uint32_t ctx_id)
{
struct __waiter *waiter;
bool locked = false;
bool waiting = false;
unsigned long flags = 0;
ENV_BUG_ON(!cb);
if( __try_lock_rd_idle(c, line)) {
/* No activity before look get, it is first reader */
if (cb)
_req_on_lock(ctx, cb, ctx_id, line, OCF_READ);
/* lock was not owned by anyone */
_req_on_lock(c, ctx, cb, ctx_id, line, OCF_READ);
return true;
}
waiter = NULL;
waiter = env_allocator_new(c->allocator);
if (!waiter)
return false;
repeat:
/* Lock waiters list */
__lock_waiters_list(c, line, flags);
@ -479,23 +486,10 @@ repeat:
/* Check if read lock can be obtained */
if (__try_lock_rd(c, line)) {
/* Cache line locked */
locked = true;
goto unlock;
}
}
if (!cb)
goto unlock;
if (!waiter) {
/* Need to create waiters and add it into list */
__unlock_waiters_list(c, line, flags);
waiter = env_allocator_new(c->allocator);
if (!waiter)
goto end;
goto repeat;
}
/* Setup waiters field */
waiter->line = line;
waiter->ctx = ctx;
@ -511,13 +505,12 @@ repeat:
unlock:
__unlock_waiters_list(c, line, flags);
end:
if (locked && cb)
_req_on_lock(ctx, cb, ctx_id, line, OCF_READ);
if (!waiting && waiter)
if (!waiting) {
_req_on_lock(c, ctx, cb, ctx_id, line, OCF_READ);
env_allocator_del(c->allocator, waiter);
}
return locked || waiting;
return true;
}
static inline void __unlock_cache_line_rd_common(struct ocf_cache_line_concurrency *c,
@ -569,7 +562,7 @@ static inline void __unlock_cache_line_rd_common(struct ocf_cache_line_concurren
exchanged = false;
list_del(iter);
_req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id,
_req_on_lock(c, waiter->ctx, waiter->cb, waiter->ctx_id,
line, waiter->rw);
env_allocator_del(c->allocator, waiter);
@ -650,7 +643,7 @@ static inline void __unlock_cache_line_wr_common(struct ocf_cache_line_concurren
exchanged = false;
list_del(iter);
_req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id, line,
_req_on_lock(c, waiter->ctx, waiter->cb, waiter->ctx_id, line,
waiter->rw);
env_allocator_del(c->allocator, waiter);
@ -720,11 +713,10 @@ static inline void __remove_line_from_waiters_list(struct ocf_cache_line_concurr
/* Try to read-lock request without adding waiters. Function should be called
* under read lock, multiple threads may attempt to acquire the lock
* concurrently. */
static int _ocf_req_trylock_rd(struct ocf_request *req)
static int _ocf_req_trylock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req)
{
int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
ocf_cache_line_t line;
int ret = OCF_LOCK_ACQUIRED;
@ -739,11 +731,11 @@ static int _ocf_req_trylock_rd(struct ocf_request *req)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked);
if (__lock_cache_line_rd(c, line, NULL, NULL, 0)) {
if( __try_lock_rd_idle(c, line)) {
/* cache line locked */
req->map[i].rd_locked = true;
} else {
@ -761,7 +753,7 @@ static int _ocf_req_trylock_rd(struct ocf_request *req)
line = req->map[i].coll_idx;
if (req->map[i].rd_locked) {
__unlock_rd(c, line);
__unlock_cache_line_rd(c, line);
req->map[i].rd_locked = false;
}
}
@ -774,11 +766,10 @@ static int _ocf_req_trylock_rd(struct ocf_request *req)
* Read-lock request cache lines. Must be called under cacheline concurrency
* write lock.
*/
static int _ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
static int _ocf_req_lock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb)
{
int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
ocf_cache_line_t line;
int ret = OCF_LOCK_NOT_ACQUIRED;
@ -797,7 +788,7 @@ static int _ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked);
@ -827,20 +818,17 @@ err:
}
int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
int ocf_req_async_lock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb)
{
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
int lock;
env_rwsem_down_read(&c->lock);
lock = _ocf_req_trylock_rd(req);
env_rwsem_up_read(&c->lock);
lock = _ocf_req_trylock_rd(c, req);
if (lock != OCF_LOCK_ACQUIRED) {
env_rwsem_down_write(&c->lock);
lock = _ocf_req_lock_rd(req, cb);
env_rwsem_up_write(&c->lock);
env_mutex_lock(&c->lock);
lock = _ocf_req_lock_rd(c, req, cb);
env_mutex_unlock(&c->lock);
}
return lock;
@ -849,11 +837,10 @@ int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
/* Try to write-lock request without adding waiters. Function should be called
* under read lock, multiple threads may attempt to acquire the lock
* concurrently. */
static int _ocf_req_trylock_wr(struct ocf_request *req)
static int _ocf_req_trylock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req)
{
int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
ocf_cache_line_t line;
int ret = OCF_LOCK_ACQUIRED;
@ -866,11 +853,11 @@ static int _ocf_req_trylock_wr(struct ocf_request *req)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked);
if (__lock_cache_line_wr(c, line, NULL, NULL, 0)) {
if (__try_lock_wr(c, line)) {
/* cache line locked */
req->map[i].wr_locked = true;
} else {
@ -888,7 +875,7 @@ static int _ocf_req_trylock_wr(struct ocf_request *req)
line = req->map[i].coll_idx;
if (req->map[i].wr_locked) {
__unlock_wr(c, line);
__unlock_cache_line_wr(c, line);
req->map[i].wr_locked = false;
}
}
@ -901,11 +888,10 @@ static int _ocf_req_trylock_wr(struct ocf_request *req)
* Write-lock request cache lines. Must be called under cacheline concurrency
* write lock.
*/
static int _ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
static int _ocf_req_lock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb)
{
int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
ocf_cache_line_t line;
int ret = OCF_LOCK_NOT_ACQUIRED;
@ -925,7 +911,7 @@ static int _ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked);
@ -954,20 +940,17 @@ err:
return ret;
}
int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
int ocf_req_async_lock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb)
{
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
int lock;
env_rwsem_down_read(&c->lock);
lock = _ocf_req_trylock_wr(req);
env_rwsem_up_read(&c->lock);
lock = _ocf_req_trylock_wr(c, req);
if (lock != OCF_LOCK_ACQUIRED) {
env_rwsem_down_write(&c->lock);
lock = _ocf_req_lock_wr(req, cb);
env_rwsem_up_write(&c->lock);
env_mutex_lock(&c->lock);
lock = _ocf_req_lock_wr(c, req, cb);
env_mutex_unlock(&c->lock);
}
return lock;
@ -977,9 +960,8 @@ int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
/*
*
*/
void ocf_req_unlock_rd(struct ocf_request *req)
void ocf_req_unlock_rd(struct ocf_cache_line_concurrency *c, struct ocf_request *req)
{
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
int32_t i;
ocf_cache_line_t line;
@ -995,7 +977,7 @@ void ocf_req_unlock_rd(struct ocf_request *req)
line = req->map[i].coll_idx;
ENV_BUG_ON(!req->map[i].rd_locked);
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
__unlock_cache_line_rd(c, line);
req->map[i].rd_locked = false;
@ -1005,9 +987,8 @@ void ocf_req_unlock_rd(struct ocf_request *req)
/*
*
*/
void ocf_req_unlock_wr(struct ocf_request *req)
void ocf_req_unlock_wr(struct ocf_cache_line_concurrency *c, struct ocf_request *req)
{
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
int32_t i;
ocf_cache_line_t line;
@ -1023,7 +1004,7 @@ void ocf_req_unlock_wr(struct ocf_request *req)
line = req->map[i].coll_idx;
ENV_BUG_ON(!req->map[i].wr_locked);
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
__unlock_cache_line_wr(c, line);
req->map[i].wr_locked = false;
@ -1033,9 +1014,8 @@ void ocf_req_unlock_wr(struct ocf_request *req)
/*
*
*/
void ocf_req_unlock(struct ocf_request *req)
void ocf_req_unlock(struct ocf_cache_line_concurrency *c, struct ocf_request *req)
{
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
int32_t i;
ocf_cache_line_t line;
@ -1049,7 +1029,7 @@ void ocf_req_unlock(struct ocf_request *req)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
if (req->map[i].rd_locked && req->map[i].wr_locked) {
ENV_BUG();
@ -1068,11 +1048,9 @@ void ocf_req_unlock(struct ocf_request *req)
/*
*
*/
void ocf_req_unlock_entry(struct ocf_cache *cache,
void ocf_req_unlock_entry(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, uint32_t entry)
{
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
ENV_BUG_ON(req->map[entry].status == LOOKUP_MISS);
if (req->map[entry].rd_locked && req->map[entry].wr_locked) {
@ -1091,17 +1069,15 @@ void ocf_req_unlock_entry(struct ocf_cache *cache,
/*
*
*/
bool ocf_cache_line_is_used(struct ocf_cache *cache,
bool ocf_cache_line_is_used(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
ENV_BUG_ON(line >= cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
if (env_atomic_read(&(c->access[line])))
return true;
if (ocf_cache_line_are_waiters(cache, line))
if (ocf_cache_line_are_waiters(c, line))
return true;
else
return false;
@ -1110,14 +1086,13 @@ bool ocf_cache_line_is_used(struct ocf_cache *cache,
/*
*
*/
bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
bool are;
unsigned long flags = 0;
ENV_BUG_ON(line >= cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
/* Lock waiters list */
__lock_waiters_list(c, line, flags);
@ -1132,42 +1107,35 @@ bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
/*
*
*/
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache)
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache_line_concurrency *c)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
return env_atomic_read(&c->waiting);
}
bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line)
bool ocf_cache_line_try_lock_rd(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
return __lock_cache_line_rd(c, line, NULL, NULL, 0);
return __try_lock_rd_idle(c, line);
}
/*
*
*/
void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line)
void ocf_cache_line_unlock_rd(struct ocf_cache_line_concurrency *c, ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
OCF_DEBUG_RQ(cache, "Cache line = %u", line);
OCF_DEBUG_RQ(c->cache, "Cache line = %u", line);
__unlock_cache_line_rd(c, line);
}
bool ocf_cache_line_try_lock_wr(struct ocf_cache *cache, ocf_cache_line_t line)
bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
return __lock_cache_line_wr(c, line, NULL, NULL, 0);
return __try_lock_wr(c, line);
}
void ocf_cache_line_unlock_wr(struct ocf_cache *cache, ocf_cache_line_t line)
void ocf_cache_line_unlock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
OCF_DEBUG_RQ(cache, "Cache line = %u", line);
OCF_DEBUG_RQ(c->cache, "Cache line = %u", line);
__unlock_cache_line_wr(c, line);
}

View File

@ -19,27 +19,32 @@ struct ocf_cache_line_concurrency;
/**
* @brief Initialize OCF cache concurrency module
*
* @param self - cacheline concurrency private data
* @param num_clines - cachelines count
* @param cache - OCF cache instance
* @return 0 - Initialization successful, otherwise ERROR
*/
int ocf_cache_line_concurrency_init(struct ocf_cache *cache);
int ocf_cache_line_concurrency_init(struct ocf_cache_line_concurrency **self,
unsigned num_clines, struct ocf_cache *cache);
/**
* @biref De-Initialize OCF cache concurrency module
*
* @param cache - OCF cache instance
* @param self - cacheline concurrency private data
*/
void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache);
void ocf_cache_line_concurrency_deinit(
struct ocf_cache_line_concurrency **self);
/**
* @brief Get number of waiting (suspended) OCF requests in due to cache
* overlapping
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
*
* @return Number of suspended OCF requests
*/
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache);
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache_line_concurrency *c);
/**
* @brief Return memory footprint conusmed by cache concurrency module
@ -48,7 +53,7 @@ uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache);
*
* @return Memory footprint of cache concurrency module
*/
size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache);
size_t ocf_cache_line_concurrency_size_of(ocf_cache_t cache);
/* async request cacheline lock acquisition callback */
typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
@ -56,6 +61,7 @@ typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
/**
* @brief Lock OCF request for write access (Lock all cache lines in map info)
*
* @param c - cacheline concurrency private data
* @param req - OCF request
* @param cb - async lock acquisition callback
*
@ -65,11 +71,13 @@ typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired @cb cllback be called
*/
int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb);
int ocf_req_async_lock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb);
/**
* @brief Lock OCF request for read access (Lock all cache lines in map info)
*
* @param c - cacheline concurrency private data
* @param req - OCF request
* @param cb - async lock acquisition callback
*
@ -79,28 +87,35 @@ int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb);
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired @cb callback be called
*/
int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb);
int ocf_req_async_lock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb);
/**
* @brief Unlock OCF request from write access
*
* @param c - cacheline concurrency private data
* @param req - OCF request
*/
void ocf_req_unlock_wr(struct ocf_request *req);
void ocf_req_unlock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req);
/**
* @brief Unlock OCF request from read access
*
* @param c - cacheline concurrency private data
* @param req - OCF request
*/
void ocf_req_unlock_rd(struct ocf_request *req);
void ocf_req_unlock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req);
/**
* @brief Unlock OCF request from read or write access
*
* @param c - cacheline concurrency private data
* @param req - OCF request
*/
void ocf_req_unlock(struct ocf_request *req);
void ocf_req_unlock(struct ocf_cache_line_concurrency *c,
struct ocf_request *req);
/**
* @Check if cache line is used.
@ -116,30 +131,30 @@ void ocf_req_unlock(struct ocf_request *req);
* @retval true - cache line is used
* @retval false - cache line is not used
*/
bool ocf_cache_line_is_used(struct ocf_cache *cache,
bool ocf_cache_line_is_used(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief Check if for specified cache line there are waiters
* on the waiting list
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param line - Cache line to be checked for waiters
*
* @retval true - there are waiters
* @retval false - No waiters
*/
bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief un_lock request map info entry from from write or read access.
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param req - OCF request
* @param entry - request map entry number
*/
void ocf_req_unlock_entry(struct ocf_cache *cache,
void ocf_req_unlock_entry(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, uint32_t entry);
/**
@ -148,36 +163,40 @@ void ocf_req_unlock_entry(struct ocf_cache *cache,
* @param cache - OCF cache instance
* @param line - Cache line to be unlocked
*/
void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
void ocf_cache_line_unlock_rd(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief Attempt to lock cache line for read
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param line - Cache line to be checked for waiters
*
* @retval true - read lock successfully acquired
* @retval false - failed to acquire read lock
*/
bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
bool ocf_cache_line_try_lock_rd(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief Release cache line write lock
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param line - Cache line to be unlocked
*/
void ocf_cache_line_unlock_wr(struct ocf_cache *cache, ocf_cache_line_t line);
void ocf_cache_line_unlock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief Attempt to lock cache line for write
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param line - Cache line to be checked for waiters
*
* @retval true - write lock successfully acquired
* @retval false - failed to acquire write lock
*/
bool ocf_cache_line_try_lock_wr(struct ocf_cache *cache, ocf_cache_line_t line);
bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
#endif /* OCF_CONCURRENCY_H_ */

View File

@ -4,12 +4,16 @@
*/
#include "ocf_concurrency.h"
#include "../metadata/metadata.h"
int ocf_concurrency_init(struct ocf_cache *cache)
{
int result = 0;
result = ocf_cache_line_concurrency_init(cache);
result = ocf_cache_line_concurrency_init(
&cache->device->concurrency.cache_line,
ocf_metadata_collision_table_entries(cache),
cache);
if (result)
ocf_concurrency_deinit(cache);
@ -19,6 +23,7 @@ int ocf_concurrency_init(struct ocf_cache *cache)
void ocf_concurrency_deinit(struct ocf_cache *cache)
{
ocf_cache_line_concurrency_deinit(cache);
ocf_cache_line_concurrency_deinit(
&cache->device->concurrency.cache_line);
}

View File

@ -64,7 +64,7 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
ocf_engine_invalidate(req);
} else {
ocf_req_unlock(req);
ocf_req_unlock(cache->device->concurrency.cache_line, req);
/* put the request at the last point of the completion path */
ocf_req_put(req);

View File

@ -396,7 +396,8 @@ static void _ocf_engine_clean_end(void *private_data, int error)
req->error |= error;
/* End request and do not processing */
ocf_req_unlock(req);
ocf_req_unlock(req->cache->device->concurrency.cache_line,
req);
/* Complete request */
req->complete(req, error);
@ -414,12 +415,14 @@ static int lock_clines(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
enum ocf_engine_lock_type lock_type = engine_cbs->get_lock_type(req);
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
switch (lock_type) {
case ocf_engine_lock_write:
return ocf_req_async_lock_wr(req, engine_cbs->resume);
return ocf_req_async_lock_wr(c, req, engine_cbs->resume);
case ocf_engine_lock_read:
return ocf_req_async_lock_rd(req, engine_cbs->resume);
return ocf_req_async_lock_rd(c, req, engine_cbs->resume);
default:
return OCF_LOCK_ACQUIRED;
}
@ -703,7 +706,8 @@ static int _ocf_engine_refresh(struct ocf_request *req)
req->complete(req, req->error);
/* Release WRITE lock of request */
ocf_req_unlock(req);
ocf_req_unlock(req->cache->device->concurrency.cache_line,
req);
/* Release OCF request */
ocf_req_put(req);

View File

@ -147,7 +147,7 @@ static void _ocf_discard_step_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "Completion");
/* Release WRITE lock of request */
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
if (req->error) {
ocf_metadata_error(req->cache);
@ -235,7 +235,9 @@ static int _ocf_discard_step(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume);
lock = ocf_req_async_lock_wr(
cache->device->concurrency.cache_line,
req, _ocf_discard_on_resume);
} else {
lock = OCF_LOCK_ACQUIRED;
}

View File

@ -46,7 +46,8 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
ocf_core_stats_cache_error_update(req->core, OCF_READ);
ocf_engine_push_req_front_pt(req);
} else {
ocf_req_unlock(req);
ocf_req_unlock(req->cache->device->concurrency.cache_line,
req);
/* Complete request */
req->complete(req, req->error);
@ -130,7 +131,9 @@ int ocf_read_fast(struct ocf_request *req)
if (hit && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_rd(
req->cache->device->concurrency.cache_line,
req, ocf_engine_on_resume);
}
ocf_hb_req_prot_unlock_rd(req);
@ -200,7 +203,9 @@ int ocf_write_fast(struct ocf_request *req)
if (mapped && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_wr(
req->cache->device->concurrency.cache_line,
req, ocf_engine_on_resume);
}
ocf_hb_req_prot_unlock_rd(req);

View File

@ -31,7 +31,7 @@ static void _ocf_invalidate_req(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to flush metadata to cache");
ocf_req_unlock(req);
ocf_req_unlock(req->cache->device->concurrency.cache_line, req);
/* Put OCF request - decrease reference counter */
ocf_req_put(req);

View File

@ -34,7 +34,7 @@ static void _ocf_read_pt_complete(struct ocf_request *req, int error)
/* Complete request */
req->complete(req, req->error);
ocf_req_unlock_rd(req);
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
/* Release OCF request */
ocf_req_put(req);
@ -127,7 +127,10 @@ int ocf_read_pt(struct ocf_request *req)
/* There are mapped cache line,
* lock request for READ access
*/
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_rd(
req->cache->device->concurrency.
cache_line,
req, ocf_engine_on_resume);
} else {
/* No mapped cache lines, no need to get lock */
lock = OCF_LOCK_ACQUIRED;

View File

@ -24,6 +24,9 @@
static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
{
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
if (error)
req->error |= error;
@ -41,8 +44,7 @@ static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
ocf_core_stats_cache_error_update(req->core, OCF_READ);
ocf_engine_push_req_front_pt(req);
} else {
ocf_req_unlock(req);
ocf_req_unlock(c, req);
/* Complete request */
req->complete(req, req->error);

View File

@ -60,7 +60,7 @@ static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
req->complete(req, req->error);

View File

@ -25,7 +25,7 @@ static const struct ocf_io_if _io_if_wi_update_metadata = {
int _ocf_write_wi_next_pass(struct ocf_request *req)
{
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
if (req->wi_second_pass) {
req->complete(req, req->error);
@ -75,7 +75,7 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
req->complete(req, req->error);
@ -128,7 +128,8 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "Completion");
if (req->error) {
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
req);
req->complete(req, req->error);
@ -198,7 +199,9 @@ int ocf_write_wi(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume);
lock = ocf_req_async_lock_wr(
req->cache->device->concurrency.cache_line,
req, _ocf_write_wi_on_resume);
} else {
lock = OCF_LOCK_ACQUIRED;
}

View File

@ -33,7 +33,7 @@ static void ocf_read_wo_cache_complete(struct ocf_request *req, int error)
if (req->error)
ocf_engine_error(req, true, "Failed to read data from cache");
ocf_req_unlock_rd(req);
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
/* Complete request */
req->complete(req, req->error);
@ -169,7 +169,8 @@ static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
if (!req->info.dirty_any || req->error) {
OCF_DEBUG_RQ(req, "Completion");
req->complete(req, req->error);
ocf_req_unlock_rd(req);
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line,
req);
ocf_req_put(req);
return;
}
@ -236,7 +237,9 @@ int ocf_read_wo(struct ocf_request *req)
/* There are mapped cache lines,
* lock request for READ access
*/
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_rd(
req->cache->device->concurrency.cache_line,
req, ocf_engine_on_resume);
}
ocf_hb_req_prot_unlock_rd(req); /*- END Metadata RD access -----------------*/

View File

@ -34,7 +34,8 @@ static void _ocf_write_wt_req_complete(struct ocf_request *req)
ocf_engine_invalidate(req);
} else {
/* Unlock reqest from WRITE access */
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
req);
/* Complete request */
req->complete(req, req->info.core_error ? req->error : 0);

View File

@ -31,7 +31,7 @@ static int ocf_zero_purge(struct ocf_request *req)
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
}
ocf_req_unlock_wr(req);
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
req->complete(req, req->error);
@ -152,7 +152,9 @@ void ocf_engine_zero_line(struct ocf_request *req)
req->io_if = &_io_if_ocf_zero_do;
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
lock = ocf_req_async_lock_wr(
req->cache->device->concurrency.cache_line,
req, ocf_engine_on_resume);
if (lock >= 0) {
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);

View File

@ -357,7 +357,9 @@ static int evp_lru_clean_getter(ocf_cache_t cache, void *getter_context,
break;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, cline)) {
if (ocf_cache_line_is_used(
cache->device->concurrency.cache_line,
cline)) {
continue;
}
@ -490,8 +492,11 @@ uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
break;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, cline))
if (ocf_cache_line_is_used(
cache->device->concurrency.cache_line,
cline)) {
continue;
}
ENV_BUG_ON(metadata_test_dirty(cache, cline));

View File

@ -51,6 +51,8 @@ int ocf_metadata_actor(struct ocf_cache *cache,
ocf_cache_line_t i, next_i;
uint64_t start_line, end_line;
int ret = 0;
struct ocf_cache_line_concurrency *c =
cache->device->concurrency.cache_line;
start_line = ocf_bytes_2_lines(cache, start_byte);
end_line = ocf_bytes_2_lines(cache, end_byte);
@ -63,7 +65,7 @@ int ocf_metadata_actor(struct ocf_cache *cache,
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(cache, i))
if (ocf_cache_line_is_used(c, i))
ret = -OCF_ERR_AGAIN;
else
actor(cache, i);
@ -75,7 +77,7 @@ int ocf_metadata_actor(struct ocf_cache *cache,
for (i = 0; i < cache->device->collision_table_entries; ++i) {
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(cache, i))
if (ocf_cache_line_is_used(c, i))
ret = -OCF_ERR_AGAIN;
else
actor(cache, i);

View File

@ -80,14 +80,18 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
continue;
}
if (!ocf_cache_line_try_lock_wr(cache, curr_cline))
if (!ocf_cache_line_try_lock_wr(
cache->device->concurrency.cache_line,
curr_cline)) {
break;
}
if (metadata_test_dirty(cache, curr_cline))
ocf_purge_cleaning_policy(cache, curr_cline);
ocf_metadata_sparse_cache_line(cache, curr_cline);
ocf_cache_line_unlock_wr(cache, curr_cline);
ocf_cache_line_unlock_wr(cache->device->concurrency.cache_line,
curr_cline);
if (prev_cline != cache->device->collision_table_entries)
curr_cline = ocf_metadata_get_collision_next(cache, prev_cline);

View File

@ -43,7 +43,9 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
* for this cache line which will use one, clear
* only valid bits
*/
if (!is_valid && !ocf_cache_line_are_waiters(cache, line)) {
if (!is_valid && !ocf_cache_line_are_waiters(
cache->device->concurrency.cache_line,
line)) {
ocf_purge_eviction_policy(cache, line);
ocf_metadata_remove_cache_line(cache, line);
}

View File

@ -213,7 +213,9 @@ static int _ocf_cleaner_cache_line_lock(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache);
return ocf_req_async_lock_rd(req, _ocf_cleaner_on_resume);
return ocf_req_async_lock_rd(
req->cache->device->concurrency.cache_line,
req, _ocf_cleaner_on_resume);
}
/*
@ -224,7 +226,8 @@ static void _ocf_cleaner_cache_line_unlock(struct ocf_request *req)
{
if (req->info.cleaner_cache_line_lock) {
OCF_DEBUG_TRACE(req->cache);
ocf_req_unlock(req);
ocf_req_unlock(req->cache->device->concurrency.cache_line,
req);
}
}

View File

@ -0,0 +1,482 @@
/*
* <tested_file_path>src/concurrency/ocf_cache_line_concurrency.c</tested_file_path>
* <tested_function>ocf_req_async_lock_rd</tested_function>
* <functions_to_leave>
* ocf_cache_line_concurrency_init
* ocf_cache_line_concurrency_deinit
* ocf_req_async_lock_rd
* ocf_req_async_lock_wr
* ocf_req_unlock_wr
* ocf_req_unlock_rd
* ocf_req_unlock
* ocf_cache_line_unlock_rd
* ocf_cache_line_unlock_wr
* ocf_cache_line_try_lock_rd
* ocf_cache_line_try_lock_wr
* ocf_cache_line_is_used
* __are_waiters
* __add_waiter
* __try_lock_wr
* __try_lock_rd_idle
* __try_lock_rd
* __unlock_wr
* __unlock_rd
* __try_lock_wr2wr
* __try_lock_wr2rd
* __try_lock_rd2wr
* __try_lock_rd2rd
* __lock_cache_line_wr
* __lock_cache_line_rd
* __unlock_cache_line_rd_common
* __unlock_cache_line_rd
* __unlock_cache_line_wr_common
* __unlock_cache_line_wr
* __remove_line_from_waiters_list
* _ocf_req_needs_cl_lock
* _ocf_req_trylock_rd
* _ocf_req_lock_rd
* _ocf_req_lock_wr
* _ocf_req_trylock_wr
* _req_on_lock
* ocf_cache_line_are_waiters
* </functions_to_leave>
*/
#undef static
#undef inline
#define TEST_MAX_MAP_SIZE 32
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include <unistd.h>
#include <time.h>
#include "print_desc.h"
#include "ocf_concurrency.h"
#include "../ocf_priv.h"
#include "../ocf_request.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_realloc.h"
#include "concurrency/ocf_cache_line_concurrency.c/ocf_cache_line_concurrency_generated_wraps.c"
#define LOCK_WAIT_TIMEOUT 5
void __wrap___assert_fail (const char *__assertion, const char *__file,
unsigned int __line, const char *__function)
{
print_message("assertion failure %s in %s:%u %s\n",
__assertion, __file, __line, __function);
}
int __wrap_list_empty(struct list_head *l1)
{
return l1->next == l1;
}
void __wrap_list_del(struct list_head *it)
{
it->next->prev = it->prev;
it->prev->next = it->next;
}
void __wrap_list_add_tail(struct list_head *it, struct list_head *l1)
{
it->prev = l1->prev;
it->next = l1;
l1->prev->next = it;
l1->prev = it;
}
void __wrap_ocf_realloc_cp(void** mem, size_t size, size_t count, size_t *limit)
{
if (*mem)
free(*mem);
if (count == 0)
return;
*mem = malloc(size * count);
memset(*mem, 0, size * count);
*limit = count;
}
const char *__wrap_ocf_cache_get_name(ocf_cache_t cache)
{
return "test";
}
int __wrap_snprintf (char *__restrict __s, size_t __maxlen,
const char *__restrict __format, ...)
{
va_list args;
int ret;
va_start(args, __format);
ret = vsnprintf(__s, __maxlen, __format, args);
va_end(args);
return ret;
}
static inline bool __wrap___lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
const ocf_cache_line_t line, ocf_req_async_lock_cb cb,
void *ctx, uint32_t ctx_id)
{
usleep(rand() % 100);
return __real___lock_cache_line_wr(c, line, cb, ctx, ctx_id);
}
static inline bool __wrap___lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
const ocf_cache_line_t line, ocf_req_async_lock_cb cb,
void *ctx, uint32_t ctx_id)
{
usleep(rand() % 100);
return __real___lock_cache_line_rd(c, line, cb, ctx, ctx_id);
}
int __wrap__ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
{
usleep(rand() % 500);
return __real__ocf_req_lock_wr(req, cb);
}
int __wrap__ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
{
usleep(rand() % 500);
return __real__ocf_req_lock_wr(req, cb);
}
unsigned long long progress;
pthread_cond_t prog_cond = PTHREAD_COND_INITIALIZER;
pthread_mutex_t prog_mutex = PTHREAD_MUTEX_INITIALIZER;
struct test_req {
struct ocf_request r;
struct ocf_map_info map[TEST_MAX_MAP_SIZE];
pthread_cond_t completion;
pthread_mutex_t completion_mutex;
bool finished;
};
static void req_async_lock_callback(struct ocf_request *req)
{
struct test_req* treq = (struct test_req *)req;
pthread_mutex_lock(&treq->completion_mutex);
treq->finished = true;
pthread_cond_signal(&treq->completion);
pthread_mutex_unlock(&treq->completion_mutex);
}
bool req_lock_sync(struct ocf_cache_line_concurrency *c, struct ocf_request *req,
int (*pfn)(struct ocf_cache_line_concurrency *c, struct ocf_request *req,
void (*cmpl)(struct ocf_request *req)),
volatile int *finish)
{
struct test_req* treq = (struct test_req *)req;
int result;
bool timeout = false;
struct timespec ts;
treq->finished = false;
result = pfn(c, req, req_async_lock_callback);
assert(result >= 0);
if (result == OCF_LOCK_ACQUIRED) {
return true;
}
pthread_mutex_lock(&treq->completion_mutex);
while (!treq->finished && !*finish) {
pthread_cond_wait(&treq->completion,
&treq->completion_mutex);
}
pthread_mutex_unlock(&treq->completion_mutex);
return treq->finished;
}
struct thread_ctx
{
pthread_t t;
struct ocf_cache_line_concurrency *c;
unsigned num_iterations;
unsigned clines;
unsigned max_io_size;
bool timeout;
volatile int finished;
volatile int terminated;
struct test_req treq;
};
void shuffle(unsigned *array, unsigned size)
{
int i, j;
unsigned tmp;
for (i = size - 1; i >= 0; i--)
{
j = rand() % (i + 1);
tmp = array[i];
array[i] = array[j];
array[j] = tmp;
}
}
void thread(void *_ctx)
{
struct thread_ctx *ctx = _ctx;
struct ocf_cache_line_concurrency *c = ctx->c;
struct ocf_request *req = &ctx->treq.r;
unsigned i;
unsigned cline;
unsigned *permutation;
bool rw;
bool single;
int (*lock_pfn)(struct ocf_cache_line_concurrency *c, struct ocf_request *req,
void (*cmpl)(struct ocf_request *req));
unsigned max_io_size = min(min(TEST_MAX_MAP_SIZE, ctx->clines), ctx->max_io_size);
unsigned line;
bool locked;
ctx->treq.r.map = &ctx->treq.map;
pthread_cond_init(&ctx->treq.completion, NULL);
pthread_mutex_init(&ctx->treq.completion_mutex, NULL);
permutation = malloc(ctx->clines * sizeof(unsigned));
for (i = 0; i < ctx->clines; i++)
permutation[i] = i;
i = ctx->num_iterations;
while (i-- && !ctx->terminated)
{
rw = rand() % 2;
single = (rand() % 4 == 0);
if (!single) {
shuffle(permutation, ctx->clines);
req->core_line_count = (rand() % max_io_size) + 1;
for (cline = 0; cline < req->core_line_count; cline++) {
req->map[cline].core_id = 0;
req->map[cline].core_line = 0;
req->map[cline].coll_idx = permutation[cline];
req->map[cline].status = LOOKUP_HIT;
}
lock_pfn = rw ? ocf_req_async_lock_wr : ocf_req_async_lock_rd;
if (req_lock_sync(c, req, lock_pfn, &ctx->terminated)) {
usleep(rand() % 500);
if (rw)
ocf_req_unlock_wr(c, req);
else
ocf_req_unlock_rd(c, req);
usleep(rand() % 500);
}
} else {
line = rand() % ctx->clines;
if (rw)
locked = ocf_cache_line_try_lock_wr(c, line);
else
locked = ocf_cache_line_try_lock_rd(c, line);
usleep(rand() % 500);
if (locked) {
if (rw)
ocf_cache_line_unlock_wr(c, line);
else
ocf_cache_line_unlock_rd(c, line);
usleep(rand() % 500);
}
}
pthread_mutex_lock(&prog_mutex);
progress++;
pthread_cond_signal(&prog_cond);
pthread_mutex_unlock(&prog_mutex);
}
free(permutation);
ctx->finished = 1;
}
int cmp_map(const void *p1, const void *p2)
{
struct ocf_map_info * m1 = *( struct ocf_map_info **)p1;
struct ocf_map_info * m2 = *( struct ocf_map_info **)p2;
if (m1->coll_idx > m2->coll_idx)
return 1;
if (m2->coll_idx > m1->coll_idx)
return -1;
return 0;
}
static void cctest(unsigned num_threads, unsigned num_iterations, unsigned clines,
unsigned max_io_size)
{
struct ocf_cache_line_concurrency *c;
struct thread_ctx *threads;
unsigned i, j;
time_t t;
char desc[1024];
unsigned randseed = (unsigned) time(&t);
unsigned last_progress = 0, curr_progress = 0;
struct timespec ts;
int result;
bool deadlocked = false;
unsigned timeout_s = max_io_size / 10 + 3;
snprintf(desc, sizeof(desc), "cacheline concurrency deadlock detection "
"threads %u iterations %u cache size %u max io size %u randseed %u\n",
num_threads, num_iterations, clines, max_io_size, randseed);
print_test_description(desc);
progress = 0;
pthread_cond_init(&prog_cond, NULL);
pthread_mutex_init(&prog_mutex, NULL);
srand(randseed);
threads = malloc(num_threads * sizeof(threads[0]));
memset(threads, 0, num_threads * sizeof(threads[0]));
assert_int_equal(0, ocf_cache_line_concurrency_init(&c, clines, NULL));
for (i = 0; i < num_threads; i++)
{
threads[i].timeout = false;
threads[i].finished = false;
threads[i].terminated = false;
threads[i].c = c;
threads[i].num_iterations = num_iterations;
threads[i].clines = clines;
threads[i].max_io_size = max_io_size;
pthread_create(&threads[i].t, NULL, thread, &threads[i]);
}
do {
clock_gettime(CLOCK_REALTIME, &ts);
ts.tv_sec += timeout_s;
do {
last_progress = curr_progress;
pthread_mutex_lock(&prog_mutex);
result = pthread_cond_timedwait(&prog_cond,
&prog_mutex, &ts);
curr_progress = progress;
pthread_mutex_unlock(&prog_mutex);
} while (!result && curr_progress == last_progress);
} while (last_progress != curr_progress);
for (i = 0; i < num_threads; i++)
{
if (!threads[i].finished) {
print_message("deadlocked\n");
deadlocked = true;
break;
}
}
if (!deadlocked)
goto join;
/* print locks on which all stuck threads are hanging */
for (i = 0; i < num_threads; i++)
{
if (!threads[i].finished)
{
unsigned num_clines = threads[i].treq.r.core_line_count;
struct ocf_map_info **clines = malloc(num_clines *
sizeof(*clines));
for (j = 0; j < num_clines; j++)
{
clines[j] = &threads[i].treq.r.map[j];
}
qsort(clines, num_clines, sizeof(*clines), cmp_map);
print_message("thread no %u\n", i);
for (j = 0; j < num_clines; j++) {
struct ocf_map_info *map = clines[j];
const char *status = map->rd_locked ? "R" :
map->wr_locked ? "W" : "X";
print_message("[%u] %u %s\n", j, map->coll_idx, status);
}
free(clines);
}
}
/* terminate all waiting threads */
for (i = 0; i < num_threads; i++) {
threads[i].terminated = 1;
pthread_mutex_lock(&threads[i].treq.completion_mutex);
pthread_cond_signal(&threads[i].treq.completion);
pthread_mutex_unlock(&threads[i].treq.completion_mutex);
}
join:
assert_int_equal((int)deadlocked, 0);
for (i = 0; i < num_threads; i++) {
pthread_join(threads[i].t, NULL);
}
ocf_cache_line_concurrency_deinit(&c);
free(threads);
}
static void ocf_req_async_lock_rd_test01(void **state)
{
cctest(8, 10000, 16, 8);
}
static void ocf_req_async_lock_rd_test02(void **state)
{
cctest(64, 1000, 16, 8);
}
static void ocf_req_async_lock_rd_test03(void **state)
{
cctest(64, 1000, 128, 32);
}
static void ocf_req_async_lock_rd_test04(void **state)
{
cctest(64, 1000, 1024, 32);
}
static void ocf_req_async_lock_rd_test05(void **state)
{
cctest(rand() % 64, 1000, rand() % 1024, 32);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_req_async_lock_rd_test01),
cmocka_unit_test(ocf_req_async_lock_rd_test02),
cmocka_unit_test(ocf_req_async_lock_rd_test03),
cmocka_unit_test(ocf_req_async_lock_rd_test04),
cmocka_unit_test(ocf_req_async_lock_rd_test05)
};
print_message("Cacheline concurrency deadlock detection\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}