Use cline concurrency ctx instead of cache

Cacheline concurrency functions have their interface changed
so that the cacheline concurrency private context is
explicitly on the parameter list, rather than being taken
from cache->device->concurrency.cache_line.

Cache pointer is no longer provided as a parameter to these
functions. Cacheline concurrency context now has a pointer
to cache structure (for logging purposes only).

The purpose of this change is to facilitate unit testing.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski
2021-02-24 17:29:39 -06:00
parent 0f34e46375
commit cf5f82b253
22 changed files with 200 additions and 151 deletions

View File

@@ -50,10 +50,12 @@ struct __waiters_list {
};
struct ocf_cache_line_concurrency {
ocf_cache_t cache;
env_mutex lock;
env_atomic *access;
env_atomic waiting;
size_t access_limit;
ocf_cache_line_t num_clines;
env_allocator *allocator;
struct __waiters_list waiters_lsts[_WAITERS_LIST_ENTRIES];
@@ -66,7 +68,8 @@ struct ocf_cache_line_concurrency {
#define ALLOCATOR_NAME_FMT "ocf_%s_cache_concurrency"
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE)
int ocf_cache_line_concurrency_init(struct ocf_cache *cache)
int ocf_cache_line_concurrency_init(struct ocf_cache_line_concurrency **self,
ocf_cache_t cache)
{
uint32_t i;
int error = 0;
@@ -75,8 +78,6 @@ int ocf_cache_line_concurrency_init(struct ocf_cache *cache)
ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
cache);
ENV_BUG_ON(cache->device->concurrency.cache_line);
OCF_DEBUG_TRACE(cache);
c = env_vzalloc(sizeof(*c));
@@ -85,14 +86,15 @@ int ocf_cache_line_concurrency_init(struct ocf_cache *cache)
goto exit_err;
}
c->cache = cache;
c->num_clines = line_entries;
error = env_mutex_init(&c->lock);
if (error) {
error = __LINE__;
goto rwsem_err;
}
cache->device->concurrency.cache_line = c;
OCF_REALLOC_INIT(&c->access, &c->access_limit);
OCF_REALLOC_CP(&c->access, sizeof(c->access[0]), line_entries,
&c->access_limit);
@@ -124,6 +126,7 @@ int ocf_cache_line_concurrency_init(struct ocf_cache *cache)
}
}
*self = c;
return 0;
spinlock_err:
@@ -146,24 +149,22 @@ exit_err:
if (c)
env_vfree(c);
cache->device->concurrency.cache_line = NULL;
*self = NULL;
return -1;
}
/*
*
*/
void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache)
void ocf_cache_line_concurrency_deinit(struct ocf_cache_line_concurrency **self)
{
struct ocf_cache_line_concurrency *concurrency = *self;
int i;
struct ocf_cache_line_concurrency *concurrency;
if (!cache->device->concurrency.cache_line)
if (!concurrency)
return;
OCF_DEBUG_TRACE(cache);
concurrency = cache->device->concurrency.cache_line;
OCF_DEBUG_TRACE(concurrency->cache);
env_mutex_destroy(&concurrency->lock);
@@ -178,10 +179,11 @@ void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache)
env_allocator_destroy(concurrency->allocator);
env_vfree(concurrency);
cache->device->concurrency.cache_line = NULL;
*self = NULL;
}
size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache)
size_t ocf_cache_line_concurrency_size_of(ocf_cache_t cache)
{
size_t size;
@@ -377,12 +379,11 @@ static inline bool __try_lock_rd2rd(struct ocf_cache_line_concurrency *c,
/*
*
*/
static void _req_on_lock(void *ctx, ocf_req_async_lock_cb cb,
static void _req_on_lock(struct ocf_cache_line_concurrency *c,
void *ctx, ocf_req_async_lock_cb cb,
uint32_t ctx_id, ocf_cache_line_t line, int rw)
{
struct ocf_request *req = ctx;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
if (rw == OCF_READ)
req->map[ctx_id].rd_locked = true;
@@ -415,7 +416,7 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
if (__try_lock_wr(c, line)) {
/* lock was not owned by anyone */
_req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE);
_req_on_lock(c, ctx, cb, ctx_id, line, OCF_WRITE);
return true;
}
@@ -447,7 +448,7 @@ unlock:
__unlock_waiters_list(c, line, flags);
if (!waiting) {
_req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE);
_req_on_lock(c, ctx, cb, ctx_id, line, OCF_WRITE);
env_allocator_del(c->allocator, waiter);
}
@@ -470,7 +471,7 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
if( __try_lock_rd_idle(c, line)) {
/* lock was not owned by anyone */
_req_on_lock(ctx, cb, ctx_id, line, OCF_READ);
_req_on_lock(c, ctx, cb, ctx_id, line, OCF_READ);
return true;
}
@@ -507,7 +508,7 @@ unlock:
__unlock_waiters_list(c, line, flags);
if (!waiting) {
_req_on_lock(ctx, cb, ctx_id, line, OCF_READ);
_req_on_lock(c, ctx, cb, ctx_id, line, OCF_READ);
env_allocator_del(c->allocator, waiter);
}
@@ -563,7 +564,7 @@ static inline void __unlock_cache_line_rd_common(struct ocf_cache_line_concurren
exchanged = false;
list_del(iter);
_req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id,
_req_on_lock(c, waiter->ctx, waiter->cb, waiter->ctx_id,
line, waiter->rw);
env_allocator_del(c->allocator, waiter);
@@ -644,7 +645,7 @@ static inline void __unlock_cache_line_wr_common(struct ocf_cache_line_concurren
exchanged = false;
list_del(iter);
_req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id, line,
_req_on_lock(c, waiter->ctx, waiter->cb, waiter->ctx_id, line,
waiter->rw);
env_allocator_del(c->allocator, waiter);
@@ -714,11 +715,10 @@ static inline void __remove_line_from_waiters_list(struct ocf_cache_line_concurr
/* Try to read-lock request without adding waiters. Function should be called
* under read lock, multiple threads may attempt to acquire the lock
* concurrently. */
static int _ocf_req_trylock_rd(struct ocf_request *req)
static int _ocf_req_trylock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req)
{
int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
ocf_cache_line_t line;
int ret = OCF_LOCK_ACQUIRED;
@@ -733,7 +733,7 @@ static int _ocf_req_trylock_rd(struct ocf_request *req)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked);
@@ -768,11 +768,10 @@ static int _ocf_req_trylock_rd(struct ocf_request *req)
* Read-lock request cache lines. Must be called under cacheline concurrency
* write lock.
*/
static int _ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
static int _ocf_req_lock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb)
{
int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
ocf_cache_line_t line;
int ret = OCF_LOCK_NOT_ACQUIRED;
@@ -791,7 +790,7 @@ static int _ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked);
@@ -821,17 +820,16 @@ err:
}
int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
int ocf_req_async_lock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb)
{
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
int lock;
lock = _ocf_req_trylock_rd(req);
lock = _ocf_req_trylock_rd(c, req);
if (lock != OCF_LOCK_ACQUIRED) {
env_mutex_lock(&c->lock);
lock = _ocf_req_lock_rd(req, cb);
lock = _ocf_req_lock_rd(c, req, cb);
env_mutex_unlock(&c->lock);
}
@@ -841,11 +839,10 @@ int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
/* Try to write-lock request without adding waiters. Function should be called
* under read lock, multiple threads may attempt to acquire the lock
* concurrently. */
static int _ocf_req_trylock_wr(struct ocf_request *req)
static int _ocf_req_trylock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req)
{
int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
ocf_cache_line_t line;
int ret = OCF_LOCK_ACQUIRED;
@@ -858,7 +855,7 @@ static int _ocf_req_trylock_wr(struct ocf_request *req)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked);
@@ -893,11 +890,10 @@ static int _ocf_req_trylock_wr(struct ocf_request *req)
* Write-lock request cache lines. Must be called under cacheline concurrency
* write lock.
*/
static int _ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
static int _ocf_req_lock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb)
{
int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
ocf_cache_line_t line;
int ret = OCF_LOCK_NOT_ACQUIRED;
@@ -917,7 +913,7 @@ static int _ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked);
@@ -946,17 +942,16 @@ err:
return ret;
}
int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
int ocf_req_async_lock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb)
{
struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line;
int lock;
lock = _ocf_req_trylock_wr(req);
lock = _ocf_req_trylock_wr(c, req);
if (lock != OCF_LOCK_ACQUIRED) {
env_mutex_lock(&c->lock);
lock = _ocf_req_lock_wr(req, cb);
lock = _ocf_req_lock_wr(c, req, cb);
env_mutex_unlock(&c->lock);
}
@@ -967,9 +962,8 @@ int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
/*
*
*/
void ocf_req_unlock_rd(struct ocf_request *req)
void ocf_req_unlock_rd(struct ocf_cache_line_concurrency *c, struct ocf_request *req)
{
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
int32_t i;
ocf_cache_line_t line;
@@ -985,7 +979,7 @@ void ocf_req_unlock_rd(struct ocf_request *req)
line = req->map[i].coll_idx;
ENV_BUG_ON(!req->map[i].rd_locked);
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
__unlock_cache_line_rd(c, line);
req->map[i].rd_locked = false;
@@ -995,9 +989,8 @@ void ocf_req_unlock_rd(struct ocf_request *req)
/*
*
*/
void ocf_req_unlock_wr(struct ocf_request *req)
void ocf_req_unlock_wr(struct ocf_cache_line_concurrency *c, struct ocf_request *req)
{
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
int32_t i;
ocf_cache_line_t line;
@@ -1013,7 +1006,7 @@ void ocf_req_unlock_wr(struct ocf_request *req)
line = req->map[i].coll_idx;
ENV_BUG_ON(!req->map[i].wr_locked);
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
__unlock_cache_line_wr(c, line);
req->map[i].wr_locked = false;
@@ -1023,9 +1016,8 @@ void ocf_req_unlock_wr(struct ocf_request *req)
/*
*
*/
void ocf_req_unlock(struct ocf_request *req)
void ocf_req_unlock(struct ocf_cache_line_concurrency *c, struct ocf_request *req)
{
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
int32_t i;
ocf_cache_line_t line;
@@ -1039,7 +1031,7 @@ void ocf_req_unlock(struct ocf_request *req)
}
line = req->map[i].coll_idx;
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
if (req->map[i].rd_locked && req->map[i].wr_locked) {
ENV_BUG();
@@ -1058,11 +1050,9 @@ void ocf_req_unlock(struct ocf_request *req)
/*
*
*/
void ocf_req_unlock_entry(struct ocf_cache *cache,
void ocf_req_unlock_entry(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, uint32_t entry)
{
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
ENV_BUG_ON(req->map[entry].status == LOOKUP_MISS);
if (req->map[entry].rd_locked && req->map[entry].wr_locked) {
@@ -1081,17 +1071,15 @@ void ocf_req_unlock_entry(struct ocf_cache *cache,
/*
*
*/
bool ocf_cache_line_is_used(struct ocf_cache *cache,
bool ocf_cache_line_is_used(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
ENV_BUG_ON(line >= cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
if (env_atomic_read(&(c->access[line])))
return true;
if (ocf_cache_line_are_waiters(cache, line))
if (ocf_cache_line_are_waiters(c, line))
return true;
else
return false;
@@ -1100,14 +1088,13 @@ bool ocf_cache_line_is_used(struct ocf_cache *cache,
/*
*
*/
bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
bool are;
unsigned long flags = 0;
ENV_BUG_ON(line >= cache->device->collision_table_entries);
ENV_BUG_ON(line >= c->num_clines);
/* Lock waiters list */
__lock_waiters_list(c, line, flags);
@@ -1122,42 +1109,35 @@ bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
/*
*
*/
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache)
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache_line_concurrency *c)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
return env_atomic_read(&c->waiting);
}
bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line)
bool ocf_cache_line_try_lock_rd(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
return __try_lock_rd_idle(c, line);
}
/*
*
*/
void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line)
void ocf_cache_line_unlock_rd(struct ocf_cache_line_concurrency *c, ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
OCF_DEBUG_RQ(cache, "Cache line = %u", line);
OCF_DEBUG_RQ(c->cache, "Cache line = %u", line);
__unlock_cache_line_rd(c, line);
}
bool ocf_cache_line_try_lock_wr(struct ocf_cache *cache, ocf_cache_line_t line)
bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
return __try_lock_wr(c, line);
}
void ocf_cache_line_unlock_wr(struct ocf_cache *cache, ocf_cache_line_t line)
void ocf_cache_line_unlock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
OCF_DEBUG_RQ(cache, "Cache line = %u", line);
OCF_DEBUG_RQ(c->cache, "Cache line = %u", line);
__unlock_cache_line_wr(c, line);
}

View File

@@ -19,27 +19,31 @@ struct ocf_cache_line_concurrency;
/**
* @brief Initialize OCF cache concurrency module
*
* @param self - cacheline concurrency private data
* @param cache - OCF cache instance
* @return 0 - Initialization successful, otherwise ERROR
*/
int ocf_cache_line_concurrency_init(struct ocf_cache *cache);
int ocf_cache_line_concurrency_init(struct ocf_cache_line_concurrency **self,
struct ocf_cache *cache);
/**
* @biref De-Initialize OCF cache concurrency module
*
* @param cache - OCF cache instance
* @param self - cacheline concurrency private data
*/
void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache);
void ocf_cache_line_concurrency_deinit(
struct ocf_cache_line_concurrency **self);
/**
* @brief Get number of waiting (suspended) OCF requests in due to cache
* overlapping
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
*
* @return Number of suspended OCF requests
*/
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache);
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache_line_concurrency *c);
/**
* @brief Return memory footprint conusmed by cache concurrency module
@@ -48,7 +52,7 @@ uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache);
*
* @return Memory footprint of cache concurrency module
*/
size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache);
size_t ocf_cache_line_concurrency_size_of(ocf_cache_t cache);
/* async request cacheline lock acquisition callback */
typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
@@ -56,6 +60,7 @@ typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
/**
* @brief Lock OCF request for write access (Lock all cache lines in map info)
*
* @param c - cacheline concurrency private data
* @param req - OCF request
* @param cb - async lock acquisition callback
*
@@ -65,11 +70,13 @@ typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired @cb cllback be called
*/
int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb);
int ocf_req_async_lock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb);
/**
* @brief Lock OCF request for read access (Lock all cache lines in map info)
*
* @param c - cacheline concurrency private data
* @param req - OCF request
* @param cb - async lock acquisition callback
*
@@ -79,28 +86,35 @@ int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb);
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired @cb callback be called
*/
int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb);
int ocf_req_async_lock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb);
/**
* @brief Unlock OCF request from write access
*
* @param c - cacheline concurrency private data
* @param req - OCF request
*/
void ocf_req_unlock_wr(struct ocf_request *req);
void ocf_req_unlock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req);
/**
* @brief Unlock OCF request from read access
*
* @param c - cacheline concurrency private data
* @param req - OCF request
*/
void ocf_req_unlock_rd(struct ocf_request *req);
void ocf_req_unlock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req);
/**
* @brief Unlock OCF request from read or write access
*
* @param c - cacheline concurrency private data
* @param req - OCF request
*/
void ocf_req_unlock(struct ocf_request *req);
void ocf_req_unlock(struct ocf_cache_line_concurrency *c,
struct ocf_request *req);
/**
* @Check if cache line is used.
@@ -116,30 +130,30 @@ void ocf_req_unlock(struct ocf_request *req);
* @retval true - cache line is used
* @retval false - cache line is not used
*/
bool ocf_cache_line_is_used(struct ocf_cache *cache,
bool ocf_cache_line_is_used(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief Check if for specified cache line there are waiters
* on the waiting list
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param line - Cache line to be checked for waiters
*
* @retval true - there are waiters
* @retval false - No waiters
*/
bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief un_lock request map info entry from from write or read access.
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param req - OCF request
* @param entry - request map entry number
*/
void ocf_req_unlock_entry(struct ocf_cache *cache,
void ocf_req_unlock_entry(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, uint32_t entry);
/**
@@ -148,36 +162,40 @@ void ocf_req_unlock_entry(struct ocf_cache *cache,
* @param cache - OCF cache instance
* @param line - Cache line to be unlocked
*/
void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
void ocf_cache_line_unlock_rd(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief Attempt to lock cache line for read
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param line - Cache line to be checked for waiters
*
* @retval true - read lock successfully acquired
* @retval false - failed to acquire read lock
*/
bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
bool ocf_cache_line_try_lock_rd(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief Release cache line write lock
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param line - Cache line to be unlocked
*/
void ocf_cache_line_unlock_wr(struct ocf_cache *cache, ocf_cache_line_t line);
void ocf_cache_line_unlock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
/**
* @brief Attempt to lock cache line for write
*
* @param cache - OCF cache instance
* @param c - cacheline concurrency private data
* @param line - Cache line to be checked for waiters
*
* @retval true - write lock successfully acquired
* @retval false - failed to acquire write lock
*/
bool ocf_cache_line_try_lock_wr(struct ocf_cache *cache, ocf_cache_line_t line);
bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line);
#endif /* OCF_CONCURRENCY_H_ */

View File

@@ -9,7 +9,9 @@ int ocf_concurrency_init(struct ocf_cache *cache)
{
int result = 0;
result = ocf_cache_line_concurrency_init(cache);
result = ocf_cache_line_concurrency_init(
&cache->device->concurrency.cache_line,
cache);
if (result)
ocf_concurrency_deinit(cache);
@@ -19,6 +21,8 @@ int ocf_concurrency_init(struct ocf_cache *cache)
void ocf_concurrency_deinit(struct ocf_cache *cache)
{
ocf_cache_line_concurrency_deinit(cache);
ocf_cache_line_concurrency_deinit(
&cache->device->concurrency.cache_line);
}