Move resume callback to async lock function params (refactoring)

This is a step towards common async lock interface in OCF.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2019-09-10 15:23:49 -04:00
parent 3ced3fb9db
commit f34cacf150
13 changed files with 84 additions and 98 deletions

View File

@ -35,14 +35,11 @@
#define _WAITERS_LIST_ITEM(cache_line) ((cache_line) % _WAITERS_LIST_ENTRIES) #define _WAITERS_LIST_ITEM(cache_line) ((cache_line) % _WAITERS_LIST_ENTRIES)
typedef void (*__on_lock)(void *ctx, uint32_t ctx_id, ocf_cache_line_t line,
int rw);
struct __waiter { struct __waiter {
ocf_cache_line_t line; ocf_cache_line_t line;
void *ctx; void *ctx;
uint32_t ctx_id; uint32_t ctx_id;
__on_lock on_lock; ocf_req_async_lock_cb cb;
struct list_head item; struct list_head item;
int rw; int rw;
}; };
@ -353,11 +350,37 @@ static inline bool __try_lock_rd2rd(struct ocf_cache_line_concurrency *c,
return true; return true;
} }
/*
*
*/
static void _req_on_lock(void *ctx, ocf_req_async_lock_cb cb,
uint32_t ctx_id, ocf_cache_line_t line, int rw)
{
struct ocf_request *req = ctx;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line;
if (rw == OCF_READ)
req->map[ctx_id].rd_locked = true;
else if (rw == OCF_WRITE)
req->map[ctx_id].wr_locked = true;
else
ENV_BUG();
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
/* All cache line locked, resume request */
OCF_DEBUG_RQ(req, "Resume");
ENV_BUG_ON(!cb);
env_atomic_dec(&c->waiting);
cb(req);
}
}
/* /*
* *
*/ */
static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c, static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
const ocf_cache_line_t line, __on_lock on_lock, const ocf_cache_line_t line, ocf_req_async_lock_cb cb,
void *ctx, uint32_t ctx_id) void *ctx, uint32_t ctx_id)
{ {
struct __waiter *waiter; struct __waiter *waiter;
@ -367,8 +390,8 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
if (__try_lock_wr(c, line)) { if (__try_lock_wr(c, line)) {
/* No activity before look get */ /* No activity before look get */
if (on_lock) if (cb)
on_lock(ctx, ctx_id, line, OCF_WRITE); _req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE);
return true; return true;
} }
@ -382,7 +405,7 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
locked = true; locked = true;
} else { } else {
waiter = NULL; waiter = NULL;
if (on_lock != NULL) { if (cb != NULL) {
/* Need to create waiters and add it into list */ /* Need to create waiters and add it into list */
waiter = env_allocator_new(c->allocator); waiter = env_allocator_new(c->allocator);
} }
@ -391,7 +414,7 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
waiter->line = line; waiter->line = line;
waiter->ctx = ctx; waiter->ctx = ctx;
waiter->ctx_id = ctx_id; waiter->ctx_id = ctx_id;
waiter->on_lock = on_lock; waiter->cb = cb;
waiter->rw = OCF_WRITE; waiter->rw = OCF_WRITE;
INIT_LIST_HEAD(&waiter->item); INIT_LIST_HEAD(&waiter->item);
@ -403,8 +426,8 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
__unlock_waiters_list(c, line, flags); __unlock_waiters_list(c, line, flags);
if (locked && on_lock) if (locked && cb)
on_lock(ctx, ctx_id, line, OCF_WRITE); _req_on_lock(ctx, cb, ctx_id, line, OCF_WRITE);
return locked || waiting; return locked || waiting;
} }
@ -414,7 +437,7 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
* In case cache line is locked, attempt to add caller on wait list. * In case cache line is locked, attempt to add caller on wait list.
*/ */
static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c, static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
const ocf_cache_line_t line, __on_lock on_lock, const ocf_cache_line_t line, ocf_req_async_lock_cb cb,
void *ctx, uint32_t ctx_id) void *ctx, uint32_t ctx_id)
{ {
struct __waiter *waiter; struct __waiter *waiter;
@ -424,8 +447,8 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
if (__try_lock_rd_idle(c, line)) { if (__try_lock_rd_idle(c, line)) {
/* No activity before look get, it is first reader */ /* No activity before look get, it is first reader */
if (on_lock) if (cb)
on_lock(ctx, ctx_id, line, OCF_READ); _req_on_lock(ctx, cb, ctx_id, line, OCF_READ);
return true; return true;
} }
@ -444,7 +467,7 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
if (!locked) { if (!locked) {
waiter = NULL; waiter = NULL;
if (on_lock) { if (cb) {
/* Need to create waiters and add it into list */ /* Need to create waiters and add it into list */
waiter = env_allocator_new(c->allocator); waiter = env_allocator_new(c->allocator);
} }
@ -453,7 +476,7 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
waiter->line = line; waiter->line = line;
waiter->ctx = ctx; waiter->ctx = ctx;
waiter->ctx_id = ctx_id; waiter->ctx_id = ctx_id;
waiter->on_lock = on_lock; waiter->cb = cb;
waiter->rw = OCF_READ; waiter->rw = OCF_READ;
INIT_LIST_HEAD(&waiter->item); INIT_LIST_HEAD(&waiter->item);
@ -465,8 +488,8 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
__unlock_waiters_list(c, line, flags); __unlock_waiters_list(c, line, flags);
if (locked && on_lock) if (locked && cb)
on_lock(ctx, ctx_id, line, OCF_READ); _req_on_lock(ctx, cb, ctx_id, line, OCF_READ);
return locked || waiting; return locked || waiting;
} }
@ -520,8 +543,8 @@ static inline void __unlock_cache_line_rd_common(struct ocf_cache_line_concurren
exchanged = false; exchanged = false;
list_del(iter); list_del(iter);
waiter->on_lock(waiter->ctx, waiter->ctx_id, line, _req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id,
waiter->rw); line, waiter->rw);
env_allocator_del(c->allocator, waiter); env_allocator_del(c->allocator, waiter);
} else { } else {
@ -601,7 +624,7 @@ static inline void __unlock_cache_line_wr_common(struct ocf_cache_line_concurren
exchanged = false; exchanged = false;
list_del(iter); list_del(iter);
waiter->on_lock(waiter->ctx, waiter->ctx_id, line, _req_on_lock(waiter->ctx, waiter->cb, waiter->ctx_id, line,
waiter->rw); waiter->rw);
env_allocator_del(c->allocator, waiter); env_allocator_del(c->allocator, waiter);
@ -668,31 +691,6 @@ static inline void __remove_line_from_waiters_list(struct ocf_cache_line_concurr
__unlock_waiters_list(c, line, flags); __unlock_waiters_list(c, line, flags);
} }
/*
*
*/
static void _req_on_lock(void *ctx, uint32_t ctx_id,
ocf_cache_line_t line, int rw)
{
struct ocf_request *req = ctx;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
if (rw == OCF_READ)
req->map[ctx_id].rd_locked = true;
else if (rw == OCF_WRITE)
req->map[ctx_id].wr_locked = true;
else
ENV_BUG();
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
/* All cache line locked, resume request */
OCF_DEBUG_RQ(req, "Resume");
ENV_BUG_ON(!req->io_if->resume);
env_atomic_dec(&c->waiting);
req->io_if->resume(req);
}
}
/* Try to read-lock request without adding waiters. Function should be called /* Try to read-lock request without adding waiters. Function should be called
* under read lock, multiple threads may attempt to acquire the lock * under read lock, multiple threads may attempt to acquire the lock
* concurrently. */ * concurrently. */
@ -750,13 +748,12 @@ static int _ocf_req_trylock_rd(struct ocf_request *req)
* Read-lock request cache lines. Must be called under cacheline concurrency * Read-lock request cache lines. Must be called under cacheline concurrency
* write lock. * write lock.
*/ */
static int _ocf_req_lock_rd(struct ocf_request *req) static int _ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
{ {
int32_t i; int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line; cache_line;
ocf_cache_line_t line; ocf_cache_line_t line;
__on_lock on_lock = _req_on_lock;
int ret = OCF_LOCK_NOT_ACQUIRED; int ret = OCF_LOCK_NOT_ACQUIRED;
ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
@ -778,7 +775,7 @@ static int _ocf_req_lock_rd(struct ocf_request *req)
ENV_BUG_ON(req->map[i].rd_locked); ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked); ENV_BUG_ON(req->map[i].wr_locked);
if (!__lock_cache_line_rd(c, line, on_lock, req, i)) { if (!__lock_cache_line_rd(c, line, cb, req, i)) {
/* lock not acquired and not added to wait list */ /* lock not acquired and not added to wait list */
ret = -OCF_ERR_NO_MEM; ret = -OCF_ERR_NO_MEM;
goto err; goto err;
@ -804,7 +801,7 @@ err:
} }
int ocf_req_trylock_rd(struct ocf_request *req) int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
{ {
struct ocf_cache_line_concurrency *c = struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line; req->cache->device->concurrency.cache_line;
@ -816,7 +813,7 @@ int ocf_req_trylock_rd(struct ocf_request *req)
if (lock != OCF_LOCK_ACQUIRED) { if (lock != OCF_LOCK_ACQUIRED) {
env_rwlock_write_lock(&c->lock); env_rwlock_write_lock(&c->lock);
lock = _ocf_req_lock_rd(req); lock = _ocf_req_lock_rd(req, cb);
env_rwlock_write_unlock(&c->lock); env_rwlock_write_unlock(&c->lock);
} }
@ -878,17 +875,16 @@ static int _ocf_req_trylock_wr(struct ocf_request *req)
* Write-lock request cache lines. Must be called under cacheline concurrency * Write-lock request cache lines. Must be called under cacheline concurrency
* write lock. * write lock.
*/ */
static int _ocf_req_lock_wr(struct ocf_request *req) static int _ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
{ {
int32_t i; int32_t i;
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency. struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.
cache_line; cache_line;
ocf_cache_line_t line; ocf_cache_line_t line;
__on_lock on_lock = _req_on_lock;
int ret = OCF_LOCK_NOT_ACQUIRED; int ret = OCF_LOCK_NOT_ACQUIRED;
ENV_BUG_ON(env_atomic_read(&req->lock_remaining)); ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
ENV_BUG_ON(!req->io_if->resume); ENV_BUG_ON(!cb);
env_atomic_inc(&c->waiting); env_atomic_inc(&c->waiting);
env_atomic_set(&req->lock_remaining, req->core_line_count); env_atomic_set(&req->lock_remaining, req->core_line_count);
@ -907,7 +903,7 @@ static int _ocf_req_lock_wr(struct ocf_request *req)
ENV_BUG_ON(req->map[i].rd_locked); ENV_BUG_ON(req->map[i].rd_locked);
ENV_BUG_ON(req->map[i].wr_locked); ENV_BUG_ON(req->map[i].wr_locked);
if (!__lock_cache_line_wr(c, line, on_lock, req, i)) { if (!__lock_cache_line_wr(c, line, cb, req, i)) {
/* lock not acquired and not added to wait list */ /* lock not acquired and not added to wait list */
ret = -OCF_ERR_NO_MEM; ret = -OCF_ERR_NO_MEM;
goto err; goto err;
@ -932,7 +928,7 @@ err:
return ret; return ret;
} }
int ocf_req_trylock_wr(struct ocf_request *req) int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
{ {
struct ocf_cache_line_concurrency *c = struct ocf_cache_line_concurrency *c =
req->cache->device->concurrency.cache_line; req->cache->device->concurrency.cache_line;
@ -944,7 +940,7 @@ int ocf_req_trylock_wr(struct ocf_request *req)
if (lock != OCF_LOCK_ACQUIRED) { if (lock != OCF_LOCK_ACQUIRED) {
env_rwlock_write_lock(&c->lock); env_rwlock_write_lock(&c->lock);
lock = _ocf_req_lock_wr(req); lock = _ocf_req_lock_wr(req, cb);
env_rwlock_write_unlock(&c->lock); env_rwlock_write_unlock(&c->lock);
} }

View File

@ -50,19 +50,23 @@ uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache);
*/ */
size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache); size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache);
/* async request cacheline lock acquisition callback */
typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
/** /**
* @brief Lock OCF request for WRITE access (Lock all cache lines in map info) * @brief Lock OCF request for WRITE access (Lock all cache lines in map info)
* *
* @note io_if->resume callback has to be set * @note io_if->resume callback has to be set
* *
* @param req - OCF request * @param req - OCF request
* @param cb - async lock acquisition callback
* *
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
* *
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired io_if->resume be called * added into waiting list. When lock will be acquired io_if->resume be called
*/ */
int ocf_req_trylock_wr(struct ocf_request *req); int ocf_req_async_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb);
/** /**
* @brief Lock OCF request for READ access (Lock all cache lines in map info) * @brief Lock OCF request for READ access (Lock all cache lines in map info)
@ -70,13 +74,14 @@ int ocf_req_trylock_wr(struct ocf_request *req);
* @note io_if->resume callback has to be set * @note io_if->resume callback has to be set
* *
* @param req - OCF request * @param req - OCF request
* @param cb - async lock acquisition callback
* *
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
* *
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired io_if->resume be called * added into waiting list. When lock will be acquired io_if->resume be called
*/ */
int ocf_req_trylock_rd(struct ocf_request *req); int ocf_req_async_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb);
/** /**
* @brief Unlock OCF request from WRITE access * @brief Unlock OCF request from WRITE access

View File

@ -37,8 +37,6 @@ struct ocf_io_if {
int (*write)(struct ocf_request *req); int (*write)(struct ocf_request *req);
void (*resume)(struct ocf_request *req);
const char *name; const char *name;
}; };

View File

@ -22,30 +22,25 @@ static int _ocf_discard_step_do(struct ocf_request *req);
static int _ocf_discard_step(struct ocf_request *req); static int _ocf_discard_step(struct ocf_request *req);
static int _ocf_discard_flush_cache(struct ocf_request *req); static int _ocf_discard_flush_cache(struct ocf_request *req);
static int _ocf_discard_core(struct ocf_request *req); static int _ocf_discard_core(struct ocf_request *req);
static void _ocf_discard_on_resume(struct ocf_request *req);
static const struct ocf_io_if _io_if_discard_step = { static const struct ocf_io_if _io_if_discard_step = {
.read = _ocf_discard_step, .read = _ocf_discard_step,
.write = _ocf_discard_step, .write = _ocf_discard_step,
.resume = _ocf_discard_on_resume,
}; };
static const struct ocf_io_if _io_if_discard_step_resume = { static const struct ocf_io_if _io_if_discard_step_resume = {
.read = _ocf_discard_step_do, .read = _ocf_discard_step_do,
.write = _ocf_discard_step_do, .write = _ocf_discard_step_do,
.resume = _ocf_discard_on_resume,
}; };
static const struct ocf_io_if _io_if_discard_flush_cache = { static const struct ocf_io_if _io_if_discard_flush_cache = {
.read = _ocf_discard_flush_cache, .read = _ocf_discard_flush_cache,
.write = _ocf_discard_flush_cache, .write = _ocf_discard_flush_cache,
.resume = _ocf_discard_on_resume,
}; };
static const struct ocf_io_if _io_if_discard_core = { static const struct ocf_io_if _io_if_discard_core = {
.read = _ocf_discard_core, .read = _ocf_discard_core,
.write = _ocf_discard_core, .write = _ocf_discard_core,
.resume = _ocf_discard_on_resume,
}; };
static void _ocf_discard_complete_req(struct ocf_request *req, int error) static void _ocf_discard_complete_req(struct ocf_request *req, int error)
@ -239,7 +234,7 @@ static int _ocf_discard_step(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) { if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */ /* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume);
} else { } else {
lock = OCF_LOCK_ACQUIRED; lock = OCF_LOCK_ACQUIRED;
} }

View File

@ -104,7 +104,6 @@ static int _ocf_read_fast_do(struct ocf_request *req)
static const struct ocf_io_if _io_if_read_fast_resume = { static const struct ocf_io_if _io_if_read_fast_resume = {
.read = _ocf_read_fast_do, .read = _ocf_read_fast_do,
.write = _ocf_read_fast_do, .write = _ocf_read_fast_do,
.resume = ocf_engine_on_resume,
}; };
int ocf_read_fast(struct ocf_request *req) int ocf_read_fast(struct ocf_request *req)
@ -129,7 +128,7 @@ int ocf_read_fast(struct ocf_request *req)
hit = ocf_engine_is_hit(req); hit = ocf_engine_is_hit(req);
if (hit) { if (hit) {
ocf_io_start(&req->ioi.io); ocf_io_start(&req->ioi.io);
lock = ocf_req_trylock_rd(req); lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_RD(); OCF_METADATA_UNLOCK_RD();
@ -174,7 +173,6 @@ int ocf_read_fast(struct ocf_request *req)
static const struct ocf_io_if _io_if_write_fast_resume = { static const struct ocf_io_if _io_if_write_fast_resume = {
.read = ocf_write_wb_do, .read = ocf_write_wb_do,
.write = ocf_write_wb_do, .write = ocf_write_wb_do,
.resume = ocf_engine_on_resume,
}; };
int ocf_write_fast(struct ocf_request *req) int ocf_write_fast(struct ocf_request *req)
@ -199,7 +197,7 @@ int ocf_write_fast(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req); mapped = ocf_engine_is_mapped(req);
if (mapped) { if (mapped) {
ocf_io_start(&req->ioi.io); ocf_io_start(&req->ioi.io);
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_RD(); OCF_METADATA_UNLOCK_RD();

View File

@ -99,7 +99,6 @@ int ocf_read_pt_do(struct ocf_request *req)
static const struct ocf_io_if _io_if_pt_resume = { static const struct ocf_io_if _io_if_pt_resume = {
.read = ocf_read_pt_do, .read = ocf_read_pt_do,
.write = ocf_read_pt_do, .write = ocf_read_pt_do,
.resume = ocf_engine_on_resume,
}; };
int ocf_read_pt(struct ocf_request *req) int ocf_read_pt(struct ocf_request *req)
@ -130,7 +129,7 @@ int ocf_read_pt(struct ocf_request *req)
/* There are mapped cache line, /* There are mapped cache line,
* lock request for READ access * lock request for READ access
*/ */
lock = ocf_req_trylock_rd(req); lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
} else { } else {
/* No mapped cache lines, no need to get lock */ /* No mapped cache lines, no need to get lock */
lock = OCF_LOCK_ACQUIRED; lock = OCF_LOCK_ACQUIRED;

View File

@ -208,7 +208,6 @@ static int _ocf_read_generic_do(struct ocf_request *req)
static const struct ocf_io_if _io_if_read_generic_resume = { static const struct ocf_io_if _io_if_read_generic_resume = {
.read = _ocf_read_generic_do, .read = _ocf_read_generic_do,
.write = _ocf_read_generic_do, .write = _ocf_read_generic_do,
.resume = ocf_engine_on_resume,
}; };
int ocf_read_generic(struct ocf_request *req) int ocf_read_generic(struct ocf_request *req)
@ -243,13 +242,13 @@ int ocf_read_generic(struct ocf_request *req)
/* Request is fully mapped, no need to call eviction */ /* Request is fully mapped, no need to call eviction */
if (ocf_engine_is_hit(req)) { if (ocf_engine_is_hit(req)) {
/* There is a hit, lock request for READ access */ /* There is a hit, lock request for READ access */
lock = ocf_req_trylock_rd(req); lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
} else { } else {
/* All cache line mapped, but some sectors are not valid /* All cache line mapped, but some sectors are not valid
* and cache insert will be performed - lock for * and cache insert will be performed - lock for
* WRITE is required * WRITE is required
*/ */
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
} }
@ -274,12 +273,14 @@ int ocf_read_generic(struct ocf_request *req)
/* After mapping turns out there is hit, /* After mapping turns out there is hit,
* so lock OCF request for read access * so lock OCF request for read access
*/ */
lock = ocf_req_trylock_rd(req); lock = ocf_req_async_lock_rd(req,
ocf_engine_on_resume);
} else { } else {
/* Miss, new cache lines were mapped, /* Miss, new cache lines were mapped,
* need to lock OCF request for write access * need to lock OCF request for write access
*/ */
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req,
ocf_engine_on_resume);
} }
} }
OCF_METADATA_UNLOCK_WR(); OCF_METADATA_UNLOCK_WR();

View File

@ -21,7 +21,6 @@
static const struct ocf_io_if _io_if_wb_resume = { static const struct ocf_io_if _io_if_wb_resume = {
.read = ocf_write_wb_do, .read = ocf_write_wb_do,
.write = ocf_write_wb_do, .write = ocf_write_wb_do,
.resume = ocf_engine_on_resume,
}; };
static void _ocf_write_wb_update_bits(struct ocf_request *req) static void _ocf_write_wb_update_bits(struct ocf_request *req)
@ -190,7 +189,7 @@ int ocf_write_wb(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req); mapped = ocf_engine_is_mapped(req);
if (mapped) { if (mapped) {
/* All cache line are mapped, lock request for WRITE access */ /* All cache line are mapped, lock request for WRITE access */
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
@ -206,7 +205,7 @@ int ocf_write_wb(struct ocf_request *req)
if (!req->info.mapping_error) { if (!req->info.mapping_error) {
/* Lock request for WRITE access */ /* Lock request for WRITE access */
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/

View File

@ -130,7 +130,6 @@ static void _ocf_write_wi_on_resume(struct ocf_request *req)
static const struct ocf_io_if _io_if_wi_resume = { static const struct ocf_io_if _io_if_wi_resume = {
.read = _ocf_write_wi_do, .read = _ocf_write_wi_do,
.write = _ocf_write_wi_do, .write = _ocf_write_wi_do,
.resume = _ocf_write_wi_on_resume,
}; };
int ocf_write_wi(struct ocf_request *req) int ocf_write_wi(struct ocf_request *req)
@ -155,7 +154,7 @@ int ocf_write_wi(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) { if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */ /* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume);
} else { } else {
lock = OCF_LOCK_ACQUIRED; lock = OCF_LOCK_ACQUIRED;
} }

View File

@ -197,7 +197,6 @@ int ocf_read_wo_do(struct ocf_request *req)
static const struct ocf_io_if _io_if_wo_resume = { static const struct ocf_io_if _io_if_wo_resume = {
.read = ocf_read_wo_do, .read = ocf_read_wo_do,
.write = ocf_read_wo_do, .write = ocf_read_wo_do,
.resume = ocf_engine_on_resume,
}; };
int ocf_read_wo(struct ocf_request *req) int ocf_read_wo(struct ocf_request *req)
@ -224,7 +223,7 @@ int ocf_read_wo(struct ocf_request *req)
/* There are mapped cache lines, /* There are mapped cache lines,
* lock request for READ access * lock request for READ access
*/ */
lock = ocf_req_trylock_rd(req); lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/

View File

@ -158,7 +158,6 @@ static int _ocf_write_wt_do(struct ocf_request *req)
static const struct ocf_io_if _io_if_wt_resume = { static const struct ocf_io_if _io_if_wt_resume = {
.read = _ocf_write_wt_do, .read = _ocf_write_wt_do,
.write = _ocf_write_wt_do, .write = _ocf_write_wt_do,
.resume = ocf_engine_on_resume,
}; };
int ocf_write_wt(struct ocf_request *req) int ocf_write_wt(struct ocf_request *req)
@ -183,7 +182,7 @@ int ocf_write_wt(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req); mapped = ocf_engine_is_mapped(req);
if (mapped) { if (mapped) {
/* All cache line are mapped, lock request for WRITE access */ /* All cache line are mapped, lock request for WRITE access */
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
@ -199,7 +198,7 @@ int ocf_write_wt(struct ocf_request *req)
if (!req->info.mapping_error) { if (!req->info.mapping_error) {
/* Lock request for WRITE access */ /* Lock request for WRITE access */
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
} }
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/

View File

@ -129,7 +129,6 @@ static int _ocf_zero_do(struct ocf_request *req)
static const struct ocf_io_if _io_if_ocf_zero_do = { static const struct ocf_io_if _io_if_ocf_zero_do = {
.read = _ocf_zero_do, .read = _ocf_zero_do,
.write = _ocf_zero_do, .write = _ocf_zero_do,
.resume = ocf_engine_on_resume,
}; };
/** /**
@ -151,7 +150,7 @@ void ocf_engine_zero_line(struct ocf_request *req)
req->io_if = &_io_if_ocf_zero_do; req->io_if = &_io_if_ocf_zero_do;
/* Some cache line are mapped, lock request for WRITE access */ /* Some cache line are mapped, lock request for WRITE access */
lock = ocf_req_trylock_wr(req); lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
if (lock >= 0) { if (lock >= 0) {
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED); ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);

View File

@ -197,6 +197,12 @@ static void _ocf_cleaner_complete_req(struct ocf_request *req)
cmpl(master->priv, master->error); cmpl(master->priv, master->error);
} }
static void _ocf_cleaner_on_resume(struct ocf_request *req)
{
OCF_DEBUG_TRACE(req->cache);
ocf_engine_push_req_front(req, true);
}
/* /*
* cleaner - Cache line lock, function lock cache lines depends on attributes * cleaner - Cache line lock, function lock cache lines depends on attributes
*/ */
@ -207,7 +213,7 @@ static int _ocf_cleaner_cache_line_lock(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache); OCF_DEBUG_TRACE(req->cache);
return ocf_req_trylock_rd(req); return ocf_req_async_lock_rd(req, _ocf_cleaner_on_resume);
} }
/* /*
@ -697,16 +703,9 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req)
return 0; return 0;
} }
static void _ocf_cleaner_on_resume(struct ocf_request *req)
{
OCF_DEBUG_TRACE(req->cache);
ocf_engine_push_req_front(req, true);
}
static const struct ocf_io_if _io_if_fire_cache = { static const struct ocf_io_if _io_if_fire_cache = {
.read = _ocf_cleaner_fire_cache, .read = _ocf_cleaner_fire_cache,
.write = _ocf_cleaner_fire_cache, .write = _ocf_cleaner_fire_cache,
.resume = _ocf_cleaner_on_resume,
}; };
static int _ocf_cleaner_fire(struct ocf_request *req) static int _ocf_cleaner_fire(struct ocf_request *req)