Unify req naming convention (rq -> req)
Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
parent
131148adac
commit
db92083432
@ -156,7 +156,7 @@ struct ocf_stats_errors {
|
||||
*
|
||||
* @param cache Cache instance for each statistics will be collected
|
||||
* @param usage Usage statistics
|
||||
* @param rq Request statistics
|
||||
* @param req Request statistics
|
||||
* @param blocks Blocks statistics
|
||||
* @param errors Errors statistics
|
||||
*
|
||||
@ -165,7 +165,7 @@ struct ocf_stats_errors {
|
||||
*/
|
||||
int ocf_stats_collect_cache(ocf_cache_t cache,
|
||||
struct ocf_stats_usage *usage,
|
||||
struct ocf_stats_requests *rq,
|
||||
struct ocf_stats_requests *req,
|
||||
struct ocf_stats_blocks *blocks,
|
||||
struct ocf_stats_errors *errors);
|
||||
|
||||
@ -174,7 +174,7 @@ int ocf_stats_collect_cache(ocf_cache_t cache,
|
||||
*
|
||||
* @param cache Core for each statistics will be collected
|
||||
* @param usage Usage statistics
|
||||
* @param rq Request statistics
|
||||
* @param req Request statistics
|
||||
* @param blocks Blocks statistics
|
||||
* @param errors Errors statistics
|
||||
*
|
||||
@ -183,7 +183,7 @@ int ocf_stats_collect_cache(ocf_cache_t cache,
|
||||
*/
|
||||
int ocf_stats_collect_core(ocf_core_t core,
|
||||
struct ocf_stats_usage *usage,
|
||||
struct ocf_stats_requests *rq,
|
||||
struct ocf_stats_requests *req,
|
||||
struct ocf_stats_blocks *blocks,
|
||||
struct ocf_stats_errors *errors);
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../cleaning/acp.h"
|
||||
#include "../engine/engine_common.h"
|
||||
#include "../concurrency/ocf_cache_concurrency.h"
|
||||
|
@ -15,14 +15,14 @@
|
||||
#define OCF_DEBUG_TRACE(cache) \
|
||||
ocf_cache_log(cache, log_info, "[Concurrency][Cache] %s\n", __func__)
|
||||
|
||||
#define OCF_DEBUG_RQ(rq, format, ...) \
|
||||
ocf_cache_log(rq->cache, log_info, "[Concurrency][Cache][%s] %s - " \
|
||||
format"\n", OCF_READ == (rq)->rw ? "RD" : "WR", \
|
||||
#define OCF_DEBUG_RQ(req, format, ...) \
|
||||
ocf_cache_log(req->cache, log_info, "[Concurrency][Cache][%s] %s - " \
|
||||
format"\n", OCF_READ == (req)->rw ? "RD" : "WR", \
|
||||
__func__, ##__VA_ARGS__)
|
||||
|
||||
#else
|
||||
#define OCF_DEBUG_TRACE(cache)
|
||||
#define OCF_DEBUG_RQ(rq, format, ...)
|
||||
#define OCF_DEBUG_RQ(req, format, ...)
|
||||
#endif
|
||||
|
||||
#define OCF_CACHE_LINE_ACCESS_WR INT_MAX
|
||||
@ -632,9 +632,9 @@ static inline void __unlock_cache_line_wr(struct ocf_cache_concurrency *c,
|
||||
* so need to check lock state under a common lock.
|
||||
*/
|
||||
static inline void __remove_line_from_waiters_list(struct ocf_cache_concurrency *c,
|
||||
struct ocf_request *rq, int i, void *ctx, int rw)
|
||||
struct ocf_request *req, int i, void *ctx, int rw)
|
||||
{
|
||||
ocf_cache_line_t line = rq->map[i].coll_idx;
|
||||
ocf_cache_line_t line = req->map[i].coll_idx;
|
||||
uint32_t idx = _WAITERS_LIST_ITEM(line);
|
||||
struct __waiters_list *lst = &c->waiters_lsts[idx];
|
||||
struct list_head *iter, *next;
|
||||
@ -643,12 +643,12 @@ static inline void __remove_line_from_waiters_list(struct ocf_cache_concurrency
|
||||
|
||||
__lock_waiters_list(c, line, flags);
|
||||
|
||||
if (rw == OCF_READ && rq->map[i].rd_locked) {
|
||||
if (rw == OCF_READ && req->map[i].rd_locked) {
|
||||
__unlock_cache_line_rd_common(c, line);
|
||||
rq->map[i].rd_locked = false;
|
||||
} else if (rw == OCF_WRITE && rq->map[i].wr_locked) {
|
||||
req->map[i].rd_locked = false;
|
||||
} else if (rw == OCF_WRITE && req->map[i].wr_locked) {
|
||||
__unlock_cache_line_wr_common(c, line);
|
||||
rq->map[i].wr_locked = false;
|
||||
req->map[i].wr_locked = false;
|
||||
} else {
|
||||
list_for_each_safe(iter, next, &lst->head) {
|
||||
waiter = list_entry(iter, struct __waiter, item);
|
||||
@ -665,17 +665,17 @@ static inline void __remove_line_from_waiters_list(struct ocf_cache_concurrency
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static int _ocf_rq_lock_rd_common(struct ocf_request *rq, void *context,
|
||||
static int _ocf_req_lock_rd_common(struct ocf_request *req, void *context,
|
||||
__on_lock on_lock)
|
||||
{
|
||||
bool locked, waiting;
|
||||
int32_t i;
|
||||
struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Lock");
|
||||
OCF_DEBUG_RQ(req, "Lock");
|
||||
|
||||
ENV_BUG_ON(env_atomic_read(&rq->lock_remaining));
|
||||
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
||||
ENV_BUG_ON(!on_lock);
|
||||
|
||||
/* Try lock request without adding waiters */
|
||||
@ -684,25 +684,25 @@ static int _ocf_rq_lock_rd_common(struct ocf_request *rq, void *context,
|
||||
/* At this point we have many thread that tries get lock for request */
|
||||
|
||||
locked = true;
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (rq->map[i].status == LOOKUP_MISS) {
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
continue;
|
||||
}
|
||||
|
||||
line = rq->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= rq->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(rq->map[i].rd_locked);
|
||||
ENV_BUG_ON(rq->map[i].wr_locked);
|
||||
line = req->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(req->map[i].rd_locked);
|
||||
ENV_BUG_ON(req->map[i].wr_locked);
|
||||
|
||||
if (__lock_cache_line_rd(c, line, NULL, NULL, 0)) {
|
||||
/* cache line locked */
|
||||
rq->map[i].rd_locked = true;
|
||||
req->map[i].rd_locked = true;
|
||||
} else {
|
||||
/* Not possible to lock all request */
|
||||
locked = false;
|
||||
OCF_DEBUG_RQ(rq, "NO Lock, cache line = %u", line);
|
||||
OCF_DEBUG_RQ(req, "NO Lock, cache line = %u", line);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -711,11 +711,11 @@ static int _ocf_rq_lock_rd_common(struct ocf_request *rq, void *context,
|
||||
if (!locked) {
|
||||
/* Request is not locked, discard acquired locks */
|
||||
for (; i >= 0; i--) {
|
||||
line = rq->map[i].coll_idx;
|
||||
line = req->map[i].coll_idx;
|
||||
|
||||
if (rq->map[i].rd_locked) {
|
||||
if (req->map[i].rd_locked) {
|
||||
__unlock_rd(c, line);
|
||||
rq->map[i].rd_locked = false;
|
||||
req->map[i].rd_locked = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -727,27 +727,27 @@ static int _ocf_rq_lock_rd_common(struct ocf_request *rq, void *context,
|
||||
return OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
|
||||
env_atomic_set(&rq->lock_remaining, rq->core_line_count);
|
||||
env_atomic_inc(&rq->lock_remaining);
|
||||
env_atomic_set(&req->lock_remaining, req->core_line_count);
|
||||
env_atomic_inc(&req->lock_remaining);
|
||||
|
||||
env_rwlock_write_lock(&c->lock);
|
||||
/* At this point one thread tries to get locks */
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Exclusive");
|
||||
OCF_DEBUG_RQ(req, "Exclusive");
|
||||
|
||||
waiting = true;
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (rq->map[i].status == LOOKUP_MISS) {
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
env_atomic_dec(&rq->lock_remaining);
|
||||
env_atomic_dec(&req->lock_remaining);
|
||||
continue;
|
||||
}
|
||||
|
||||
line = rq->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= rq->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(rq->map[i].rd_locked);
|
||||
ENV_BUG_ON(rq->map[i].wr_locked);
|
||||
line = req->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(req->map[i].rd_locked);
|
||||
ENV_BUG_ON(req->map[i].wr_locked);
|
||||
|
||||
if (!__lock_cache_line_rd(c, line, on_lock, context, i)) {
|
||||
/* lock not acquired and not added to wait list */
|
||||
@ -758,14 +758,14 @@ static int _ocf_rq_lock_rd_common(struct ocf_request *rq, void *context,
|
||||
|
||||
if (!waiting) {
|
||||
for (; i >= 0; i--)
|
||||
__remove_line_from_waiters_list(c, rq, i, context, OCF_READ);
|
||||
__remove_line_from_waiters_list(c, req, i, context, OCF_READ);
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Exclusive END");
|
||||
OCF_DEBUG_RQ(req, "Exclusive END");
|
||||
|
||||
env_rwlock_write_unlock(&c->lock);
|
||||
|
||||
if (env_atomic_dec_return(&rq->lock_remaining) == 0)
|
||||
if (env_atomic_dec_return(&req->lock_remaining) == 0)
|
||||
return OCF_LOCK_ACQUIRED;
|
||||
|
||||
if (waiting) {
|
||||
@ -779,59 +779,59 @@ static int _ocf_rq_lock_rd_common(struct ocf_request *rq, void *context,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static void _rq_on_lock(void *ctx, uint32_t ctx_id,
|
||||
static void _req_on_lock(void *ctx, uint32_t ctx_id,
|
||||
ocf_cache_line_t line, int rw)
|
||||
{
|
||||
struct ocf_request *rq = ctx;
|
||||
struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache;
|
||||
struct ocf_request *req = ctx;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
|
||||
if (rw == OCF_READ)
|
||||
rq->map[ctx_id].rd_locked = true;
|
||||
req->map[ctx_id].rd_locked = true;
|
||||
else if (rw == OCF_WRITE)
|
||||
rq->map[ctx_id].wr_locked = true;
|
||||
req->map[ctx_id].wr_locked = true;
|
||||
else
|
||||
ENV_BUG();
|
||||
|
||||
if (env_atomic_dec_return(&rq->lock_remaining) == 0) {
|
||||
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
|
||||
/* All cache line locked, resume request */
|
||||
OCF_DEBUG_RQ(rq, "Resume");
|
||||
OCF_CHECK_NULL(rq->resume);
|
||||
OCF_DEBUG_RQ(req, "Resume");
|
||||
OCF_CHECK_NULL(req->resume);
|
||||
env_atomic_dec(&c->waiting);
|
||||
rq->resume(rq);
|
||||
req->resume(req);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
*/
|
||||
int ocf_rq_trylock_rd(struct ocf_request *rq)
|
||||
int ocf_req_trylock_rd(struct ocf_request *req)
|
||||
{
|
||||
OCF_CHECK_NULL(rq->resume);
|
||||
return _ocf_rq_lock_rd_common(rq, rq, _rq_on_lock);
|
||||
OCF_CHECK_NULL(req->resume);
|
||||
return _ocf_req_lock_rd_common(req, req, _req_on_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock wait request context
|
||||
*/
|
||||
struct _rq_wait_context {
|
||||
struct ocf_request *rq;
|
||||
struct _req_wait_context {
|
||||
struct ocf_request *req;
|
||||
env_completion cmpl;
|
||||
};
|
||||
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static int _ocf_rq_lock_wr_common(struct ocf_request *rq, void *context,
|
||||
static int _ocf_req_lock_wr_common(struct ocf_request *req, void *context,
|
||||
__on_lock on_lock)
|
||||
{
|
||||
bool locked, waiting;
|
||||
int32_t i;
|
||||
struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Lock");
|
||||
OCF_DEBUG_RQ(req, "Lock");
|
||||
|
||||
ENV_BUG_ON(env_atomic_read(&rq->lock_remaining));
|
||||
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
||||
|
||||
/* Try lock request without adding waiters */
|
||||
|
||||
@ -839,25 +839,25 @@ static int _ocf_rq_lock_wr_common(struct ocf_request *rq, void *context,
|
||||
/* At this point many thread that tries getting lock for request */
|
||||
|
||||
locked = true;
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (rq->map[i].status == LOOKUP_MISS) {
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
continue;
|
||||
}
|
||||
|
||||
line = rq->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= rq->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(rq->map[i].rd_locked);
|
||||
ENV_BUG_ON(rq->map[i].wr_locked);
|
||||
line = req->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(req->map[i].rd_locked);
|
||||
ENV_BUG_ON(req->map[i].wr_locked);
|
||||
|
||||
if (__lock_cache_line_wr(c, line, NULL, NULL, 0)) {
|
||||
/* cache line locked */
|
||||
rq->map[i].wr_locked = true;
|
||||
req->map[i].wr_locked = true;
|
||||
} else {
|
||||
/* Not possible to lock all request */
|
||||
locked = false;
|
||||
OCF_DEBUG_RQ(rq, "NO Lock, cache line = %u", line);
|
||||
OCF_DEBUG_RQ(req, "NO Lock, cache line = %u", line);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -866,11 +866,11 @@ static int _ocf_rq_lock_wr_common(struct ocf_request *rq, void *context,
|
||||
if (!locked) {
|
||||
/* Request is not locked, discard acquired locks */
|
||||
for (; i >= 0; i--) {
|
||||
line = rq->map[i].coll_idx;
|
||||
line = req->map[i].coll_idx;
|
||||
|
||||
if (rq->map[i].wr_locked) {
|
||||
if (req->map[i].wr_locked) {
|
||||
__unlock_wr(c, line);
|
||||
rq->map[i].wr_locked = false;
|
||||
req->map[i].wr_locked = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -882,27 +882,27 @@ static int _ocf_rq_lock_wr_common(struct ocf_request *rq, void *context,
|
||||
return OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
|
||||
env_atomic_set(&rq->lock_remaining, rq->core_line_count);
|
||||
env_atomic_inc(&rq->lock_remaining);
|
||||
env_atomic_set(&req->lock_remaining, req->core_line_count);
|
||||
env_atomic_inc(&req->lock_remaining);
|
||||
|
||||
env_rwlock_write_lock(&c->lock);
|
||||
/* At this point one thread tires getting locks */
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Exclusive");
|
||||
OCF_DEBUG_RQ(req, "Exclusive");
|
||||
|
||||
waiting = true;
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (rq->map[i].status == LOOKUP_MISS) {
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
env_atomic_dec(&rq->lock_remaining);
|
||||
env_atomic_dec(&req->lock_remaining);
|
||||
continue;
|
||||
}
|
||||
|
||||
line = rq->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= rq->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(rq->map[i].rd_locked);
|
||||
ENV_BUG_ON(rq->map[i].wr_locked);
|
||||
line = req->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(req->map[i].rd_locked);
|
||||
ENV_BUG_ON(req->map[i].wr_locked);
|
||||
|
||||
if (!__lock_cache_line_wr(c, line, on_lock, context, i)) {
|
||||
/* lock not acquired and not added to wait list */
|
||||
@ -913,14 +913,14 @@ static int _ocf_rq_lock_wr_common(struct ocf_request *rq, void *context,
|
||||
|
||||
if (!waiting) {
|
||||
for (; i >= 0; i--)
|
||||
__remove_line_from_waiters_list(c, rq, i, context, OCF_WRITE);
|
||||
__remove_line_from_waiters_list(c, req, i, context, OCF_WRITE);
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Exclusive END");
|
||||
OCF_DEBUG_RQ(req, "Exclusive END");
|
||||
|
||||
env_rwlock_write_unlock(&c->lock);
|
||||
|
||||
if (env_atomic_dec_return(&rq->lock_remaining) == 0)
|
||||
if (env_atomic_dec_return(&req->lock_remaining) == 0)
|
||||
return OCF_LOCK_ACQUIRED;
|
||||
|
||||
if (waiting) {
|
||||
@ -934,97 +934,97 @@ static int _ocf_rq_lock_wr_common(struct ocf_request *rq, void *context,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
int ocf_rq_trylock_wr(struct ocf_request *rq)
|
||||
int ocf_req_trylock_wr(struct ocf_request *req)
|
||||
{
|
||||
OCF_CHECK_NULL(rq->resume);
|
||||
return _ocf_rq_lock_wr_common(rq, rq, _rq_on_lock);
|
||||
OCF_CHECK_NULL(req->resume);
|
||||
return _ocf_req_lock_wr_common(req, req, _req_on_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
*/
|
||||
void ocf_rq_unlock_rd(struct ocf_request *rq)
|
||||
void ocf_req_unlock_rd(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
int32_t i;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Unlock");
|
||||
OCF_DEBUG_RQ(req, "Unlock");
|
||||
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (rq->map[i].status == LOOKUP_MISS) {
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
continue;
|
||||
}
|
||||
|
||||
line = rq->map[i].coll_idx;
|
||||
line = req->map[i].coll_idx;
|
||||
|
||||
ENV_BUG_ON(!rq->map[i].rd_locked);
|
||||
ENV_BUG_ON(line >= rq->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(!req->map[i].rd_locked);
|
||||
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
|
||||
|
||||
__unlock_cache_line_rd(c, line);
|
||||
rq->map[i].rd_locked = false;
|
||||
req->map[i].rd_locked = false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
*/
|
||||
void ocf_rq_unlock_wr(struct ocf_request *rq)
|
||||
void ocf_req_unlock_wr(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
int32_t i;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Unlock");
|
||||
OCF_DEBUG_RQ(req, "Unlock");
|
||||
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (rq->map[i].status == LOOKUP_MISS) {
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
continue;
|
||||
}
|
||||
|
||||
line = rq->map[i].coll_idx;
|
||||
line = req->map[i].coll_idx;
|
||||
|
||||
ENV_BUG_ON(!rq->map[i].wr_locked);
|
||||
ENV_BUG_ON(line >= rq->cache->device->collision_table_entries);
|
||||
ENV_BUG_ON(!req->map[i].wr_locked);
|
||||
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
|
||||
|
||||
__unlock_cache_line_wr(c, line);
|
||||
rq->map[i].wr_locked = false;
|
||||
req->map[i].wr_locked = false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
*/
|
||||
void ocf_rq_unlock(struct ocf_request *rq)
|
||||
void ocf_req_unlock(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
int32_t i;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Unlock");
|
||||
OCF_DEBUG_RQ(req, "Unlock");
|
||||
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
|
||||
if (rq->map[i].status == LOOKUP_MISS) {
|
||||
if (req->map[i].status == LOOKUP_MISS) {
|
||||
/* MISS nothing to lock */
|
||||
continue;
|
||||
}
|
||||
|
||||
line = rq->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= rq->cache->device->collision_table_entries);
|
||||
line = req->map[i].coll_idx;
|
||||
ENV_BUG_ON(line >= req->cache->device->collision_table_entries);
|
||||
|
||||
if (rq->map[i].rd_locked && rq->map[i].wr_locked) {
|
||||
if (req->map[i].rd_locked && req->map[i].wr_locked) {
|
||||
ENV_BUG();
|
||||
} else if (rq->map[i].rd_locked) {
|
||||
} else if (req->map[i].rd_locked) {
|
||||
__unlock_cache_line_rd(c, line);
|
||||
rq->map[i].rd_locked = false;
|
||||
} else if (rq->map[i].wr_locked) {
|
||||
req->map[i].rd_locked = false;
|
||||
} else if (req->map[i].wr_locked) {
|
||||
__unlock_cache_line_wr(c, line);
|
||||
rq->map[i].wr_locked = false;
|
||||
req->map[i].wr_locked = false;
|
||||
} else {
|
||||
ENV_BUG();
|
||||
}
|
||||
@ -1034,21 +1034,21 @@ void ocf_rq_unlock(struct ocf_request *rq)
|
||||
/*
|
||||
*
|
||||
*/
|
||||
void ocf_rq_unlock_entry(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t entry)
|
||||
void ocf_req_unlock_entry(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t entry)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
|
||||
ENV_BUG_ON(rq->map[entry].status == LOOKUP_MISS);
|
||||
ENV_BUG_ON(req->map[entry].status == LOOKUP_MISS);
|
||||
|
||||
if (rq->map[entry].rd_locked && rq->map[entry].wr_locked) {
|
||||
if (req->map[entry].rd_locked && req->map[entry].wr_locked) {
|
||||
ENV_BUG();
|
||||
} else if (rq->map[entry].rd_locked) {
|
||||
__unlock_cache_line_rd(c, rq->map[entry].coll_idx);
|
||||
rq->map[entry].rd_locked = false;
|
||||
} else if (rq->map[entry].wr_locked) {
|
||||
__unlock_cache_line_wr(c, rq->map[entry].coll_idx);
|
||||
rq->map[entry].wr_locked = false;
|
||||
} else if (req->map[entry].rd_locked) {
|
||||
__unlock_cache_line_rd(c, req->map[entry].coll_idx);
|
||||
req->map[entry].rd_locked = false;
|
||||
} else if (req->map[entry].wr_locked) {
|
||||
__unlock_cache_line_wr(c, req->map[entry].coll_idx);
|
||||
req->map[entry].wr_locked = false;
|
||||
} else {
|
||||
ENV_BUG();
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
#define OCF_CACHE_CONCURRENCY_H_
|
||||
|
||||
/**
|
||||
* @file utils_rq.h
|
||||
* @file utils_req.h
|
||||
* @brief OCF cache concurrency module
|
||||
*/
|
||||
|
||||
@ -53,66 +53,66 @@ size_t ocf_cache_concurrency_size_of(struct ocf_cache *cache);
|
||||
/**
|
||||
* @brief Lock OCF request for WRITE access (Lock all cache lines in map info)
|
||||
*
|
||||
* @note rq->resume callback has to be set
|
||||
* @note req->resume callback has to be set
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*
|
||||
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
|
||||
*
|
||||
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
|
||||
* added into waiting list. When lock will be acquired rq->resume be called
|
||||
* added into waiting list. When lock will be acquired req->resume be called
|
||||
*/
|
||||
int ocf_rq_trylock_wr(struct ocf_request *rq);
|
||||
int ocf_req_trylock_wr(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Try complete lock of OCF request for WRITE access (Lock cache lines
|
||||
* that marked as invalid)
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*
|
||||
* @note If request not locked it will be added into waiting list
|
||||
*
|
||||
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
|
||||
*
|
||||
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
|
||||
* added into waiting list. When lock will be acquired rq->resume be called
|
||||
* added into waiting list. When lock will be acquired req->resume be called
|
||||
*/
|
||||
int ocf_rq_retrylock_wr(struct ocf_request *rq);
|
||||
int ocf_req_retrylock_wr(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Lock OCF request for READ access (Lock all cache lines in map info)
|
||||
*
|
||||
* @note rq->resume callback has to be set
|
||||
* @note req->resume callback has to be set
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*
|
||||
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
|
||||
*
|
||||
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
|
||||
* added into waiting list. When lock will be acquired rq->resume be called
|
||||
* added into waiting list. When lock will be acquired req->resume be called
|
||||
*/
|
||||
int ocf_rq_trylock_rd(struct ocf_request *rq);
|
||||
int ocf_req_trylock_rd(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Unlock OCF request from WRITE access
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*/
|
||||
void ocf_rq_unlock_wr(struct ocf_request *rq);
|
||||
void ocf_req_unlock_wr(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Unlock OCF request from READ access
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*/
|
||||
void ocf_rq_unlock_rd(struct ocf_request *rq);
|
||||
void ocf_req_unlock_rd(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Unlock OCF request from READ or WRITE access
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*/
|
||||
void ocf_rq_unlock(struct ocf_request *rq);
|
||||
void ocf_req_unlock(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @Check if cache line is used.
|
||||
@ -148,11 +148,11 @@ bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
|
||||
* @brief un_lock request map info entry from from WRITE or READ access.
|
||||
*
|
||||
* @param cache - OCF cache instance
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
* @param entry - request map entry number
|
||||
*/
|
||||
void ocf_rq_unlock_entry(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t entry);
|
||||
void ocf_req_unlock_entry(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t entry);
|
||||
|
||||
/**
|
||||
* @brief Release cache line read lock
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "../ocf_cache_priv.h"
|
||||
|
||||
/**
|
||||
* @file utils_rq.h
|
||||
* @file utils_req.h
|
||||
* @brief OCF concurrency
|
||||
*/
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include "engine_d2c.h"
|
||||
#include "engine_ops.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../layer_space_management.h"
|
||||
|
||||
@ -106,11 +106,11 @@ const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t req_cache_mode)
|
||||
return cache_mode_io_if_map[req_cache_mode];
|
||||
}
|
||||
|
||||
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
|
||||
struct ocf_request *ocf_engine_pop_req(struct ocf_cache *cache,
|
||||
struct ocf_queue *q)
|
||||
{
|
||||
unsigned long lock_flags;
|
||||
struct ocf_request *rq;
|
||||
struct ocf_request *req;
|
||||
|
||||
OCF_CHECK_NULL(q);
|
||||
|
||||
@ -125,22 +125,22 @@ struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
|
||||
}
|
||||
|
||||
/* Get the first request and remove it from the list */
|
||||
rq = list_first_entry(&q->io_list, struct ocf_request, list);
|
||||
req = list_first_entry(&q->io_list, struct ocf_request, list);
|
||||
|
||||
env_atomic_dec(&q->io_no);
|
||||
list_del(&rq->list);
|
||||
list_del(&req->list);
|
||||
|
||||
/* UNLOCK */
|
||||
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
|
||||
|
||||
OCF_CHECK_NULL(rq);
|
||||
OCF_CHECK_NULL(req);
|
||||
|
||||
if (ocf_rq_alloc_map(rq)) {
|
||||
rq->complete(rq, rq->error);
|
||||
if (ocf_req_alloc_map(req)) {
|
||||
req->complete(req, req->error);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return rq;
|
||||
return req;
|
||||
}
|
||||
|
||||
bool ocf_fallback_pt_is_on(ocf_cache_t cache)
|
||||
@ -218,7 +218,7 @@ ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
|
||||
{
|
||||
ocf_cache_mode_t mode;
|
||||
|
||||
if (cache->pt_unaligned_io && !ocf_rq_is_4k(io->addr, io->bytes))
|
||||
if (cache->pt_unaligned_io && !ocf_req_is_4k(io->addr, io->bytes))
|
||||
return ocf_cache_mode_pt;
|
||||
|
||||
mode = ocf_part_get_cache_mode(cache,
|
||||
@ -239,27 +239,27 @@ ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
|
||||
return mode;
|
||||
}
|
||||
|
||||
int ocf_engine_hndl_rq(struct ocf_request *rq,
|
||||
int ocf_engine_hndl_req(struct ocf_request *req,
|
||||
ocf_req_cache_mode_t req_cache_mode)
|
||||
{
|
||||
ocf_cache_t cache = rq->cache;
|
||||
ocf_cache_t cache = req->cache;
|
||||
|
||||
OCF_CHECK_NULL(cache);
|
||||
|
||||
rq->io_if = ocf_get_io_if(req_cache_mode);
|
||||
if (!rq->io_if)
|
||||
req->io_if = ocf_get_io_if(req_cache_mode);
|
||||
if (!req->io_if)
|
||||
return -EINVAL;
|
||||
|
||||
/* Till OCF engine is not synchronous fully need to push OCF request
|
||||
* to into OCF workers
|
||||
*/
|
||||
|
||||
ocf_engine_push_rq_back(rq, true);
|
||||
ocf_engine_push_req_back(req, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
|
||||
int ocf_engine_hndl_fast_req(struct ocf_request *req,
|
||||
ocf_req_cache_mode_t req_cache_mode)
|
||||
{
|
||||
const struct ocf_io_if *io_if;
|
||||
@ -268,47 +268,47 @@ int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
|
||||
if (!io_if)
|
||||
return -EINVAL;
|
||||
|
||||
switch (rq->rw) {
|
||||
switch (req->rw) {
|
||||
case OCF_READ:
|
||||
return io_if->read(rq);
|
||||
return io_if->read(req);
|
||||
case OCF_WRITE:
|
||||
return io_if->write(rq);
|
||||
return io_if->write(req);
|
||||
default:
|
||||
return OCF_FAST_PATH_NO;
|
||||
}
|
||||
}
|
||||
|
||||
static void ocf_engine_hndl_2dc_rq(struct ocf_request *rq)
|
||||
static void ocf_engine_hndl_2dc_req(struct ocf_request *req)
|
||||
{
|
||||
if (OCF_READ == rq->rw)
|
||||
IO_IFS[OCF_IO_D2C_IF].read(rq);
|
||||
else if (OCF_WRITE == rq->rw)
|
||||
IO_IFS[OCF_IO_D2C_IF].write(rq);
|
||||
if (OCF_READ == req->rw)
|
||||
IO_IFS[OCF_IO_D2C_IF].read(req);
|
||||
else if (OCF_WRITE == req->rw)
|
||||
IO_IFS[OCF_IO_D2C_IF].write(req);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
void ocf_engine_hndl_discard_rq(struct ocf_request *rq)
|
||||
void ocf_engine_hndl_discard_req(struct ocf_request *req)
|
||||
{
|
||||
if (rq->d2c) {
|
||||
ocf_engine_hndl_2dc_rq(rq);
|
||||
if (req->d2c) {
|
||||
ocf_engine_hndl_2dc_req(req);
|
||||
return;
|
||||
}
|
||||
|
||||
if (OCF_READ == rq->rw)
|
||||
IO_IFS[OCF_IO_DISCARD_IF].read(rq);
|
||||
else if (OCF_WRITE == rq->rw)
|
||||
IO_IFS[OCF_IO_DISCARD_IF].write(rq);
|
||||
if (OCF_READ == req->rw)
|
||||
IO_IFS[OCF_IO_DISCARD_IF].read(req);
|
||||
else if (OCF_WRITE == req->rw)
|
||||
IO_IFS[OCF_IO_DISCARD_IF].write(req);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
void ocf_engine_hndl_ops_rq(struct ocf_request *rq)
|
||||
void ocf_engine_hndl_ops_req(struct ocf_request *req)
|
||||
{
|
||||
if (rq->d2c)
|
||||
rq->io_if = &IO_IFS[OCF_IO_D2C_IF];
|
||||
if (req->d2c)
|
||||
req->io_if = &IO_IFS[OCF_IO_D2C_IF];
|
||||
else
|
||||
rq->io_if = &IO_IFS[OCF_IO_OPS_IF];
|
||||
req->io_if = &IO_IFS[OCF_IO_OPS_IF];
|
||||
|
||||
ocf_engine_push_rq_back(rq, true);
|
||||
ocf_engine_push_req_back(req, true);
|
||||
}
|
||||
|
@ -63,20 +63,20 @@ bool ocf_fallback_pt_is_on(ocf_cache_t cache);
|
||||
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
|
||||
uint64_t bytes);
|
||||
|
||||
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
|
||||
struct ocf_request *ocf_engine_pop_req(struct ocf_cache *cache,
|
||||
struct ocf_queue *q);
|
||||
|
||||
int ocf_engine_hndl_rq(struct ocf_request *rq,
|
||||
int ocf_engine_hndl_req(struct ocf_request *req,
|
||||
ocf_req_cache_mode_t req_cache_mode);
|
||||
|
||||
#define OCF_FAST_PATH_YES 7
|
||||
#define OCF_FAST_PATH_NO 13
|
||||
|
||||
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
|
||||
int ocf_engine_hndl_fast_req(struct ocf_request *req,
|
||||
ocf_req_cache_mode_t req_cache_mode);
|
||||
|
||||
void ocf_engine_hndl_discard_rq(struct ocf_request *rq);
|
||||
void ocf_engine_hndl_discard_req(struct ocf_request *req);
|
||||
|
||||
void ocf_engine_hndl_ops_rq(struct ocf_request *rq);
|
||||
void ocf_engine_hndl_ops_req(struct ocf_request *req);
|
||||
|
||||
#endif
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "engine_inv.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
@ -37,56 +37,56 @@ static inline void backfill_queue_inc_block(struct ocf_cache *cache)
|
||||
env_atomic_set(&cache->pending_read_misses_list_blocked, 1);
|
||||
}
|
||||
|
||||
static void _ocf_backfill_do_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_backfill_do_io(struct ocf_request *req, int error)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
if (error)
|
||||
rq->error = error;
|
||||
req->error = error;
|
||||
|
||||
if (rq->error)
|
||||
inc_fallback_pt_error_counter(rq->cache);
|
||||
if (req->error)
|
||||
inc_fallback_pt_error_counter(req->cache);
|
||||
|
||||
/* Handle callback-caller race to let only one of the two complete the
|
||||
* request. Also, complete original request only if this is the last
|
||||
* sub-request to complete
|
||||
*/
|
||||
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
|
||||
if (env_atomic_dec_return(&req->req_remaining) == 0) {
|
||||
/* We must free the pages we have allocated */
|
||||
ctx_data_secure_erase(cache->owner, rq->data);
|
||||
ctx_data_munlock(cache->owner, rq->data);
|
||||
ctx_data_free(cache->owner, rq->data);
|
||||
rq->data = NULL;
|
||||
ctx_data_secure_erase(cache->owner, req->data);
|
||||
ctx_data_munlock(cache->owner, req->data);
|
||||
ctx_data_free(cache->owner, req->data);
|
||||
req->data = NULL;
|
||||
|
||||
if (rq->error) {
|
||||
env_atomic_inc(&cache->core_obj[rq->core_id].
|
||||
if (req->error) {
|
||||
env_atomic_inc(&cache->core_obj[req->core_id].
|
||||
counters->cache_errors.write);
|
||||
ocf_engine_invalidate(rq);
|
||||
ocf_engine_invalidate(req);
|
||||
} else {
|
||||
ocf_rq_unlock(rq);
|
||||
ocf_req_unlock(req);
|
||||
|
||||
/* always free the request at the last point
|
||||
* of the completion path
|
||||
*/
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_backfill_do(struct ocf_request *rq)
|
||||
static int _ocf_backfill_do(struct ocf_request *req)
|
||||
{
|
||||
unsigned int reqs_to_issue;
|
||||
|
||||
backfill_queue_dec_unblock(rq->cache);
|
||||
backfill_queue_dec_unblock(req->cache);
|
||||
|
||||
reqs_to_issue = ocf_engine_io_count(rq);
|
||||
reqs_to_issue = ocf_engine_io_count(req);
|
||||
|
||||
/* There will be #reqs_to_issue completions */
|
||||
env_atomic_set(&rq->req_remaining, reqs_to_issue);
|
||||
env_atomic_set(&req->req_remaining, reqs_to_issue);
|
||||
|
||||
rq->data = rq->cp_data;
|
||||
req->data = req->cp_data;
|
||||
|
||||
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_WRITE, reqs_to_issue,
|
||||
ocf_submit_cache_reqs(req->cache, req->map, req, OCF_WRITE, reqs_to_issue,
|
||||
_ocf_backfill_do_io);
|
||||
|
||||
return 0;
|
||||
@ -97,8 +97,8 @@ static const struct ocf_io_if _io_if_backfill = {
|
||||
.write = _ocf_backfill_do,
|
||||
};
|
||||
|
||||
void ocf_engine_backfill(struct ocf_request *rq)
|
||||
void ocf_engine_backfill(struct ocf_request *req)
|
||||
{
|
||||
backfill_queue_inc_block(rq->cache);
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_backfill, true);
|
||||
backfill_queue_inc_block(req->cache);
|
||||
ocf_engine_push_req_front_if(req, &_io_if_backfill, true);
|
||||
}
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef ENGINE_BF_H_
|
||||
#define ENGINE_BF_H_
|
||||
|
||||
void ocf_engine_backfill(struct ocf_request *rq);
|
||||
void ocf_engine_backfill(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_BF_H_ */
|
||||
|
@ -11,22 +11,22 @@
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "common"
|
||||
#include "engine_debug.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../layer_space_management.h"
|
||||
|
||||
void ocf_engine_error(struct ocf_request *rq,
|
||||
void ocf_engine_error(struct ocf_request *req,
|
||||
bool stop_cache, const char *msg)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
if (stop_cache)
|
||||
env_bit_clear(ocf_cache_state_running, &cache->cache_state);
|
||||
|
||||
ocf_core_log(&cache->core_obj[rq->core_id], log_err,
|
||||
ocf_core_log(&cache->core_obj[req->core_id], log_err,
|
||||
"%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg,
|
||||
BYTES_TO_SECTORS(rq->byte_position), rq->byte_length);
|
||||
BYTES_TO_SECTORS(req->byte_position), req->byte_length);
|
||||
}
|
||||
|
||||
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
|
||||
@ -85,21 +85,21 @@ static inline int _ocf_engine_check_map_entry(struct ocf_cache *cache,
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ocf_engine_update_rq_info(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t entry)
|
||||
void ocf_engine_update_req_info(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t entry)
|
||||
{
|
||||
uint8_t start_sector = 0;
|
||||
uint8_t end_sector = ocf_line_end_sector(cache);
|
||||
struct ocf_map_info *_entry = &(rq->map[entry]);
|
||||
struct ocf_map_info *_entry = &(req->map[entry]);
|
||||
|
||||
if (entry == 0) {
|
||||
start_sector = BYTES_TO_SECTORS(rq->byte_position)
|
||||
start_sector = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (entry == rq->core_line_count - 1) {
|
||||
end_sector = BYTES_TO_SECTORS(rq->byte_position +
|
||||
rq->byte_length - 1)% ocf_line_sectors(cache);
|
||||
if (entry == req->core_line_count - 1) {
|
||||
end_sector = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1)% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
/* Handle return value */
|
||||
@ -107,31 +107,31 @@ void ocf_engine_update_rq_info(struct ocf_cache *cache,
|
||||
case LOOKUP_HIT:
|
||||
if (metadata_test_valid_sec(cache, _entry->coll_idx,
|
||||
start_sector, end_sector)) {
|
||||
rq->info.hit_no++;
|
||||
req->info.hit_no++;
|
||||
} else {
|
||||
rq->info.invalid_no++;
|
||||
req->info.invalid_no++;
|
||||
}
|
||||
|
||||
/* Check request is dirty */
|
||||
if (metadata_test_dirty(cache, _entry->coll_idx)) {
|
||||
rq->info.dirty_any++;
|
||||
req->info.dirty_any++;
|
||||
|
||||
/* Check if cache line is fully dirty */
|
||||
if (metadata_test_dirty_all(cache, _entry->coll_idx))
|
||||
rq->info.dirty_all++;
|
||||
req->info.dirty_all++;
|
||||
}
|
||||
|
||||
if (rq->part_id != ocf_metadata_get_partition_id(cache,
|
||||
if (req->part_id != ocf_metadata_get_partition_id(cache,
|
||||
_entry->coll_idx)) {
|
||||
/*
|
||||
* Need to move this cache line into other partition
|
||||
*/
|
||||
_entry->re_part = rq->info.re_part = true;
|
||||
_entry->re_part = req->info.re_part = true;
|
||||
}
|
||||
|
||||
break;
|
||||
case LOOKUP_MISS:
|
||||
rq->info.seq_req = false;
|
||||
req->info.seq_req = false;
|
||||
break;
|
||||
case LOOKUP_MAPPED:
|
||||
break;
|
||||
@ -141,39 +141,39 @@ void ocf_engine_update_rq_info(struct ocf_cache *cache,
|
||||
}
|
||||
|
||||
/* Check if cache hit is sequential */
|
||||
if (rq->info.seq_req && entry) {
|
||||
if (req->info.seq_req && entry) {
|
||||
if (ocf_metadata_map_lg2phy(cache,
|
||||
(rq->map[entry - 1].coll_idx)) + 1 !=
|
||||
(req->map[entry - 1].coll_idx)) + 1 !=
|
||||
ocf_metadata_map_lg2phy(cache,
|
||||
_entry->coll_idx)) {
|
||||
rq->info.seq_req = false;
|
||||
req->info.seq_req = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ocf_engine_traverse(struct ocf_request *rq)
|
||||
void ocf_engine_traverse(struct ocf_request *req)
|
||||
{
|
||||
uint32_t i;
|
||||
uint64_t core_line;
|
||||
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
ocf_rq_clear_info(rq);
|
||||
rq->info.seq_req = true;
|
||||
ocf_req_clear_info(req);
|
||||
req->info.seq_req = true;
|
||||
|
||||
for (i = 0, core_line = rq->core_line_first;
|
||||
core_line <= rq->core_line_last; core_line++, i++) {
|
||||
for (i = 0, core_line = req->core_line_first;
|
||||
core_line <= req->core_line_last; core_line++, i++) {
|
||||
|
||||
struct ocf_map_info *entry = &(rq->map[i]);
|
||||
struct ocf_map_info *entry = &(req->map[i]);
|
||||
|
||||
ocf_engine_lookup_map_entry(cache, entry, core_id,
|
||||
core_line);
|
||||
|
||||
if (entry->status != LOOKUP_HIT) {
|
||||
rq->info.seq_req = false;
|
||||
req->info.seq_req = false;
|
||||
/* There is miss then lookup for next map entry */
|
||||
OCF_DEBUG_PARAM(cache, "Miss, core line = %llu",
|
||||
entry->core_line);
|
||||
@ -186,40 +186,40 @@ void ocf_engine_traverse(struct ocf_request *rq)
|
||||
/* Update eviction (LRU) */
|
||||
ocf_eviction_set_hot_cache_line(cache, entry->coll_idx);
|
||||
|
||||
ocf_engine_update_rq_info(cache, rq, i);
|
||||
ocf_engine_update_req_info(cache, req, i);
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", req->info.seq_req ?
|
||||
"Yes" : "No");
|
||||
}
|
||||
|
||||
int ocf_engine_check(struct ocf_request *rq)
|
||||
int ocf_engine_check(struct ocf_request *req)
|
||||
{
|
||||
int result = 0;
|
||||
uint32_t i;
|
||||
uint64_t core_line;
|
||||
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
ocf_rq_clear_info(rq);
|
||||
rq->info.seq_req = true;
|
||||
ocf_req_clear_info(req);
|
||||
req->info.seq_req = true;
|
||||
|
||||
for (i = 0, core_line = rq->core_line_first;
|
||||
core_line <= rq->core_line_last; core_line++, i++) {
|
||||
for (i = 0, core_line = req->core_line_first;
|
||||
core_line <= req->core_line_last; core_line++, i++) {
|
||||
|
||||
struct ocf_map_info *entry = &(rq->map[i]);
|
||||
struct ocf_map_info *entry = &(req->map[i]);
|
||||
|
||||
if (entry->status == LOOKUP_MISS) {
|
||||
rq->info.seq_req = false;
|
||||
req->info.seq_req = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (_ocf_engine_check_map_entry(cache, entry, rq->core_id)) {
|
||||
if (_ocf_engine_check_map_entry(cache, entry, req->core_id)) {
|
||||
/* Mapping is invalid */
|
||||
entry->invalid = true;
|
||||
rq->info.seq_req = false;
|
||||
req->info.seq_req = false;
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Invalid, Cache line %u",
|
||||
entry->coll_idx);
|
||||
@ -231,26 +231,26 @@ int ocf_engine_check(struct ocf_request *rq)
|
||||
OCF_DEBUG_PARAM(cache, "Valid, Cache line %u",
|
||||
entry->coll_idx);
|
||||
|
||||
ocf_engine_update_rq_info(cache, rq, i);
|
||||
ocf_engine_update_req_info(cache, req, i);
|
||||
}
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", req->info.seq_req ?
|
||||
"Yes" : "No");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void ocf_engine_map_cache_line(struct ocf_request *rq,
|
||||
static void ocf_engine_map_cache_line(struct ocf_request *req,
|
||||
uint64_t core_line, unsigned int hash_index,
|
||||
ocf_cache_line_t *cache_line)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
ocf_part_id_t part_id = rq->part_id;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_part_id_t part_id = req->part_id;
|
||||
ocf_cleaning_t clean_policy_type;
|
||||
|
||||
if (cache->device->freelist_part->curr_size == 0) {
|
||||
rq->info.eviction_error = 1;
|
||||
req->info.eviction_error = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -265,7 +265,7 @@ static void ocf_engine_map_cache_line(struct ocf_request *rq,
|
||||
ocf_metadata_add_to_partition(cache, part_id, *cache_line);
|
||||
|
||||
/* Add the block to the corresponding collision list */
|
||||
ocf_metadata_add_to_collision(cache, rq->core_id, core_line, hash_index,
|
||||
ocf_metadata_add_to_collision(cache, req->core_id, core_line, hash_index,
|
||||
*cache_line);
|
||||
|
||||
ocf_eviction_init_cache_line(cache, *cache_line, part_id);
|
||||
@ -284,13 +284,13 @@ static void ocf_engine_map_cache_line(struct ocf_request *rq,
|
||||
}
|
||||
|
||||
static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
|
||||
struct ocf_request *rq)
|
||||
struct ocf_request *req)
|
||||
{
|
||||
uint32_t i;
|
||||
struct ocf_map_info *entry;
|
||||
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
entry = &(rq->map[i]);
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
entry = &(req->map[i]);
|
||||
|
||||
switch (entry->status) {
|
||||
case LOOKUP_HIT:
|
||||
@ -298,7 +298,7 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
|
||||
break;
|
||||
|
||||
case LOOKUP_MAPPED:
|
||||
OCF_DEBUG_RQ(rq, "Canceling cache line %u",
|
||||
OCF_DEBUG_RQ(req, "Canceling cache line %u",
|
||||
entry->coll_idx);
|
||||
set_cache_line_invalid_no_flush(cache, 0,
|
||||
ocf_line_end_sector(cache),
|
||||
@ -312,83 +312,83 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
|
||||
}
|
||||
}
|
||||
|
||||
void ocf_engine_map(struct ocf_request *rq)
|
||||
void ocf_engine_map(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
uint32_t i;
|
||||
struct ocf_map_info *entry;
|
||||
uint64_t core_line;
|
||||
int status = LOOKUP_MAPPED;
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
|
||||
if (ocf_engine_unmapped_count(rq))
|
||||
status = space_managment_evict_do(cache, rq,
|
||||
ocf_engine_unmapped_count(rq));
|
||||
if (ocf_engine_unmapped_count(req))
|
||||
status = space_managment_evict_do(cache, req,
|
||||
ocf_engine_unmapped_count(req));
|
||||
|
||||
if (rq->info.eviction_error)
|
||||
if (req->info.eviction_error)
|
||||
return;
|
||||
|
||||
ocf_rq_clear_info(rq);
|
||||
rq->info.seq_req = true;
|
||||
ocf_req_clear_info(req);
|
||||
req->info.seq_req = true;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
for (i = 0, core_line = rq->core_line_first;
|
||||
core_line <= rq->core_line_last; core_line++, i++) {
|
||||
entry = &(rq->map[i]);
|
||||
for (i = 0, core_line = req->core_line_first;
|
||||
core_line <= req->core_line_last; core_line++, i++) {
|
||||
entry = &(req->map[i]);
|
||||
|
||||
ocf_engine_lookup_map_entry(cache, entry, core_id, core_line);
|
||||
|
||||
if (entry->status != LOOKUP_HIT) {
|
||||
ocf_engine_map_cache_line(rq, entry->core_line,
|
||||
ocf_engine_map_cache_line(req, entry->core_line,
|
||||
entry->hash_key, &entry->coll_idx);
|
||||
|
||||
if (rq->info.eviction_error) {
|
||||
if (req->info.eviction_error) {
|
||||
/*
|
||||
* Eviction error (mapping error), need to
|
||||
* clean, return and do pass through
|
||||
*/
|
||||
OCF_DEBUG_RQ(rq, "Eviction ERROR when mapping");
|
||||
ocf_engine_map_hndl_error(cache, rq);
|
||||
OCF_DEBUG_RQ(req, "Eviction ERROR when mapping");
|
||||
ocf_engine_map_hndl_error(cache, req);
|
||||
break;
|
||||
}
|
||||
|
||||
entry->status = status;
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(rq->cache,
|
||||
OCF_DEBUG_PARAM(req->cache,
|
||||
"%s, cache line %u, core line = %llu",
|
||||
entry->status == LOOKUP_HIT ? "Hit" : "Map",
|
||||
entry->coll_idx, entry->core_line);
|
||||
|
||||
ocf_engine_update_rq_info(cache, rq, i);
|
||||
ocf_engine_update_req_info(cache, req, i);
|
||||
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(rq->cache, "Sequential - %s", rq->info.seq_req ?
|
||||
OCF_DEBUG_PARAM(req->cache, "Sequential - %s", req->info.seq_req ?
|
||||
"Yes" : "No");
|
||||
}
|
||||
|
||||
static void _ocf_engine_clean_end(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
struct ocf_request *req = private_data;
|
||||
|
||||
if (error) {
|
||||
OCF_DEBUG_RQ(rq, "Cleaning ERROR");
|
||||
rq->error |= error;
|
||||
OCF_DEBUG_RQ(req, "Cleaning ERROR");
|
||||
req->error |= error;
|
||||
|
||||
/* End request and do not processing */
|
||||
ocf_rq_unlock(rq);
|
||||
ocf_req_unlock(req);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, error);
|
||||
req->complete(req, error);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
} else {
|
||||
rq->info.dirty_any = 0;
|
||||
rq->info.dirty_all = 0;
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
req->info.dirty_any = 0;
|
||||
req->info.dirty_all = 0;
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -396,12 +396,12 @@ static int _ocf_engine_clean_getter(struct ocf_cache *cache,
|
||||
void *getter_context, uint32_t item, ocf_cache_line_t *line)
|
||||
{
|
||||
struct ocf_cleaner_attribs *attribs = getter_context;
|
||||
struct ocf_request *rq = attribs->cmpl_context;
|
||||
struct ocf_request *req = attribs->cmpl_context;
|
||||
|
||||
for (; attribs->getter_item < rq->core_line_count;
|
||||
for (; attribs->getter_item < req->core_line_count;
|
||||
attribs->getter_item++) {
|
||||
|
||||
struct ocf_map_info *entry = &rq->map[attribs->getter_item];
|
||||
struct ocf_map_info *entry = &req->map[attribs->getter_item];
|
||||
|
||||
if (entry->status != LOOKUP_HIT)
|
||||
continue;
|
||||
@ -418,53 +418,53 @@ static int _ocf_engine_clean_getter(struct ocf_cache *cache,
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ocf_engine_clean(struct ocf_request *rq)
|
||||
void ocf_engine_clean(struct ocf_request *req)
|
||||
{
|
||||
/* Initialize attributes for cleaner */
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.cache_line_lock = false,
|
||||
|
||||
.cmpl_context = rq,
|
||||
.cmpl_context = req,
|
||||
.cmpl_fn = _ocf_engine_clean_end,
|
||||
|
||||
.getter = _ocf_engine_clean_getter,
|
||||
.getter_context = &attribs,
|
||||
.getter_item = 0,
|
||||
|
||||
.count = rq->info.dirty_any,
|
||||
.io_queue = rq->io_queue
|
||||
.count = req->info.dirty_any,
|
||||
.io_queue = req->io_queue
|
||||
};
|
||||
|
||||
/* Start cleaning */
|
||||
ocf_cleaner_fire(rq->cache, &attribs);
|
||||
ocf_cleaner_fire(req->cache, &attribs);
|
||||
}
|
||||
|
||||
void ocf_engine_update_block_stats(struct ocf_request *rq)
|
||||
void ocf_engine_update_block_stats(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
ocf_part_id_t part_id = rq->part_id;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_part_id_t part_id = req->part_id;
|
||||
struct ocf_counters_block *blocks;
|
||||
|
||||
blocks = &cache->core_obj[core_id].counters->
|
||||
part_counters[part_id].blocks;
|
||||
|
||||
if (rq->rw == OCF_READ)
|
||||
env_atomic64_add(rq->byte_length, &blocks->read_bytes);
|
||||
else if (rq->rw == OCF_WRITE)
|
||||
env_atomic64_add(rq->byte_length, &blocks->write_bytes);
|
||||
if (req->rw == OCF_READ)
|
||||
env_atomic64_add(req->byte_length, &blocks->read_bytes);
|
||||
else if (req->rw == OCF_WRITE)
|
||||
env_atomic64_add(req->byte_length, &blocks->write_bytes);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
void ocf_engine_update_request_stats(struct ocf_request *rq)
|
||||
void ocf_engine_update_request_stats(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
ocf_part_id_t part_id = rq->part_id;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_part_id_t part_id = req->part_id;
|
||||
struct ocf_counters_req *reqs;
|
||||
|
||||
switch (rq->rw) {
|
||||
switch (req->rw) {
|
||||
case OCF_READ:
|
||||
reqs = &cache->core_obj[core_id].counters->
|
||||
part_counters[part_id].read_reqs;
|
||||
@ -479,69 +479,69 @@ void ocf_engine_update_request_stats(struct ocf_request *rq)
|
||||
|
||||
env_atomic64_inc(&reqs->total);
|
||||
|
||||
if (rq->info.hit_no == 0)
|
||||
if (req->info.hit_no == 0)
|
||||
env_atomic64_inc(&reqs->full_miss);
|
||||
else if (rq->info.hit_no < rq->core_line_count)
|
||||
else if (req->info.hit_no < req->core_line_count)
|
||||
env_atomic64_inc(&reqs->partial_miss);
|
||||
}
|
||||
|
||||
void ocf_engine_push_rq_back(struct ocf_request *rq, bool allow_sync)
|
||||
void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
struct ocf_queue *q = NULL;
|
||||
unsigned long lock_flags;
|
||||
|
||||
INIT_LIST_HEAD(&rq->list);
|
||||
INIT_LIST_HEAD(&req->list);
|
||||
|
||||
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
|
||||
q = &cache->io_queues[rq->io_queue];
|
||||
ENV_BUG_ON(req->io_queue >= cache->io_queues_no);
|
||||
q = &cache->io_queues[req->io_queue];
|
||||
|
||||
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
|
||||
|
||||
list_add_tail(&rq->list, &q->io_list);
|
||||
list_add_tail(&req->list, &q->io_list);
|
||||
env_atomic_inc(&q->io_no);
|
||||
|
||||
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
|
||||
|
||||
if (!rq->info.internal)
|
||||
if (!req->info.internal)
|
||||
env_atomic_set(&cache->last_access_ms,
|
||||
env_ticks_to_msecs(env_get_tick_count()));
|
||||
|
||||
ctx_queue_kick(cache->owner, q, allow_sync);
|
||||
}
|
||||
|
||||
void ocf_engine_push_rq_front(struct ocf_request *rq, bool allow_sync)
|
||||
void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
struct ocf_queue *q = NULL;
|
||||
unsigned long lock_flags;
|
||||
|
||||
INIT_LIST_HEAD(&rq->list);
|
||||
INIT_LIST_HEAD(&req->list);
|
||||
|
||||
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
|
||||
q = &cache->io_queues[rq->io_queue];
|
||||
ENV_BUG_ON(req->io_queue >= cache->io_queues_no);
|
||||
q = &cache->io_queues[req->io_queue];
|
||||
|
||||
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
|
||||
|
||||
list_add(&rq->list, &q->io_list);
|
||||
list_add(&req->list, &q->io_list);
|
||||
env_atomic_inc(&q->io_no);
|
||||
|
||||
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
|
||||
|
||||
if (!rq->info.internal)
|
||||
if (!req->info.internal)
|
||||
env_atomic_set(&cache->last_access_ms,
|
||||
env_ticks_to_msecs(env_get_tick_count()));
|
||||
|
||||
ctx_queue_kick(cache->owner, q, allow_sync);
|
||||
}
|
||||
|
||||
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
|
||||
void ocf_engine_push_req_front_if(struct ocf_request *req,
|
||||
const struct ocf_io_if *io_if,
|
||||
bool allow_sync)
|
||||
{
|
||||
rq->error = 0; /* Please explain why!!! */
|
||||
rq->io_if = io_if;
|
||||
ocf_engine_push_rq_front(rq, allow_sync);
|
||||
req->error = 0; /* Please explain why!!! */
|
||||
req->io_if = io_if;
|
||||
ocf_engine_push_req_front(req, allow_sync);
|
||||
}
|
||||
|
||||
void inc_fallback_pt_error_counter(ocf_cache_t cache)
|
||||
@ -558,44 +558,44 @@ void inc_fallback_pt_error_counter(ocf_cache_t cache)
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_engine_refresh(struct ocf_request *rq)
|
||||
static int _ocf_engine_refresh(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
int result;
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
/* Check under metadata RD lock */
|
||||
|
||||
result = ocf_engine_check(rq);
|
||||
result = ocf_engine_check(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
if (result == 0) {
|
||||
|
||||
/* Refresh successful, can process with original IO interface */
|
||||
rq->io_if = rq->priv;
|
||||
req->io_if = req->priv;
|
||||
|
||||
rq->resume = NULL;
|
||||
rq->priv = NULL;
|
||||
req->resume = NULL;
|
||||
req->priv = NULL;
|
||||
|
||||
if (rq->rw == OCF_READ)
|
||||
rq->io_if->read(rq);
|
||||
else if (rq->rw == OCF_WRITE)
|
||||
rq->io_if->write(rq);
|
||||
if (req->rw == OCF_READ)
|
||||
req->io_if->read(req);
|
||||
else if (req->rw == OCF_WRITE)
|
||||
req->io_if->write(req);
|
||||
else
|
||||
ENV_BUG();
|
||||
} else {
|
||||
ENV_WARN(true, "Inconsistent request");
|
||||
rq->error = -EINVAL;
|
||||
req->error = -EINVAL;
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
/* Release WRITE lock of request */
|
||||
ocf_rq_unlock(rq);
|
||||
ocf_req_unlock(req);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -606,16 +606,16 @@ static const struct ocf_io_if _io_if_refresh = {
|
||||
.write = _ocf_engine_refresh,
|
||||
};
|
||||
|
||||
void ocf_engine_on_resume(struct ocf_request *rq)
|
||||
void ocf_engine_on_resume(struct ocf_request *req)
|
||||
{
|
||||
ENV_BUG_ON(rq->priv);
|
||||
ENV_BUG_ON(ocf_engine_on_resume != rq->resume);
|
||||
OCF_CHECK_NULL(rq->io_if);
|
||||
ENV_BUG_ON(req->priv);
|
||||
ENV_BUG_ON(ocf_engine_on_resume != req->resume);
|
||||
OCF_CHECK_NULL(req->io_if);
|
||||
|
||||
/* Exchange IO interface */
|
||||
rq->priv = (void *)rq->io_if;
|
||||
req->priv = (void *)req->io_if;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "On resume");
|
||||
OCF_DEBUG_RQ(req, "On resume");
|
||||
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_refresh, false);
|
||||
ocf_engine_push_req_front_if(req, &_io_if_refresh, false);
|
||||
}
|
||||
|
@ -16,103 +16,103 @@
|
||||
/**
|
||||
* @brief Signal and handle OCF request error
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
* @param stop_cache Indicates if OCF cache engine need to be stopped
|
||||
* @param msg Error message to be printed into log
|
||||
*/
|
||||
void ocf_engine_error(struct ocf_request *rq, bool stop_cache,
|
||||
void ocf_engine_error(struct ocf_request *req, bool stop_cache,
|
||||
const char *msg);
|
||||
|
||||
/**
|
||||
* @brief Check if OCF request is hit
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @retval true HIT
|
||||
* @retval false MISS
|
||||
*/
|
||||
static inline bool ocf_engine_is_hit(struct ocf_request *rq)
|
||||
static inline bool ocf_engine_is_hit(struct ocf_request *req)
|
||||
{
|
||||
return rq->info.hit_no == rq->core_line_count;
|
||||
return req->info.hit_no == req->core_line_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if OCF request is miss
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @retval true MISS
|
||||
* @retval false HIT
|
||||
*/
|
||||
#define ocf_engine_is_miss(rq) (!ocf_engine_is_hit(rq))
|
||||
#define ocf_engine_is_miss(req) (!ocf_engine_is_hit(req))
|
||||
|
||||
/**
|
||||
* @brief Check if all cache lines are mapped fully
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @retval true request is mapped fully
|
||||
* @retval false request is not mapped fully and eviction might be run in
|
||||
* order to complete mapping
|
||||
*/
|
||||
static inline bool ocf_engine_is_mapped(struct ocf_request *rq)
|
||||
static inline bool ocf_engine_is_mapped(struct ocf_request *req)
|
||||
{
|
||||
return rq->info.hit_no + rq->info.invalid_no == rq->core_line_count;
|
||||
return req->info.hit_no + req->info.invalid_no == req->core_line_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if all cache lines are dirty
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @retval true request is dirty fully
|
||||
* @retval false request is not dirty fully
|
||||
*/
|
||||
static inline bool ocf_engine_is_dirty_all(struct ocf_request *rq)
|
||||
static inline bool ocf_engine_is_dirty_all(struct ocf_request *req)
|
||||
{
|
||||
return rq->info.dirty_all == rq->core_line_count;
|
||||
return req->info.dirty_all == req->core_line_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get number of mapped cache lines
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @return Number of mapped cache lines
|
||||
*/
|
||||
static inline uint32_t ocf_engine_mapped_count(struct ocf_request *rq)
|
||||
static inline uint32_t ocf_engine_mapped_count(struct ocf_request *req)
|
||||
{
|
||||
return rq->info.hit_no + rq->info.invalid_no;
|
||||
return req->info.hit_no + req->info.invalid_no;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get number of unmapped cache lines
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @return Number of unmapped cache lines
|
||||
*/
|
||||
static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *rq)
|
||||
static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *req)
|
||||
{
|
||||
return rq->core_line_count - (rq->info.hit_no + rq->info.invalid_no);
|
||||
return req->core_line_count - (req->info.hit_no + req->info.invalid_no);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get number of IOs to perform cache read or write
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @return Count of cache IOs
|
||||
*/
|
||||
static inline uint32_t ocf_engine_io_count(struct ocf_request *rq)
|
||||
static inline uint32_t ocf_engine_io_count(struct ocf_request *req)
|
||||
{
|
||||
return rq->info.seq_req ? 1 : rq->core_line_count;
|
||||
return req->info.seq_req ? 1 : req->core_line_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Clean request (flush dirty data to the core device)
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @note After successful cleaning:
|
||||
* - Dirty status bits in request info will be cleared
|
||||
@ -123,7 +123,7 @@ static inline uint32_t ocf_engine_io_count(struct ocf_request *rq)
|
||||
* - complete request to the application
|
||||
* - free request
|
||||
*/
|
||||
void ocf_engine_clean(struct ocf_request *rq);
|
||||
void ocf_engine_clean(struct ocf_request *req);
|
||||
|
||||
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
|
||||
struct ocf_map_info *entry, ocf_core_id_t core_id,
|
||||
@ -135,9 +135,9 @@ void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
|
||||
*
|
||||
* @note This function CALL EVICTION
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*/
|
||||
void ocf_engine_map(struct ocf_request *rq);
|
||||
void ocf_engine_map(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Traverse OCF request (lookup cache)
|
||||
@ -145,79 +145,79 @@ void ocf_engine_map(struct ocf_request *rq);
|
||||
* @note This function DO NOT CALL EVICTION. Only lookup in metadata is
|
||||
* performed. Main purpose of this function is to check if there is a HIT.
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*/
|
||||
void ocf_engine_traverse(struct ocf_request *rq);
|
||||
void ocf_engine_traverse(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Check if OCF request mapping is still valid
|
||||
*
|
||||
* @note If mapping entries is invalid it will be marked
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @retval 0 - OCF request mapping is valid
|
||||
* @return Non zero - OCF request mapping is invalid and need to call re-mapping
|
||||
*/
|
||||
int ocf_engine_check(struct ocf_request *rq);
|
||||
int ocf_engine_check(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Update OCF request info
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*/
|
||||
void ocf_engine_update_rq_info(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t entry);
|
||||
void ocf_engine_update_req_info(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t entry);
|
||||
|
||||
/**
|
||||
* @brief Update OCF request block statistics for an exported object
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*/
|
||||
void ocf_engine_update_block_stats(struct ocf_request *rq);
|
||||
void ocf_engine_update_block_stats(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Update OCF request request statistics for an exported object
|
||||
* (not applicable to write wi and to read wt
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*/
|
||||
void ocf_engine_update_request_stats(struct ocf_request *rq);
|
||||
void ocf_engine_update_request_stats(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Push front OCF request to the OCF thread worker queue
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
* @param allow_sync caller allows for request from queue to be ran immediately
|
||||
from push function in caller context
|
||||
*/
|
||||
void ocf_engine_push_rq_back(struct ocf_request *rq,
|
||||
void ocf_engine_push_req_back(struct ocf_request *req,
|
||||
bool allow_sync);
|
||||
|
||||
/**
|
||||
* @brief Push back OCF request to the OCF thread worker queue
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
* @param allow_sync caller allows for request from queue to be ran immediately
|
||||
from push function in caller context
|
||||
*/
|
||||
void ocf_engine_push_rq_front(struct ocf_request *rq,
|
||||
void ocf_engine_push_req_front(struct ocf_request *req,
|
||||
bool allow_sync);
|
||||
|
||||
/**
|
||||
* @brief Set interface and push from request to the OCF thread worker queue
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
* @param io_if IO interface
|
||||
* @param allow_sync caller allows for request from queue to be ran immediately
|
||||
from push function in caller context
|
||||
*/
|
||||
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
|
||||
void ocf_engine_push_req_front_if(struct ocf_request *req,
|
||||
const struct ocf_io_if *io_if,
|
||||
bool allow_sync);
|
||||
|
||||
void inc_fallback_pt_error_counter(ocf_cache_t cache);
|
||||
|
||||
void ocf_engine_on_resume(struct ocf_request *rq);
|
||||
void ocf_engine_on_resume(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_COMMON_H_ */
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "engine_d2c.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../metadata/metadata.h"
|
||||
|
||||
@ -33,7 +33,7 @@ static void _ocf_d2c_completion(struct ocf_request *req, int error)
|
||||
req->complete(req, req->error);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(req);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
int ocf_io_d2c(struct ocf_request *req)
|
||||
@ -46,7 +46,7 @@ int ocf_io_d2c(struct ocf_request *req)
|
||||
ocf_io_start(req->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(req);
|
||||
ocf_req_get(req);
|
||||
|
||||
ocf_submit_obj_req(&core->obj, req, _ocf_d2c_completion);
|
||||
|
||||
@ -61,7 +61,7 @@ int ocf_io_d2c(struct ocf_request *req)
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(req);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef ENGINE_2DC_H_
|
||||
#define ENGINE_2DC_H_
|
||||
|
||||
int ocf_io_d2c(struct ocf_request *rq);
|
||||
int ocf_io_d2c(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_2DC_H_ */
|
||||
|
@ -30,11 +30,11 @@
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define OCF_DEBUG_RQ(rq, format, ...) \
|
||||
ocf_cache_log(rq->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \
|
||||
#define OCF_DEBUG_RQ(req, format, ...) \
|
||||
ocf_cache_log(req->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \
|
||||
format"\n", OCF_ENGINE_DEBUG_IO_NAME, \
|
||||
OCF_READ == (rq)->rw ? "RD" : "WR", rq->byte_position, \
|
||||
rq->byte_length, __func__, ##__VA_ARGS__)
|
||||
OCF_READ == (req)->rw ? "RD" : "WR", req->byte_position, \
|
||||
req->byte_length, __func__, ##__VA_ARGS__)
|
||||
|
||||
#else
|
||||
#define OCF_DEBUG_PREFIX
|
||||
@ -42,7 +42,7 @@
|
||||
#define OCF_DEBUG_TRACE(cache)
|
||||
#define OCF_DEBUG_MSG(cache, msg)
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...)
|
||||
#define OCF_DEBUG_RQ(rq, format, ...)
|
||||
#define OCF_DEBUG_RQ(req, format, ...)
|
||||
#endif
|
||||
|
||||
#endif /* ENGINE_DEBUG_H_ */
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "engine_common.h"
|
||||
#include "engine_discard.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
@ -18,10 +18,10 @@
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "discard"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static int _ocf_discard_step_do(struct ocf_request *rq);
|
||||
static int _ocf_discard_step(struct ocf_request *rq);
|
||||
static int _ocf_discard_flush_cache(struct ocf_request *rq);
|
||||
static int _ocf_discard_core(struct ocf_request *rq);
|
||||
static int _ocf_discard_step_do(struct ocf_request *req);
|
||||
static int _ocf_discard_step(struct ocf_request *req);
|
||||
static int _ocf_discard_flush_cache(struct ocf_request *req);
|
||||
static int _ocf_discard_core(struct ocf_request *req);
|
||||
|
||||
static const struct ocf_io_if _io_if_discard_step = {
|
||||
.read = _ocf_discard_step,
|
||||
@ -43,19 +43,19 @@ static const struct ocf_io_if _io_if_discard_core = {
|
||||
.write = _ocf_discard_core
|
||||
};
|
||||
|
||||
static void _ocf_discard_complete_rq(struct ocf_request *req, int error)
|
||||
static void _ocf_discard_complete_req(struct ocf_request *req, int error)
|
||||
{
|
||||
req->complete(req, error);
|
||||
|
||||
ocf_rq_put(req);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
static void _ocf_discard_core_io(struct ocf_io *io, int error)
|
||||
{
|
||||
struct ocf_request *rq = io->priv1;
|
||||
struct ocf_request *req = io->priv1;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Core DISCARD Completion");
|
||||
OCF_DEBUG_RQ(req, "Core DISCARD Completion");
|
||||
|
||||
_ocf_discard_complete_rq(rq, error);
|
||||
_ocf_discard_complete_req(req, error);
|
||||
}
|
||||
|
||||
static int _ocf_discard_core(struct ocf_request *req)
|
||||
@ -65,7 +65,7 @@ static int _ocf_discard_core(struct ocf_request *req)
|
||||
|
||||
io = ocf_dobj_new_io(&cache->core_obj[req->core_id].obj);
|
||||
if (!io) {
|
||||
_ocf_discard_complete_rq(req, -ENOMEM);
|
||||
_ocf_discard_complete_req(req, -ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -83,16 +83,16 @@ static int _ocf_discard_core(struct ocf_request *req)
|
||||
|
||||
static void _ocf_discard_cache_flush_io_cmpl(struct ocf_io *io, int error)
|
||||
{
|
||||
struct ocf_request *rq = io->priv1;
|
||||
struct ocf_request *req = io->priv1;
|
||||
|
||||
if (error) {
|
||||
ocf_metadata_error(rq->cache);
|
||||
_ocf_discard_complete_rq(rq, error);
|
||||
ocf_metadata_error(req->cache);
|
||||
_ocf_discard_complete_req(req, error);
|
||||
return;
|
||||
}
|
||||
|
||||
rq->io_if = &_io_if_discard_core;
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
req->io_if = &_io_if_discard_core;
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
|
||||
static int _ocf_discard_flush_cache(struct ocf_request *req)
|
||||
@ -102,7 +102,7 @@ static int _ocf_discard_flush_cache(struct ocf_request *req)
|
||||
io = ocf_dobj_new_io(&req->cache->device->obj);
|
||||
if (!io) {
|
||||
ocf_metadata_error(req->cache);
|
||||
_ocf_discard_complete_rq(req, -ENOMEM);
|
||||
_ocf_discard_complete_req(req, -ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -114,111 +114,111 @@ static int _ocf_discard_flush_cache(struct ocf_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _ocf_discard_finish_step(struct ocf_request *rq)
|
||||
static void _ocf_discard_finish_step(struct ocf_request *req)
|
||||
{
|
||||
rq->discard.handled += BYTES_TO_SECTORS(rq->byte_length);
|
||||
req->discard.handled += BYTES_TO_SECTORS(req->byte_length);
|
||||
|
||||
if (rq->discard.handled < rq->discard.nr_sects)
|
||||
rq->io_if = &_io_if_discard_step;
|
||||
else if (rq->cache->device->init_mode != ocf_init_mode_metadata_volatile)
|
||||
rq->io_if = &_io_if_discard_flush_cache;
|
||||
if (req->discard.handled < req->discard.nr_sects)
|
||||
req->io_if = &_io_if_discard_step;
|
||||
else if (req->cache->device->init_mode != ocf_init_mode_metadata_volatile)
|
||||
req->io_if = &_io_if_discard_flush_cache;
|
||||
else
|
||||
rq->io_if = &_io_if_discard_core;
|
||||
req->io_if = &_io_if_discard_core;
|
||||
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
|
||||
static void _ocf_discard_step_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_discard_step_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
req->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
/* Release WRITE lock of request */
|
||||
ocf_rq_unlock_wr(rq);
|
||||
ocf_req_unlock_wr(req);
|
||||
|
||||
if (rq->error) {
|
||||
ocf_metadata_error(rq->cache);
|
||||
_ocf_discard_complete_rq(rq, rq->error);
|
||||
if (req->error) {
|
||||
ocf_metadata_error(req->cache);
|
||||
_ocf_discard_complete_req(req, req->error);
|
||||
return;
|
||||
}
|
||||
|
||||
_ocf_discard_finish_step(rq);
|
||||
_ocf_discard_finish_step(req);
|
||||
}
|
||||
|
||||
int _ocf_discard_step_do(struct ocf_request *rq)
|
||||
int _ocf_discard_step_do(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
|
||||
env_atomic_set(&req->req_remaining, 1); /* One core IO */
|
||||
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* There are mapped cache line, need to remove them */
|
||||
|
||||
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
|
||||
|
||||
/* Remove mapped cache lines from metadata */
|
||||
ocf_purge_map_info(rq);
|
||||
ocf_purge_map_info(req);
|
||||
|
||||
if (rq->info.flush_metadata) {
|
||||
if (req->info.flush_metadata) {
|
||||
/* Request was dirty and need to flush metadata */
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
ocf_metadata_flush_do_asynch(cache, req,
|
||||
_ocf_discard_step_io);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Discard");
|
||||
_ocf_discard_step_io(rq, 0);
|
||||
OCF_DEBUG_RQ(req, "Discard");
|
||||
_ocf_discard_step_io(req, 0);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _ocf_discard_on_resume(struct ocf_request *rq)
|
||||
static void _ocf_discard_on_resume(struct ocf_request *req)
|
||||
{
|
||||
OCF_DEBUG_RQ(rq, "On resume");
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
OCF_DEBUG_RQ(req, "On resume");
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
|
||||
static int _ocf_discard_step(struct ocf_request *rq)
|
||||
static int _ocf_discard_step(struct ocf_request *req)
|
||||
{
|
||||
int lock;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
rq->byte_position = SECTORS_TO_BYTES(rq->discard.sector +
|
||||
rq->discard.handled);
|
||||
rq->byte_length = MIN(SECTORS_TO_BYTES(rq->discard.nr_sects -
|
||||
rq->discard.handled), MAX_TRIM_RQ_SIZE);
|
||||
rq->core_line_first = ocf_bytes_2_lines(cache, rq->byte_position);
|
||||
rq->core_line_last =
|
||||
ocf_bytes_2_lines(cache, rq->byte_position + rq->byte_length - 1);
|
||||
rq->core_line_count = rq->core_line_last - rq->core_line_first + 1;
|
||||
rq->io_if = &_io_if_discard_step_resume;
|
||||
req->byte_position = SECTORS_TO_BYTES(req->discard.sector +
|
||||
req->discard.handled);
|
||||
req->byte_length = MIN(SECTORS_TO_BYTES(req->discard.nr_sects -
|
||||
req->discard.handled), MAX_TRIM_RQ_SIZE);
|
||||
req->core_line_first = ocf_bytes_2_lines(cache, req->byte_position);
|
||||
req->core_line_last =
|
||||
ocf_bytes_2_lines(cache, req->byte_position + req->byte_length - 1);
|
||||
req->core_line_count = req->core_line_last - req->core_line_first + 1;
|
||||
req->io_if = &_io_if_discard_step_resume;
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
|
||||
|
||||
ENV_BUG_ON(env_memset(rq->map, sizeof(*rq->map) * rq->core_line_count,
|
||||
ENV_BUG_ON(env_memset(req->map, sizeof(*req->map) * req->core_line_count,
|
||||
0));
|
||||
|
||||
/* Travers to check if request is mapped fully */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
@ -227,15 +227,15 @@ static int _ocf_discard_step(struct ocf_request *rq)
|
||||
|
||||
if (lock >= 0) {
|
||||
if (OCF_LOCK_ACQUIRED == lock) {
|
||||
_ocf_discard_step_do(rq);
|
||||
_ocf_discard_step_do(req);
|
||||
} else {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK")
|
||||
OCF_DEBUG_RQ(req, "NO LOCK")
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->error |= lock;
|
||||
_ocf_discard_finish_step(rq);
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
|
||||
req->error |= lock;
|
||||
_ocf_discard_finish_step(req);
|
||||
}
|
||||
|
||||
env_cond_resched();
|
||||
@ -243,27 +243,27 @@ static int _ocf_discard_step(struct ocf_request *rq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ocf_discard(struct ocf_request *rq)
|
||||
int ocf_discard(struct ocf_request *req)
|
||||
{
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
ocf_io_start(req->io);
|
||||
|
||||
if (rq->rw == OCF_READ) {
|
||||
rq->complete(rq, -EINVAL);
|
||||
if (req->rw == OCF_READ) {
|
||||
req->complete(req, -EINVAL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = _ocf_discard_on_resume;
|
||||
req->resume = _ocf_discard_on_resume;
|
||||
|
||||
_ocf_discard_step(rq);
|
||||
_ocf_discard_step(req);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef __ENGINE_DISCARD_H__
|
||||
#define __ENGINE_DISCARD_H__
|
||||
|
||||
int ocf_discard(struct ocf_request *rq);
|
||||
int ocf_discard(struct ocf_request *req);
|
||||
|
||||
#endif
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "engine_common.h"
|
||||
#include "engine_pt.h"
|
||||
#include "engine_wb.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
@ -28,76 +28,76 @@
|
||||
* |_| \_\___|\__,_|\__,_| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
|
||||
*/
|
||||
|
||||
static void _ocf_read_fast_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_read_fast_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
req->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining)) {
|
||||
if (env_atomic_dec_return(&req->req_remaining)) {
|
||||
/* Not all requests finished */
|
||||
return;
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "HIT completion");
|
||||
OCF_DEBUG_RQ(req, "HIT completion");
|
||||
|
||||
if (rq->error) {
|
||||
OCF_DEBUG_RQ(rq, "ERROR");
|
||||
if (req->error) {
|
||||
OCF_DEBUG_RQ(req, "ERROR");
|
||||
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
cache_errors.read);
|
||||
ocf_engine_push_rq_front_pt(rq);
|
||||
ocf_engine_push_req_front_pt(req);
|
||||
} else {
|
||||
ocf_rq_unlock(rq);
|
||||
ocf_req_unlock(req);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
/* Free the request at the last point of the completion path */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_read_fast_do(struct ocf_request *rq)
|
||||
static int _ocf_read_fast_do(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
if (ocf_engine_is_miss(rq)) {
|
||||
if (ocf_engine_is_miss(req)) {
|
||||
/* It seams that after resume, now request is MISS, do PT */
|
||||
OCF_DEBUG_RQ(rq, "Switching to read PT");
|
||||
ocf_read_pt_do(rq);
|
||||
OCF_DEBUG_RQ(req, "Switching to read PT");
|
||||
ocf_read_pt_do(req);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
if (req->info.re_part) {
|
||||
OCF_DEBUG_RQ(req, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
ocf_part_move(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
/* Submit IO */
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
|
||||
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
|
||||
ocf_engine_io_count(rq), _ocf_read_fast_io);
|
||||
OCF_DEBUG_RQ(req, "Submit");
|
||||
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
|
||||
ocf_submit_cache_reqs(req->cache, req->map, req, OCF_READ,
|
||||
ocf_engine_io_count(req), _ocf_read_fast_io);
|
||||
|
||||
|
||||
/* Updata statistics */
|
||||
ocf_engine_update_request_stats(rq);
|
||||
ocf_engine_update_block_stats(rq);
|
||||
ocf_engine_update_request_stats(req);
|
||||
ocf_engine_update_block_stats(req);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -107,56 +107,56 @@ static const struct ocf_io_if _io_if_read_fast_resume = {
|
||||
.write = _ocf_read_fast_do,
|
||||
};
|
||||
|
||||
int ocf_read_fast(struct ocf_request *rq)
|
||||
int ocf_read_fast(struct ocf_request *req)
|
||||
{
|
||||
bool hit;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_read_fast_resume;
|
||||
req->resume = ocf_engine_on_resume;
|
||||
req->io_if = &_io_if_read_fast_resume;
|
||||
|
||||
/*- Metadata RD access -----------------------------------------------*/
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Traverse request to cache if there is hit */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
hit = ocf_engine_is_hit(rq);
|
||||
hit = ocf_engine_is_hit(req);
|
||||
if (hit) {
|
||||
ocf_io_start(rq->io);
|
||||
lock = ocf_rq_trylock_rd(rq);
|
||||
ocf_io_start(req->io);
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
if (hit) {
|
||||
OCF_DEBUG_RQ(rq, "Fast path success");
|
||||
OCF_DEBUG_RQ(req, "Fast path success");
|
||||
|
||||
if (lock >= 0) {
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* Lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
OCF_DEBUG_RQ(req, "NO LOCK");
|
||||
} else {
|
||||
/* Lock was acquired can perform IO */
|
||||
_ocf_read_fast_do(rq);
|
||||
_ocf_read_fast_do(req);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR");
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR");
|
||||
req->complete(req, lock);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "Fast path failure");
|
||||
OCF_DEBUG_RQ(req, "Fast path failure");
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
if (hit)
|
||||
return OCF_FAST_PATH_YES;
|
||||
@ -177,56 +177,56 @@ static const struct ocf_io_if _io_if_write_fast_resume = {
|
||||
.write = ocf_write_wb_do,
|
||||
};
|
||||
|
||||
int ocf_write_fast(struct ocf_request *rq)
|
||||
int ocf_write_fast(struct ocf_request *req)
|
||||
{
|
||||
bool mapped;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_write_fast_resume;
|
||||
req->resume = ocf_engine_on_resume;
|
||||
req->io_if = &_io_if_write_fast_resume;
|
||||
|
||||
/*- Metadata RD access -----------------------------------------------*/
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Traverse request to cache if there is hit */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
mapped = ocf_engine_is_mapped(rq);
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
if (mapped) {
|
||||
ocf_io_start(rq->io);
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
ocf_io_start(req->io);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
if (mapped) {
|
||||
if (lock >= 0) {
|
||||
OCF_DEBUG_RQ(rq, "Fast path success");
|
||||
OCF_DEBUG_RQ(req, "Fast path success");
|
||||
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* Lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
OCF_DEBUG_RQ(req, "NO LOCK");
|
||||
} else {
|
||||
/* Lock was acquired can perform IO */
|
||||
ocf_write_wb_do(rq);
|
||||
ocf_write_wb_do(req);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "Fast path lock failure");
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
OCF_DEBUG_RQ(req, "Fast path lock failure");
|
||||
req->complete(req, lock);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "Fast path failure");
|
||||
OCF_DEBUG_RQ(req, "Fast path failure");
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
#ifndef ENGINE_FAST_H_
|
||||
#define ENGINE_FAST_H_
|
||||
|
||||
int ocf_read_fast(struct ocf_request *rq);
|
||||
int ocf_write_fast(struct ocf_request *rq);
|
||||
int ocf_read_fast(struct ocf_request *req);
|
||||
int ocf_write_fast(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_WI_H_ */
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "engine_inv.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
@ -16,47 +16,47 @@
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "inv"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_invalidate_rq(struct ocf_request *rq, int error)
|
||||
static void _ocf_invalidate_req(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
rq->error = error;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
req->error = error;
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
if (rq->error)
|
||||
ocf_engine_error(rq, true, "Failed to flush metadata to cache");
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to flush metadata to cache");
|
||||
|
||||
ocf_rq_unlock(rq);
|
||||
ocf_req_unlock(req);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
static int _ocf_invalidate_do(struct ocf_request *rq)
|
||||
static int _ocf_invalidate_do(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
ENV_BUG_ON(env_atomic_read(&rq->req_remaining));
|
||||
ENV_BUG_ON(env_atomic_read(&req->req_remaining));
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
ocf_purge_map_info(rq);
|
||||
ocf_purge_map_info(req);
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
|
||||
env_atomic_inc(&rq->req_remaining);
|
||||
env_atomic_inc(&req->req_remaining);
|
||||
|
||||
if (ocf_data_obj_is_atomic(&cache->device->obj) &&
|
||||
rq->info.flush_metadata) {
|
||||
req->info.flush_metadata) {
|
||||
/* Metadata flush IO */
|
||||
ocf_metadata_flush_do_asynch(cache, rq, _ocf_invalidate_rq);
|
||||
ocf_metadata_flush_do_asynch(cache, req, _ocf_invalidate_req);
|
||||
}
|
||||
|
||||
_ocf_invalidate_rq(rq, 0);
|
||||
_ocf_invalidate_req(req, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -66,7 +66,7 @@ static const struct ocf_io_if _io_if_invalidate = {
|
||||
.write = _ocf_invalidate_do,
|
||||
};
|
||||
|
||||
void ocf_engine_invalidate(struct ocf_request *rq)
|
||||
void ocf_engine_invalidate(struct ocf_request *req)
|
||||
{
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_invalidate, true);
|
||||
ocf_engine_push_req_front_if(req, &_io_if_invalidate, true);
|
||||
}
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef ENGINE_INV_H_
|
||||
#define ENGINE_INV_H_
|
||||
|
||||
void ocf_engine_invalidate(struct ocf_request *rq);
|
||||
void ocf_engine_invalidate(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_INV_H_ */
|
||||
|
@ -7,55 +7,55 @@
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "engine_ops.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_io.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "ops"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_engine_ops_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_engine_ops_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
req->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
if (req->error) {
|
||||
/* An error occured */
|
||||
ocf_engine_error(rq, false, "Core operation failure");
|
||||
ocf_engine_error(req, false, "Core operation failure");
|
||||
}
|
||||
|
||||
/* Complete requests - both to cache and to core*/
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
int ocf_engine_ops(struct ocf_request *rq)
|
||||
int ocf_engine_ops(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* IO to the core device and to the cache device */
|
||||
env_atomic_set(&rq->req_remaining, 2);
|
||||
env_atomic_set(&req->req_remaining, 2);
|
||||
|
||||
/* Submit operation into core device */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
|
||||
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
|
||||
_ocf_engine_ops_io);
|
||||
|
||||
ocf_submit_cache_reqs(cache, rq->map, rq, rq->rw,
|
||||
ocf_submit_cache_reqs(cache, req->map, req, req->rw,
|
||||
1, _ocf_engine_ops_io);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef __CACHE_ENGINE_OPS_H_
|
||||
#define __CACHE_ENGINE_OPS_H_
|
||||
|
||||
int ocf_engine_ops(struct ocf_request *rq);
|
||||
int ocf_engine_ops(struct ocf_request *req);
|
||||
|
||||
#endif /* __CACHE_ENGINE_OPS_H_ */
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "engine_pt.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
@ -16,86 +16,86 @@
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "pt"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_read_pt_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_read_pt_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
req->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
if (req->error) {
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
core_errors.read);
|
||||
}
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
ocf_rq_unlock_rd(rq);
|
||||
ocf_req_unlock_rd(req);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
static inline void _ocf_read_pt_submit(struct ocf_request *rq)
|
||||
static inline void _ocf_read_pt_submit(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* Core device IO */
|
||||
env_atomic_set(&req->req_remaining, 1); /* Core device IO */
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
OCF_DEBUG_RQ(req, "Submit");
|
||||
|
||||
/* Core read */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
|
||||
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
|
||||
_ocf_read_pt_io);
|
||||
}
|
||||
|
||||
int ocf_read_pt_do(struct ocf_request *rq)
|
||||
int ocf_read_pt_do(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
if (rq->info.dirty_any) {
|
||||
if (req->info.dirty_any) {
|
||||
OCF_METADATA_LOCK_RD();
|
||||
/* Need to clean, start it */
|
||||
ocf_engine_clean(rq);
|
||||
ocf_engine_clean(req);
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
/* Do not processing, because first we need to clean request */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
if (req->info.re_part) {
|
||||
OCF_DEBUG_RQ(req, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
ocf_part_move(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
/* Submit read IO to the core */
|
||||
_ocf_read_pt_submit(rq);
|
||||
_ocf_read_pt_submit(req);
|
||||
|
||||
/* Update statistics */
|
||||
ocf_engine_update_block_stats(rq);
|
||||
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
|
||||
part_counters[rq->part_id].read_reqs.pass_through);
|
||||
ocf_engine_update_block_stats(req);
|
||||
env_atomic64_inc(&cache->core_obj[req->core_id].counters->
|
||||
part_counters[req->part_id].read_reqs.pass_through);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -105,36 +105,36 @@ static const struct ocf_io_if _io_if_pt_resume = {
|
||||
.write = ocf_read_pt_do,
|
||||
};
|
||||
|
||||
int ocf_read_pt(struct ocf_request *rq)
|
||||
int ocf_read_pt(struct ocf_request *req)
|
||||
{
|
||||
bool use_cache = false;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
ocf_io_start(req->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_pt_resume;
|
||||
req->resume = ocf_engine_on_resume;
|
||||
req->io_if = &_io_if_pt_resume;
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
|
||||
|
||||
/* Traverse request to check if there are mapped cache lines */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
if (rq->info.seq_cutoff && ocf_engine_is_dirty_all(rq)) {
|
||||
if (req->info.seq_cutoff && ocf_engine_is_dirty_all(req)) {
|
||||
use_cache = true;
|
||||
} else {
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* There are mapped cache line,
|
||||
* lock request for READ access
|
||||
*/
|
||||
lock = ocf_rq_trylock_rd(rq);
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
} else {
|
||||
/* No mapped cache lines, no need to get lock */
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
@ -148,32 +148,32 @@ int ocf_read_pt(struct ocf_request *rq)
|
||||
* There is dirt HIT, and sequential cut off,
|
||||
* because of this force read data from cache
|
||||
*/
|
||||
ocf_rq_clear(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_wt)->read(rq);
|
||||
ocf_req_clear(req);
|
||||
ocf_get_io_if(ocf_cache_mode_wt)->read(req);
|
||||
} else {
|
||||
if (lock >= 0) {
|
||||
if (lock == OCF_LOCK_ACQUIRED) {
|
||||
/* Lock acquired perform read off operations */
|
||||
ocf_read_pt_do(rq);
|
||||
ocf_read_pt_do(req);
|
||||
} else {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
OCF_DEBUG_RQ(req, "NO LOCK");
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
|
||||
req->complete(req, lock);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ocf_engine_push_rq_front_pt(struct ocf_request *rq)
|
||||
void ocf_engine_push_req_front_pt(struct ocf_request *req)
|
||||
{
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_pt_resume, true);
|
||||
ocf_engine_push_req_front_if(req, &_io_if_pt_resume, true);
|
||||
}
|
||||
|
||||
|
@ -6,10 +6,10 @@
|
||||
#ifndef ENGINE_OFF_H_
|
||||
#define ENGINE_OFF_H_
|
||||
|
||||
int ocf_read_pt(struct ocf_request *rq);
|
||||
int ocf_read_pt(struct ocf_request *req);
|
||||
|
||||
int ocf_read_pt_do(struct ocf_request *rq);
|
||||
int ocf_read_pt_do(struct ocf_request *req);
|
||||
|
||||
void ocf_engine_push_rq_front_pt(struct ocf_request *rq);
|
||||
void ocf_engine_push_req_front_pt(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_OFF_H_ */
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include "cache_engine.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
@ -22,70 +22,70 @@
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "rd"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_read_generic_hit_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_read_generic_hit_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
req->error |= error;
|
||||
|
||||
if (rq->error)
|
||||
inc_fallback_pt_error_counter(rq->cache);
|
||||
if (req->error)
|
||||
inc_fallback_pt_error_counter(req->cache);
|
||||
|
||||
/* Handle callback-caller race to let only one of the two complete the
|
||||
* request. Also, complete original request only if this is the last
|
||||
* sub-request to complete
|
||||
*/
|
||||
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
|
||||
OCF_DEBUG_RQ(rq, "HIT completion");
|
||||
if (env_atomic_dec_return(&req->req_remaining) == 0) {
|
||||
OCF_DEBUG_RQ(req, "HIT completion");
|
||||
|
||||
if (rq->error) {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].
|
||||
if (req->error) {
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].
|
||||
counters->cache_errors.read);
|
||||
ocf_engine_push_rq_front_pt(rq);
|
||||
ocf_engine_push_req_front_pt(req);
|
||||
} else {
|
||||
|
||||
ocf_rq_unlock(rq);
|
||||
ocf_req_unlock(req);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
/* Free the request at the last point
|
||||
* of the completion path
|
||||
*/
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void _ocf_read_generic_miss_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_read_generic_miss_io(struct ocf_request *req, int error)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
if (error)
|
||||
rq->error = error;
|
||||
req->error = error;
|
||||
|
||||
/* Handle callback-caller race to let only one of the two complete the
|
||||
* request. Also, complete original request only if this is the last
|
||||
* sub-request to complete
|
||||
*/
|
||||
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
|
||||
OCF_DEBUG_RQ(rq, "MISS completion");
|
||||
if (env_atomic_dec_return(&req->req_remaining) == 0) {
|
||||
OCF_DEBUG_RQ(req, "MISS completion");
|
||||
|
||||
if (rq->error) {
|
||||
if (req->error) {
|
||||
/*
|
||||
* --- Do not submit this request to write-back-thread.
|
||||
* Stop it here ---
|
||||
*/
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&cache->core_obj[rq->core_id].
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&cache->core_obj[req->core_id].
|
||||
counters->core_errors.read);
|
||||
|
||||
ctx_data_free(cache->owner, rq->cp_data);
|
||||
rq->cp_data = NULL;
|
||||
ctx_data_free(cache->owner, req->cp_data);
|
||||
req->cp_data = NULL;
|
||||
|
||||
/* Invalidate metadata */
|
||||
ocf_engine_invalidate(rq);
|
||||
ocf_engine_invalidate(req);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -93,77 +93,77 @@ static void _ocf_read_generic_miss_io(struct ocf_request *rq, int error)
|
||||
/* Copy pages to copy vec, since this is the one needed
|
||||
* by the above layer
|
||||
*/
|
||||
ctx_data_cpy(cache->owner, rq->cp_data, rq->data, 0, 0,
|
||||
rq->byte_length);
|
||||
ctx_data_cpy(cache->owner, req->cp_data, req->data, 0, 0,
|
||||
req->byte_length);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
ocf_engine_backfill(rq);
|
||||
ocf_engine_backfill(req);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void _ocf_read_generic_submit_hit(struct ocf_request *rq)
|
||||
static inline void _ocf_read_generic_submit_hit(struct ocf_request *req)
|
||||
{
|
||||
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
|
||||
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
|
||||
|
||||
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
|
||||
ocf_engine_io_count(rq), _ocf_read_generic_hit_io);
|
||||
ocf_submit_cache_reqs(req->cache, req->map, req, OCF_READ,
|
||||
ocf_engine_io_count(req), _ocf_read_generic_hit_io);
|
||||
}
|
||||
|
||||
static inline void _ocf_read_generic_submit_miss(struct ocf_request *rq)
|
||||
static inline void _ocf_read_generic_submit_miss(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
int ret;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1);
|
||||
env_atomic_set(&req->req_remaining, 1);
|
||||
|
||||
rq->cp_data = ctx_data_alloc(cache->owner,
|
||||
BYTES_TO_PAGES(rq->byte_length));
|
||||
if (!rq->cp_data)
|
||||
req->cp_data = ctx_data_alloc(cache->owner,
|
||||
BYTES_TO_PAGES(req->byte_length));
|
||||
if (!req->cp_data)
|
||||
goto err_alloc;
|
||||
|
||||
ret = ctx_data_mlock(cache->owner, rq->cp_data);
|
||||
ret = ctx_data_mlock(cache->owner, req->cp_data);
|
||||
if (ret)
|
||||
goto err_alloc;
|
||||
|
||||
/* Submit read request to core device. */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
|
||||
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
|
||||
_ocf_read_generic_miss_io);
|
||||
|
||||
return;
|
||||
|
||||
err_alloc:
|
||||
_ocf_read_generic_miss_io(rq, -ENOMEM);
|
||||
_ocf_read_generic_miss_io(req, -ENOMEM);
|
||||
}
|
||||
|
||||
static int _ocf_read_generic_do(struct ocf_request *rq)
|
||||
static int _ocf_read_generic_do(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
if (ocf_engine_is_miss(rq) && rq->map->rd_locked) {
|
||||
if (ocf_engine_is_miss(req) && req->map->rd_locked) {
|
||||
/* Miss can be handled only on write locks.
|
||||
* Need to switch to PT
|
||||
*/
|
||||
OCF_DEBUG_RQ(rq, "Switching to PT");
|
||||
ocf_read_pt_do(rq);
|
||||
OCF_DEBUG_RQ(req, "Switching to PT");
|
||||
ocf_read_pt_do(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
if (ocf_engine_is_miss(rq)) {
|
||||
if (rq->info.dirty_any) {
|
||||
if (ocf_engine_is_miss(req)) {
|
||||
if (req->info.dirty_any) {
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Request is dirty need to clean request */
|
||||
ocf_engine_clean(rq);
|
||||
ocf_engine_clean(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
/* We need to clean request before processing, return */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -171,38 +171,38 @@ static int _ocf_read_generic_do(struct ocf_request *rq)
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Set valid status bits map */
|
||||
ocf_set_valid_map_info(rq);
|
||||
ocf_set_valid_map_info(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
}
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
if (req->info.re_part) {
|
||||
OCF_DEBUG_RQ(req, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
ocf_part_move(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
OCF_DEBUG_RQ(req, "Submit");
|
||||
|
||||
/* Submit IO */
|
||||
if (ocf_engine_is_hit(rq))
|
||||
_ocf_read_generic_submit_hit(rq);
|
||||
if (ocf_engine_is_hit(req))
|
||||
_ocf_read_generic_submit_hit(req);
|
||||
else
|
||||
_ocf_read_generic_submit_miss(rq);
|
||||
_ocf_read_generic_submit_miss(req);
|
||||
|
||||
/* Updata statistics */
|
||||
ocf_engine_update_request_stats(rq);
|
||||
ocf_engine_update_block_stats(rq);
|
||||
ocf_engine_update_request_stats(req);
|
||||
ocf_engine_update_block_stats(req);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -212,46 +212,46 @@ static const struct ocf_io_if _io_if_read_generic_resume = {
|
||||
.write = _ocf_read_generic_do,
|
||||
};
|
||||
|
||||
int ocf_read_generic(struct ocf_request *rq)
|
||||
int ocf_read_generic(struct ocf_request *req)
|
||||
{
|
||||
bool mapped;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
ocf_io_start(req->io);
|
||||
|
||||
if (env_atomic_read(&cache->pending_read_misses_list_blocked)) {
|
||||
/* There are conditions to bypass IO */
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->read(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_read_generic_resume;
|
||||
req->resume = ocf_engine_on_resume;
|
||||
req->io_if = &_io_if_read_generic_resume;
|
||||
|
||||
/*- Metadata RD access -----------------------------------------------*/
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Traverse request to cache if there is hit */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
mapped = ocf_engine_is_mapped(rq);
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
if (mapped) {
|
||||
/* Request is fully mapped, no need to call eviction */
|
||||
if (ocf_engine_is_hit(rq)) {
|
||||
if (ocf_engine_is_hit(req)) {
|
||||
/* There is a hit, lock request for READ access */
|
||||
lock = ocf_rq_trylock_rd(rq);
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
} else {
|
||||
/* All cache line mapped, but some sectors are not valid
|
||||
* and cache insert will be performed - lock for
|
||||
* WRITE is required
|
||||
*/
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
}
|
||||
}
|
||||
|
||||
@ -269,19 +269,19 @@ int ocf_read_generic(struct ocf_request *rq)
|
||||
* again. If there are misses need to call eviction. This
|
||||
* process is called 'mapping'.
|
||||
*/
|
||||
ocf_engine_map(rq);
|
||||
ocf_engine_map(req);
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (ocf_engine_is_hit(rq)) {
|
||||
if (!req->info.eviction_error) {
|
||||
if (ocf_engine_is_hit(req)) {
|
||||
/* After mapping turns out there is hit,
|
||||
* so lock OCF request for read access
|
||||
*/
|
||||
lock = ocf_rq_trylock_rd(rq);
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
} else {
|
||||
/* Miss, new cache lines were mapped,
|
||||
* need to lock OCF request for write access
|
||||
*/
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
}
|
||||
}
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
@ -289,28 +289,28 @@ int ocf_read_generic(struct ocf_request *rq)
|
||||
/*- END Metadata WR access -----------------------------------*/
|
||||
}
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (!req->info.eviction_error) {
|
||||
if (lock >= 0) {
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* Lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
OCF_DEBUG_RQ(req, "NO LOCK");
|
||||
} else {
|
||||
/* Lock was acquired can perform IO */
|
||||
_ocf_read_generic_do(rq);
|
||||
_ocf_read_generic_do(req);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
|
||||
req->complete(req, lock);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
} else {
|
||||
ocf_rq_clear(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
|
||||
ocf_req_clear(req);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->read(req);
|
||||
}
|
||||
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef ENGINE_RD_H_
|
||||
#define ENGINE_RD_H_
|
||||
|
||||
int ocf_read_generic(struct ocf_request *rq);
|
||||
int ocf_read_generic(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_RD_H_ */
|
||||
|
@ -7,82 +7,82 @@
|
||||
#include "engine_wa.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../metadata/metadata.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wa"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_read_wa_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_read_wa_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
req->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
if (rq->error) {
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
if (req->error) {
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
core_errors.write);
|
||||
}
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
int ocf_write_wa(struct ocf_request *rq)
|
||||
int ocf_write_wa(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
ocf_io_start(req->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
|
||||
|
||||
/* Traverse request to check if there are mapped cache lines */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
|
||||
|
||||
if (ocf_engine_is_hit(rq)) {
|
||||
ocf_rq_clear(rq);
|
||||
if (ocf_engine_is_hit(req)) {
|
||||
ocf_req_clear(req);
|
||||
|
||||
/* There is HIT, do WT */
|
||||
ocf_get_io_if(ocf_cache_mode_wt)->write(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_wt)->write(req);
|
||||
|
||||
} else if (ocf_engine_mapped_count(rq)) {
|
||||
ocf_rq_clear(rq);
|
||||
} else if (ocf_engine_mapped_count(req)) {
|
||||
ocf_req_clear(req);
|
||||
|
||||
/* Partial MISS, do WI */
|
||||
ocf_get_io_if(ocf_cache_mode_wi)->write(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_wi)->write(req);
|
||||
} else {
|
||||
|
||||
/* There is no mapped cache line, write directly into core */
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
OCF_DEBUG_RQ(req, "Submit");
|
||||
|
||||
/* Submit write IO to the core */
|
||||
env_atomic_set(&rq->req_remaining, 1);
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
|
||||
env_atomic_set(&req->req_remaining, 1);
|
||||
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
|
||||
_ocf_read_wa_io);
|
||||
|
||||
/* Update statistics */
|
||||
ocf_engine_update_block_stats(rq);
|
||||
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
|
||||
part_counters[rq->part_id].write_reqs.pass_through);
|
||||
ocf_engine_update_block_stats(req);
|
||||
env_atomic64_inc(&cache->core_obj[req->core_id].counters->
|
||||
part_counters[req->part_id].write_reqs.pass_through);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef ENGINE_WA_H_
|
||||
#define ENGINE_WA_H_
|
||||
|
||||
int ocf_write_wa(struct ocf_request *rq);
|
||||
int ocf_write_wa(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_WA_H_ */
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "engine_common.h"
|
||||
#include "engine_wb.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
@ -23,59 +23,59 @@ static const struct ocf_io_if _io_if_wb_resume = {
|
||||
.write = ocf_write_wb_do,
|
||||
};
|
||||
|
||||
static void _ocf_write_wb_update_bits(struct ocf_request *rq)
|
||||
static void _ocf_write_wb_update_bits(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
if (ocf_engine_is_miss(rq)) {
|
||||
if (ocf_engine_is_miss(req)) {
|
||||
OCF_METADATA_LOCK_RD();
|
||||
/* Update valid status bits */
|
||||
ocf_set_valid_map_info(rq);
|
||||
ocf_set_valid_map_info(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
}
|
||||
|
||||
if (!ocf_engine_is_dirty_all(rq)) {
|
||||
if (!ocf_engine_is_dirty_all(req)) {
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* set dirty bits, and mark if metadata flushing is required */
|
||||
ocf_set_dirty_map_info(rq);
|
||||
ocf_set_dirty_map_info(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
}
|
||||
|
||||
static void _ocf_write_wb_io_flush_metadata(struct ocf_request *rq, int error)
|
||||
static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error)
|
||||
rq->error = error;
|
||||
req->error = error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
if (rq->error)
|
||||
ocf_engine_error(rq, true, "Failed to write data to cache");
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to write data to cache");
|
||||
|
||||
ocf_rq_unlock_wr(rq);
|
||||
ocf_req_unlock_wr(req);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
static int ocf_write_wb_do_flush_metadata(struct ocf_request *rq)
|
||||
static int ocf_write_wb_do_flush_metadata(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
|
||||
env_atomic_set(&req->req_remaining, 1); /* One core IO */
|
||||
|
||||
if (rq->info.flush_metadata) {
|
||||
OCF_DEBUG_RQ(rq, "Flush metadata");
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
if (req->info.flush_metadata) {
|
||||
OCF_DEBUG_RQ(req, "Flush metadata");
|
||||
ocf_metadata_flush_do_asynch(cache, req,
|
||||
_ocf_write_wb_io_flush_metadata);
|
||||
}
|
||||
|
||||
_ocf_write_wb_io_flush_metadata(rq, 0);
|
||||
_ocf_write_wb_io_flush_metadata(req, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -85,39 +85,39 @@ static const struct ocf_io_if _io_if_wb_flush_metadata = {
|
||||
.write = ocf_write_wb_do_flush_metadata,
|
||||
};
|
||||
|
||||
static void _ocf_write_wb_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_write_wb_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
rq->error |= error;
|
||||
req->error |= error;
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
ocf_engine_error(rq, true, "Failed to write data to cache");
|
||||
if (req->error) {
|
||||
ocf_engine_error(req, true, "Failed to write data to cache");
|
||||
|
||||
ocf_rq_unlock_wr(rq);
|
||||
ocf_req_unlock_wr(req);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
} else {
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_wb_flush_metadata,
|
||||
ocf_engine_push_req_front_if(req, &_io_if_wb_flush_metadata,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void _ocf_write_wb_submit(struct ocf_request *rq)
|
||||
static inline void _ocf_write_wb_submit(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
|
||||
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
|
||||
|
||||
/*
|
||||
* 1. Submit data
|
||||
@ -125,73 +125,73 @@ static inline void _ocf_write_wb_submit(struct ocf_request *rq)
|
||||
* 3. Then continue processing request (flush metadata)
|
||||
*/
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
if (req->info.re_part) {
|
||||
OCF_DEBUG_RQ(req, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
ocf_part_move(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit Data");
|
||||
OCF_DEBUG_RQ(req, "Submit Data");
|
||||
|
||||
/* Data IO */
|
||||
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
|
||||
ocf_engine_io_count(rq), _ocf_write_wb_io);
|
||||
ocf_submit_cache_reqs(cache, req->map, req, OCF_WRITE,
|
||||
ocf_engine_io_count(req), _ocf_write_wb_io);
|
||||
}
|
||||
|
||||
int ocf_write_wb_do(struct ocf_request *rq)
|
||||
int ocf_write_wb_do(struct ocf_request *req)
|
||||
{
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Updata status bits */
|
||||
_ocf_write_wb_update_bits(rq);
|
||||
_ocf_write_wb_update_bits(req);
|
||||
|
||||
/* Submit IO */
|
||||
_ocf_write_wb_submit(rq);
|
||||
_ocf_write_wb_submit(req);
|
||||
|
||||
/* Updata statistics */
|
||||
ocf_engine_update_request_stats(rq);
|
||||
ocf_engine_update_block_stats(rq);
|
||||
ocf_engine_update_request_stats(req);
|
||||
ocf_engine_update_block_stats(req);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ocf_write_wb(struct ocf_request *rq)
|
||||
int ocf_write_wb(struct ocf_request *req)
|
||||
{
|
||||
bool mapped;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
ocf_io_start(req->io);
|
||||
|
||||
/* Not sure if we need this. */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_wb_resume;
|
||||
req->resume = ocf_engine_on_resume;
|
||||
req->io_if = &_io_if_wb_resume;
|
||||
|
||||
/* TODO: Handle fits into dirty */
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
|
||||
|
||||
/* Travers to check if request is mapped fully */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
mapped = ocf_engine_is_mapped(rq);
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
if (mapped) {
|
||||
/* All cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
|
||||
@ -203,36 +203,36 @@ int ocf_write_wb(struct ocf_request *rq)
|
||||
* again. If there are misses need to call eviction. This
|
||||
* process is called 'mapping'.
|
||||
*/
|
||||
ocf_engine_map(rq);
|
||||
ocf_engine_map(req);
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (!req->info.eviction_error) {
|
||||
/* Lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (!req->info.eviction_error) {
|
||||
if (lock >= 0) {
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
OCF_DEBUG_RQ(req, "NO LOCK");
|
||||
} else {
|
||||
ocf_write_wb_do(rq);
|
||||
ocf_write_wb_do(req);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
|
||||
req->complete(req, lock);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
} else {
|
||||
ocf_rq_clear(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
|
||||
ocf_req_clear(req);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->write(req);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5,8 +5,8 @@
|
||||
#ifndef ENGINE_WB_H_
|
||||
#define ENGINE_WB_H_
|
||||
|
||||
int ocf_write_wb(struct ocf_request *rq);
|
||||
int ocf_write_wb(struct ocf_request *req);
|
||||
|
||||
int ocf_write_wb_do(struct ocf_request *rq);
|
||||
int ocf_write_wb_do(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_WI_H_ */
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "engine_wi.h"
|
||||
#include "engine_common.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../metadata/metadata.h"
|
||||
@ -16,119 +16,119 @@
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wi"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq);
|
||||
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req);
|
||||
|
||||
static const struct ocf_io_if _io_if_wi_flush_metadata = {
|
||||
.read = ocf_write_wi_update_and_flush_metadata,
|
||||
.write = ocf_write_wi_update_and_flush_metadata,
|
||||
};
|
||||
|
||||
static void _ocf_write_wi_io_flush_metadata(struct ocf_request *rq, int error)
|
||||
static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
rq->error |= error;
|
||||
req->error |= error;
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
if (rq->error)
|
||||
ocf_engine_error(rq, true, "Failed to write data to cache");
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to write data to cache");
|
||||
|
||||
ocf_rq_unlock_wr(rq);
|
||||
ocf_req_unlock_wr(req);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq)
|
||||
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
|
||||
env_atomic_set(&req->req_remaining, 1); /* One core IO */
|
||||
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* There are mapped cache line, need to remove them */
|
||||
|
||||
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
|
||||
|
||||
/* Remove mapped cache lines from metadata */
|
||||
ocf_purge_map_info(rq);
|
||||
ocf_purge_map_info(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
|
||||
if (rq->info.flush_metadata) {
|
||||
if (req->info.flush_metadata) {
|
||||
/* Request was dirty and need to flush metadata */
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
ocf_metadata_flush_do_asynch(cache, req,
|
||||
_ocf_write_wi_io_flush_metadata);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
_ocf_write_wi_io_flush_metadata(rq, 0);
|
||||
_ocf_write_wi_io_flush_metadata(req, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _ocf_write_wi_core_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_write_wi_core_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
rq->error = error;
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
req->error = error;
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
core_errors.write);
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
ocf_rq_unlock_wr(rq);
|
||||
if (req->error) {
|
||||
ocf_req_unlock_wr(req);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
} else {
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_wi_flush_metadata,
|
||||
ocf_engine_push_req_front_if(req, &_io_if_wi_flush_metadata,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_write_wi_do(struct ocf_request *rq)
|
||||
static int _ocf_write_wi_do(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
|
||||
env_atomic_set(&req->req_remaining, 1); /* One core IO */
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
OCF_DEBUG_RQ(req, "Submit");
|
||||
|
||||
/* Submit write IO to the core */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
|
||||
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
|
||||
_ocf_write_wi_core_io);
|
||||
|
||||
/* Update statistics */
|
||||
ocf_engine_update_block_stats(rq);
|
||||
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
|
||||
part_counters[rq->part_id].write_reqs.pass_through);
|
||||
ocf_engine_update_block_stats(req);
|
||||
env_atomic64_inc(&cache->core_obj[req->core_id].counters->
|
||||
part_counters[req->part_id].write_reqs.pass_through);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _ocf_write_wi_on_resume(struct ocf_request *rq)
|
||||
static void _ocf_write_wi_on_resume(struct ocf_request *req)
|
||||
{
|
||||
OCF_DEBUG_RQ(rq, "On resume");
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
OCF_DEBUG_RQ(req, "On resume");
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_wi_resume = {
|
||||
@ -136,30 +136,30 @@ static const struct ocf_io_if _io_if_wi_resume = {
|
||||
.write = _ocf_write_wi_do,
|
||||
};
|
||||
|
||||
int ocf_write_wi(struct ocf_request *rq)
|
||||
int ocf_write_wi(struct ocf_request *req)
|
||||
{
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
ocf_io_start(req->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = _ocf_write_wi_on_resume;
|
||||
rq->io_if = &_io_if_wi_resume;
|
||||
req->resume = _ocf_write_wi_on_resume;
|
||||
req->io_if = &_io_if_wi_resume;
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
|
||||
|
||||
/* Travers to check if request is mapped fully */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
@ -168,19 +168,19 @@ int ocf_write_wi(struct ocf_request *rq)
|
||||
|
||||
if (lock >= 0) {
|
||||
if (lock == OCF_LOCK_ACQUIRED) {
|
||||
_ocf_write_wi_do(rq);
|
||||
_ocf_write_wi_do(req);
|
||||
} else {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
OCF_DEBUG_RQ(req, "NO LOCK");
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
|
||||
req->complete(req, lock);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef ENGINE_WI_H_
|
||||
#define ENGINE_WI_H_
|
||||
|
||||
int ocf_write_wi(struct ocf_request *rq);
|
||||
int ocf_write_wi(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_WI_H_ */
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "engine_wt.h"
|
||||
#include "engine_inv.h"
|
||||
#include "engine_common.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
@ -18,141 +18,141 @@
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wt"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_write_wt_io(struct ocf_request *rq)
|
||||
static void _ocf_write_wt_io(struct ocf_request *req)
|
||||
{
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
if (req->error) {
|
||||
/* An error occured */
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->info.core_error ? rq->error : 0);
|
||||
req->complete(req, req->info.core_error ? req->error : 0);
|
||||
|
||||
ocf_engine_invalidate(rq);
|
||||
ocf_engine_invalidate(req);
|
||||
} else {
|
||||
/* Unlock reqest from WRITE access */
|
||||
ocf_rq_unlock_wr(rq);
|
||||
ocf_req_unlock_wr(req);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->info.core_error ? rq->error : 0);
|
||||
req->complete(req, req->info.core_error ? req->error : 0);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
}
|
||||
|
||||
static void _ocf_write_wt_cache_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_write_wt_cache_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
rq->error = rq->error ?: error;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
req->error = req->error ?: error;
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
|
||||
if (rq->error)
|
||||
inc_fallback_pt_error_counter(rq->cache);
|
||||
if (req->error)
|
||||
inc_fallback_pt_error_counter(req->cache);
|
||||
}
|
||||
|
||||
_ocf_write_wt_io(rq);
|
||||
_ocf_write_wt_io(req);
|
||||
}
|
||||
|
||||
static void _ocf_write_wt_core_io(struct ocf_request *rq, int error)
|
||||
static void _ocf_write_wt_core_io(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
rq->error = error;
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
req->error = error;
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
core_errors.write);
|
||||
}
|
||||
|
||||
_ocf_write_wt_io(rq);
|
||||
_ocf_write_wt_io(req);
|
||||
}
|
||||
|
||||
static inline void _ocf_write_wt_submit(struct ocf_request *rq)
|
||||
static inline void _ocf_write_wt_submit(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
/* Submit IOs */
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
OCF_DEBUG_RQ(req, "Submit");
|
||||
|
||||
/* Calculate how many IOs need to be submited */
|
||||
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq)); /* Cache IO */
|
||||
env_atomic_inc(&rq->req_remaining); /* Core device IO */
|
||||
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req)); /* Cache IO */
|
||||
env_atomic_inc(&req->req_remaining); /* Core device IO */
|
||||
|
||||
if (rq->info.flush_metadata) {
|
||||
if (req->info.flush_metadata) {
|
||||
/* Metadata flush IO */
|
||||
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
ocf_metadata_flush_do_asynch(cache, req,
|
||||
_ocf_write_wt_cache_io);
|
||||
}
|
||||
|
||||
/* To cache */
|
||||
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
|
||||
ocf_engine_io_count(rq), _ocf_write_wt_cache_io);
|
||||
ocf_submit_cache_reqs(cache, req->map, req, OCF_WRITE,
|
||||
ocf_engine_io_count(req), _ocf_write_wt_cache_io);
|
||||
|
||||
/* To core */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
|
||||
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
|
||||
_ocf_write_wt_core_io);
|
||||
}
|
||||
|
||||
static void _ocf_write_wt_update_bits(struct ocf_request *rq)
|
||||
static void _ocf_write_wt_update_bits(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
if (ocf_engine_is_miss(rq)) {
|
||||
if (ocf_engine_is_miss(req)) {
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Update valid status bits */
|
||||
ocf_set_valid_map_info(rq);
|
||||
ocf_set_valid_map_info(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
}
|
||||
|
||||
if (rq->info.dirty_any) {
|
||||
if (req->info.dirty_any) {
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Writes goes to SDD and HDD, need to update status bits from
|
||||
* dirty to clean
|
||||
*/
|
||||
|
||||
ocf_set_clean_map_info(rq);
|
||||
ocf_set_clean_map_info(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
if (req->info.re_part) {
|
||||
OCF_DEBUG_RQ(req, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
ocf_part_move(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_write_wt_do(struct ocf_request *rq)
|
||||
static int _ocf_write_wt_do(struct ocf_request *req)
|
||||
{
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Update status bits */
|
||||
_ocf_write_wt_update_bits(rq);
|
||||
_ocf_write_wt_update_bits(req);
|
||||
|
||||
/* Submit IO */
|
||||
_ocf_write_wt_submit(rq);
|
||||
_ocf_write_wt_submit(req);
|
||||
|
||||
/* Updata statistics */
|
||||
ocf_engine_update_request_stats(rq);
|
||||
ocf_engine_update_block_stats(rq);
|
||||
ocf_engine_update_request_stats(req);
|
||||
ocf_engine_update_block_stats(req);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -162,30 +162,30 @@ static const struct ocf_io_if _io_if_wt_resume = {
|
||||
.write = _ocf_write_wt_do,
|
||||
};
|
||||
|
||||
int ocf_write_wt(struct ocf_request *rq)
|
||||
int ocf_write_wt(struct ocf_request *req)
|
||||
{
|
||||
bool mapped;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
ocf_io_start(req->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_wt_resume;
|
||||
req->resume = ocf_engine_on_resume;
|
||||
req->io_if = &_io_if_wt_resume;
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
|
||||
|
||||
/* Travers to check if request is mapped fully */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
mapped = ocf_engine_is_mapped(rq);
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
if (mapped) {
|
||||
/* All cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
|
||||
@ -197,36 +197,36 @@ int ocf_write_wt(struct ocf_request *rq)
|
||||
* again. If there are misses need to call eviction. This
|
||||
* process is called 'mapping'.
|
||||
*/
|
||||
ocf_engine_map(rq);
|
||||
ocf_engine_map(req);
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (!req->info.eviction_error) {
|
||||
/* Lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (!req->info.eviction_error) {
|
||||
if (lock >= 0) {
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
OCF_DEBUG_RQ(req, "NO LOCK");
|
||||
} else {
|
||||
_ocf_write_wt_do(rq);
|
||||
_ocf_write_wt_do(req);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d\n", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR %d\n", lock);
|
||||
req->complete(req, lock);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
} else {
|
||||
ocf_rq_clear(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
|
||||
ocf_req_clear(req);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->write(req);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef ENGINE_WT_H_
|
||||
#define ENGINE_WT_H_
|
||||
|
||||
int ocf_write_wt(struct ocf_request *rq);
|
||||
int ocf_write_wt(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_WT_H_ */
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "engine_zero.h"
|
||||
#include "engine_common.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../metadata/metadata.h"
|
||||
@ -16,28 +16,28 @@
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "zero"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static int ocf_zero_purge(struct ocf_request *rq)
|
||||
static int ocf_zero_purge(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
if (rq->error) {
|
||||
ocf_engine_error(rq, true, "Failed to discard data on cache");
|
||||
if (req->error) {
|
||||
ocf_engine_error(req, true, "Failed to discard data on cache");
|
||||
} else {
|
||||
/* There are mapped cache line, need to remove them */
|
||||
|
||||
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
|
||||
|
||||
/* Remove mapped cache lines from metadata */
|
||||
ocf_purge_map_info(rq);
|
||||
ocf_purge_map_info(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
ocf_rq_unlock_wr(rq);
|
||||
ocf_req_unlock_wr(req);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
req->complete(req, req->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -47,28 +47,28 @@ static const struct ocf_io_if _io_if_zero_purge = {
|
||||
.write = ocf_zero_purge,
|
||||
};
|
||||
|
||||
static void _ocf_zero_io_flush_metadata(struct ocf_request *rq, int error)
|
||||
static void _ocf_zero_io_flush_metadata(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
|
||||
cache_errors.write);
|
||||
rq->error = error;
|
||||
req->error = error;
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_zero_purge, true);
|
||||
ocf_engine_push_req_front_if(req, &_io_if_zero_purge, true);
|
||||
}
|
||||
|
||||
static inline void ocf_zero_map_info(struct ocf_request *rq)
|
||||
static inline void ocf_zero_map_info(struct ocf_request *req)
|
||||
{
|
||||
uint32_t map_idx = 0;
|
||||
uint8_t start_bit;
|
||||
uint8_t end_bit;
|
||||
struct ocf_map_info *map = rq->map;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
uint32_t count = rq->core_line_count;
|
||||
struct ocf_map_info *map = req->map;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
uint32_t count = req->core_line_count;
|
||||
|
||||
/* Purge range on the basis of map info
|
||||
*
|
||||
@ -86,43 +86,43 @@ static inline void ocf_zero_map_info(struct ocf_request *rq)
|
||||
|
||||
if (map_idx == 0) {
|
||||
/* First */
|
||||
start_bit = BYTES_TO_SECTORS(rq->byte_position)
|
||||
start_bit = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (map_idx == (count - 1)) {
|
||||
/* Last */
|
||||
end_bit = BYTES_TO_SECTORS(rq->byte_position +
|
||||
rq->byte_length - 1) %
|
||||
end_bit = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1) %
|
||||
ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
ocf_metadata_flush_mark(cache, rq, map_idx, INVALID,
|
||||
ocf_metadata_flush_mark(cache, req, map_idx, INVALID,
|
||||
start_bit, end_bit);
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_zero_do(struct ocf_request *rq)
|
||||
static int _ocf_zero_do(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Mark cache lines for zeroing/discarding */
|
||||
ocf_zero_map_info(rq);
|
||||
ocf_zero_map_info(req);
|
||||
|
||||
/* Discard marked cache lines */
|
||||
env_atomic_set(&rq->req_remaining, 1);
|
||||
if (rq->info.flush_metadata) {
|
||||
env_atomic_set(&req->req_remaining, 1);
|
||||
if (req->info.flush_metadata) {
|
||||
/* Request was dirty and need to flush metadata */
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
ocf_metadata_flush_do_asynch(cache, req,
|
||||
_ocf_zero_io_flush_metadata);
|
||||
}
|
||||
_ocf_zero_io_flush_metadata(rq, 0);
|
||||
_ocf_zero_io_flush_metadata(req, 0);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -137,30 +137,30 @@ static const struct ocf_io_if _io_if_ocf_zero_do = {
|
||||
* - Caller has to have metadata write lock
|
||||
* - Core line has to be mapped
|
||||
*/
|
||||
void ocf_engine_zero_line(struct ocf_request *rq)
|
||||
void ocf_engine_zero_line(struct ocf_request *req)
|
||||
{
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
|
||||
ENV_BUG_ON(rq->core_line_count != 1);
|
||||
ENV_BUG_ON(req->core_line_count != 1);
|
||||
|
||||
/* Traverse to check if request is mapped */
|
||||
ocf_engine_traverse(rq);
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
ENV_BUG_ON(!ocf_engine_is_mapped(rq));
|
||||
ENV_BUG_ON(!ocf_engine_is_mapped(req));
|
||||
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_ocf_zero_do;
|
||||
req->resume = ocf_engine_on_resume;
|
||||
req->io_if = &_io_if_ocf_zero_do;
|
||||
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
|
||||
if (lock >= 0) {
|
||||
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_ocf_zero_do, true);
|
||||
ocf_engine_push_req_front_if(req, &_io_if_ocf_zero_do, true);
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
|
||||
req->complete(req, lock);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,6 @@
|
||||
#ifndef ENGINE_ZERO_H_
|
||||
#define ENGINE_ZERO_H_
|
||||
|
||||
void ocf_engine_zero_line(struct ocf_request *rq);
|
||||
void ocf_engine_zero_line(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_ZERO_H_ */
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../mngt/ocf_mngt_common.h"
|
||||
#include "../engine/engine_zero.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
|
||||
#define OCF_EVICTION_MAX_SCAN 1024
|
||||
|
||||
@ -345,22 +345,22 @@ static void evp_lru_zero_line_complete(struct ocf_request *ocf_req, int error)
|
||||
static void evp_lru_zero_line(struct ocf_cache *cache, uint32_t io_queue,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_request *rq;
|
||||
struct ocf_request *req;
|
||||
ocf_core_id_t id;
|
||||
uint64_t addr, core_line;
|
||||
|
||||
ocf_metadata_get_core_info(cache, line, &id, &core_line);
|
||||
addr = core_line * ocf_line_size(cache);
|
||||
|
||||
rq = ocf_rq_new(cache, id, addr, ocf_line_size(cache), OCF_WRITE);
|
||||
if (rq) {
|
||||
rq->info.internal = true;
|
||||
rq->complete = evp_lru_zero_line_complete;
|
||||
rq->io_queue = io_queue;
|
||||
req = ocf_req_new(cache, id, addr, ocf_line_size(cache), OCF_WRITE);
|
||||
if (req) {
|
||||
req->info.internal = true;
|
||||
req->complete = evp_lru_zero_line_complete;
|
||||
req->io_queue = io_queue;
|
||||
|
||||
env_atomic_inc(&cache->pending_eviction_clines);
|
||||
|
||||
ocf_engine_zero_line(rq);
|
||||
ocf_engine_zero_line(req);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -139,17 +139,17 @@ int ocf_metadata_load_recovery(struct ocf_cache *cache)
|
||||
return cache->metadata.iface.load_recovery(cache);
|
||||
}
|
||||
|
||||
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
cache->metadata.iface.flush_mark(cache, rq, map_idx, to_state,
|
||||
cache->metadata.iface.flush_mark(cache, req, map_idx, to_state,
|
||||
start, stop);
|
||||
}
|
||||
|
||||
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, ocf_req_end_t complete)
|
||||
struct ocf_request *req, ocf_req_end_t complete)
|
||||
{
|
||||
cache->metadata.iface.flush_do_asynch(cache, rq, complete);
|
||||
cache->metadata.iface.flush_do_asynch(cache, req, complete);
|
||||
}
|
||||
|
||||
static inline int ocf_metadata_check_properties(void)
|
||||
|
@ -244,7 +244,7 @@ void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line);
|
||||
* @param[in] cache - Cache instance
|
||||
* @param[in] line - cache line which to be flushed
|
||||
*/
|
||||
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
|
||||
|
||||
/**
|
||||
@ -257,7 +257,7 @@ void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
* @param context - context that will be passed into callback
|
||||
*/
|
||||
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, ocf_req_end_t complete);
|
||||
struct ocf_request *req, ocf_req_end_t complete);
|
||||
|
||||
/**
|
||||
* @brief Load metadata
|
||||
|
@ -1207,7 +1207,7 @@ static void ocf_metadata_hash_flush(struct ocf_cache *cache,
|
||||
* Flush specified cache line
|
||||
*/
|
||||
static void ocf_metadata_hash_flush_mark(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t map_idx, int to_state,
|
||||
struct ocf_request *req, uint32_t map_idx, int to_state,
|
||||
uint8_t start, uint8_t stop)
|
||||
{
|
||||
struct ocf_metadata_hash_ctrl *ctrl = NULL;
|
||||
@ -1224,14 +1224,14 @@ static void ocf_metadata_hash_flush_mark(struct ocf_cache *cache,
|
||||
/* Collision table to get mapping cache line to HDD sector*/
|
||||
ocf_metadata_raw_flush_mark(cache,
|
||||
&(ctrl->raw_desc[metadata_segment_collision]),
|
||||
rq, map_idx, to_state, start, stop);
|
||||
req, map_idx, to_state, start, stop);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush specified cache lines asynchronously
|
||||
*/
|
||||
static void ocf_metadata_hash_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, ocf_req_end_t complete)
|
||||
struct ocf_request *req, ocf_req_end_t complete)
|
||||
{
|
||||
int result = 0;
|
||||
struct ocf_metadata_hash_ctrl *ctrl = NULL;
|
||||
@ -1245,9 +1245,9 @@ static void ocf_metadata_hash_flush_do_asynch(struct ocf_cache *cache,
|
||||
* line persistent in case of recovery
|
||||
*/
|
||||
|
||||
env_atomic_inc(&rq->req_remaining); /* Core device IO */
|
||||
env_atomic_inc(&req->req_remaining); /* Core device IO */
|
||||
|
||||
result |= ocf_metadata_raw_flush_do_asynch(cache, rq,
|
||||
result |= ocf_metadata_raw_flush_do_asynch(cache, req,
|
||||
&(ctrl->raw_desc[metadata_segment_collision]),
|
||||
complete);
|
||||
|
||||
|
@ -455,7 +455,7 @@ out:
|
||||
ctx_data_free(cache->owner, data);
|
||||
ocf_io_put(io);
|
||||
|
||||
if (env_atomic_dec_return(&mio->rq_remaining))
|
||||
if (env_atomic_dec_return(&mio->req_remaining))
|
||||
return;
|
||||
|
||||
env_completion_complete(&mio->completion);
|
||||
@ -505,7 +505,7 @@ static int metadata_submit_io(
|
||||
goto free_data;
|
||||
|
||||
/* Submit IO */
|
||||
env_atomic_inc(&mio->rq_remaining);
|
||||
env_atomic_inc(&mio->req_remaining);
|
||||
ocf_dobj_submit_io(io);
|
||||
|
||||
return 0;
|
||||
@ -542,7 +542,7 @@ static int metadata_io(struct metadata_io *mio)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
env_atomic_set(&mio->rq_remaining, 1);
|
||||
env_atomic_set(&mio->req_remaining, 1);
|
||||
env_completion_init(&mio->completion);
|
||||
|
||||
while (count) {
|
||||
@ -559,7 +559,7 @@ static int metadata_io(struct metadata_io *mio)
|
||||
OCF_COND_RESCHED(step, 128);
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&mio->rq_remaining) == 0)
|
||||
if (env_atomic_dec_return(&mio->req_remaining) == 0)
|
||||
env_completion_complete(&mio->completion);
|
||||
|
||||
/* Wait for all IO to be finished */
|
||||
|
@ -79,7 +79,7 @@ struct metadata_io {
|
||||
uint32_t page;
|
||||
uint32_t count;
|
||||
env_completion completion;
|
||||
env_atomic rq_remaining;
|
||||
env_atomic req_remaining;
|
||||
ocf_metadata_io_event_t hndl_fn;
|
||||
void *hndl_cntx;
|
||||
};
|
||||
|
@ -291,12 +291,12 @@ static int _raw_ram_flush_all(struct ocf_cache *cache,
|
||||
* RAM RAM Implementation - Mark to Flush
|
||||
*/
|
||||
static void _raw_ram_flush_mark(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t map_idx, int to_state,
|
||||
struct ocf_request *req, uint32_t map_idx, int to_state,
|
||||
uint8_t start, uint8_t stop)
|
||||
{
|
||||
if (to_state == DIRTY || to_state == CLEAN) {
|
||||
rq->map[map_idx].flush = true;
|
||||
rq->info.flush_metadata = true;
|
||||
req->map[map_idx].flush = true;
|
||||
req->info.flush_metadata = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -305,7 +305,7 @@ static void _raw_ram_flush_mark(struct ocf_cache *cache,
|
||||
******************************************************************************/
|
||||
struct _raw_ram_flush_ctx {
|
||||
struct ocf_metadata_raw *raw;
|
||||
struct ocf_request *rq;
|
||||
struct ocf_request *req;
|
||||
ocf_req_end_t complete;
|
||||
env_atomic flush_req_cnt;
|
||||
int error;
|
||||
@ -327,8 +327,8 @@ static void _raw_ram_flush_do_asynch_io_complete(struct ocf_cache *cache,
|
||||
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
|
||||
|
||||
/* Call metadata flush completed call back */
|
||||
ctx->rq->error |= ctx->error;
|
||||
ctx->complete(ctx->rq, ctx->error);
|
||||
ctx->req->error |= ctx->error;
|
||||
ctx->complete(ctx->req, ctx->error);
|
||||
|
||||
env_free(ctx);
|
||||
}
|
||||
@ -382,15 +382,15 @@ int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *rq,
|
||||
static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *req,
|
||||
uint32_t *pages_tab, struct ocf_metadata_raw *raw,
|
||||
int *pages_to_flush) {
|
||||
int i, j = 0;
|
||||
int line_no = rq->core_line_count;
|
||||
int line_no = req->core_line_count;
|
||||
struct ocf_map_info *map;
|
||||
|
||||
for (i = 0; i < line_no; i++) {
|
||||
map = &rq->map[i];
|
||||
map = &req->map[i];
|
||||
if (map->flush) {
|
||||
pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx);
|
||||
j++;
|
||||
@ -401,13 +401,13 @@ static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *rq,
|
||||
}
|
||||
|
||||
static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, struct ocf_metadata_raw *raw,
|
||||
struct ocf_request *req, struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete)
|
||||
{
|
||||
int result = 0, i;
|
||||
uint32_t __pages_tab[MAX_STACK_TAB_SIZE];
|
||||
uint32_t *pages_tab;
|
||||
int line_no = rq->core_line_count;
|
||||
int line_no = req->core_line_count;
|
||||
int pages_to_flush;
|
||||
uint32_t start_page = 0;
|
||||
uint32_t count = 0;
|
||||
@ -417,19 +417,19 @@ static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
if (!rq->info.flush_metadata) {
|
||||
if (!req->info.flush_metadata) {
|
||||
/* Nothing to flush call flush callback */
|
||||
complete(rq, 0);
|
||||
complete(req, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
|
||||
if (!ctx) {
|
||||
complete(rq, -ENOMEM);
|
||||
complete(req, -ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctx->rq = rq;
|
||||
ctx->req = req;
|
||||
ctx->complete = complete;
|
||||
ctx->raw = raw;
|
||||
env_atomic_set(&ctx->flush_req_cnt, 1);
|
||||
@ -440,7 +440,7 @@ static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO);
|
||||
if (!pages_tab) {
|
||||
env_free(ctx);
|
||||
complete(rq, -ENOMEM);
|
||||
complete(req, -ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@ -449,7 +449,7 @@ static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
* to prevent freeing of asynchronous context
|
||||
*/
|
||||
|
||||
__raw_ram_flush_do_asynch_add_pages(rq, pages_tab, raw,
|
||||
__raw_ram_flush_do_asynch_add_pages(req, pages_tab, raw,
|
||||
&pages_to_flush);
|
||||
|
||||
env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab),
|
||||
@ -479,7 +479,7 @@ static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
|
||||
env_atomic_inc(&ctx->flush_req_cnt);
|
||||
|
||||
result |= metadata_io_write_i_asynch(cache, rq->io_queue, ctx,
|
||||
result |= metadata_io_write_i_asynch(cache, req->io_queue, ctx,
|
||||
raw->ssd_pages_offset + start_page, count,
|
||||
_raw_ram_flush_do_asynch_fill,
|
||||
_raw_ram_flush_do_asynch_io_complete);
|
||||
|
@ -130,11 +130,11 @@ struct raw_iface {
|
||||
int (*flush_all)(struct ocf_cache *cache,
|
||||
struct ocf_metadata_raw *raw);
|
||||
|
||||
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start,
|
||||
uint8_t stop);
|
||||
|
||||
int (*flush_do_asynch)(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int (*flush_do_asynch)(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete);
|
||||
};
|
||||
@ -308,17 +308,17 @@ static inline int ocf_metadata_raw_flush_all(struct ocf_cache *cache,
|
||||
|
||||
|
||||
static inline void ocf_metadata_raw_flush_mark(struct ocf_cache *cache,
|
||||
struct ocf_metadata_raw *raw, struct ocf_request *rq,
|
||||
struct ocf_metadata_raw *raw, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
raw->iface->flush_mark(cache, rq, map_idx, to_state, start, stop);
|
||||
raw->iface->flush_mark(cache, req, map_idx, to_state, start, stop);
|
||||
}
|
||||
|
||||
static inline int ocf_metadata_raw_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, struct ocf_metadata_raw *raw,
|
||||
struct ocf_request *req, struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete)
|
||||
{
|
||||
return raw->iface->flush_do_asynch(cache, rq, raw, complete);
|
||||
return raw->iface->flush_do_asynch(cache, req, raw, complete);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -32,7 +32,7 @@
|
||||
#endif
|
||||
|
||||
struct _raw_atomic_flush_ctx {
|
||||
struct ocf_request *rq;
|
||||
struct ocf_request *req;
|
||||
ocf_req_end_t complete;
|
||||
env_atomic flush_req_cnt;
|
||||
};
|
||||
@ -41,18 +41,18 @@ static void _raw_atomic_io_discard_cmpl(struct _raw_atomic_flush_ctx *ctx,
|
||||
int error)
|
||||
{
|
||||
if (error)
|
||||
ctx->rq->error = error;
|
||||
ctx->req->error = error;
|
||||
|
||||
if (env_atomic_dec_return(&ctx->flush_req_cnt))
|
||||
return;
|
||||
|
||||
if (ctx->rq->error)
|
||||
ocf_metadata_error(ctx->rq->cache);
|
||||
if (ctx->req->error)
|
||||
ocf_metadata_error(ctx->req->cache);
|
||||
|
||||
/* Call metadata flush completed call back */
|
||||
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
|
||||
|
||||
ctx->complete(ctx->rq, ctx->rq->error);
|
||||
ctx->complete(ctx->req, ctx->req->error);
|
||||
|
||||
env_free(ctx);
|
||||
}
|
||||
@ -69,12 +69,12 @@ static void _raw_atomic_io_discard_end(struct ocf_io *io, int error)
|
||||
static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
|
||||
uint64_t start_addr, uint32_t len, struct _raw_atomic_flush_ctx *ctx)
|
||||
{
|
||||
struct ocf_request *rq = context;
|
||||
struct ocf_request *req = context;
|
||||
struct ocf_io *io = ocf_new_cache_io(cache);
|
||||
|
||||
if (!io) {
|
||||
rq->error = -ENOMEM;
|
||||
return rq->error;
|
||||
req->error = -ENOMEM;
|
||||
return req->error;
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Page to flushing = %u, count of pages = %u",
|
||||
@ -90,17 +90,17 @@ static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
|
||||
else
|
||||
ocf_dobj_submit_write_zeroes(io);
|
||||
|
||||
return rq->error;
|
||||
return req->error;
|
||||
}
|
||||
|
||||
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
if (to_state == INVALID) {
|
||||
rq->map[map_idx].flush = true;
|
||||
rq->map[map_idx].start_flush = start;
|
||||
rq->map[map_idx].stop_flush = stop;
|
||||
rq->info.flush_metadata = true;
|
||||
req->map[map_idx].flush = true;
|
||||
req->map[map_idx].start_flush = start;
|
||||
req->map[map_idx].stop_flush = stop;
|
||||
req->info.flush_metadata = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -114,10 +114,10 @@ static inline void _raw_atomic_add_page(struct ocf_cache *cache,
|
||||
}
|
||||
|
||||
static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, int map_idx,
|
||||
struct ocf_request *req, int map_idx,
|
||||
struct _raw_atomic_flush_ctx *ctx)
|
||||
{
|
||||
struct ocf_map_info *map = &rq->map[map_idx];
|
||||
struct ocf_map_info *map = &req->map[map_idx];
|
||||
uint32_t len = 0;
|
||||
uint64_t start_addr;
|
||||
int result = 0;
|
||||
@ -130,12 +130,12 @@ static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
|
||||
len = SECTORS_TO_BYTES(map->stop_flush - map->start_flush);
|
||||
len += SECTORS_TO_BYTES(1);
|
||||
|
||||
result = _raw_atomic_io_discard_do(cache, rq, start_addr, len, ctx);
|
||||
result = _raw_atomic_io_discard_do(cache, req, start_addr, len, ctx);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw, ocf_req_end_t complete)
|
||||
{
|
||||
int result = 0, i;
|
||||
@ -143,33 +143,33 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
uint32_t *clines_tab;
|
||||
int clines_to_flush = 0;
|
||||
uint32_t len = 0;
|
||||
int line_no = rq->core_line_count;
|
||||
int line_no = req->core_line_count;
|
||||
struct ocf_map_info *map;
|
||||
uint64_t start_addr;
|
||||
struct _raw_atomic_flush_ctx *ctx;
|
||||
|
||||
ENV_BUG_ON(!complete);
|
||||
|
||||
if (!rq->info.flush_metadata) {
|
||||
if (!req->info.flush_metadata) {
|
||||
/* Nothing to flush call flush callback */
|
||||
complete(rq, 0);
|
||||
complete(req, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
|
||||
if (!ctx) {
|
||||
complete(rq, -ENOMEM);
|
||||
complete(req, -ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctx->rq = rq;
|
||||
ctx->req = req;
|
||||
ctx->complete = complete;
|
||||
env_atomic_set(&ctx->flush_req_cnt, 1);
|
||||
|
||||
if (line_no == 1) {
|
||||
map = &rq->map[0];
|
||||
map = &req->map[0];
|
||||
if (map->flush && map->status != LOOKUP_MISS) {
|
||||
result = _raw_atomic_flush_do_asynch_sec(cache, rq,
|
||||
result = _raw_atomic_flush_do_asynch_sec(cache, req,
|
||||
0, ctx);
|
||||
}
|
||||
_raw_atomic_io_discard_cmpl(ctx, result);
|
||||
@ -182,14 +182,14 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
clines_tab = env_zalloc(sizeof(*clines_tab) * line_no,
|
||||
ENV_MEM_NOIO);
|
||||
if (!clines_tab) {
|
||||
complete(rq, -ENOMEM);
|
||||
complete(req, -ENOMEM);
|
||||
env_free(ctx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < line_no; i++) {
|
||||
map = &rq->map[i];
|
||||
map = &req->map[i];
|
||||
|
||||
if (!map->flush || map->status == LOOKUP_MISS)
|
||||
continue;
|
||||
@ -197,7 +197,7 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
if (i == 0) {
|
||||
/* First */
|
||||
if (map->start_flush) {
|
||||
_raw_atomic_flush_do_asynch_sec(cache, rq, i,
|
||||
_raw_atomic_flush_do_asynch_sec(cache, req, i,
|
||||
ctx);
|
||||
} else {
|
||||
_raw_atomic_add_page(cache, clines_tab,
|
||||
@ -206,7 +206,7 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
} else if (i == (line_no - 1)) {
|
||||
/* Last */
|
||||
if (map->stop_flush != ocf_line_end_sector(cache)) {
|
||||
_raw_atomic_flush_do_asynch_sec(cache, rq,
|
||||
_raw_atomic_flush_do_asynch_sec(cache, req,
|
||||
i, ctx);
|
||||
} else {
|
||||
_raw_atomic_add_page(cache, clines_tab,
|
||||
@ -241,7 +241,7 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
len += ocf_line_size(cache);
|
||||
}
|
||||
|
||||
result |= _raw_atomic_io_discard_do(cache, rq, start_addr,
|
||||
result |= _raw_atomic_io_discard_do(cache, req, start_addr,
|
||||
len, ctx);
|
||||
|
||||
if (result)
|
||||
|
@ -6,10 +6,10 @@
|
||||
#ifndef __METADATA_RAW_ATOMIC_H__
|
||||
#define __METADATA_RAW_ATOMIC_H__
|
||||
|
||||
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
|
||||
|
||||
int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw, ocf_req_end_t complete);
|
||||
|
||||
#endif /* __METADATA_RAW_ATOMIC_H__ */
|
||||
|
@ -428,7 +428,7 @@ int raw_dynamic_flush_all(struct ocf_cache *cache,
|
||||
/*
|
||||
* RAM DYNAMIC Implementation - Mark to Flush
|
||||
*/
|
||||
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
ENV_BUG();
|
||||
@ -437,7 +437,7 @@ void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
/*
|
||||
* RAM DYNAMIC Implementation - Do flushing asynchronously
|
||||
*/
|
||||
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw, ocf_req_end_t complete)
|
||||
{
|
||||
ENV_BUG();
|
||||
|
@ -92,13 +92,13 @@ int raw_dynamic_flush_all(struct ocf_cache *cache,
|
||||
/*
|
||||
* RAW DYNAMIC - Mark specified entry to be flushed
|
||||
*/
|
||||
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
|
||||
|
||||
/*
|
||||
* DYNAMIC Implementation - Do Flush Asynchronously
|
||||
*/
|
||||
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw, ocf_req_end_t complete);
|
||||
|
||||
|
||||
|
@ -57,7 +57,7 @@ int raw_volatile_flush_all(struct ocf_cache *cache,
|
||||
/*
|
||||
* RAM RAM Implementation - Mark to Flush
|
||||
*/
|
||||
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
}
|
||||
@ -66,9 +66,9 @@ void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
* RAM RAM Implementation - Do Flush asynchronously
|
||||
*/
|
||||
int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, struct ocf_metadata_raw *raw,
|
||||
struct ocf_request *req, struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete)
|
||||
{
|
||||
complete(rq, 0);
|
||||
complete(req, 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -39,14 +39,14 @@ int raw_volatile_flush_all(struct ocf_cache *cache,
|
||||
/*
|
||||
* RAM RAW volatile Implementation - Mark to Flush
|
||||
*/
|
||||
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
|
||||
|
||||
/*
|
||||
* RAM RAW volatile Implementation - Do Flush asynchronously
|
||||
*/
|
||||
int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, struct ocf_metadata_raw *raw,
|
||||
struct ocf_request *req, struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete);
|
||||
|
||||
#endif /* __METADATA_RAW_VOLATILE_H__ */
|
||||
|
@ -195,7 +195,7 @@ struct ocf_metadata_iface {
|
||||
* @param[in] cache - Cache instance
|
||||
* @param[in] line - cache line which to be flushed
|
||||
*/
|
||||
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start,
|
||||
uint8_t stop);
|
||||
|
||||
@ -209,7 +209,7 @@ struct ocf_metadata_iface {
|
||||
* @param context - context that will be passed into callback
|
||||
*/
|
||||
void (*flush_do_asynch)(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, ocf_req_end_t complete);
|
||||
struct ocf_request *req, ocf_req_end_t complete);
|
||||
|
||||
|
||||
/* TODO Provide documentation below */
|
||||
|
@ -142,7 +142,7 @@ uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu)
|
||||
}
|
||||
env_mutex_unlock(&syncher->lock);
|
||||
if (ret == 0)
|
||||
ocf_engine_push_rq_front(&curr->fl_req, true);
|
||||
ocf_engine_push_req_front(&curr->fl_req, true);
|
||||
env_cond_resched();
|
||||
env_mutex_lock(&syncher->lock);
|
||||
}
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_req.h"
|
||||
#include "../utils/utils_device.h"
|
||||
#include "../eviction/ops.h"
|
||||
#include "../ocf_logger_priv.h"
|
||||
@ -437,12 +437,12 @@ int ocf_mngt_cache_visit_reverse(ocf_ctx_t ocf_ctx,
|
||||
|
||||
void ocf_mngt_wait_for_io_finish(ocf_cache_t cache)
|
||||
{
|
||||
uint32_t rq_active = 0;
|
||||
uint32_t req_active = 0;
|
||||
|
||||
do {
|
||||
rq_active = ocf_rq_get_allocated(cache);
|
||||
if (rq_active)
|
||||
req_active = ocf_req_get_allocated(cache);
|
||||
if (req_active)
|
||||
env_msleep(500);
|
||||
} while (rq_active);
|
||||
} while (req_active);
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "ocf_io_priv.h"
|
||||
#include "metadata/metadata.h"
|
||||
#include "engine/cache_engine.h"
|
||||
#include "utils/utils_rq.h"
|
||||
#include "utils/utils_req.h"
|
||||
#include "utils/utils_part.h"
|
||||
#include "utils/utils_device.h"
|
||||
#include "ocf_request.h"
|
||||
@ -122,29 +122,29 @@ int ocf_core_set_uuid(ocf_core_t core, const struct ocf_data_obj_uuid *uuid)
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline void inc_dirty_rq_counter(struct ocf_core_io *core_io,
|
||||
static inline void inc_dirty_req_counter(struct ocf_core_io *core_io,
|
||||
ocf_cache_t cache)
|
||||
{
|
||||
core_io->dirty = 1;
|
||||
env_atomic_inc(&cache->pending_dirty_requests);
|
||||
}
|
||||
|
||||
static inline void dec_counter_if_rq_was_dirty(struct ocf_core_io *core_io,
|
||||
static inline void dec_counter_if_req_was_dirty(struct ocf_core_io *core_io,
|
||||
ocf_cache_t cache)
|
||||
{
|
||||
int pending_dirty_rq_count;
|
||||
int pending_dirty_req_count;
|
||||
|
||||
if (!core_io->dirty)
|
||||
return;
|
||||
|
||||
pending_dirty_rq_count =
|
||||
pending_dirty_req_count =
|
||||
env_atomic_dec_return(&cache->pending_dirty_requests);
|
||||
|
||||
ENV_BUG_ON(pending_dirty_rq_count < 0);
|
||||
ENV_BUG_ON(pending_dirty_req_count < 0);
|
||||
|
||||
core_io->dirty = 0;
|
||||
|
||||
if (!pending_dirty_rq_count)
|
||||
if (!pending_dirty_req_count)
|
||||
env_waitqueue_wake_up(&cache->pending_dirty_wq);
|
||||
}
|
||||
|
||||
@ -332,7 +332,7 @@ static void ocf_req_complete(struct ocf_request *req, int error)
|
||||
/* Complete IO */
|
||||
ocf_io_end(req->io, error);
|
||||
|
||||
dec_counter_if_rq_was_dirty(ocf_io_to_core_io(req->io), req->cache);
|
||||
dec_counter_if_req_was_dirty(ocf_io_to_core_io(req->io), req->cache);
|
||||
|
||||
/* Invalidate OCF IO, it is not valid after completion */
|
||||
ocf_core_io_put(req->io);
|
||||
@ -398,22 +398,22 @@ int ocf_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
|
||||
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
|
||||
|
||||
if (req_cache_mode == ocf_req_cache_mode_wb) {
|
||||
inc_dirty_rq_counter(core_io, cache);
|
||||
inc_dirty_req_counter(core_io, cache);
|
||||
|
||||
//Double cache mode check prevents sending WB request
|
||||
//while flushing is performed.
|
||||
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
|
||||
if (req_cache_mode != ocf_req_cache_mode_wb)
|
||||
dec_counter_if_rq_was_dirty(core_io, cache);
|
||||
dec_counter_if_req_was_dirty(core_io, cache);
|
||||
}
|
||||
|
||||
if (cache->conf_meta->valid_parts_no <= 1)
|
||||
io->class = 0;
|
||||
|
||||
core_io->req = ocf_rq_new(cache, ocf_core_get_id(core),
|
||||
core_io->req = ocf_req_new(cache, ocf_core_get_id(core),
|
||||
io->addr, io->bytes, io->dir);
|
||||
if (!core_io->req) {
|
||||
dec_counter_if_rq_was_dirty(core_io, cache);
|
||||
dec_counter_if_req_was_dirty(core_io, cache);
|
||||
io->end(io, -ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
@ -432,10 +432,10 @@ int ocf_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
|
||||
ocf_core_update_stats(core, io);
|
||||
|
||||
ocf_core_io_get(io);
|
||||
ret = ocf_engine_hndl_rq(core_io->req, req_cache_mode);
|
||||
ret = ocf_engine_hndl_req(core_io->req, req_cache_mode);
|
||||
if (ret) {
|
||||
dec_counter_if_rq_was_dirty(core_io, cache);
|
||||
ocf_rq_put(core_io->req);
|
||||
dec_counter_if_req_was_dirty(core_io, cache);
|
||||
ocf_req_put(core_io->req);
|
||||
io->end(io, ret);
|
||||
}
|
||||
|
||||
@ -472,13 +472,13 @@ int ocf_submit_io_fast(struct ocf_io *io)
|
||||
|
||||
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
|
||||
if (req_cache_mode == ocf_req_cache_mode_wb) {
|
||||
inc_dirty_rq_counter(core_io, cache);
|
||||
inc_dirty_req_counter(core_io, cache);
|
||||
|
||||
//Double cache mode check prevents sending WB request
|
||||
//while flushing is performed.
|
||||
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
|
||||
if (req_cache_mode != ocf_req_cache_mode_wb)
|
||||
dec_counter_if_rq_was_dirty(core_io, cache);
|
||||
dec_counter_if_req_was_dirty(core_io, cache);
|
||||
}
|
||||
|
||||
switch (req_cache_mode) {
|
||||
@ -499,20 +499,20 @@ int ocf_submit_io_fast(struct ocf_io *io)
|
||||
if (cache->conf_meta->valid_parts_no <= 1)
|
||||
io->class = 0;
|
||||
|
||||
core_io->req = ocf_rq_new_extended(cache, ocf_core_get_id(core),
|
||||
core_io->req = ocf_req_new_extended(cache, ocf_core_get_id(core),
|
||||
io->addr, io->bytes, io->dir);
|
||||
// We need additional pointer to req in case completion arrives before
|
||||
// we leave this function and core_io is freed
|
||||
req = core_io->req;
|
||||
|
||||
if (!req) {
|
||||
dec_counter_if_rq_was_dirty(core_io, cache);
|
||||
dec_counter_if_req_was_dirty(core_io, cache);
|
||||
io->end(io, -ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
if (req->d2c) {
|
||||
dec_counter_if_rq_was_dirty(core_io, cache);
|
||||
ocf_rq_put(req);
|
||||
dec_counter_if_req_was_dirty(core_io, cache);
|
||||
ocf_req_put(req);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -525,16 +525,16 @@ int ocf_submit_io_fast(struct ocf_io *io)
|
||||
ocf_core_update_stats(core, io);
|
||||
ocf_core_io_get(io);
|
||||
|
||||
fast = ocf_engine_hndl_fast_rq(req, req_cache_mode);
|
||||
fast = ocf_engine_hndl_fast_req(req, req_cache_mode);
|
||||
if (fast != OCF_FAST_PATH_NO) {
|
||||
ocf_seq_cutoff_update(core, req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dec_counter_if_rq_was_dirty(core_io, cache);
|
||||
dec_counter_if_req_was_dirty(core_io, cache);
|
||||
|
||||
ocf_core_io_put(io);
|
||||
ocf_rq_put(req);
|
||||
ocf_req_put(req);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -563,7 +563,7 @@ int ocf_submit_flush(struct ocf_io *io)
|
||||
return 0;
|
||||
}
|
||||
|
||||
core_io->req = ocf_rq_new(cache, ocf_core_get_id(core),
|
||||
core_io->req = ocf_req_new(cache, ocf_core_get_id(core),
|
||||
io->addr, io->bytes, io->dir);
|
||||
if (!core_io->req) {
|
||||
ocf_io_end(io, -ENOMEM);
|
||||
@ -576,7 +576,7 @@ int ocf_submit_flush(struct ocf_io *io)
|
||||
core_io->req->data = core_io->data;
|
||||
|
||||
ocf_core_io_get(io);
|
||||
ocf_engine_hndl_ops_rq(core_io->req);
|
||||
ocf_engine_hndl_ops_req(core_io->req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -606,7 +606,7 @@ int ocf_submit_discard(struct ocf_io *io)
|
||||
return 0;
|
||||
}
|
||||
|
||||
core_io->req = ocf_rq_new_discard(cache, ocf_core_get_id(core),
|
||||
core_io->req = ocf_req_new_discard(cache, ocf_core_get_id(core),
|
||||
io->addr, io->bytes, OCF_WRITE);
|
||||
if (!core_io->req) {
|
||||
ocf_io_end(io, -ENOMEM);
|
||||
@ -619,7 +619,7 @@ int ocf_submit_discard(struct ocf_io *io)
|
||||
core_io->req->data = core_io->data;
|
||||
|
||||
ocf_core_io_get(io);
|
||||
ocf_engine_hndl_discard_rq(core_io->req);
|
||||
ocf_engine_hndl_discard_req(core_io->req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ struct ocf_ctx {
|
||||
} core_pool;
|
||||
|
||||
struct {
|
||||
struct ocf_rq_allocator *rq;
|
||||
struct ocf_req_allocator *req;
|
||||
env_allocator *core_io_allocator;
|
||||
} resources;
|
||||
};
|
||||
|
@ -101,7 +101,7 @@ void ocf_queue_run(ocf_queue_t q)
|
||||
|
||||
while (env_atomic_read(&q->io_no) > 0) {
|
||||
/* Make sure a request is dequeued. */
|
||||
io_req = ocf_engine_pop_rq(cache, q);
|
||||
io_req = ocf_engine_pop_req(cache, q);
|
||||
|
||||
if (!io_req)
|
||||
continue;
|
||||
|
@ -77,10 +77,10 @@ struct ocf_map_info {
|
||||
/*!< This bit indicates if cache line need to be flushed */
|
||||
|
||||
uint8_t start_flush;
|
||||
/*!< If rq need flush, contain first sector of range to flush */
|
||||
/*!< If req need flush, contain first sector of range to flush */
|
||||
|
||||
uint8_t stop_flush;
|
||||
/*!< If rq need flush, contain last sector of range to flush */
|
||||
/*!< If req need flush, contain last sector of range to flush */
|
||||
};
|
||||
|
||||
/**
|
||||
@ -126,7 +126,7 @@ struct ocf_request {
|
||||
const struct ocf_io_if *io_if;
|
||||
/*!< IO interface */
|
||||
|
||||
void (*resume)(struct ocf_request *rq);
|
||||
void (*resume)(struct ocf_request *req);
|
||||
/*!< OCF request resume callback */
|
||||
|
||||
ocf_core_id_t core_id;
|
||||
@ -199,6 +199,6 @@ struct ocf_request {
|
||||
struct ocf_map_info __map[];
|
||||
};
|
||||
|
||||
typedef void (*ocf_req_end_t)(struct ocf_request *rq, int error);
|
||||
typedef void (*ocf_req_end_t)(struct ocf_request *req, int error);
|
||||
|
||||
#endif
|
||||
|
@ -66,7 +66,7 @@ static void _set(struct ocf_stat *stat, uint64_t value, uint64_t denominator)
|
||||
stat->percent = _percentage(value, denominator);
|
||||
}
|
||||
|
||||
static void _fill_rq(struct ocf_stats_requests *rq, struct ocf_stats_core *s)
|
||||
static void _fill_req(struct ocf_stats_requests *req, struct ocf_stats_core *s)
|
||||
{
|
||||
uint64_t serviced = s->read_reqs.total + s->write_reqs.total;
|
||||
uint64_t total = serviced + s->read_reqs.pass_through +
|
||||
@ -76,26 +76,26 @@ static void _fill_rq(struct ocf_stats_requests *rq, struct ocf_stats_core *s)
|
||||
/* Reads Section */
|
||||
hit = s->read_reqs.total - (s->read_reqs.full_miss +
|
||||
s->read_reqs.partial_miss);
|
||||
_set(&rq->rd_hits, hit, total);
|
||||
_set(&rq->rd_partial_misses, s->read_reqs.partial_miss, total);
|
||||
_set(&rq->rd_full_misses, s->read_reqs.full_miss, total);
|
||||
_set(&rq->rd_total, s->read_reqs.total, total);
|
||||
_set(&req->rd_hits, hit, total);
|
||||
_set(&req->rd_partial_misses, s->read_reqs.partial_miss, total);
|
||||
_set(&req->rd_full_misses, s->read_reqs.full_miss, total);
|
||||
_set(&req->rd_total, s->read_reqs.total, total);
|
||||
|
||||
/* Write Section */
|
||||
hit = s->write_reqs.total - (s->write_reqs.full_miss +
|
||||
s->write_reqs.partial_miss);
|
||||
_set(&rq->wr_hits, hit, total);
|
||||
_set(&rq->wr_partial_misses, s->write_reqs.partial_miss, total);
|
||||
_set(&rq->wr_full_misses, s->write_reqs.full_miss, total);
|
||||
_set(&rq->wr_total, s->write_reqs.total, total);
|
||||
_set(&req->wr_hits, hit, total);
|
||||
_set(&req->wr_partial_misses, s->write_reqs.partial_miss, total);
|
||||
_set(&req->wr_full_misses, s->write_reqs.full_miss, total);
|
||||
_set(&req->wr_total, s->write_reqs.total, total);
|
||||
|
||||
/* Pass-Through section */
|
||||
_set(&rq->rd_pt, s->read_reqs.pass_through, total);
|
||||
_set(&rq->wr_pt, s->write_reqs.pass_through, total);
|
||||
_set(&req->rd_pt, s->read_reqs.pass_through, total);
|
||||
_set(&req->wr_pt, s->write_reqs.pass_through, total);
|
||||
|
||||
/* Summary */
|
||||
_set(&rq->serviced, serviced, total);
|
||||
_set(&rq->total, total, total);
|
||||
_set(&req->serviced, serviced, total);
|
||||
_set(&req->total, total, total);
|
||||
}
|
||||
|
||||
static void _fill_blocks(struct ocf_stats_blocks *blocks,
|
||||
@ -155,7 +155,7 @@ static void _fill_errors(struct ocf_stats_errors *errors,
|
||||
|
||||
int ocf_stats_collect_core(ocf_core_t core,
|
||||
struct ocf_stats_usage *usage,
|
||||
struct ocf_stats_requests *rq,
|
||||
struct ocf_stats_requests *req,
|
||||
struct ocf_stats_blocks *blocks,
|
||||
struct ocf_stats_errors *errors)
|
||||
{
|
||||
@ -176,7 +176,7 @@ int ocf_stats_collect_core(ocf_core_t core,
|
||||
cache_occupancy = _get_cache_occupancy(cache);
|
||||
|
||||
_ocf_stats_zero(usage);
|
||||
_ocf_stats_zero(rq);
|
||||
_ocf_stats_zero(req);
|
||||
_ocf_stats_zero(blocks);
|
||||
_ocf_stats_zero(errors);
|
||||
|
||||
@ -198,8 +198,8 @@ int ocf_stats_collect_core(ocf_core_t core,
|
||||
_lines4k(s.cache_occupancy, cache_line_size));
|
||||
}
|
||||
|
||||
if (rq)
|
||||
_fill_rq(rq, &s);
|
||||
if (req)
|
||||
_fill_req(req, &s);
|
||||
|
||||
if (blocks)
|
||||
_fill_blocks(blocks, &s);
|
||||
@ -257,7 +257,7 @@ static int _accumulate_stats(ocf_core_t core, void *cntx)
|
||||
|
||||
int ocf_stats_collect_cache(ocf_cache_t cache,
|
||||
struct ocf_stats_usage *usage,
|
||||
struct ocf_stats_requests *rq,
|
||||
struct ocf_stats_requests *req,
|
||||
struct ocf_stats_blocks *blocks,
|
||||
struct ocf_stats_errors *errors)
|
||||
{
|
||||
@ -275,7 +275,7 @@ int ocf_stats_collect_cache(ocf_cache_t cache,
|
||||
cache_line_size = ocf_cache_get_line_size(cache);
|
||||
|
||||
_ocf_stats_zero(usage);
|
||||
_ocf_stats_zero(rq);
|
||||
_ocf_stats_zero(req);
|
||||
_ocf_stats_zero(blocks);
|
||||
_ocf_stats_zero(errors);
|
||||
|
||||
@ -301,8 +301,8 @@ int ocf_stats_collect_cache(ocf_cache_t cache,
|
||||
_lines4k(info.size, cache_line_size));
|
||||
}
|
||||
|
||||
if (rq)
|
||||
_fill_rq(rq, &s);
|
||||
if (req)
|
||||
_fill_req(req, &s);
|
||||
|
||||
if (blocks)
|
||||
_fill_blocks(blocks, &s);
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "ocf_cache_priv.h"
|
||||
#include "utils/utils_rq.h"
|
||||
#include "utils/utils_req.h"
|
||||
#include "ocf_utils.h"
|
||||
#include "ocf_ctx_priv.h"
|
||||
|
||||
@ -13,7 +13,7 @@ int ocf_utils_init(struct ocf_ctx *ocf_ctx)
|
||||
{
|
||||
int result;
|
||||
|
||||
result = ocf_rq_allocator_init(ocf_ctx);
|
||||
result = ocf_req_allocator_init(ocf_ctx);
|
||||
if (result)
|
||||
goto ocf_utils_init_ERROR;
|
||||
|
||||
@ -34,7 +34,7 @@ ocf_utils_init_ERROR:
|
||||
|
||||
void ocf_utils_deinit(struct ocf_ctx *ocf_ctx)
|
||||
{
|
||||
ocf_rq_allocator_deinit(ocf_ctx);
|
||||
ocf_req_allocator_deinit(ocf_ctx);
|
||||
|
||||
if (ocf_ctx->resources.core_io_allocator) {
|
||||
env_allocator_destroy(ocf_ctx->resources.core_io_allocator);
|
||||
|
@ -48,21 +48,21 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
}
|
||||
|
||||
void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx)
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
|
||||
{
|
||||
ocf_cache_line_t line = rq->map[map_idx].coll_idx;
|
||||
ocf_cache_line_t line = req->map[map_idx].coll_idx;
|
||||
ocf_part_id_t part_id;
|
||||
ocf_core_id_t core_id;
|
||||
|
||||
ENV_BUG_ON(!rq);
|
||||
ENV_BUG_ON(!req);
|
||||
|
||||
part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
core_id = rq->core_id;
|
||||
core_id = req->core_id;
|
||||
|
||||
__set_cache_line_invalid(cache, start_bit, end_bit, line, core_id,
|
||||
part_id);
|
||||
|
||||
ocf_metadata_flush_mark(cache, rq, map_idx, INVALID, start_bit,
|
||||
ocf_metadata_flush_mark(cache, req, map_idx, INVALID, start_bit,
|
||||
end_bit);
|
||||
}
|
||||
|
||||
@ -79,10 +79,10 @@ void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
|
||||
}
|
||||
|
||||
void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx)
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
|
||||
{
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
ocf_cache_line_t line = rq->map[map_idx].coll_idx;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_cache_line_t line = req->map[map_idx].coll_idx;
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
|
||||
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
|
||||
@ -99,10 +99,10 @@ void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
}
|
||||
|
||||
void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx)
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
|
||||
{
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
ocf_cache_line_t line = rq->map[map_idx].coll_idx;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_cache_line_t line = req->map[map_idx].coll_idx;
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
|
||||
|
||||
@ -135,14 +135,14 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
|
||||
ocf_purge_cleaning_policy(cache, line);
|
||||
}
|
||||
|
||||
ocf_metadata_flush_mark(cache, rq, map_idx, CLEAN, start_bit, end_bit);
|
||||
ocf_metadata_flush_mark(cache, req, map_idx, CLEAN, start_bit, end_bit);
|
||||
}
|
||||
|
||||
void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx)
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx)
|
||||
{
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
ocf_cache_line_t line = rq->map[map_idx].coll_idx;
|
||||
ocf_core_id_t core_id = req->core_id;
|
||||
ocf_cache_line_t line = req->map[map_idx].coll_idx;
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
|
||||
|
||||
@ -173,5 +173,5 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
|
||||
|
||||
ocf_cleaning_set_hot_cache_line(cache, line);
|
||||
|
||||
ocf_metadata_flush_mark(cache, rq, map_idx, DIRTY, start_bit, end_bit);
|
||||
ocf_metadata_flush_mark(cache, req, map_idx, DIRTY, start_bit, end_bit);
|
||||
}
|
||||
|
@ -75,11 +75,11 @@ static inline uint64_t ocf_lines_2_bytes(struct ocf_cache *cache,
|
||||
* @param cache Cache instance
|
||||
* @param start_bit Start bit of cache line for which state will be set
|
||||
* @param end_bit End bit of cache line for which state will be set
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
* @param map_idx Array index to map containing cache line to invalid
|
||||
*/
|
||||
void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx);
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
|
||||
|
||||
|
||||
/**
|
||||
@ -99,11 +99,11 @@ void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
|
||||
* @param cache Cache instance
|
||||
* @param start_bit Start bit of cache line for which state will be set
|
||||
* @param end_bit End bit of cache line for which state will be set
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
* @param map_idx Array index to map containing cache line to invalid
|
||||
*/
|
||||
void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx);
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
|
||||
|
||||
/**
|
||||
* @brief Set cache line clean
|
||||
@ -111,11 +111,11 @@ void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
* @param cache Cache instance
|
||||
* @param start_bit Start bit of cache line for which state will be set
|
||||
* @param end_bit End bit of cache line for which state will be set
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
* @param map_idx Array index to map containing cache line to invalid
|
||||
*/
|
||||
void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx);
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
|
||||
|
||||
/**
|
||||
* @brief Set cache line dirty
|
||||
@ -123,11 +123,11 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
|
||||
* @param cache Cache instance
|
||||
* @param start_bit Start bit of cache line for which state will be set
|
||||
* @param end_bit End bit of cache line for which state will be set
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
* @param map_idx Array index to map containing cache line to invalid
|
||||
*/
|
||||
void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
|
||||
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx);
|
||||
uint8_t end_bit, struct ocf_request *req, uint32_t map_idx);
|
||||
|
||||
/**
|
||||
* @brief Remove cache line from cleaning policy
|
||||
@ -166,33 +166,33 @@ static inline void ocf_purge_eviction_policy(struct ocf_cache *cache,
|
||||
* @param cache Cache instance
|
||||
* @param start Start bit of range in cache line to purge
|
||||
* @param end End bit of range in cache line to purge
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
* @param map_idx Array index to map containing cache line to purge
|
||||
*/
|
||||
static inline void _ocf_purge_cache_line_sec(struct ocf_cache *cache,
|
||||
uint8_t start, uint8_t stop, struct ocf_request *rq,
|
||||
uint8_t start, uint8_t stop, struct ocf_request *req,
|
||||
uint32_t map_idx)
|
||||
{
|
||||
|
||||
set_cache_line_clean(cache, start, stop, rq, map_idx);
|
||||
set_cache_line_clean(cache, start, stop, req, map_idx);
|
||||
|
||||
set_cache_line_invalid(cache, start, stop, rq, map_idx);
|
||||
set_cache_line_invalid(cache, start, stop, req, map_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Purge cache line (remove completely, form collision, move to free
|
||||
* partition, from cleaning policy and eviction policy)
|
||||
*
|
||||
* @param rq - OCF request to purge
|
||||
* @param req - OCF request to purge
|
||||
*/
|
||||
static inline void ocf_purge_map_info(struct ocf_request *rq)
|
||||
static inline void ocf_purge_map_info(struct ocf_request *req)
|
||||
{
|
||||
uint32_t map_idx = 0;
|
||||
uint8_t start_bit;
|
||||
uint8_t end_bit;
|
||||
struct ocf_map_info *map = rq->map;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
uint32_t count = rq->core_line_count;
|
||||
struct ocf_map_info *map = req->map;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
uint32_t count = req->core_line_count;
|
||||
|
||||
/* Purge range on the basis of map info
|
||||
*
|
||||
@ -211,7 +211,7 @@ static inline void ocf_purge_map_info(struct ocf_request *rq)
|
||||
if (map_idx == 0) {
|
||||
/* First */
|
||||
|
||||
start_bit = BYTES_TO_SECTORS(rq->byte_position)
|
||||
start_bit = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
|
||||
}
|
||||
@ -219,24 +219,24 @@ static inline void ocf_purge_map_info(struct ocf_request *rq)
|
||||
if (map_idx == (count - 1)) {
|
||||
/* Last */
|
||||
|
||||
end_bit = BYTES_TO_SECTORS(rq->byte_position +
|
||||
rq->byte_length - 1) %
|
||||
end_bit = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1) %
|
||||
ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
_ocf_purge_cache_line_sec(cache, start_bit, end_bit, rq,
|
||||
_ocf_purge_cache_line_sec(cache, start_bit, end_bit, req,
|
||||
map_idx);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ocf_set_valid_map_info(struct ocf_request *rq)
|
||||
static inline void ocf_set_valid_map_info(struct ocf_request *req)
|
||||
{
|
||||
uint32_t map_idx = 0;
|
||||
uint8_t start_bit;
|
||||
uint8_t end_bit;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
uint32_t count = rq->core_line_count;
|
||||
struct ocf_map_info *map = rq->map;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
uint32_t count = req->core_line_count;
|
||||
struct ocf_map_info *map = req->map;
|
||||
|
||||
/* Set valid bits for sectors on the basis of map info
|
||||
*
|
||||
@ -254,29 +254,29 @@ static inline void ocf_set_valid_map_info(struct ocf_request *rq)
|
||||
if (map_idx == 0) {
|
||||
/* First */
|
||||
|
||||
start_bit = BYTES_TO_SECTORS(rq->byte_position)
|
||||
start_bit = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (map_idx == (count - 1)) {
|
||||
/* Last */
|
||||
|
||||
end_bit = BYTES_TO_SECTORS(rq->byte_position +
|
||||
rq->byte_length - 1)
|
||||
end_bit = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
set_cache_line_valid(cache, start_bit, end_bit, rq, map_idx);
|
||||
set_cache_line_valid(cache, start_bit, end_bit, req, map_idx);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ocf_set_dirty_map_info(struct ocf_request *rq)
|
||||
static inline void ocf_set_dirty_map_info(struct ocf_request *req)
|
||||
{
|
||||
uint32_t map_idx = 0;
|
||||
uint8_t start_bit;
|
||||
uint8_t end_bit;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
uint32_t count = rq->core_line_count;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
uint32_t count = req->core_line_count;
|
||||
|
||||
/* Set valid bits for sectors on the basis of map info
|
||||
*
|
||||
@ -292,29 +292,29 @@ static inline void ocf_set_dirty_map_info(struct ocf_request *rq)
|
||||
if (map_idx == 0) {
|
||||
/* First */
|
||||
|
||||
start_bit = BYTES_TO_SECTORS(rq->byte_position)
|
||||
start_bit = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (map_idx == (count - 1)) {
|
||||
/* Last */
|
||||
|
||||
end_bit = BYTES_TO_SECTORS(rq->byte_position +
|
||||
rq->byte_length - 1) %
|
||||
end_bit = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1) %
|
||||
ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
set_cache_line_dirty(cache, start_bit, end_bit, rq, map_idx);
|
||||
set_cache_line_dirty(cache, start_bit, end_bit, req, map_idx);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ocf_set_clean_map_info(struct ocf_request *rq)
|
||||
static inline void ocf_set_clean_map_info(struct ocf_request *req)
|
||||
{
|
||||
uint32_t map_idx = 0;
|
||||
uint8_t start_bit;
|
||||
uint8_t end_bit;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
uint32_t count = rq->core_line_count;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
uint32_t count = req->core_line_count;
|
||||
|
||||
/* Set valid bits for sectors on the basis of map info
|
||||
*
|
||||
@ -330,20 +330,20 @@ static inline void ocf_set_clean_map_info(struct ocf_request *rq)
|
||||
if (map_idx == 0) {
|
||||
/* First */
|
||||
|
||||
start_bit = BYTES_TO_SECTORS(rq->byte_position)
|
||||
start_bit = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (map_idx == (count - 1)) {
|
||||
/* Last */
|
||||
|
||||
end_bit = BYTES_TO_SECTORS(rq->byte_position +
|
||||
rq->byte_length - 1) %
|
||||
end_bit = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1) %
|
||||
ocf_line_sectors(cache);
|
||||
|
||||
}
|
||||
|
||||
set_cache_line_clean(cache, start_bit, end_bit, rq, map_idx);
|
||||
set_cache_line_clean(cache, start_bit, end_bit, req, map_idx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "../engine/engine_common.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "utils_cleaner.h"
|
||||
#include "utils_rq.h"
|
||||
#include "utils_req.h"
|
||||
#include "utils_io.h"
|
||||
#include "utils_cache_line.h"
|
||||
|
||||
@ -40,131 +40,131 @@ struct ocf_cleaner_sync {
|
||||
/*
|
||||
* Allocate cleaning request
|
||||
*/
|
||||
static struct ocf_request *_ocf_cleaner_alloc_rq(struct ocf_cache *cache,
|
||||
static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache,
|
||||
uint32_t count, const struct ocf_cleaner_attribs *attribs)
|
||||
{
|
||||
struct ocf_request *rq = ocf_rq_new_extended(cache, 0, 0,
|
||||
struct ocf_request *req = ocf_req_new_extended(cache, 0, 0,
|
||||
count * ocf_line_size(cache), OCF_READ);
|
||||
int ret;
|
||||
|
||||
if (!rq)
|
||||
if (!req)
|
||||
return NULL;
|
||||
|
||||
rq->info.internal = true;
|
||||
rq->info.cleaner_cache_line_lock = attribs->cache_line_lock;
|
||||
req->info.internal = true;
|
||||
req->info.cleaner_cache_line_lock = attribs->cache_line_lock;
|
||||
|
||||
/* Allocate pages for cleaning IO */
|
||||
rq->data = ctx_data_alloc(cache->owner,
|
||||
req->data = ctx_data_alloc(cache->owner,
|
||||
ocf_line_size(cache) / PAGE_SIZE * count);
|
||||
if (!rq->data) {
|
||||
ocf_rq_put(rq);
|
||||
if (!req->data) {
|
||||
ocf_req_put(req);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = ctx_data_mlock(cache->owner, rq->data);
|
||||
ret = ctx_data_mlock(cache->owner, req->data);
|
||||
if (ret) {
|
||||
ctx_data_free(cache->owner, rq->data);
|
||||
ocf_rq_put(rq);
|
||||
ctx_data_free(cache->owner, req->data);
|
||||
ocf_req_put(req);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rq->io_queue = attribs->io_queue;
|
||||
req->io_queue = attribs->io_queue;
|
||||
|
||||
return rq;
|
||||
return req;
|
||||
}
|
||||
|
||||
enum {
|
||||
ocf_cleaner_rq_type_master = 1,
|
||||
ocf_cleaner_rq_type_slave = 2
|
||||
ocf_cleaner_req_type_master = 1,
|
||||
ocf_cleaner_req_type_slave = 2
|
||||
};
|
||||
|
||||
static struct ocf_request *_ocf_cleaner_alloc_master_rq(
|
||||
static struct ocf_request *_ocf_cleaner_alloc_master_req(
|
||||
struct ocf_cache *cache, uint32_t count,
|
||||
const struct ocf_cleaner_attribs *attribs)
|
||||
{
|
||||
struct ocf_request *rq = _ocf_cleaner_alloc_rq(cache, count, attribs);
|
||||
struct ocf_request *req = _ocf_cleaner_alloc_req(cache, count, attribs);
|
||||
|
||||
if (rq) {
|
||||
if (req) {
|
||||
/* Set type of cleaning request */
|
||||
rq->master_io_req_type = ocf_cleaner_rq_type_master;
|
||||
req->master_io_req_type = ocf_cleaner_req_type_master;
|
||||
|
||||
/* In master, save completion context and function */
|
||||
rq->priv = attribs->cmpl_context;
|
||||
rq->master_io_req = attribs->cmpl_fn;
|
||||
req->priv = attribs->cmpl_context;
|
||||
req->master_io_req = attribs->cmpl_fn;
|
||||
|
||||
/* The count of all requests */
|
||||
env_atomic_set(&rq->master_remaining, 1);
|
||||
env_atomic_set(&req->master_remaining, 1);
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "New master request, count = %u",
|
||||
count);
|
||||
}
|
||||
return rq;
|
||||
return req;
|
||||
}
|
||||
|
||||
static struct ocf_request *_ocf_cleaner_alloc_slave_rq(
|
||||
static struct ocf_request *_ocf_cleaner_alloc_slave_req(
|
||||
struct ocf_request *master,
|
||||
uint32_t count, const struct ocf_cleaner_attribs *attribs)
|
||||
{
|
||||
struct ocf_request *rq = _ocf_cleaner_alloc_rq(
|
||||
struct ocf_request *req = _ocf_cleaner_alloc_req(
|
||||
master->cache, count, attribs);
|
||||
|
||||
if (rq) {
|
||||
if (req) {
|
||||
/* Set type of cleaning request */
|
||||
rq->master_io_req_type = ocf_cleaner_rq_type_slave;
|
||||
req->master_io_req_type = ocf_cleaner_req_type_slave;
|
||||
|
||||
/* Slave refers to master request, get its reference counter */
|
||||
ocf_rq_get(master);
|
||||
ocf_req_get(master);
|
||||
|
||||
/* Slave request contains reference to master */
|
||||
rq->master_io_req = master;
|
||||
req->master_io_req = master;
|
||||
|
||||
/* One more additional slave request, increase global counter
|
||||
* of requests count
|
||||
*/
|
||||
env_atomic_inc(&master->master_remaining);
|
||||
|
||||
OCF_DEBUG_PARAM(rq->cache,
|
||||
OCF_DEBUG_PARAM(req->cache,
|
||||
"New slave request, count = %u,all requests count = %d",
|
||||
count, env_atomic_read(&master->master_remaining));
|
||||
}
|
||||
return rq;
|
||||
return req;
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_dealloc_rq(struct ocf_request *rq)
|
||||
static void _ocf_cleaner_dealloc_req(struct ocf_request *req)
|
||||
{
|
||||
if (ocf_cleaner_rq_type_slave == rq->master_io_req_type) {
|
||||
if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
|
||||
/* Slave contains reference to the master request,
|
||||
* release reference counter
|
||||
*/
|
||||
struct ocf_request *master = rq->master_io_req;
|
||||
struct ocf_request *master = req->master_io_req;
|
||||
|
||||
OCF_DEBUG_MSG(rq->cache, "Put master request by slave");
|
||||
ocf_rq_put(master);
|
||||
OCF_DEBUG_MSG(req->cache, "Put master request by slave");
|
||||
ocf_req_put(master);
|
||||
|
||||
OCF_DEBUG_MSG(rq->cache, "Free slave request");
|
||||
} else if (ocf_cleaner_rq_type_master == rq->master_io_req_type) {
|
||||
OCF_DEBUG_MSG(rq->cache, "Free master request");
|
||||
OCF_DEBUG_MSG(req->cache, "Free slave request");
|
||||
} else if (ocf_cleaner_req_type_master == req->master_io_req_type) {
|
||||
OCF_DEBUG_MSG(req->cache, "Free master request");
|
||||
} else {
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
ctx_data_secure_erase(rq->cache->owner, rq->data);
|
||||
ctx_data_munlock(rq->cache->owner, rq->data);
|
||||
ctx_data_free(rq->cache->owner, rq->data);
|
||||
ocf_rq_put(rq);
|
||||
ctx_data_secure_erase(req->cache->owner, req->data);
|
||||
ctx_data_munlock(req->cache->owner, req->data);
|
||||
ctx_data_free(req->cache->owner, req->data);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
/*
|
||||
* cleaner - Get clean result
|
||||
*/
|
||||
static void _ocf_cleaner_set_error(struct ocf_request *rq)
|
||||
static void _ocf_cleaner_set_error(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_request *master = NULL;
|
||||
|
||||
if (ocf_cleaner_rq_type_master == rq->master_io_req_type) {
|
||||
master = rq;
|
||||
} else if (ocf_cleaner_rq_type_slave == rq->master_io_req_type) {
|
||||
master = rq->master_io_req;
|
||||
if (ocf_cleaner_req_type_master == req->master_io_req_type) {
|
||||
master = req;
|
||||
} else if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
|
||||
master = req->master_io_req;
|
||||
} else {
|
||||
ENV_BUG();
|
||||
return;
|
||||
@ -173,23 +173,23 @@ static void _ocf_cleaner_set_error(struct ocf_request *rq)
|
||||
master->error = -EIO;
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_complete_rq(struct ocf_request *rq)
|
||||
static void _ocf_cleaner_complete_req(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_request *master = NULL;
|
||||
ocf_req_end_t cmpl;
|
||||
|
||||
if (ocf_cleaner_rq_type_master == rq->master_io_req_type) {
|
||||
OCF_DEBUG_MSG(rq->cache, "Master completion");
|
||||
master = rq;
|
||||
} else if (ocf_cleaner_rq_type_slave == rq->master_io_req_type) {
|
||||
OCF_DEBUG_MSG(rq->cache, "Slave completion");
|
||||
master = rq->master_io_req;
|
||||
if (ocf_cleaner_req_type_master == req->master_io_req_type) {
|
||||
OCF_DEBUG_MSG(req->cache, "Master completion");
|
||||
master = req;
|
||||
} else if (ocf_cleaner_req_type_slave == req->master_io_req_type) {
|
||||
OCF_DEBUG_MSG(req->cache, "Slave completion");
|
||||
master = req->master_io_req;
|
||||
} else {
|
||||
ENV_BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(rq->cache, "Master requests remaining = %d",
|
||||
OCF_DEBUG_PARAM(req->cache, "Master requests remaining = %d",
|
||||
env_atomic_read(&master->master_remaining));
|
||||
|
||||
if (env_atomic_dec_return(&master->master_remaining)) {
|
||||
@ -197,7 +197,7 @@ static void _ocf_cleaner_complete_rq(struct ocf_request *rq)
|
||||
return;
|
||||
}
|
||||
|
||||
OCF_DEBUG_MSG(rq->cache, "All cleaning request completed");
|
||||
OCF_DEBUG_MSG(req->cache, "All cleaning request completed");
|
||||
|
||||
/* Only master contains completion function and completion context */
|
||||
cmpl = master->master_io_req;
|
||||
@ -207,25 +207,25 @@ static void _ocf_cleaner_complete_rq(struct ocf_request *rq)
|
||||
/*
|
||||
* cleaner - Cache line lock, function lock cache lines depends on attributes
|
||||
*/
|
||||
static int _ocf_cleaner_cache_line_lock(struct ocf_request *rq)
|
||||
static int _ocf_cleaner_cache_line_lock(struct ocf_request *req)
|
||||
{
|
||||
if (!rq->info.cleaner_cache_line_lock)
|
||||
if (!req->info.cleaner_cache_line_lock)
|
||||
return OCF_LOCK_ACQUIRED;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
return ocf_rq_trylock_rd(rq);
|
||||
return ocf_req_trylock_rd(req);
|
||||
}
|
||||
|
||||
/*
|
||||
* cleaner - Cache line unlock, function unlock cache lines
|
||||
* depends on attributes
|
||||
*/
|
||||
static void _ocf_cleaner_cache_line_unlock(struct ocf_request *rq)
|
||||
static void _ocf_cleaner_cache_line_unlock(struct ocf_request *req)
|
||||
{
|
||||
if (rq->info.cleaner_cache_line_lock) {
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
ocf_rq_unlock(rq);
|
||||
if (req->info.cleaner_cache_line_lock) {
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
ocf_req_unlock(req);
|
||||
}
|
||||
}
|
||||
|
||||
@ -243,47 +243,47 @@ static bool _ocf_cleaner_sector_is_dirty(struct ocf_cache *cache,
|
||||
return valid ? dirty : false;
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_finish_rq(struct ocf_request *rq)
|
||||
static void _ocf_cleaner_finish_req(struct ocf_request *req)
|
||||
{
|
||||
/* Handle cache lines unlocks */
|
||||
_ocf_cleaner_cache_line_unlock(rq);
|
||||
_ocf_cleaner_cache_line_unlock(req);
|
||||
|
||||
/* Signal completion to the caller of cleaning */
|
||||
_ocf_cleaner_complete_rq(rq);
|
||||
_ocf_cleaner_complete_req(req);
|
||||
|
||||
/* Free allocated resources */
|
||||
_ocf_cleaner_dealloc_rq(rq);
|
||||
_ocf_cleaner_dealloc_req(req);
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_flush_cache_io_end(struct ocf_io *io, int error)
|
||||
{
|
||||
struct ocf_request *rq = io->priv1;
|
||||
struct ocf_request *req = io->priv1;
|
||||
|
||||
if (error) {
|
||||
ocf_metadata_error(rq->cache);
|
||||
rq->error = error;
|
||||
ocf_metadata_error(req->cache);
|
||||
req->error = error;
|
||||
}
|
||||
|
||||
OCF_DEBUG_MSG(rq->cache, "Cache flush finished");
|
||||
OCF_DEBUG_MSG(req->cache, "Cache flush finished");
|
||||
|
||||
_ocf_cleaner_finish_rq(rq);
|
||||
_ocf_cleaner_finish_req(req);
|
||||
}
|
||||
|
||||
static int _ocf_cleaner_fire_flush_cache(struct ocf_request *rq)
|
||||
static int _ocf_cleaner_fire_flush_cache(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_io *io;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
io = ocf_dobj_new_io(&rq->cache->device->obj);
|
||||
io = ocf_dobj_new_io(&req->cache->device->obj);
|
||||
if (!io) {
|
||||
ocf_metadata_error(rq->cache);
|
||||
rq->error = -ENOMEM;
|
||||
ocf_metadata_error(req->cache);
|
||||
req->error = -ENOMEM;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
|
||||
ocf_io_set_cmpl(io, rq, NULL, _ocf_cleaner_flush_cache_io_end);
|
||||
ocf_io_set_cmpl(io, req, NULL, _ocf_cleaner_flush_cache_io_end);
|
||||
|
||||
ocf_dobj_submit_flush(io);
|
||||
|
||||
@ -295,33 +295,33 @@ static const struct ocf_io_if _io_if_flush_cache = {
|
||||
.write = _ocf_cleaner_fire_flush_cache,
|
||||
};
|
||||
|
||||
static void _ocf_cleaner_metadata_io_end(struct ocf_request *rq, int error)
|
||||
static void _ocf_cleaner_metadata_io_end(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
ocf_metadata_error(rq->cache);
|
||||
rq->error = error;
|
||||
_ocf_cleaner_finish_rq(rq);
|
||||
ocf_metadata_error(req->cache);
|
||||
req->error = error;
|
||||
_ocf_cleaner_finish_req(req);
|
||||
return;
|
||||
}
|
||||
|
||||
OCF_DEBUG_MSG(rq->cache, "Metadata flush finished");
|
||||
OCF_DEBUG_MSG(req->cache, "Metadata flush finished");
|
||||
|
||||
rq->io_if = &_io_if_flush_cache;
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
req->io_if = &_io_if_flush_cache;
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
|
||||
static int _ocf_cleaner_update_metadata(struct ocf_request *rq)
|
||||
static int _ocf_cleaner_update_metadata(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
const struct ocf_map_info *iter = rq->map;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
const struct ocf_map_info *iter = req->map;
|
||||
uint32_t i;
|
||||
ocf_cache_line_t cache_line;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
/* Update metadata */
|
||||
for (i = 0; i < rq->core_line_count; i++, iter++) {
|
||||
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||
if (iter->status == LOOKUP_MISS)
|
||||
continue;
|
||||
|
||||
@ -336,13 +336,13 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *rq)
|
||||
continue;
|
||||
|
||||
ocf_metadata_get_core_and_part_id(cache, cache_line,
|
||||
&rq->core_id, &rq->part_id);
|
||||
&req->core_id, &req->part_id);
|
||||
|
||||
set_cache_line_clean(cache, 0, ocf_line_end_sector(cache), rq,
|
||||
set_cache_line_clean(cache, 0, ocf_line_end_sector(cache), req,
|
||||
i);
|
||||
}
|
||||
|
||||
ocf_metadata_flush_do_asynch(cache, rq, _ocf_cleaner_metadata_io_end);
|
||||
ocf_metadata_flush_do_asynch(cache, req, _ocf_cleaner_metadata_io_end);
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
|
||||
return 0;
|
||||
@ -354,14 +354,14 @@ static const struct ocf_io_if _io_if_update_metadata = {
|
||||
};
|
||||
|
||||
static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
|
||||
struct ocf_request *rq, int error)
|
||||
struct ocf_request *req, int error)
|
||||
{
|
||||
uint32_t i;
|
||||
struct ocf_map_info *iter = rq->map;
|
||||
struct ocf_map_info *iter = req->map;
|
||||
|
||||
if (error) {
|
||||
/* Flush error, set error for all cache line of this core */
|
||||
for (i = 0; i < rq->core_line_count; i++, iter++) {
|
||||
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||
if (iter->status == LOOKUP_MISS)
|
||||
continue;
|
||||
|
||||
@ -369,19 +369,19 @@ static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
|
||||
iter->invalid = true;
|
||||
}
|
||||
|
||||
_ocf_cleaner_set_error(rq);
|
||||
_ocf_cleaner_set_error(req);
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_MSG(rq->cache, "Core flush finished");
|
||||
OCF_DEBUG_MSG(req->cache, "Core flush finished");
|
||||
|
||||
/*
|
||||
* All core writes done, switch to post cleaning activities
|
||||
*/
|
||||
rq->io_if = &_io_if_update_metadata;
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
req->io_if = &_io_if_update_metadata;
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error)
|
||||
@ -391,21 +391,21 @@ static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error)
|
||||
ocf_io_put(io);
|
||||
}
|
||||
|
||||
static int _ocf_cleaner_fire_flush_cores(struct ocf_request *rq)
|
||||
static int _ocf_cleaner_fire_flush_cores(struct ocf_request *req)
|
||||
{
|
||||
uint32_t i;
|
||||
ocf_core_id_t core_id = OCF_CORE_MAX;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_map_info *iter = rq->map;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
struct ocf_map_info *iter = req->map;
|
||||
struct ocf_io *io;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
/* Protect IO completion race */
|
||||
env_atomic_set(&rq->req_remaining, 1);
|
||||
env_atomic_set(&req->req_remaining, 1);
|
||||
|
||||
/* Submit flush requests */
|
||||
for (i = 0; i < rq->core_line_count; i++, iter++) {
|
||||
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||
if (iter->invalid) {
|
||||
/* IO error, skip this item */
|
||||
continue;
|
||||
@ -419,22 +419,22 @@ static int _ocf_cleaner_fire_flush_cores(struct ocf_request *rq)
|
||||
|
||||
core_id = iter->core_id;
|
||||
|
||||
env_atomic_inc(&rq->req_remaining);
|
||||
env_atomic_inc(&req->req_remaining);
|
||||
|
||||
io = ocf_new_core_io(cache, core_id);
|
||||
if (!io) {
|
||||
_ocf_cleaner_flush_cores_io_end(iter, rq, -ENOMEM);
|
||||
_ocf_cleaner_flush_cores_io_end(iter, req, -ENOMEM);
|
||||
continue;
|
||||
}
|
||||
|
||||
ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
|
||||
ocf_io_set_cmpl(io, iter, rq, _ocf_cleaner_flush_cores_io_cmpl);
|
||||
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_flush_cores_io_cmpl);
|
||||
|
||||
ocf_dobj_submit_flush(io);
|
||||
}
|
||||
|
||||
/* Protect IO completion race */
|
||||
_ocf_cleaner_flush_cores_io_end(NULL, rq, 0);
|
||||
_ocf_cleaner_flush_cores_io_end(NULL, req, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -444,44 +444,44 @@ static const struct ocf_io_if _io_if_flush_cores = {
|
||||
.write = _ocf_cleaner_fire_flush_cores,
|
||||
};
|
||||
|
||||
static void _ocf_cleaner_core_io_end(struct ocf_request *rq)
|
||||
static void _ocf_cleaner_core_io_end(struct ocf_request *req)
|
||||
{
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_MSG(rq->cache, "Core writes finished");
|
||||
OCF_DEBUG_MSG(req->cache, "Core writes finished");
|
||||
|
||||
/*
|
||||
* All cache read requests done, now we can submit writes to cores,
|
||||
* Move processing to thread, where IO will be (and can be) submitted
|
||||
*/
|
||||
rq->io_if = &_io_if_flush_cores;
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
req->io_if = &_io_if_flush_cores;
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error)
|
||||
{
|
||||
struct ocf_map_info *map = io->priv1;
|
||||
struct ocf_request *rq = io->priv2;
|
||||
struct ocf_request *req = io->priv2;
|
||||
|
||||
if (error) {
|
||||
map->invalid |= 1;
|
||||
_ocf_cleaner_set_error(rq);
|
||||
env_atomic_inc(&rq->cache->core_obj[map->core_id].counters->
|
||||
_ocf_cleaner_set_error(req);
|
||||
env_atomic_inc(&req->cache->core_obj[map->core_id].counters->
|
||||
core_errors.write);
|
||||
}
|
||||
|
||||
_ocf_cleaner_core_io_end(rq);
|
||||
_ocf_cleaner_core_io_end(req);
|
||||
|
||||
ocf_io_put(io);
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *rq,
|
||||
static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
|
||||
struct ocf_map_info *iter, uint64_t begin, uint64_t end)
|
||||
{
|
||||
uint64_t addr, offset;
|
||||
int err;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
struct ocf_io *io;
|
||||
struct ocf_counters_block *core_stats =
|
||||
&cache->core_obj[iter->core_id].counters->core_blocks;
|
||||
@ -499,22 +499,22 @@ static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *rq,
|
||||
|
||||
ocf_io_configure(io, addr, SECTORS_TO_BYTES(end - begin), OCF_WRITE,
|
||||
part_id, 0);
|
||||
err = ocf_io_set_data(io, rq->data, offset);
|
||||
err = ocf_io_set_data(io, req->data, offset);
|
||||
if (err) {
|
||||
ocf_io_put(io);
|
||||
goto error;
|
||||
}
|
||||
|
||||
ocf_io_set_cmpl(io, iter, rq, _ocf_cleaner_core_io_cmpl);
|
||||
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_core_io_cmpl);
|
||||
|
||||
env_atomic64_add(SECTORS_TO_BYTES(end - begin), &core_stats->write_bytes);
|
||||
|
||||
OCF_DEBUG_PARAM(rq->cache, "Core write, line = %llu, "
|
||||
OCF_DEBUG_PARAM(req->cache, "Core write, line = %llu, "
|
||||
"sector = %llu, count = %llu", iter->core_line, begin,
|
||||
end - begin);
|
||||
|
||||
/* Increase IO counter to be processed */
|
||||
env_atomic_inc(&rq->req_remaining);
|
||||
env_atomic_inc(&req->req_remaining);
|
||||
|
||||
/* Send IO */
|
||||
ocf_dobj_submit_io(io);
|
||||
@ -522,21 +522,21 @@ static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *rq,
|
||||
return;
|
||||
error:
|
||||
iter->invalid = true;
|
||||
_ocf_cleaner_set_error(rq);
|
||||
_ocf_cleaner_set_error(req);
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_core_submit_io(struct ocf_request *rq,
|
||||
static void _ocf_cleaner_core_submit_io(struct ocf_request *req,
|
||||
struct ocf_map_info *iter)
|
||||
{
|
||||
uint64_t i, dirty_start = 0;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
bool counting_dirty = false;
|
||||
|
||||
/* Check integrity of entry to be cleaned */
|
||||
if (metadata_test_valid(cache, iter->coll_idx)
|
||||
&& metadata_test_dirty(cache, iter->coll_idx)) {
|
||||
|
||||
_ocf_cleaner_core_io_for_dirty_range(rq, iter, 0,
|
||||
_ocf_cleaner_core_io_for_dirty_range(req, iter, 0,
|
||||
ocf_line_sectors(cache));
|
||||
|
||||
return;
|
||||
@ -547,7 +547,7 @@ static void _ocf_cleaner_core_submit_io(struct ocf_request *rq,
|
||||
if (!_ocf_cleaner_sector_is_dirty(cache, iter->coll_idx, i)) {
|
||||
if (counting_dirty) {
|
||||
counting_dirty = false;
|
||||
_ocf_cleaner_core_io_for_dirty_range(rq, iter,
|
||||
_ocf_cleaner_core_io_for_dirty_range(req, iter,
|
||||
dirty_start, i);
|
||||
}
|
||||
|
||||
@ -562,22 +562,22 @@ static void _ocf_cleaner_core_submit_io(struct ocf_request *rq,
|
||||
}
|
||||
|
||||
if (counting_dirty)
|
||||
_ocf_cleaner_core_io_for_dirty_range(rq, iter, dirty_start, i);
|
||||
_ocf_cleaner_core_io_for_dirty_range(req, iter, dirty_start, i);
|
||||
}
|
||||
|
||||
static int _ocf_cleaner_fire_core(struct ocf_request *rq)
|
||||
static int _ocf_cleaner_fire_core(struct ocf_request *req)
|
||||
{
|
||||
uint32_t i;
|
||||
struct ocf_map_info *iter;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
/* Protect IO completion race */
|
||||
env_atomic_set(&rq->req_remaining, 1);
|
||||
env_atomic_set(&req->req_remaining, 1);
|
||||
|
||||
/* Submits writes to the core */
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
iter = &(rq->map[i]);
|
||||
for (i = 0; i < req->core_line_count; i++) {
|
||||
iter = &(req->map[i]);
|
||||
|
||||
if (iter->invalid) {
|
||||
/* IO read error on cache, skip this item */
|
||||
@ -587,11 +587,11 @@ static int _ocf_cleaner_fire_core(struct ocf_request *rq)
|
||||
if (iter->status == LOOKUP_MISS)
|
||||
continue;
|
||||
|
||||
_ocf_cleaner_core_submit_io(rq, iter);
|
||||
_ocf_cleaner_core_submit_io(req, iter);
|
||||
}
|
||||
|
||||
/* Protect IO completion race */
|
||||
_ocf_cleaner_core_io_end(rq);
|
||||
_ocf_cleaner_core_io_end(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -601,34 +601,34 @@ static const struct ocf_io_if _io_if_fire_core = {
|
||||
.write = _ocf_cleaner_fire_core,
|
||||
};
|
||||
|
||||
static void _ocf_cleaner_cache_io_end(struct ocf_request *rq)
|
||||
static void _ocf_cleaner_cache_io_end(struct ocf_request *req)
|
||||
{
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
/*
|
||||
* All cache read requests done, now we can submit writes to cores,
|
||||
* Move processing to thread, where IO will be (and can be) submitted
|
||||
*/
|
||||
rq->io_if = &_io_if_fire_core;
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
req->io_if = &_io_if_fire_core;
|
||||
ocf_engine_push_req_front(req, true);
|
||||
|
||||
OCF_DEBUG_MSG(rq->cache, "Cache reads finished");
|
||||
OCF_DEBUG_MSG(req->cache, "Cache reads finished");
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
|
||||
{
|
||||
struct ocf_map_info *map = io->priv1;
|
||||
struct ocf_request *rq = io->priv2;
|
||||
struct ocf_request *req = io->priv2;
|
||||
|
||||
if (error) {
|
||||
map->invalid |= 1;
|
||||
_ocf_cleaner_set_error(rq);
|
||||
env_atomic_inc(&rq->cache->core_obj[map->core_id].counters->
|
||||
_ocf_cleaner_set_error(req);
|
||||
env_atomic_inc(&req->cache->core_obj[map->core_id].counters->
|
||||
cache_errors.read);
|
||||
}
|
||||
|
||||
_ocf_cleaner_cache_io_end(rq);
|
||||
_ocf_cleaner_cache_io_end(req);
|
||||
|
||||
ocf_io_put(io);
|
||||
}
|
||||
@ -637,11 +637,11 @@ static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
|
||||
* cleaner - Traverse cache lines to be cleaned, detect sequential IO, and
|
||||
* perform cache reads and core writes
|
||||
*/
|
||||
static int _ocf_cleaner_fire_cache(struct ocf_request *rq)
|
||||
static int _ocf_cleaner_fire_cache(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
uint32_t i;
|
||||
struct ocf_map_info *iter = rq->map;
|
||||
struct ocf_map_info *iter = req->map;
|
||||
uint64_t addr, offset;
|
||||
ocf_part_id_t part_id;
|
||||
struct ocf_io *io;
|
||||
@ -649,9 +649,9 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *rq)
|
||||
struct ocf_counters_block *cache_stats;
|
||||
|
||||
/* Protect IO completion race */
|
||||
env_atomic_inc(&rq->req_remaining);
|
||||
env_atomic_inc(&req->req_remaining);
|
||||
|
||||
for (i = 0; i < rq->core_line_count; i++, iter++) {
|
||||
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||
if (iter->core_id == OCF_CORE_MAX)
|
||||
continue;
|
||||
if (iter->status == LOOKUP_MISS)
|
||||
@ -664,11 +664,11 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *rq)
|
||||
if (!io) {
|
||||
/* Allocation error */
|
||||
iter->invalid = true;
|
||||
_ocf_cleaner_set_error(rq);
|
||||
_ocf_cleaner_set_error(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(rq->cache, "Cache read, line = %u",
|
||||
OCF_DEBUG_PARAM(req->cache, "Cache read, line = %u",
|
||||
iter->coll_idx);
|
||||
|
||||
addr = ocf_metadata_map_lg2phy(cache,
|
||||
@ -680,14 +680,14 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *rq)
|
||||
|
||||
part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx);
|
||||
|
||||
ocf_io_set_cmpl(io, iter, rq, _ocf_cleaner_cache_io_cmpl);
|
||||
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_cache_io_cmpl);
|
||||
ocf_io_configure(io, addr, ocf_line_size(cache), OCF_READ,
|
||||
part_id, 0);
|
||||
err = ocf_io_set_data(io, rq->data, offset);
|
||||
err = ocf_io_set_data(io, req->data, offset);
|
||||
if (err) {
|
||||
ocf_io_put(io);
|
||||
iter->invalid = true;
|
||||
_ocf_cleaner_set_error(rq);
|
||||
_ocf_cleaner_set_error(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -697,7 +697,7 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *rq)
|
||||
}
|
||||
|
||||
/* Protect IO completion race */
|
||||
_ocf_cleaner_cache_io_end(rq);
|
||||
_ocf_cleaner_cache_io_end(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -707,33 +707,33 @@ static const struct ocf_io_if _io_if_fire_cache = {
|
||||
.write = _ocf_cleaner_fire_cache,
|
||||
};
|
||||
|
||||
static void _ocf_cleaner_on_resume(struct ocf_request *rq)
|
||||
static void _ocf_cleaner_on_resume(struct ocf_request *req)
|
||||
{
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
|
||||
static int _ocf_cleaner_fire(struct ocf_request *rq)
|
||||
static int _ocf_cleaner_fire(struct ocf_request *req)
|
||||
{
|
||||
int result;
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = _ocf_cleaner_on_resume;
|
||||
rq->io_if = &_io_if_fire_cache;
|
||||
req->resume = _ocf_cleaner_on_resume;
|
||||
req->io_if = &_io_if_fire_cache;
|
||||
|
||||
/* Handle cache lines locks */
|
||||
result = _ocf_cleaner_cache_line_lock(rq);
|
||||
result = _ocf_cleaner_cache_line_lock(req);
|
||||
|
||||
if (result >= 0) {
|
||||
if (result == OCF_LOCK_ACQUIRED) {
|
||||
OCF_DEBUG_MSG(rq->cache, "Lock acquired");
|
||||
_ocf_cleaner_fire_cache(rq);
|
||||
OCF_DEBUG_MSG(req->cache, "Lock acquired");
|
||||
_ocf_cleaner_fire_cache(req);
|
||||
} else {
|
||||
OCF_DEBUG_MSG(rq->cache, "NO Lock");
|
||||
OCF_DEBUG_MSG(req->cache, "NO Lock");
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
OCF_DEBUG_MSG(rq->cache, "Lock error");
|
||||
OCF_DEBUG_MSG(req->cache, "Lock error");
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -758,40 +758,40 @@ static int _ocf_cleaner_cmp_private(const void *a, const void *b)
|
||||
/**
|
||||
* Prepare cleaning request to be fired
|
||||
*
|
||||
* @param rq cleaning request
|
||||
* @param req cleaning request
|
||||
* @param i_out number of already filled map requests (remaining to be filled
|
||||
* with missed
|
||||
*/
|
||||
static int _ocf_cleaner_do_fire(struct ocf_request *rq, uint32_t i_out,
|
||||
static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t i_out,
|
||||
bool do_sort)
|
||||
{
|
||||
uint32_t i;
|
||||
/* Set counts of cache IOs */
|
||||
env_atomic_set(&rq->req_remaining, i_out);
|
||||
env_atomic_set(&req->req_remaining, i_out);
|
||||
|
||||
/* fill tail of a request with fake MISSes so that it won't
|
||||
* be cleaned
|
||||
*/
|
||||
for (; i_out < rq->core_line_count; ++i_out) {
|
||||
rq->map[i_out].core_id = OCF_CORE_MAX;
|
||||
rq->map[i_out].core_line = ULLONG_MAX;
|
||||
rq->map[i_out].status = LOOKUP_MISS;
|
||||
rq->map[i_out].hash_key = i_out;
|
||||
for (; i_out < req->core_line_count; ++i_out) {
|
||||
req->map[i_out].core_id = OCF_CORE_MAX;
|
||||
req->map[i_out].core_line = ULLONG_MAX;
|
||||
req->map[i_out].status = LOOKUP_MISS;
|
||||
req->map[i_out].hash_key = i_out;
|
||||
}
|
||||
|
||||
if (do_sort) {
|
||||
/* Sort by core id and core line */
|
||||
env_sort(rq->map, rq->core_line_count, sizeof(rq->map[0]),
|
||||
env_sort(req->map, req->core_line_count, sizeof(req->map[0]),
|
||||
_ocf_cleaner_cmp_private, NULL);
|
||||
for (i = 0; i < rq->core_line_count; i++)
|
||||
rq->map[i].hash_key = i;
|
||||
for (i = 0; i < req->core_line_count; i++)
|
||||
req->map[i].hash_key = i;
|
||||
}
|
||||
|
||||
/* issue actual request */
|
||||
return _ocf_cleaner_fire(rq);
|
||||
return _ocf_cleaner_fire(req);
|
||||
}
|
||||
|
||||
static inline uint32_t _ocf_cleaner_get_rq_max_count(uint32_t count,
|
||||
static inline uint32_t _ocf_cleaner_get_req_max_count(uint32_t count,
|
||||
bool low_mem)
|
||||
{
|
||||
if (low_mem || count <= 4096)
|
||||
@ -801,11 +801,11 @@ static inline uint32_t _ocf_cleaner_get_rq_max_count(uint32_t count,
|
||||
}
|
||||
|
||||
static void _ocf_cleaner_fire_error(struct ocf_request *master,
|
||||
struct ocf_request *rq, int err)
|
||||
struct ocf_request *req, int err)
|
||||
{
|
||||
master->error = err;
|
||||
_ocf_cleaner_complete_rq(rq);
|
||||
_ocf_cleaner_dealloc_rq(rq);
|
||||
_ocf_cleaner_complete_req(req);
|
||||
_ocf_cleaner_dealloc_req(req);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -820,28 +820,28 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
* optimal number, but for smaller 1024 is too large to benefit from
|
||||
* cleaning request overlapping
|
||||
*/
|
||||
uint32_t max = _ocf_cleaner_get_rq_max_count(count, false);
|
||||
uint32_t max = _ocf_cleaner_get_req_max_count(count, false);
|
||||
ocf_cache_line_t cache_line;
|
||||
/* it is possible that more than one cleaning request will be generated
|
||||
* for each cleaning order, thus multiple allocations. At the end of
|
||||
* loop, rq is set to zero and NOT deallocated, as deallocation is
|
||||
* loop, req is set to zero and NOT deallocated, as deallocation is
|
||||
* handled in completion.
|
||||
* In addition first request we call master which contains completion
|
||||
* contexts. Then succeeding request we call salve requests which
|
||||
* contains reference to the master request
|
||||
*/
|
||||
struct ocf_request *rq = NULL, *master;
|
||||
struct ocf_request *req = NULL, *master;
|
||||
int err;
|
||||
ocf_core_id_t core_id;
|
||||
uint64_t core_sector;
|
||||
|
||||
/* Allocate master request */
|
||||
master = _ocf_cleaner_alloc_master_rq(cache, max, attribs);
|
||||
master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
|
||||
|
||||
if (!master) {
|
||||
/* Some memory allocation error, try re-allocate request */
|
||||
max = _ocf_cleaner_get_rq_max_count(count, true);
|
||||
master = _ocf_cleaner_alloc_master_rq(cache, max, attribs);
|
||||
max = _ocf_cleaner_get_req_max_count(count, true);
|
||||
master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
|
||||
}
|
||||
|
||||
if (!master) {
|
||||
@ -849,34 +849,34 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
return;
|
||||
}
|
||||
|
||||
rq = master;
|
||||
req = master;
|
||||
|
||||
/* prevent cleaning completion race */
|
||||
ocf_rq_get(master);
|
||||
ocf_req_get(master);
|
||||
env_atomic_inc(&master->master_remaining);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
||||
/* when request hasn't yet been allocated or is just issued */
|
||||
if (!rq) {
|
||||
if (!req) {
|
||||
if (max > count - i) {
|
||||
/* less than max left */
|
||||
max = count - i;
|
||||
}
|
||||
|
||||
rq = _ocf_cleaner_alloc_slave_rq(master, max, attribs);
|
||||
req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
|
||||
}
|
||||
|
||||
if (!rq) {
|
||||
if (!req) {
|
||||
/* Some memory allocation error,
|
||||
* try re-allocate request
|
||||
*/
|
||||
max = _ocf_cleaner_get_rq_max_count(max, true);
|
||||
rq = _ocf_cleaner_alloc_slave_rq(master, max, attribs);
|
||||
max = _ocf_cleaner_get_req_max_count(max, true);
|
||||
req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
|
||||
}
|
||||
|
||||
/* when request allocation failed stop processing */
|
||||
if (!rq) {
|
||||
if (!req) {
|
||||
master->error = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
@ -915,43 +915,43 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
continue;
|
||||
}
|
||||
|
||||
rq->map[i_out].core_id = core_id;
|
||||
rq->map[i_out].core_line = core_sector;
|
||||
rq->map[i_out].coll_idx = cache_line;
|
||||
rq->map[i_out].status = LOOKUP_HIT;
|
||||
rq->map[i_out].hash_key = i_out;
|
||||
req->map[i_out].core_id = core_id;
|
||||
req->map[i_out].core_line = core_sector;
|
||||
req->map[i_out].coll_idx = cache_line;
|
||||
req->map[i_out].status = LOOKUP_HIT;
|
||||
req->map[i_out].hash_key = i_out;
|
||||
i_out++;
|
||||
|
||||
if (max == i_out) {
|
||||
err = _ocf_cleaner_do_fire(rq, i_out, attribs->do_sort);
|
||||
err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort);
|
||||
if (err) {
|
||||
_ocf_cleaner_fire_error(master, rq, err);
|
||||
rq = NULL;
|
||||
_ocf_cleaner_fire_error(master, req, err);
|
||||
req = NULL;
|
||||
break;
|
||||
}
|
||||
i_out = 0;
|
||||
rq = NULL;
|
||||
req = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (rq) {
|
||||
err = _ocf_cleaner_do_fire(rq, i_out, attribs->do_sort);
|
||||
if (req) {
|
||||
err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort);
|
||||
if (err)
|
||||
_ocf_cleaner_fire_error(master, rq, err);
|
||||
rq = NULL;
|
||||
_ocf_cleaner_fire_error(master, req, err);
|
||||
req = NULL;
|
||||
i_out = 0;
|
||||
}
|
||||
|
||||
/* prevent cleaning completion race */
|
||||
_ocf_cleaner_complete_rq(master);
|
||||
ocf_rq_put(master);
|
||||
_ocf_cleaner_complete_req(master);
|
||||
ocf_req_put(master);
|
||||
}
|
||||
|
||||
static void ocf_cleaner_sync_end(void *private_data, int error)
|
||||
{
|
||||
struct ocf_cleaner_sync *sync = private_data;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
if (error)
|
||||
sync->error = error;
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
struct ocf_submit_io_wait_context {
|
||||
env_completion complete;
|
||||
int error;
|
||||
env_atomic rq_remaining;
|
||||
env_atomic req_remaining;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -39,7 +39,7 @@ int ocf_submit_obj_flush_wait(ocf_data_obj_t obj)
|
||||
struct ocf_submit_io_wait_context cntx = { };
|
||||
struct ocf_io *io;
|
||||
|
||||
env_atomic_set(&cntx.rq_remaining, 1);
|
||||
env_atomic_set(&cntx.req_remaining, 1);
|
||||
env_completion_init(&cntx.complete);
|
||||
|
||||
io = ocf_dobj_new_io(obj);
|
||||
@ -66,7 +66,7 @@ static void ocf_submit_obj_discard_wait_io(struct ocf_io *io, int error)
|
||||
|
||||
ocf_io_put(io); /* Release IO */
|
||||
|
||||
if (env_atomic_dec_return(&cntx->rq_remaining))
|
||||
if (env_atomic_dec_return(&cntx->req_remaining))
|
||||
return;
|
||||
|
||||
/* All discard IO handled, signal it by setting completion */
|
||||
@ -81,7 +81,7 @@ int ocf_submit_obj_discard_wait(ocf_data_obj_t obj, uint64_t addr,
|
||||
uint64_t max_length = (uint32_t)~0;
|
||||
|
||||
ENV_BUG_ON(env_memset(&cntx, sizeof(cntx), 0));
|
||||
env_atomic_set(&cntx.rq_remaining, 1);
|
||||
env_atomic_set(&cntx.req_remaining, 1);
|
||||
env_completion_init(&cntx.complete);
|
||||
|
||||
while (length) {
|
||||
@ -94,7 +94,7 @@ int ocf_submit_obj_discard_wait(ocf_data_obj_t obj, uint64_t addr,
|
||||
|
||||
bytes = min(length, max_length);
|
||||
|
||||
env_atomic_inc(&cntx.rq_remaining);
|
||||
env_atomic_inc(&cntx.req_remaining);
|
||||
|
||||
ocf_io_configure(io, addr, bytes, OCF_WRITE, 0, 0);
|
||||
ocf_io_set_cmpl(io, &cntx, NULL,
|
||||
@ -105,7 +105,7 @@ int ocf_submit_obj_discard_wait(ocf_data_obj_t obj, uint64_t addr,
|
||||
length -= bytes;
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&cntx.rq_remaining) == 0)
|
||||
if (env_atomic_dec_return(&cntx.req_remaining) == 0)
|
||||
env_completion_complete(&cntx.complete);
|
||||
|
||||
env_completion_wait(&cntx.complete);
|
||||
@ -201,10 +201,10 @@ end:
|
||||
|
||||
static void ocf_submit_obj_req_cmpl(struct ocf_io *io, int error)
|
||||
{
|
||||
struct ocf_request *rq = io->priv1;
|
||||
struct ocf_request *req = io->priv1;
|
||||
ocf_req_end_t callback = io->priv2;
|
||||
|
||||
callback(rq, error);
|
||||
callback(req, error);
|
||||
}
|
||||
|
||||
void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
|
@ -87,9 +87,9 @@ int ocf_part_init(struct ocf_cache *cache)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ocf_part_move(struct ocf_request *rq)
|
||||
void ocf_part_move(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
struct ocf_map_info *entry;
|
||||
ocf_cache_line_t line;
|
||||
ocf_part_id_t id_old, id_new;
|
||||
@ -98,8 +98,8 @@ void ocf_part_move(struct ocf_request *rq)
|
||||
|
||||
ENV_BUG_ON(type >= ocf_cleaning_max);
|
||||
|
||||
entry = &rq->map[0];
|
||||
for (i = 0; i < rq->core_line_count; i++, entry++) {
|
||||
entry = &req->map[0];
|
||||
for (i = 0; i < req->core_line_count; i++, entry++) {
|
||||
if (!entry->re_part) {
|
||||
/* Changing partition not required */
|
||||
continue;
|
||||
@ -112,7 +112,7 @@ void ocf_part_move(struct ocf_request *rq)
|
||||
|
||||
line = entry->coll_idx;
|
||||
id_old = ocf_metadata_get_partition_id(cache, line);
|
||||
id_new = rq->part_id;
|
||||
id_new = req->part_id;
|
||||
|
||||
ENV_BUG_ON(id_old >= OCF_IO_CLASS_MAX ||
|
||||
id_new >= OCF_IO_CLASS_MAX);
|
||||
@ -157,15 +157,15 @@ void ocf_part_move(struct ocf_request *rq)
|
||||
cleaning_policy_ops[type].
|
||||
set_hot_cache_line(cache, line);
|
||||
|
||||
env_atomic_inc(&cache->core_runtime_meta[rq->core_id].
|
||||
env_atomic_inc(&cache->core_runtime_meta[req->core_id].
|
||||
part_counters[id_new].dirty_clines);
|
||||
env_atomic_dec(&cache->core_runtime_meta[rq->core_id].
|
||||
env_atomic_dec(&cache->core_runtime_meta[req->core_id].
|
||||
part_counters[id_old].dirty_clines);
|
||||
}
|
||||
|
||||
env_atomic_inc(&cache->core_runtime_meta[rq->core_id].
|
||||
env_atomic_inc(&cache->core_runtime_meta[req->core_id].
|
||||
part_counters[id_new].cached_clines);
|
||||
env_atomic_dec(&cache->core_runtime_meta[rq->core_id].
|
||||
env_atomic_dec(&cache->core_runtime_meta[req->core_id].
|
||||
part_counters[id_old].cached_clines);
|
||||
|
||||
/* DONE */
|
||||
|
@ -50,7 +50,7 @@ static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class)
|
||||
return PARTITION_DEFAULT;
|
||||
}
|
||||
|
||||
void ocf_part_move(struct ocf_request *rq);
|
||||
void ocf_part_move(struct ocf_request *req);
|
||||
|
||||
#define for_each_part(cache, part, id) \
|
||||
for_each_lst_entry(&cache->lst_part, part, id, \
|
||||
|
316
src/utils/utils_req.c
Normal file
316
src/utils/utils_req.c
Normal file
@ -0,0 +1,316 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "utils_req.h"
|
||||
#include "utils_cache_line.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
|
||||
#define OCF_UTILS_RQ_DEBUG 0
|
||||
|
||||
#if 1 == OCF_UTILS_RQ_DEBUG
|
||||
#define OCF_DEBUG_TRACE(cache) \
|
||||
ocf_cache_log(cache, log_info, "[Utils][RQ] %s\n", __func__)
|
||||
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...) \
|
||||
ocf_cache_log(cache, log_info, "[Utils][RQ] %s - "format"\n", \
|
||||
__func__, ##__VA_ARGS__)
|
||||
#else
|
||||
#define OCF_DEBUG_TRACE(cache)
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...)
|
||||
#endif
|
||||
|
||||
enum ocf_req_size {
|
||||
ocf_req_size_1 = 0,
|
||||
ocf_req_size_2,
|
||||
ocf_req_size_4,
|
||||
ocf_req_size_8,
|
||||
ocf_req_size_16,
|
||||
ocf_req_size_32,
|
||||
ocf_req_size_64,
|
||||
ocf_req_size_128,
|
||||
ocf_req_size_max,
|
||||
};
|
||||
|
||||
struct ocf_req_allocator {
|
||||
env_allocator *allocator[ocf_req_size_max];
|
||||
size_t size[ocf_req_size_max];
|
||||
};
|
||||
|
||||
static inline size_t ocf_req_sizeof_map(struct ocf_request *req)
|
||||
{
|
||||
uint32_t lines = req->alloc_core_line_count;
|
||||
size_t size = (lines * sizeof(struct ocf_map_info));
|
||||
|
||||
ENV_BUG_ON(lines == 0);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t ocf_req_sizeof(uint32_t lines)
|
||||
{
|
||||
size_t size = sizeof(struct ocf_request) +
|
||||
(lines * sizeof(struct ocf_map_info));
|
||||
|
||||
ENV_BUG_ON(lines == 0);
|
||||
return size;
|
||||
}
|
||||
|
||||
#define ALLOCATOR_NAME_FMT "ocf_req_%u"
|
||||
/* Max number of digits in decimal representation of unsigned int is 10 */
|
||||
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + 10)
|
||||
|
||||
int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx)
|
||||
{
|
||||
int i;
|
||||
struct ocf_req_allocator *req;
|
||||
char name[ALLOCATOR_NAME_MAX] = { '\0' };
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
ocf_ctx->resources.req = env_zalloc(sizeof(*(ocf_ctx->resources.req)),
|
||||
ENV_MEM_NORMAL);
|
||||
req = ocf_ctx->resources.req;
|
||||
|
||||
if (!req)
|
||||
goto ocf_utils_req_init_ERROR;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(req->allocator); i++) {
|
||||
req->size[i] = ocf_req_sizeof(1 << i);
|
||||
|
||||
if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
|
||||
(1 << i)) < 0) {
|
||||
goto ocf_utils_req_init_ERROR;
|
||||
}
|
||||
|
||||
req->allocator[i] = env_allocator_create(req->size[i], name);
|
||||
|
||||
if (!req->allocator[i])
|
||||
goto ocf_utils_req_init_ERROR;
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "New request allocator, lines = %u, "
|
||||
"size = %lu", 1 << i, req->size[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
ocf_utils_req_init_ERROR:
|
||||
|
||||
ocf_req_allocator_deinit(ocf_ctx);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx)
|
||||
{
|
||||
int i;
|
||||
struct ocf_req_allocator *req;
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
|
||||
if (!ocf_ctx->resources.req)
|
||||
return;
|
||||
|
||||
req = ocf_ctx->resources.req;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(req->allocator); i++) {
|
||||
if (req->allocator[i]) {
|
||||
env_allocator_destroy(req->allocator[i]);
|
||||
req->allocator[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
env_free(req);
|
||||
ocf_ctx->resources.req = NULL;
|
||||
}
|
||||
|
||||
static inline env_allocator *_ocf_req_get_allocator_1(
|
||||
struct ocf_cache *cache)
|
||||
{
|
||||
return cache->owner->resources.req->allocator[0];
|
||||
}
|
||||
|
||||
static env_allocator *_ocf_req_get_allocator(
|
||||
struct ocf_cache *cache, uint32_t count)
|
||||
{
|
||||
struct ocf_ctx *ocf_ctx = cache->owner;
|
||||
unsigned int idx = 31 - __builtin_clz(count);
|
||||
|
||||
if (__builtin_ffs(count) <= idx)
|
||||
idx++;
|
||||
|
||||
ENV_BUG_ON(count == 0);
|
||||
|
||||
if (idx >= ocf_req_size_max)
|
||||
return NULL;
|
||||
|
||||
return ocf_ctx->resources.req->allocator[idx];
|
||||
}
|
||||
|
||||
static void start_cache_req(struct ocf_request *req)
|
||||
{
|
||||
ocf_cache_t cache = req->cache;
|
||||
|
||||
req->d2c = 1;
|
||||
if (env_atomic_read(&cache->attached)) {
|
||||
req->d2c = 0 ;
|
||||
env_atomic_inc(&cache->pending_cache_requests);
|
||||
if (!env_atomic_read(&cache->attached)) {
|
||||
req->d2c = 1;
|
||||
env_atomic_dec(&cache->pending_cache_requests);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ocf_request *ocf_req_new(struct ocf_cache *cache,
|
||||
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw)
|
||||
{
|
||||
uint64_t core_line_first, core_line_last, core_line_count;
|
||||
struct ocf_request *req;
|
||||
env_allocator *allocator;
|
||||
|
||||
if (likely(bytes)) {
|
||||
core_line_first = ocf_bytes_2_lines(cache, addr);
|
||||
core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1);
|
||||
core_line_count = core_line_last - core_line_first + 1;
|
||||
} else {
|
||||
core_line_first = ocf_bytes_2_lines(cache, addr);
|
||||
core_line_last = core_line_first;
|
||||
core_line_count = 1;
|
||||
}
|
||||
|
||||
allocator = _ocf_req_get_allocator(cache, core_line_count);
|
||||
if (allocator) {
|
||||
req = env_allocator_new(allocator);
|
||||
} else {
|
||||
req = env_allocator_new(_ocf_req_get_allocator_1(cache));
|
||||
}
|
||||
|
||||
if (unlikely(!req))
|
||||
return NULL;
|
||||
|
||||
if (allocator)
|
||||
req->map = req->__map;
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
req->cache = cache;
|
||||
|
||||
env_atomic_inc(&cache->pending_requests);
|
||||
start_cache_req(req);
|
||||
|
||||
req->io_queue = 0;
|
||||
env_atomic_set(&req->ref_count, 1);
|
||||
req->core_id = core_id;
|
||||
|
||||
req->byte_position = addr;
|
||||
req->byte_length = bytes;
|
||||
req->core_line_first = core_line_first;
|
||||
req->core_line_last = core_line_last;
|
||||
req->core_line_count = core_line_count;
|
||||
req->alloc_core_line_count = core_line_count;
|
||||
req->rw = rw;
|
||||
req->part_id = PARTITION_DEFAULT;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
int ocf_req_alloc_map(struct ocf_request *req)
|
||||
{
|
||||
if (req->map)
|
||||
return 0;
|
||||
|
||||
req->map = env_zalloc(ocf_req_sizeof_map(req), ENV_MEM_NOIO);
|
||||
if (!req->map) {
|
||||
req->error = -ENOMEM;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ocf_request *ocf_req_new_extended(struct ocf_cache *cache,
|
||||
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw)
|
||||
{
|
||||
struct ocf_request *req;
|
||||
|
||||
req = ocf_req_new(cache, core_id, addr, bytes, rw);
|
||||
|
||||
if (likely(req) && ocf_req_alloc_map(req)) {
|
||||
ocf_req_put(req);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
struct ocf_request *ocf_req_new_discard(struct ocf_cache *cache,
|
||||
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw)
|
||||
{
|
||||
struct ocf_request *req;
|
||||
|
||||
req = ocf_req_new_extended(cache, core_id, addr,
|
||||
MIN(bytes, MAX_TRIM_RQ_SIZE),rw);
|
||||
|
||||
if (!req)
|
||||
return NULL;
|
||||
|
||||
req->discard.sector = BYTES_TO_SECTORS(addr);
|
||||
req->discard.nr_sects = BYTES_TO_SECTORS(bytes);
|
||||
req->discard.handled = 0;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
void ocf_req_get(struct ocf_request *req)
|
||||
{
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
env_atomic_inc(&req->ref_count);
|
||||
}
|
||||
|
||||
void ocf_req_put(struct ocf_request *req)
|
||||
{
|
||||
env_allocator *allocator;
|
||||
|
||||
if (env_atomic_dec_return(&req->ref_count))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
if (!req->d2c && !env_atomic_dec_return(
|
||||
&req->cache->pending_cache_requests)) {
|
||||
env_waitqueue_wake_up(&req->cache->pending_cache_wq);
|
||||
}
|
||||
|
||||
env_atomic_dec(&req->cache->pending_requests);
|
||||
|
||||
allocator = _ocf_req_get_allocator(req->cache,
|
||||
req->alloc_core_line_count);
|
||||
if (allocator) {
|
||||
env_allocator_del(allocator, req);
|
||||
} else {
|
||||
env_free(req->map);
|
||||
env_allocator_del(_ocf_req_get_allocator_1(req->cache), req);
|
||||
}
|
||||
}
|
||||
|
||||
void ocf_req_clear_info(struct ocf_request *req)
|
||||
{
|
||||
ENV_BUG_ON(env_memset(&req->info, sizeof(req->info), 0));
|
||||
}
|
||||
|
||||
void ocf_req_clear_map(struct ocf_request *req)
|
||||
{
|
||||
if (likely(req->map))
|
||||
ENV_BUG_ON(env_memset(req->map,
|
||||
sizeof(req->map[0]) * req->core_line_count, 0));
|
||||
}
|
||||
|
||||
uint32_t ocf_req_get_allocated(struct ocf_cache *cache)
|
||||
{
|
||||
return env_atomic_read(&cache->pending_requests);
|
||||
}
|
@ -9,11 +9,11 @@
|
||||
#include "../ocf_request.h"
|
||||
|
||||
/**
|
||||
* @file utils_rq.h
|
||||
* @file utils_req.h
|
||||
* @brief OCF request allocation utilities
|
||||
*/
|
||||
|
||||
struct ocf_rq_allocator;
|
||||
struct ocf_req_allocator;
|
||||
|
||||
/**
|
||||
* @brief Initialize OCF request allocation utility
|
||||
@ -21,14 +21,14 @@ struct ocf_rq_allocator;
|
||||
* @param cache - OCF cache instance
|
||||
* @return Operation status 0 - successful, non-zero failure
|
||||
*/
|
||||
int ocf_rq_allocator_init(struct ocf_ctx *ocf_ctx);
|
||||
int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx);
|
||||
|
||||
/**
|
||||
* @brief De-initialize OCF request allocation utility
|
||||
*
|
||||
* @param cache - OCF cache instance
|
||||
*/
|
||||
void ocf_rq_allocator_deinit(struct ocf_ctx *ocf_ctx);
|
||||
void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx);
|
||||
|
||||
/**
|
||||
* @brief Allocate new OCF request
|
||||
@ -41,18 +41,18 @@ void ocf_rq_allocator_deinit(struct ocf_ctx *ocf_ctx);
|
||||
*
|
||||
* @return new OCF request
|
||||
*/
|
||||
struct ocf_request *ocf_rq_new(struct ocf_cache *cache,
|
||||
struct ocf_request *ocf_req_new(struct ocf_cache *cache,
|
||||
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw);
|
||||
|
||||
/**
|
||||
* @brief Allocate OCF request map
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param req OCF request
|
||||
*
|
||||
* @retval 0 Allocation succeed
|
||||
* @retval non-zero Allocation failed
|
||||
*/
|
||||
int ocf_rq_alloc_map(struct ocf_request *rq);
|
||||
int ocf_req_alloc_map(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Allocate new OCF request with NOIO map allocation for huge request
|
||||
@ -66,7 +66,7 @@ int ocf_rq_alloc_map(struct ocf_request *rq);
|
||||
* @return new OCF request
|
||||
*/
|
||||
|
||||
struct ocf_request *ocf_rq_new_extended(struct ocf_cache *cache,
|
||||
struct ocf_request *ocf_req_new_extended(struct ocf_cache *cache,
|
||||
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw);
|
||||
|
||||
/**
|
||||
@ -80,7 +80,7 @@ struct ocf_request *ocf_rq_new_extended(struct ocf_cache *cache,
|
||||
*
|
||||
* @return new OCF request
|
||||
*/
|
||||
struct ocf_request *ocf_rq_new_discard(struct ocf_cache *cache,
|
||||
struct ocf_request *ocf_req_new_discard(struct ocf_cache *cache,
|
||||
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw);
|
||||
|
||||
/**
|
||||
@ -90,63 +90,63 @@ struct ocf_request *ocf_rq_new_discard(struct ocf_cache *cache,
|
||||
*
|
||||
* @return Number of allocated requests
|
||||
*/
|
||||
uint32_t ocf_rq_get_allocated(struct ocf_cache *cache);
|
||||
uint32_t ocf_req_get_allocated(struct ocf_cache *cache);
|
||||
|
||||
/**
|
||||
* @brief Increment OCF request reference count
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*/
|
||||
void ocf_rq_get(struct ocf_request *rq);
|
||||
void ocf_req_get(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Decrement OCF request reference. If reference is 0 then request will
|
||||
* be deallocated
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*/
|
||||
void ocf_rq_put(struct ocf_request *rq);
|
||||
void ocf_req_put(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Clear OCF request info
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*/
|
||||
void ocf_rq_clear_info(struct ocf_request *rq);
|
||||
void ocf_req_clear_info(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Clear OCF request map
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*/
|
||||
void ocf_rq_clear_map(struct ocf_request *rq);
|
||||
void ocf_req_clear_map(struct ocf_request *req);
|
||||
|
||||
/**
|
||||
* @brief Clear OCF request
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
*/
|
||||
static inline void ocf_rq_clear(struct ocf_request *rq)
|
||||
static inline void ocf_req_clear(struct ocf_request *req)
|
||||
{
|
||||
ocf_rq_clear_info(rq);
|
||||
ocf_rq_clear_map(rq);
|
||||
ocf_req_clear_info(req);
|
||||
ocf_req_clear_map(req);
|
||||
|
||||
env_atomic_set(&rq->lock_remaining, 0);
|
||||
env_atomic_set(&rq->req_remaining, 0);
|
||||
env_atomic_set(&req->lock_remaining, 0);
|
||||
env_atomic_set(&req->req_remaining, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return OCF request reference count
|
||||
*
|
||||
* @param rq - OCF request
|
||||
* @param req - OCF request
|
||||
* @return OCF request reference count
|
||||
*/
|
||||
static inline int ocf_rq_ref_count(struct ocf_request *rq)
|
||||
static inline int ocf_req_ref_count(struct ocf_request *req)
|
||||
{
|
||||
return env_atomic_read(&rq->ref_count);
|
||||
return env_atomic_read(&req->ref_count);
|
||||
}
|
||||
|
||||
static inline bool ocf_rq_is_4k(uint64_t addr, uint32_t bytes)
|
||||
static inline bool ocf_req_is_4k(uint64_t addr, uint32_t bytes)
|
||||
{
|
||||
return !((addr % PAGE_SIZE) || (bytes % PAGE_SIZE));
|
||||
}
|
@ -1,316 +0,0 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "utils_rq.h"
|
||||
#include "utils_cache_line.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
|
||||
#define OCF_UTILS_RQ_DEBUG 0
|
||||
|
||||
#if 1 == OCF_UTILS_RQ_DEBUG
|
||||
#define OCF_DEBUG_TRACE(cache) \
|
||||
ocf_cache_log(cache, log_info, "[Utils][RQ] %s\n", __func__)
|
||||
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...) \
|
||||
ocf_cache_log(cache, log_info, "[Utils][RQ] %s - "format"\n", \
|
||||
__func__, ##__VA_ARGS__)
|
||||
#else
|
||||
#define OCF_DEBUG_TRACE(cache)
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...)
|
||||
#endif
|
||||
|
||||
enum ocf_rq_size {
|
||||
ocf_rq_size_1 = 0,
|
||||
ocf_rq_size_2,
|
||||
ocf_rq_size_4,
|
||||
ocf_rq_size_8,
|
||||
ocf_rq_size_16,
|
||||
ocf_rq_size_32,
|
||||
ocf_rq_size_64,
|
||||
ocf_rq_size_128,
|
||||
ocf_rq_size_max,
|
||||
};
|
||||
|
||||
struct ocf_rq_allocator {
|
||||
env_allocator *allocator[ocf_rq_size_max];
|
||||
size_t size[ocf_rq_size_max];
|
||||
};
|
||||
|
||||
static inline size_t ocf_rq_sizeof_map(struct ocf_request *rq)
|
||||
{
|
||||
uint32_t lines = rq->alloc_core_line_count;
|
||||
size_t size = (lines * sizeof(struct ocf_map_info));
|
||||
|
||||
ENV_BUG_ON(lines == 0);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t ocf_rq_sizeof(uint32_t lines)
|
||||
{
|
||||
size_t size = sizeof(struct ocf_request) +
|
||||
(lines * sizeof(struct ocf_map_info));
|
||||
|
||||
ENV_BUG_ON(lines == 0);
|
||||
return size;
|
||||
}
|
||||
|
||||
#define ALLOCATOR_NAME_FMT "ocf_rq_%u"
|
||||
/* Max number of digits in decimal representation of unsigned int is 10 */
|
||||
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + 10)
|
||||
|
||||
int ocf_rq_allocator_init(struct ocf_ctx *ocf_ctx)
|
||||
{
|
||||
int i;
|
||||
struct ocf_rq_allocator *rq;
|
||||
char name[ALLOCATOR_NAME_MAX] = { '\0' };
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
ocf_ctx->resources.rq = env_zalloc(sizeof(*(ocf_ctx->resources.rq)),
|
||||
ENV_MEM_NORMAL);
|
||||
rq = ocf_ctx->resources.rq;
|
||||
|
||||
if (!rq)
|
||||
goto ocf_utils_rq_init_ERROR;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(rq->allocator); i++) {
|
||||
rq->size[i] = ocf_rq_sizeof(1 << i);
|
||||
|
||||
if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
|
||||
(1 << i)) < 0) {
|
||||
goto ocf_utils_rq_init_ERROR;
|
||||
}
|
||||
|
||||
rq->allocator[i] = env_allocator_create(rq->size[i], name);
|
||||
|
||||
if (!rq->allocator[i])
|
||||
goto ocf_utils_rq_init_ERROR;
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "New request allocator, lines = %u, "
|
||||
"size = %lu", 1 << i, rq->size[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
ocf_utils_rq_init_ERROR:
|
||||
|
||||
ocf_rq_allocator_deinit(ocf_ctx);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ocf_rq_allocator_deinit(struct ocf_ctx *ocf_ctx)
|
||||
{
|
||||
int i;
|
||||
struct ocf_rq_allocator *rq;
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
|
||||
if (!ocf_ctx->resources.rq)
|
||||
return;
|
||||
|
||||
rq = ocf_ctx->resources.rq;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(rq->allocator); i++) {
|
||||
if (rq->allocator[i]) {
|
||||
env_allocator_destroy(rq->allocator[i]);
|
||||
rq->allocator[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
env_free(rq);
|
||||
ocf_ctx->resources.rq = NULL;
|
||||
}
|
||||
|
||||
static inline env_allocator *_ocf_rq_get_allocator_1(
|
||||
struct ocf_cache *cache)
|
||||
{
|
||||
return cache->owner->resources.rq->allocator[0];
|
||||
}
|
||||
|
||||
static env_allocator *_ocf_rq_get_allocator(
|
||||
struct ocf_cache *cache, uint32_t count)
|
||||
{
|
||||
struct ocf_ctx *ocf_ctx = cache->owner;
|
||||
unsigned int idx = 31 - __builtin_clz(count);
|
||||
|
||||
if (__builtin_ffs(count) <= idx)
|
||||
idx++;
|
||||
|
||||
ENV_BUG_ON(count == 0);
|
||||
|
||||
if (idx >= ocf_rq_size_max)
|
||||
return NULL;
|
||||
|
||||
return ocf_ctx->resources.rq->allocator[idx];
|
||||
}
|
||||
|
||||
static void start_cache_req(struct ocf_request *rq)
|
||||
{
|
||||
ocf_cache_t cache = rq->cache;
|
||||
|
||||
rq->d2c = 1;
|
||||
if (env_atomic_read(&cache->attached)) {
|
||||
rq->d2c = 0 ;
|
||||
env_atomic_inc(&cache->pending_cache_requests);
|
||||
if (!env_atomic_read(&cache->attached)) {
|
||||
rq->d2c = 1;
|
||||
env_atomic_dec(&cache->pending_cache_requests);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ocf_request *ocf_rq_new(struct ocf_cache *cache,
|
||||
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw)
|
||||
{
|
||||
uint64_t core_line_first, core_line_last, core_line_count;
|
||||
struct ocf_request *rq;
|
||||
env_allocator *allocator;
|
||||
|
||||
if (likely(bytes)) {
|
||||
core_line_first = ocf_bytes_2_lines(cache, addr);
|
||||
core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1);
|
||||
core_line_count = core_line_last - core_line_first + 1;
|
||||
} else {
|
||||
core_line_first = ocf_bytes_2_lines(cache, addr);
|
||||
core_line_last = core_line_first;
|
||||
core_line_count = 1;
|
||||
}
|
||||
|
||||
allocator = _ocf_rq_get_allocator(cache, core_line_count);
|
||||
if (allocator) {
|
||||
rq = env_allocator_new(allocator);
|
||||
} else {
|
||||
rq = env_allocator_new(_ocf_rq_get_allocator_1(cache));
|
||||
}
|
||||
|
||||
if (unlikely(!rq))
|
||||
return NULL;
|
||||
|
||||
if (allocator)
|
||||
rq->map = rq->__map;
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
rq->cache = cache;
|
||||
|
||||
env_atomic_inc(&cache->pending_requests);
|
||||
start_cache_req(rq);
|
||||
|
||||
rq->io_queue = 0;
|
||||
env_atomic_set(&rq->ref_count, 1);
|
||||
rq->core_id = core_id;
|
||||
|
||||
rq->byte_position = addr;
|
||||
rq->byte_length = bytes;
|
||||
rq->core_line_first = core_line_first;
|
||||
rq->core_line_last = core_line_last;
|
||||
rq->core_line_count = core_line_count;
|
||||
rq->alloc_core_line_count = core_line_count;
|
||||
rq->rw = rw;
|
||||
rq->part_id = PARTITION_DEFAULT;
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
int ocf_rq_alloc_map(struct ocf_request *rq)
|
||||
{
|
||||
if (rq->map)
|
||||
return 0;
|
||||
|
||||
rq->map = env_zalloc(ocf_rq_sizeof_map(rq), ENV_MEM_NOIO);
|
||||
if (!rq->map) {
|
||||
rq->error = -ENOMEM;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ocf_request *ocf_rq_new_extended(struct ocf_cache *cache,
|
||||
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw)
|
||||
{
|
||||
struct ocf_request *rq;
|
||||
|
||||
rq = ocf_rq_new(cache, core_id, addr, bytes, rw);
|
||||
|
||||
if (likely(rq) && ocf_rq_alloc_map(rq)) {
|
||||
ocf_rq_put(rq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
struct ocf_request *ocf_rq_new_discard(struct ocf_cache *cache,
|
||||
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw)
|
||||
{
|
||||
struct ocf_request *rq;
|
||||
|
||||
rq = ocf_rq_new_extended(cache, core_id, addr,
|
||||
MIN(bytes, MAX_TRIM_RQ_SIZE),rw);
|
||||
|
||||
if (!rq)
|
||||
return NULL;
|
||||
|
||||
rq->discard.sector = BYTES_TO_SECTORS(addr);
|
||||
rq->discard.nr_sects = BYTES_TO_SECTORS(bytes);
|
||||
rq->discard.handled = 0;
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
void ocf_rq_get(struct ocf_request *rq)
|
||||
{
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
env_atomic_inc(&rq->ref_count);
|
||||
}
|
||||
|
||||
void ocf_rq_put(struct ocf_request *rq)
|
||||
{
|
||||
env_allocator *allocator;
|
||||
|
||||
if (env_atomic_dec_return(&rq->ref_count))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
if (!rq->d2c && !env_atomic_dec_return(
|
||||
&rq->cache->pending_cache_requests)) {
|
||||
env_waitqueue_wake_up(&rq->cache->pending_cache_wq);
|
||||
}
|
||||
|
||||
env_atomic_dec(&rq->cache->pending_requests);
|
||||
|
||||
allocator = _ocf_rq_get_allocator(rq->cache,
|
||||
rq->alloc_core_line_count);
|
||||
if (allocator) {
|
||||
env_allocator_del(allocator, rq);
|
||||
} else {
|
||||
env_free(rq->map);
|
||||
env_allocator_del(_ocf_rq_get_allocator_1(rq->cache), rq);
|
||||
}
|
||||
}
|
||||
|
||||
void ocf_rq_clear_info(struct ocf_request *rq)
|
||||
{
|
||||
ENV_BUG_ON(env_memset(&rq->info, sizeof(rq->info), 0));
|
||||
}
|
||||
|
||||
void ocf_rq_clear_map(struct ocf_request *rq)
|
||||
{
|
||||
if (likely(rq->map))
|
||||
ENV_BUG_ON(env_memset(rq->map,
|
||||
sizeof(rq->map[0]) * rq->core_line_count, 0));
|
||||
}
|
||||
|
||||
uint32_t ocf_rq_get_allocated(struct ocf_cache *cache)
|
||||
{
|
||||
return env_atomic_read(&cache->pending_requests);
|
||||
}
|
@ -75,7 +75,7 @@ int __wrap_cleaning_policy_acp_add_core(ocf_cache_t cache, ocf_core_id_t core_id
|
||||
int __wrap_cleaning_policy_acp_remove_core(ocf_cache_t cache,
|
||||
ocf_core_id_t core_id){}
|
||||
|
||||
void __wrap_cleaning_policy_acp_request_pending(struct ocf_request *rq){
|
||||
void __wrap_cleaning_policy_acp_request_pending(struct ocf_request *req){
|
||||
}
|
||||
|
||||
void cleaning_policy_acp_setup(struct ocf_cache *cache)
|
||||
|
@ -42,7 +42,7 @@ void __wrap_env_cond_resched(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __wrap_ocf_engine_push_rq_front(struct ocf_request *rq)
|
||||
void __wrap_ocf_engine_push_req_front(struct ocf_request *req)
|
||||
{
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user