Unify req naming convention (rq -> req)

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga
2018-12-12 13:25:26 +01:00
parent 131148adac
commit db92083432
70 changed files with 1815 additions and 1815 deletions

View File

@@ -20,7 +20,7 @@
#include "engine_d2c.h"
#include "engine_ops.h"
#include "../utils/utils_part.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../metadata/metadata.h"
#include "../layer_space_management.h"
@@ -106,11 +106,11 @@ const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t req_cache_mode)
return cache_mode_io_if_map[req_cache_mode];
}
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
struct ocf_request *ocf_engine_pop_req(struct ocf_cache *cache,
struct ocf_queue *q)
{
unsigned long lock_flags;
struct ocf_request *rq;
struct ocf_request *req;
OCF_CHECK_NULL(q);
@@ -125,22 +125,22 @@ struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
}
/* Get the first request and remove it from the list */
rq = list_first_entry(&q->io_list, struct ocf_request, list);
req = list_first_entry(&q->io_list, struct ocf_request, list);
env_atomic_dec(&q->io_no);
list_del(&rq->list);
list_del(&req->list);
/* UNLOCK */
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
OCF_CHECK_NULL(rq);
OCF_CHECK_NULL(req);
if (ocf_rq_alloc_map(rq)) {
rq->complete(rq, rq->error);
if (ocf_req_alloc_map(req)) {
req->complete(req, req->error);
return NULL;
}
return rq;
return req;
}
bool ocf_fallback_pt_is_on(ocf_cache_t cache)
@@ -218,7 +218,7 @@ ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
{
ocf_cache_mode_t mode;
if (cache->pt_unaligned_io && !ocf_rq_is_4k(io->addr, io->bytes))
if (cache->pt_unaligned_io && !ocf_req_is_4k(io->addr, io->bytes))
return ocf_cache_mode_pt;
mode = ocf_part_get_cache_mode(cache,
@@ -239,27 +239,27 @@ ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
return mode;
}
int ocf_engine_hndl_rq(struct ocf_request *rq,
int ocf_engine_hndl_req(struct ocf_request *req,
ocf_req_cache_mode_t req_cache_mode)
{
ocf_cache_t cache = rq->cache;
ocf_cache_t cache = req->cache;
OCF_CHECK_NULL(cache);
rq->io_if = ocf_get_io_if(req_cache_mode);
if (!rq->io_if)
req->io_if = ocf_get_io_if(req_cache_mode);
if (!req->io_if)
return -EINVAL;
/* Till OCF engine is not synchronous fully need to push OCF request
* to into OCF workers
*/
ocf_engine_push_rq_back(rq, true);
ocf_engine_push_req_back(req, true);
return 0;
}
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
int ocf_engine_hndl_fast_req(struct ocf_request *req,
ocf_req_cache_mode_t req_cache_mode)
{
const struct ocf_io_if *io_if;
@@ -268,47 +268,47 @@ int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
if (!io_if)
return -EINVAL;
switch (rq->rw) {
switch (req->rw) {
case OCF_READ:
return io_if->read(rq);
return io_if->read(req);
case OCF_WRITE:
return io_if->write(rq);
return io_if->write(req);
default:
return OCF_FAST_PATH_NO;
}
}
static void ocf_engine_hndl_2dc_rq(struct ocf_request *rq)
static void ocf_engine_hndl_2dc_req(struct ocf_request *req)
{
if (OCF_READ == rq->rw)
IO_IFS[OCF_IO_D2C_IF].read(rq);
else if (OCF_WRITE == rq->rw)
IO_IFS[OCF_IO_D2C_IF].write(rq);
if (OCF_READ == req->rw)
IO_IFS[OCF_IO_D2C_IF].read(req);
else if (OCF_WRITE == req->rw)
IO_IFS[OCF_IO_D2C_IF].write(req);
else
ENV_BUG();
}
void ocf_engine_hndl_discard_rq(struct ocf_request *rq)
void ocf_engine_hndl_discard_req(struct ocf_request *req)
{
if (rq->d2c) {
ocf_engine_hndl_2dc_rq(rq);
if (req->d2c) {
ocf_engine_hndl_2dc_req(req);
return;
}
if (OCF_READ == rq->rw)
IO_IFS[OCF_IO_DISCARD_IF].read(rq);
else if (OCF_WRITE == rq->rw)
IO_IFS[OCF_IO_DISCARD_IF].write(rq);
if (OCF_READ == req->rw)
IO_IFS[OCF_IO_DISCARD_IF].read(req);
else if (OCF_WRITE == req->rw)
IO_IFS[OCF_IO_DISCARD_IF].write(req);
else
ENV_BUG();
}
void ocf_engine_hndl_ops_rq(struct ocf_request *rq)
void ocf_engine_hndl_ops_req(struct ocf_request *req)
{
if (rq->d2c)
rq->io_if = &IO_IFS[OCF_IO_D2C_IF];
if (req->d2c)
req->io_if = &IO_IFS[OCF_IO_D2C_IF];
else
rq->io_if = &IO_IFS[OCF_IO_OPS_IF];
req->io_if = &IO_IFS[OCF_IO_OPS_IF];
ocf_engine_push_rq_back(rq, true);
ocf_engine_push_req_back(req, true);
}

View File

@@ -63,20 +63,20 @@ bool ocf_fallback_pt_is_on(ocf_cache_t cache);
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes);
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
struct ocf_request *ocf_engine_pop_req(struct ocf_cache *cache,
struct ocf_queue *q);
int ocf_engine_hndl_rq(struct ocf_request *rq,
int ocf_engine_hndl_req(struct ocf_request *req,
ocf_req_cache_mode_t req_cache_mode);
#define OCF_FAST_PATH_YES 7
#define OCF_FAST_PATH_NO 13
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
int ocf_engine_hndl_fast_req(struct ocf_request *req,
ocf_req_cache_mode_t req_cache_mode);
void ocf_engine_hndl_discard_rq(struct ocf_request *rq);
void ocf_engine_hndl_discard_req(struct ocf_request *req);
void ocf_engine_hndl_ops_rq(struct ocf_request *rq);
void ocf_engine_hndl_ops_req(struct ocf_request *req);
#endif

View File

@@ -10,7 +10,7 @@
#include "engine_inv.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_io.h"
#include "../concurrency/ocf_concurrency.h"
@@ -37,56 +37,56 @@ static inline void backfill_queue_inc_block(struct ocf_cache *cache)
env_atomic_set(&cache->pending_read_misses_list_blocked, 1);
}
static void _ocf_backfill_do_io(struct ocf_request *rq, int error)
static void _ocf_backfill_do_io(struct ocf_request *req, int error)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
if (error)
rq->error = error;
req->error = error;
if (rq->error)
inc_fallback_pt_error_counter(rq->cache);
if (req->error)
inc_fallback_pt_error_counter(req->cache);
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
if (env_atomic_dec_return(&req->req_remaining) == 0) {
/* We must free the pages we have allocated */
ctx_data_secure_erase(cache->owner, rq->data);
ctx_data_munlock(cache->owner, rq->data);
ctx_data_free(cache->owner, rq->data);
rq->data = NULL;
ctx_data_secure_erase(cache->owner, req->data);
ctx_data_munlock(cache->owner, req->data);
ctx_data_free(cache->owner, req->data);
req->data = NULL;
if (rq->error) {
env_atomic_inc(&cache->core_obj[rq->core_id].
if (req->error) {
env_atomic_inc(&cache->core_obj[req->core_id].
counters->cache_errors.write);
ocf_engine_invalidate(rq);
ocf_engine_invalidate(req);
} else {
ocf_rq_unlock(rq);
ocf_req_unlock(req);
/* always free the request at the last point
* of the completion path
*/
ocf_rq_put(rq);
ocf_req_put(req);
}
}
}
static int _ocf_backfill_do(struct ocf_request *rq)
static int _ocf_backfill_do(struct ocf_request *req)
{
unsigned int reqs_to_issue;
backfill_queue_dec_unblock(rq->cache);
backfill_queue_dec_unblock(req->cache);
reqs_to_issue = ocf_engine_io_count(rq);
reqs_to_issue = ocf_engine_io_count(req);
/* There will be #reqs_to_issue completions */
env_atomic_set(&rq->req_remaining, reqs_to_issue);
env_atomic_set(&req->req_remaining, reqs_to_issue);
rq->data = rq->cp_data;
req->data = req->cp_data;
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_WRITE, reqs_to_issue,
ocf_submit_cache_reqs(req->cache, req->map, req, OCF_WRITE, reqs_to_issue,
_ocf_backfill_do_io);
return 0;
@@ -97,8 +97,8 @@ static const struct ocf_io_if _io_if_backfill = {
.write = _ocf_backfill_do,
};
void ocf_engine_backfill(struct ocf_request *rq)
void ocf_engine_backfill(struct ocf_request *req)
{
backfill_queue_inc_block(rq->cache);
ocf_engine_push_rq_front_if(rq, &_io_if_backfill, true);
backfill_queue_inc_block(req->cache);
ocf_engine_push_req_front_if(req, &_io_if_backfill, true);
}

View File

@@ -6,6 +6,6 @@
#ifndef ENGINE_BF_H_
#define ENGINE_BF_H_
void ocf_engine_backfill(struct ocf_request *rq);
void ocf_engine_backfill(struct ocf_request *req);
#endif /* ENGINE_BF_H_ */

View File

@@ -11,22 +11,22 @@
#define OCF_ENGINE_DEBUG_IO_NAME "common"
#include "engine_debug.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_cleaner.h"
#include "../metadata/metadata.h"
#include "../layer_space_management.h"
void ocf_engine_error(struct ocf_request *rq,
void ocf_engine_error(struct ocf_request *req,
bool stop_cache, const char *msg)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
if (stop_cache)
env_bit_clear(ocf_cache_state_running, &cache->cache_state);
ocf_core_log(&cache->core_obj[rq->core_id], log_err,
ocf_core_log(&cache->core_obj[req->core_id], log_err,
"%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg,
BYTES_TO_SECTORS(rq->byte_position), rq->byte_length);
BYTES_TO_SECTORS(req->byte_position), req->byte_length);
}
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
@@ -85,21 +85,21 @@ static inline int _ocf_engine_check_map_entry(struct ocf_cache *cache,
return -1;
}
void ocf_engine_update_rq_info(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t entry)
void ocf_engine_update_req_info(struct ocf_cache *cache,
struct ocf_request *req, uint32_t entry)
{
uint8_t start_sector = 0;
uint8_t end_sector = ocf_line_end_sector(cache);
struct ocf_map_info *_entry = &(rq->map[entry]);
struct ocf_map_info *_entry = &(req->map[entry]);
if (entry == 0) {
start_sector = BYTES_TO_SECTORS(rq->byte_position)
start_sector = BYTES_TO_SECTORS(req->byte_position)
% ocf_line_sectors(cache);
}
if (entry == rq->core_line_count - 1) {
end_sector = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1)% ocf_line_sectors(cache);
if (entry == req->core_line_count - 1) {
end_sector = BYTES_TO_SECTORS(req->byte_position +
req->byte_length - 1)% ocf_line_sectors(cache);
}
/* Handle return value */
@@ -107,31 +107,31 @@ void ocf_engine_update_rq_info(struct ocf_cache *cache,
case LOOKUP_HIT:
if (metadata_test_valid_sec(cache, _entry->coll_idx,
start_sector, end_sector)) {
rq->info.hit_no++;
req->info.hit_no++;
} else {
rq->info.invalid_no++;
req->info.invalid_no++;
}
/* Check request is dirty */
if (metadata_test_dirty(cache, _entry->coll_idx)) {
rq->info.dirty_any++;
req->info.dirty_any++;
/* Check if cache line is fully dirty */
if (metadata_test_dirty_all(cache, _entry->coll_idx))
rq->info.dirty_all++;
req->info.dirty_all++;
}
if (rq->part_id != ocf_metadata_get_partition_id(cache,
if (req->part_id != ocf_metadata_get_partition_id(cache,
_entry->coll_idx)) {
/*
* Need to move this cache line into other partition
*/
_entry->re_part = rq->info.re_part = true;
_entry->re_part = req->info.re_part = true;
}
break;
case LOOKUP_MISS:
rq->info.seq_req = false;
req->info.seq_req = false;
break;
case LOOKUP_MAPPED:
break;
@@ -141,39 +141,39 @@ void ocf_engine_update_rq_info(struct ocf_cache *cache,
}
/* Check if cache hit is sequential */
if (rq->info.seq_req && entry) {
if (req->info.seq_req && entry) {
if (ocf_metadata_map_lg2phy(cache,
(rq->map[entry - 1].coll_idx)) + 1 !=
(req->map[entry - 1].coll_idx)) + 1 !=
ocf_metadata_map_lg2phy(cache,
_entry->coll_idx)) {
rq->info.seq_req = false;
req->info.seq_req = false;
}
}
}
void ocf_engine_traverse(struct ocf_request *rq)
void ocf_engine_traverse(struct ocf_request *req)
{
uint32_t i;
uint64_t core_line;
struct ocf_cache *cache = rq->cache;
ocf_core_id_t core_id = rq->core_id;
struct ocf_cache *cache = req->cache;
ocf_core_id_t core_id = req->core_id;
OCF_DEBUG_TRACE(rq->cache);
OCF_DEBUG_TRACE(req->cache);
ocf_rq_clear_info(rq);
rq->info.seq_req = true;
ocf_req_clear_info(req);
req->info.seq_req = true;
for (i = 0, core_line = rq->core_line_first;
core_line <= rq->core_line_last; core_line++, i++) {
for (i = 0, core_line = req->core_line_first;
core_line <= req->core_line_last; core_line++, i++) {
struct ocf_map_info *entry = &(rq->map[i]);
struct ocf_map_info *entry = &(req->map[i]);
ocf_engine_lookup_map_entry(cache, entry, core_id,
core_line);
if (entry->status != LOOKUP_HIT) {
rq->info.seq_req = false;
req->info.seq_req = false;
/* There is miss then lookup for next map entry */
OCF_DEBUG_PARAM(cache, "Miss, core line = %llu",
entry->core_line);
@@ -186,40 +186,40 @@ void ocf_engine_traverse(struct ocf_request *rq)
/* Update eviction (LRU) */
ocf_eviction_set_hot_cache_line(cache, entry->coll_idx);
ocf_engine_update_rq_info(cache, rq, i);
ocf_engine_update_req_info(cache, req, i);
}
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
OCF_DEBUG_PARAM(cache, "Sequential - %s", req->info.seq_req ?
"Yes" : "No");
}
int ocf_engine_check(struct ocf_request *rq)
int ocf_engine_check(struct ocf_request *req)
{
int result = 0;
uint32_t i;
uint64_t core_line;
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
OCF_DEBUG_TRACE(rq->cache);
OCF_DEBUG_TRACE(req->cache);
ocf_rq_clear_info(rq);
rq->info.seq_req = true;
ocf_req_clear_info(req);
req->info.seq_req = true;
for (i = 0, core_line = rq->core_line_first;
core_line <= rq->core_line_last; core_line++, i++) {
for (i = 0, core_line = req->core_line_first;
core_line <= req->core_line_last; core_line++, i++) {
struct ocf_map_info *entry = &(rq->map[i]);
struct ocf_map_info *entry = &(req->map[i]);
if (entry->status == LOOKUP_MISS) {
rq->info.seq_req = false;
req->info.seq_req = false;
continue;
}
if (_ocf_engine_check_map_entry(cache, entry, rq->core_id)) {
if (_ocf_engine_check_map_entry(cache, entry, req->core_id)) {
/* Mapping is invalid */
entry->invalid = true;
rq->info.seq_req = false;
req->info.seq_req = false;
OCF_DEBUG_PARAM(cache, "Invalid, Cache line %u",
entry->coll_idx);
@@ -231,26 +231,26 @@ int ocf_engine_check(struct ocf_request *rq)
OCF_DEBUG_PARAM(cache, "Valid, Cache line %u",
entry->coll_idx);
ocf_engine_update_rq_info(cache, rq, i);
ocf_engine_update_req_info(cache, req, i);
}
}
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
OCF_DEBUG_PARAM(cache, "Sequential - %s", req->info.seq_req ?
"Yes" : "No");
return result;
}
static void ocf_engine_map_cache_line(struct ocf_request *rq,
static void ocf_engine_map_cache_line(struct ocf_request *req,
uint64_t core_line, unsigned int hash_index,
ocf_cache_line_t *cache_line)
{
struct ocf_cache *cache = rq->cache;
ocf_part_id_t part_id = rq->part_id;
struct ocf_cache *cache = req->cache;
ocf_part_id_t part_id = req->part_id;
ocf_cleaning_t clean_policy_type;
if (cache->device->freelist_part->curr_size == 0) {
rq->info.eviction_error = 1;
req->info.eviction_error = 1;
return;
}
@@ -265,7 +265,7 @@ static void ocf_engine_map_cache_line(struct ocf_request *rq,
ocf_metadata_add_to_partition(cache, part_id, *cache_line);
/* Add the block to the corresponding collision list */
ocf_metadata_add_to_collision(cache, rq->core_id, core_line, hash_index,
ocf_metadata_add_to_collision(cache, req->core_id, core_line, hash_index,
*cache_line);
ocf_eviction_init_cache_line(cache, *cache_line, part_id);
@@ -284,13 +284,13 @@ static void ocf_engine_map_cache_line(struct ocf_request *rq,
}
static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
struct ocf_request *rq)
struct ocf_request *req)
{
uint32_t i;
struct ocf_map_info *entry;
for (i = 0; i < rq->core_line_count; i++) {
entry = &(rq->map[i]);
for (i = 0; i < req->core_line_count; i++) {
entry = &(req->map[i]);
switch (entry->status) {
case LOOKUP_HIT:
@@ -298,7 +298,7 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
break;
case LOOKUP_MAPPED:
OCF_DEBUG_RQ(rq, "Canceling cache line %u",
OCF_DEBUG_RQ(req, "Canceling cache line %u",
entry->coll_idx);
set_cache_line_invalid_no_flush(cache, 0,
ocf_line_end_sector(cache),
@@ -312,83 +312,83 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
}
}
void ocf_engine_map(struct ocf_request *rq)
void ocf_engine_map(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
uint32_t i;
struct ocf_map_info *entry;
uint64_t core_line;
int status = LOOKUP_MAPPED;
ocf_core_id_t core_id = rq->core_id;
ocf_core_id_t core_id = req->core_id;
if (ocf_engine_unmapped_count(rq))
status = space_managment_evict_do(cache, rq,
ocf_engine_unmapped_count(rq));
if (ocf_engine_unmapped_count(req))
status = space_managment_evict_do(cache, req,
ocf_engine_unmapped_count(req));
if (rq->info.eviction_error)
if (req->info.eviction_error)
return;
ocf_rq_clear_info(rq);
rq->info.seq_req = true;
ocf_req_clear_info(req);
req->info.seq_req = true;
OCF_DEBUG_TRACE(rq->cache);
OCF_DEBUG_TRACE(req->cache);
for (i = 0, core_line = rq->core_line_first;
core_line <= rq->core_line_last; core_line++, i++) {
entry = &(rq->map[i]);
for (i = 0, core_line = req->core_line_first;
core_line <= req->core_line_last; core_line++, i++) {
entry = &(req->map[i]);
ocf_engine_lookup_map_entry(cache, entry, core_id, core_line);
if (entry->status != LOOKUP_HIT) {
ocf_engine_map_cache_line(rq, entry->core_line,
ocf_engine_map_cache_line(req, entry->core_line,
entry->hash_key, &entry->coll_idx);
if (rq->info.eviction_error) {
if (req->info.eviction_error) {
/*
* Eviction error (mapping error), need to
* clean, return and do pass through
*/
OCF_DEBUG_RQ(rq, "Eviction ERROR when mapping");
ocf_engine_map_hndl_error(cache, rq);
OCF_DEBUG_RQ(req, "Eviction ERROR when mapping");
ocf_engine_map_hndl_error(cache, req);
break;
}
entry->status = status;
}
OCF_DEBUG_PARAM(rq->cache,
OCF_DEBUG_PARAM(req->cache,
"%s, cache line %u, core line = %llu",
entry->status == LOOKUP_HIT ? "Hit" : "Map",
entry->coll_idx, entry->core_line);
ocf_engine_update_rq_info(cache, rq, i);
ocf_engine_update_req_info(cache, req, i);
}
OCF_DEBUG_PARAM(rq->cache, "Sequential - %s", rq->info.seq_req ?
OCF_DEBUG_PARAM(req->cache, "Sequential - %s", req->info.seq_req ?
"Yes" : "No");
}
static void _ocf_engine_clean_end(void *private_data, int error)
{
struct ocf_request *rq = private_data;
struct ocf_request *req = private_data;
if (error) {
OCF_DEBUG_RQ(rq, "Cleaning ERROR");
rq->error |= error;
OCF_DEBUG_RQ(req, "Cleaning ERROR");
req->error |= error;
/* End request and do not processing */
ocf_rq_unlock(rq);
ocf_req_unlock(req);
/* Complete request */
rq->complete(rq, error);
req->complete(req, error);
/* Release OCF request */
ocf_rq_put(rq);
ocf_req_put(req);
} else {
rq->info.dirty_any = 0;
rq->info.dirty_all = 0;
ocf_engine_push_rq_front(rq, true);
req->info.dirty_any = 0;
req->info.dirty_all = 0;
ocf_engine_push_req_front(req, true);
}
}
@@ -396,12 +396,12 @@ static int _ocf_engine_clean_getter(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line)
{
struct ocf_cleaner_attribs *attribs = getter_context;
struct ocf_request *rq = attribs->cmpl_context;
struct ocf_request *req = attribs->cmpl_context;
for (; attribs->getter_item < rq->core_line_count;
for (; attribs->getter_item < req->core_line_count;
attribs->getter_item++) {
struct ocf_map_info *entry = &rq->map[attribs->getter_item];
struct ocf_map_info *entry = &req->map[attribs->getter_item];
if (entry->status != LOOKUP_HIT)
continue;
@@ -418,53 +418,53 @@ static int _ocf_engine_clean_getter(struct ocf_cache *cache,
return -1;
}
void ocf_engine_clean(struct ocf_request *rq)
void ocf_engine_clean(struct ocf_request *req)
{
/* Initialize attributes for cleaner */
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = false,
.cmpl_context = rq,
.cmpl_context = req,
.cmpl_fn = _ocf_engine_clean_end,
.getter = _ocf_engine_clean_getter,
.getter_context = &attribs,
.getter_item = 0,
.count = rq->info.dirty_any,
.io_queue = rq->io_queue
.count = req->info.dirty_any,
.io_queue = req->io_queue
};
/* Start cleaning */
ocf_cleaner_fire(rq->cache, &attribs);
ocf_cleaner_fire(req->cache, &attribs);
}
void ocf_engine_update_block_stats(struct ocf_request *rq)
void ocf_engine_update_block_stats(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
ocf_core_id_t core_id = rq->core_id;
ocf_part_id_t part_id = rq->part_id;
struct ocf_cache *cache = req->cache;
ocf_core_id_t core_id = req->core_id;
ocf_part_id_t part_id = req->part_id;
struct ocf_counters_block *blocks;
blocks = &cache->core_obj[core_id].counters->
part_counters[part_id].blocks;
if (rq->rw == OCF_READ)
env_atomic64_add(rq->byte_length, &blocks->read_bytes);
else if (rq->rw == OCF_WRITE)
env_atomic64_add(rq->byte_length, &blocks->write_bytes);
if (req->rw == OCF_READ)
env_atomic64_add(req->byte_length, &blocks->read_bytes);
else if (req->rw == OCF_WRITE)
env_atomic64_add(req->byte_length, &blocks->write_bytes);
else
ENV_BUG();
}
void ocf_engine_update_request_stats(struct ocf_request *rq)
void ocf_engine_update_request_stats(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
ocf_core_id_t core_id = rq->core_id;
ocf_part_id_t part_id = rq->part_id;
struct ocf_cache *cache = req->cache;
ocf_core_id_t core_id = req->core_id;
ocf_part_id_t part_id = req->part_id;
struct ocf_counters_req *reqs;
switch (rq->rw) {
switch (req->rw) {
case OCF_READ:
reqs = &cache->core_obj[core_id].counters->
part_counters[part_id].read_reqs;
@@ -479,69 +479,69 @@ void ocf_engine_update_request_stats(struct ocf_request *rq)
env_atomic64_inc(&reqs->total);
if (rq->info.hit_no == 0)
if (req->info.hit_no == 0)
env_atomic64_inc(&reqs->full_miss);
else if (rq->info.hit_no < rq->core_line_count)
else if (req->info.hit_no < req->core_line_count)
env_atomic64_inc(&reqs->partial_miss);
}
void ocf_engine_push_rq_back(struct ocf_request *rq, bool allow_sync)
void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
struct ocf_queue *q = NULL;
unsigned long lock_flags;
INIT_LIST_HEAD(&rq->list);
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
q = &cache->io_queues[rq->io_queue];
ENV_BUG_ON(req->io_queue >= cache->io_queues_no);
q = &cache->io_queues[req->io_queue];
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add_tail(&rq->list, &q->io_list);
list_add_tail(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
if (!rq->info.internal)
if (!req->info.internal)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
}
void ocf_engine_push_rq_front(struct ocf_request *rq, bool allow_sync)
void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
struct ocf_queue *q = NULL;
unsigned long lock_flags;
INIT_LIST_HEAD(&rq->list);
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
q = &cache->io_queues[rq->io_queue];
ENV_BUG_ON(req->io_queue >= cache->io_queues_no);
q = &cache->io_queues[req->io_queue];
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add(&rq->list, &q->io_list);
list_add(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
if (!rq->info.internal)
if (!req->info.internal)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
}
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
void ocf_engine_push_req_front_if(struct ocf_request *req,
const struct ocf_io_if *io_if,
bool allow_sync)
{
rq->error = 0; /* Please explain why!!! */
rq->io_if = io_if;
ocf_engine_push_rq_front(rq, allow_sync);
req->error = 0; /* Please explain why!!! */
req->io_if = io_if;
ocf_engine_push_req_front(req, allow_sync);
}
void inc_fallback_pt_error_counter(ocf_cache_t cache)
@@ -558,44 +558,44 @@ void inc_fallback_pt_error_counter(ocf_cache_t cache)
}
}
static int _ocf_engine_refresh(struct ocf_request *rq)
static int _ocf_engine_refresh(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
int result;
OCF_METADATA_LOCK_RD();
/* Check under metadata RD lock */
result = ocf_engine_check(rq);
result = ocf_engine_check(req);
OCF_METADATA_UNLOCK_RD();
if (result == 0) {
/* Refresh successful, can process with original IO interface */
rq->io_if = rq->priv;
req->io_if = req->priv;
rq->resume = NULL;
rq->priv = NULL;
req->resume = NULL;
req->priv = NULL;
if (rq->rw == OCF_READ)
rq->io_if->read(rq);
else if (rq->rw == OCF_WRITE)
rq->io_if->write(rq);
if (req->rw == OCF_READ)
req->io_if->read(req);
else if (req->rw == OCF_WRITE)
req->io_if->write(req);
else
ENV_BUG();
} else {
ENV_WARN(true, "Inconsistent request");
rq->error = -EINVAL;
req->error = -EINVAL;
/* Complete request */
rq->complete(rq, rq->error);
req->complete(req, req->error);
/* Release WRITE lock of request */
ocf_rq_unlock(rq);
ocf_req_unlock(req);
/* Release OCF request */
ocf_rq_put(rq);
ocf_req_put(req);
}
return 0;
@@ -606,16 +606,16 @@ static const struct ocf_io_if _io_if_refresh = {
.write = _ocf_engine_refresh,
};
void ocf_engine_on_resume(struct ocf_request *rq)
void ocf_engine_on_resume(struct ocf_request *req)
{
ENV_BUG_ON(rq->priv);
ENV_BUG_ON(ocf_engine_on_resume != rq->resume);
OCF_CHECK_NULL(rq->io_if);
ENV_BUG_ON(req->priv);
ENV_BUG_ON(ocf_engine_on_resume != req->resume);
OCF_CHECK_NULL(req->io_if);
/* Exchange IO interface */
rq->priv = (void *)rq->io_if;
req->priv = (void *)req->io_if;
OCF_DEBUG_RQ(rq, "On resume");
OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_rq_front_if(rq, &_io_if_refresh, false);
ocf_engine_push_req_front_if(req, &_io_if_refresh, false);
}

View File

@@ -16,103 +16,103 @@
/**
* @brief Signal and handle OCF request error
*
* @param rq OCF request
* @param req OCF request
* @param stop_cache Indicates if OCF cache engine need to be stopped
* @param msg Error message to be printed into log
*/
void ocf_engine_error(struct ocf_request *rq, bool stop_cache,
void ocf_engine_error(struct ocf_request *req, bool stop_cache,
const char *msg);
/**
* @brief Check if OCF request is hit
*
* @param rq OCF request
* @param req OCF request
*
* @retval true HIT
* @retval false MISS
*/
static inline bool ocf_engine_is_hit(struct ocf_request *rq)
static inline bool ocf_engine_is_hit(struct ocf_request *req)
{
return rq->info.hit_no == rq->core_line_count;
return req->info.hit_no == req->core_line_count;
}
/**
* @brief Check if OCF request is miss
*
* @param rq OCF request
* @param req OCF request
*
* @retval true MISS
* @retval false HIT
*/
#define ocf_engine_is_miss(rq) (!ocf_engine_is_hit(rq))
#define ocf_engine_is_miss(req) (!ocf_engine_is_hit(req))
/**
* @brief Check if all cache lines are mapped fully
*
* @param rq OCF request
* @param req OCF request
*
* @retval true request is mapped fully
* @retval false request is not mapped fully and eviction might be run in
* order to complete mapping
*/
static inline bool ocf_engine_is_mapped(struct ocf_request *rq)
static inline bool ocf_engine_is_mapped(struct ocf_request *req)
{
return rq->info.hit_no + rq->info.invalid_no == rq->core_line_count;
return req->info.hit_no + req->info.invalid_no == req->core_line_count;
}
/**
* @brief Check if all cache lines are dirty
*
* @param rq OCF request
* @param req OCF request
*
* @retval true request is dirty fully
* @retval false request is not dirty fully
*/
static inline bool ocf_engine_is_dirty_all(struct ocf_request *rq)
static inline bool ocf_engine_is_dirty_all(struct ocf_request *req)
{
return rq->info.dirty_all == rq->core_line_count;
return req->info.dirty_all == req->core_line_count;
}
/**
* @brief Get number of mapped cache lines
*
* @param rq OCF request
* @param req OCF request
*
* @return Number of mapped cache lines
*/
static inline uint32_t ocf_engine_mapped_count(struct ocf_request *rq)
static inline uint32_t ocf_engine_mapped_count(struct ocf_request *req)
{
return rq->info.hit_no + rq->info.invalid_no;
return req->info.hit_no + req->info.invalid_no;
}
/**
* @brief Get number of unmapped cache lines
*
* @param rq OCF request
* @param req OCF request
*
* @return Number of unmapped cache lines
*/
static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *rq)
static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *req)
{
return rq->core_line_count - (rq->info.hit_no + rq->info.invalid_no);
return req->core_line_count - (req->info.hit_no + req->info.invalid_no);
}
/**
* @brief Get number of IOs to perform cache read or write
*
* @param rq OCF request
* @param req OCF request
*
* @return Count of cache IOs
*/
static inline uint32_t ocf_engine_io_count(struct ocf_request *rq)
static inline uint32_t ocf_engine_io_count(struct ocf_request *req)
{
return rq->info.seq_req ? 1 : rq->core_line_count;
return req->info.seq_req ? 1 : req->core_line_count;
}
/**
* @brief Clean request (flush dirty data to the core device)
*
* @param rq OCF request
* @param req OCF request
*
* @note After successful cleaning:
* - Dirty status bits in request info will be cleared
@@ -123,7 +123,7 @@ static inline uint32_t ocf_engine_io_count(struct ocf_request *rq)
* - complete request to the application
* - free request
*/
void ocf_engine_clean(struct ocf_request *rq);
void ocf_engine_clean(struct ocf_request *req);
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
struct ocf_map_info *entry, ocf_core_id_t core_id,
@@ -135,9 +135,9 @@ void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
*
* @note This function CALL EVICTION
*
* @param rq OCF request
* @param req OCF request
*/
void ocf_engine_map(struct ocf_request *rq);
void ocf_engine_map(struct ocf_request *req);
/**
* @brief Traverse OCF request (lookup cache)
@@ -145,79 +145,79 @@ void ocf_engine_map(struct ocf_request *rq);
* @note This function DO NOT CALL EVICTION. Only lookup in metadata is
* performed. Main purpose of this function is to check if there is a HIT.
*
* @param rq OCF request
* @param req OCF request
*/
void ocf_engine_traverse(struct ocf_request *rq);
void ocf_engine_traverse(struct ocf_request *req);
/**
* @brief Check if OCF request mapping is still valid
*
* @note If mapping entries is invalid it will be marked
*
* @param rq OCF request
* @param req OCF request
*
* @retval 0 - OCF request mapping is valid
* @return Non zero - OCF request mapping is invalid and need to call re-mapping
*/
int ocf_engine_check(struct ocf_request *rq);
int ocf_engine_check(struct ocf_request *req);
/**
* @brief Update OCF request info
*
* @param rq OCF request
* @param req OCF request
*/
void ocf_engine_update_rq_info(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t entry);
void ocf_engine_update_req_info(struct ocf_cache *cache,
struct ocf_request *req, uint32_t entry);
/**
* @brief Update OCF request block statistics for an exported object
*
* @param rq OCF request
* @param req OCF request
*/
void ocf_engine_update_block_stats(struct ocf_request *rq);
void ocf_engine_update_block_stats(struct ocf_request *req);
/**
* @brief Update OCF request request statistics for an exported object
* (not applicable to write wi and to read wt
*
* @param rq OCF request
* @param req OCF request
*/
void ocf_engine_update_request_stats(struct ocf_request *rq);
void ocf_engine_update_request_stats(struct ocf_request *req);
/**
* @brief Push front OCF request to the OCF thread worker queue
*
* @param rq OCF request
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_rq_back(struct ocf_request *rq,
void ocf_engine_push_req_back(struct ocf_request *req,
bool allow_sync);
/**
* @brief Push back OCF request to the OCF thread worker queue
*
* @param rq OCF request
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_rq_front(struct ocf_request *rq,
void ocf_engine_push_req_front(struct ocf_request *req,
bool allow_sync);
/**
* @brief Set interface and push from request to the OCF thread worker queue
*
* @param rq OCF request
* @param req OCF request
* @param io_if IO interface
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
void ocf_engine_push_req_front_if(struct ocf_request *req,
const struct ocf_io_if *io_if,
bool allow_sync);
void inc_fallback_pt_error_counter(ocf_cache_t cache);
void ocf_engine_on_resume(struct ocf_request *rq);
void ocf_engine_on_resume(struct ocf_request *req);
#endif /* ENGINE_COMMON_H_ */

View File

@@ -7,7 +7,7 @@
#include "engine_d2c.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
@@ -33,7 +33,7 @@ static void _ocf_d2c_completion(struct ocf_request *req, int error)
req->complete(req, req->error);
/* Release OCF request */
ocf_rq_put(req);
ocf_req_put(req);
}
int ocf_io_d2c(struct ocf_request *req)
@@ -46,7 +46,7 @@ int ocf_io_d2c(struct ocf_request *req)
ocf_io_start(req->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(req);
ocf_req_get(req);
ocf_submit_obj_req(&core->obj, req, _ocf_d2c_completion);
@@ -61,7 +61,7 @@ int ocf_io_d2c(struct ocf_request *req)
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(req);
ocf_req_put(req);
return 0;

View File

@@ -6,6 +6,6 @@
#ifndef ENGINE_2DC_H_
#define ENGINE_2DC_H_
int ocf_io_d2c(struct ocf_request *rq);
int ocf_io_d2c(struct ocf_request *req);
#endif /* ENGINE_2DC_H_ */

View File

@@ -30,11 +30,11 @@
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
##__VA_ARGS__)
#define OCF_DEBUG_RQ(rq, format, ...) \
ocf_cache_log(rq->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \
#define OCF_DEBUG_RQ(req, format, ...) \
ocf_cache_log(req->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \
format"\n", OCF_ENGINE_DEBUG_IO_NAME, \
OCF_READ == (rq)->rw ? "RD" : "WR", rq->byte_position, \
rq->byte_length, __func__, ##__VA_ARGS__)
OCF_READ == (req)->rw ? "RD" : "WR", req->byte_position, \
req->byte_length, __func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_PREFIX
@@ -42,7 +42,7 @@
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#define OCF_DEBUG_RQ(rq, format, ...)
#define OCF_DEBUG_RQ(req, format, ...)
#endif
#endif /* ENGINE_DEBUG_H_ */

View File

@@ -8,7 +8,7 @@
#include "engine_common.h"
#include "engine_discard.h"
#include "../metadata/metadata.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../concurrency/ocf_concurrency.h"
@@ -18,10 +18,10 @@
#define OCF_ENGINE_DEBUG_IO_NAME "discard"
#include "engine_debug.h"
static int _ocf_discard_step_do(struct ocf_request *rq);
static int _ocf_discard_step(struct ocf_request *rq);
static int _ocf_discard_flush_cache(struct ocf_request *rq);
static int _ocf_discard_core(struct ocf_request *rq);
static int _ocf_discard_step_do(struct ocf_request *req);
static int _ocf_discard_step(struct ocf_request *req);
static int _ocf_discard_flush_cache(struct ocf_request *req);
static int _ocf_discard_core(struct ocf_request *req);
static const struct ocf_io_if _io_if_discard_step = {
.read = _ocf_discard_step,
@@ -43,19 +43,19 @@ static const struct ocf_io_if _io_if_discard_core = {
.write = _ocf_discard_core
};
static void _ocf_discard_complete_rq(struct ocf_request *req, int error)
static void _ocf_discard_complete_req(struct ocf_request *req, int error)
{
req->complete(req, error);
ocf_rq_put(req);
ocf_req_put(req);
}
static void _ocf_discard_core_io(struct ocf_io *io, int error)
{
struct ocf_request *rq = io->priv1;
struct ocf_request *req = io->priv1;
OCF_DEBUG_RQ(rq, "Core DISCARD Completion");
OCF_DEBUG_RQ(req, "Core DISCARD Completion");
_ocf_discard_complete_rq(rq, error);
_ocf_discard_complete_req(req, error);
}
static int _ocf_discard_core(struct ocf_request *req)
@@ -65,7 +65,7 @@ static int _ocf_discard_core(struct ocf_request *req)
io = ocf_dobj_new_io(&cache->core_obj[req->core_id].obj);
if (!io) {
_ocf_discard_complete_rq(req, -ENOMEM);
_ocf_discard_complete_req(req, -ENOMEM);
return -ENOMEM;
}
@@ -83,16 +83,16 @@ static int _ocf_discard_core(struct ocf_request *req)
static void _ocf_discard_cache_flush_io_cmpl(struct ocf_io *io, int error)
{
struct ocf_request *rq = io->priv1;
struct ocf_request *req = io->priv1;
if (error) {
ocf_metadata_error(rq->cache);
_ocf_discard_complete_rq(rq, error);
ocf_metadata_error(req->cache);
_ocf_discard_complete_req(req, error);
return;
}
rq->io_if = &_io_if_discard_core;
ocf_engine_push_rq_front(rq, true);
req->io_if = &_io_if_discard_core;
ocf_engine_push_req_front(req, true);
}
static int _ocf_discard_flush_cache(struct ocf_request *req)
@@ -102,7 +102,7 @@ static int _ocf_discard_flush_cache(struct ocf_request *req)
io = ocf_dobj_new_io(&req->cache->device->obj);
if (!io) {
ocf_metadata_error(req->cache);
_ocf_discard_complete_rq(req, -ENOMEM);
_ocf_discard_complete_req(req, -ENOMEM);
return -ENOMEM;
}
@@ -114,111 +114,111 @@ static int _ocf_discard_flush_cache(struct ocf_request *req)
return 0;
}
static void _ocf_discard_finish_step(struct ocf_request *rq)
static void _ocf_discard_finish_step(struct ocf_request *req)
{
rq->discard.handled += BYTES_TO_SECTORS(rq->byte_length);
req->discard.handled += BYTES_TO_SECTORS(req->byte_length);
if (rq->discard.handled < rq->discard.nr_sects)
rq->io_if = &_io_if_discard_step;
else if (rq->cache->device->init_mode != ocf_init_mode_metadata_volatile)
rq->io_if = &_io_if_discard_flush_cache;
if (req->discard.handled < req->discard.nr_sects)
req->io_if = &_io_if_discard_step;
else if (req->cache->device->init_mode != ocf_init_mode_metadata_volatile)
req->io_if = &_io_if_discard_flush_cache;
else
rq->io_if = &_io_if_discard_core;
req->io_if = &_io_if_discard_core;
ocf_engine_push_rq_front(rq, true);
ocf_engine_push_req_front(req, true);
}
static void _ocf_discard_step_io(struct ocf_request *rq, int error)
static void _ocf_discard_step_io(struct ocf_request *req, int error)
{
if (error)
rq->error |= error;
req->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
OCF_DEBUG_RQ(req, "Completion");
/* Release WRITE lock of request */
ocf_rq_unlock_wr(rq);
ocf_req_unlock_wr(req);
if (rq->error) {
ocf_metadata_error(rq->cache);
_ocf_discard_complete_rq(rq, rq->error);
if (req->error) {
ocf_metadata_error(req->cache);
_ocf_discard_complete_req(req, req->error);
return;
}
_ocf_discard_finish_step(rq);
_ocf_discard_finish_step(req);
}
int _ocf_discard_step_do(struct ocf_request *rq)
int _ocf_discard_step_do(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
env_atomic_set(&req->req_remaining, 1); /* One core IO */
if (ocf_engine_mapped_count(rq)) {
if (ocf_engine_mapped_count(req)) {
/* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(rq);
ocf_purge_map_info(req);
if (rq->info.flush_metadata) {
if (req->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
ocf_metadata_flush_do_asynch(cache, rq,
ocf_metadata_flush_do_asynch(cache, req,
_ocf_discard_step_io);
}
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
OCF_DEBUG_RQ(rq, "Discard");
_ocf_discard_step_io(rq, 0);
OCF_DEBUG_RQ(req, "Discard");
_ocf_discard_step_io(req, 0);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
static void _ocf_discard_on_resume(struct ocf_request *rq)
static void _ocf_discard_on_resume(struct ocf_request *req)
{
OCF_DEBUG_RQ(rq, "On resume");
ocf_engine_push_rq_front(rq, true);
OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front(req, true);
}
static int _ocf_discard_step(struct ocf_request *rq)
static int _ocf_discard_step(struct ocf_request *req)
{
int lock;
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
OCF_DEBUG_TRACE(rq->cache);
OCF_DEBUG_TRACE(req->cache);
rq->byte_position = SECTORS_TO_BYTES(rq->discard.sector +
rq->discard.handled);
rq->byte_length = MIN(SECTORS_TO_BYTES(rq->discard.nr_sects -
rq->discard.handled), MAX_TRIM_RQ_SIZE);
rq->core_line_first = ocf_bytes_2_lines(cache, rq->byte_position);
rq->core_line_last =
ocf_bytes_2_lines(cache, rq->byte_position + rq->byte_length - 1);
rq->core_line_count = rq->core_line_last - rq->core_line_first + 1;
rq->io_if = &_io_if_discard_step_resume;
req->byte_position = SECTORS_TO_BYTES(req->discard.sector +
req->discard.handled);
req->byte_length = MIN(SECTORS_TO_BYTES(req->discard.nr_sects -
req->discard.handled), MAX_TRIM_RQ_SIZE);
req->core_line_first = ocf_bytes_2_lines(cache, req->byte_position);
req->core_line_last =
ocf_bytes_2_lines(cache, req->byte_position + req->byte_length - 1);
req->core_line_count = req->core_line_last - req->core_line_first + 1;
req->io_if = &_io_if_discard_step_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
ENV_BUG_ON(env_memset(rq->map, sizeof(*rq->map) * rq->core_line_count,
ENV_BUG_ON(env_memset(req->map, sizeof(*req->map) * req->core_line_count,
0));
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
if (ocf_engine_mapped_count(rq)) {
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
lock = ocf_req_trylock_wr(req);
} else {
lock = OCF_LOCK_ACQUIRED;
}
@@ -227,15 +227,15 @@ static int _ocf_discard_step(struct ocf_request *rq)
if (lock >= 0) {
if (OCF_LOCK_ACQUIRED == lock) {
_ocf_discard_step_do(rq);
_ocf_discard_step_do(req);
} else {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK")
OCF_DEBUG_RQ(req, "NO LOCK")
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->error |= lock;
_ocf_discard_finish_step(rq);
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
req->error |= lock;
_ocf_discard_finish_step(req);
}
env_cond_resched();
@@ -243,27 +243,27 @@ static int _ocf_discard_step(struct ocf_request *rq)
return 0;
}
int ocf_discard(struct ocf_request *rq)
int ocf_discard(struct ocf_request *req)
{
OCF_DEBUG_TRACE(rq->cache);
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(rq->io);
ocf_io_start(req->io);
if (rq->rw == OCF_READ) {
rq->complete(rq, -EINVAL);
if (req->rw == OCF_READ) {
req->complete(req, -EINVAL);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Set resume call backs */
rq->resume = _ocf_discard_on_resume;
req->resume = _ocf_discard_on_resume;
_ocf_discard_step(rq);
_ocf_discard_step(req);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}

View File

@@ -6,6 +6,6 @@
#ifndef __ENGINE_DISCARD_H__
#define __ENGINE_DISCARD_H__
int ocf_discard(struct ocf_request *rq);
int ocf_discard(struct ocf_request *req);
#endif

View File

@@ -9,7 +9,7 @@
#include "engine_common.h"
#include "engine_pt.h"
#include "engine_wb.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_part.h"
#include "../utils/utils_io.h"
#include "../concurrency/ocf_concurrency.h"
@@ -28,76 +28,76 @@
* |_| \_\___|\__,_|\__,_| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
*/
static void _ocf_read_fast_io(struct ocf_request *rq, int error)
static void _ocf_read_fast_io(struct ocf_request *req, int error)
{
if (error)
rq->error |= error;
req->error |= error;
if (env_atomic_dec_return(&rq->req_remaining)) {
if (env_atomic_dec_return(&req->req_remaining)) {
/* Not all requests finished */
return;
}
OCF_DEBUG_RQ(rq, "HIT completion");
OCF_DEBUG_RQ(req, "HIT completion");
if (rq->error) {
OCF_DEBUG_RQ(rq, "ERROR");
if (req->error) {
OCF_DEBUG_RQ(req, "ERROR");
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
cache_errors.read);
ocf_engine_push_rq_front_pt(rq);
ocf_engine_push_req_front_pt(req);
} else {
ocf_rq_unlock(rq);
ocf_req_unlock(req);
/* Complete request */
rq->complete(rq, rq->error);
req->complete(req, req->error);
/* Free the request at the last point of the completion path */
ocf_rq_put(rq);
ocf_req_put(req);
}
}
static int _ocf_read_fast_do(struct ocf_request *rq)
static int _ocf_read_fast_do(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
if (ocf_engine_is_miss(rq)) {
if (ocf_engine_is_miss(req)) {
/* It seams that after resume, now request is MISS, do PT */
OCF_DEBUG_RQ(rq, "Switching to read PT");
ocf_read_pt_do(rq);
OCF_DEBUG_RQ(req, "Switching to read PT");
ocf_read_pt_do(req);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
ocf_part_move(req);
OCF_METADATA_UNLOCK_WR();
}
/* Submit IO */
OCF_DEBUG_RQ(rq, "Submit");
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
ocf_engine_io_count(rq), _ocf_read_fast_io);
OCF_DEBUG_RQ(req, "Submit");
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
ocf_submit_cache_reqs(req->cache, req->map, req, OCF_READ,
ocf_engine_io_count(req), _ocf_read_fast_io);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
ocf_engine_update_request_stats(req);
ocf_engine_update_block_stats(req);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
@@ -107,56 +107,56 @@ static const struct ocf_io_if _io_if_read_fast_resume = {
.write = _ocf_read_fast_do,
};
int ocf_read_fast(struct ocf_request *rq)
int ocf_read_fast(struct ocf_request *req)
{
bool hit;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_read_fast_resume;
req->resume = ocf_engine_on_resume;
req->io_if = &_io_if_read_fast_resume;
/*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
hit = ocf_engine_is_hit(rq);
hit = ocf_engine_is_hit(req);
if (hit) {
ocf_io_start(rq->io);
lock = ocf_rq_trylock_rd(rq);
ocf_io_start(req->io);
lock = ocf_req_trylock_rd(req);
}
OCF_METADATA_UNLOCK_RD();
if (hit) {
OCF_DEBUG_RQ(rq, "Fast path success");
OCF_DEBUG_RQ(req, "Fast path success");
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
OCF_DEBUG_RQ(req, "NO LOCK");
} else {
/* Lock was acquired can perform IO */
_ocf_read_fast_do(rq);
_ocf_read_fast_do(req);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR");
rq->complete(rq, lock);
ocf_rq_put(rq);
OCF_DEBUG_RQ(req, "LOCK ERROR");
req->complete(req, lock);
ocf_req_put(req);
}
} else {
OCF_DEBUG_RQ(rq, "Fast path failure");
OCF_DEBUG_RQ(req, "Fast path failure");
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
if (hit)
return OCF_FAST_PATH_YES;
@@ -177,56 +177,56 @@ static const struct ocf_io_if _io_if_write_fast_resume = {
.write = ocf_write_wb_do,
};
int ocf_write_fast(struct ocf_request *rq)
int ocf_write_fast(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_write_fast_resume;
req->resume = ocf_engine_on_resume;
req->io_if = &_io_if_write_fast_resume;
/*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(rq);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
ocf_io_start(rq->io);
lock = ocf_rq_trylock_wr(rq);
ocf_io_start(req->io);
lock = ocf_req_trylock_wr(req);
}
OCF_METADATA_UNLOCK_RD();
if (mapped) {
if (lock >= 0) {
OCF_DEBUG_RQ(rq, "Fast path success");
OCF_DEBUG_RQ(req, "Fast path success");
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
OCF_DEBUG_RQ(req, "NO LOCK");
} else {
/* Lock was acquired can perform IO */
ocf_write_wb_do(rq);
ocf_write_wb_do(req);
}
} else {
OCF_DEBUG_RQ(rq, "Fast path lock failure");
rq->complete(rq, lock);
ocf_rq_put(rq);
OCF_DEBUG_RQ(req, "Fast path lock failure");
req->complete(req, lock);
ocf_req_put(req);
}
} else {
OCF_DEBUG_RQ(rq, "Fast path failure");
OCF_DEBUG_RQ(req, "Fast path failure");
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;

View File

@@ -6,7 +6,7 @@
#ifndef ENGINE_FAST_H_
#define ENGINE_FAST_H_
int ocf_read_fast(struct ocf_request *rq);
int ocf_write_fast(struct ocf_request *rq);
int ocf_read_fast(struct ocf_request *req);
int ocf_write_fast(struct ocf_request *req);
#endif /* ENGINE_WI_H_ */

View File

@@ -8,7 +8,7 @@
#include "engine_inv.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_cache_line.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
@@ -16,47 +16,47 @@
#define OCF_ENGINE_DEBUG_IO_NAME "inv"
#include "engine_debug.h"
static void _ocf_invalidate_rq(struct ocf_request *rq, int error)
static void _ocf_invalidate_req(struct ocf_request *req, int error)
{
if (error) {
rq->error = error;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
req->error = error;
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
cache_errors.write);
}
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
OCF_DEBUG_RQ(req, "Completion");
if (rq->error)
ocf_engine_error(rq, true, "Failed to flush metadata to cache");
if (req->error)
ocf_engine_error(req, true, "Failed to flush metadata to cache");
ocf_rq_unlock(rq);
ocf_req_unlock(req);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
}
static int _ocf_invalidate_do(struct ocf_request *rq)
static int _ocf_invalidate_do(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
ENV_BUG_ON(env_atomic_read(&rq->req_remaining));
ENV_BUG_ON(env_atomic_read(&req->req_remaining));
OCF_METADATA_LOCK_WR();
ocf_purge_map_info(rq);
ocf_purge_map_info(req);
OCF_METADATA_UNLOCK_WR();
env_atomic_inc(&rq->req_remaining);
env_atomic_inc(&req->req_remaining);
if (ocf_data_obj_is_atomic(&cache->device->obj) &&
rq->info.flush_metadata) {
req->info.flush_metadata) {
/* Metadata flush IO */
ocf_metadata_flush_do_asynch(cache, rq, _ocf_invalidate_rq);
ocf_metadata_flush_do_asynch(cache, req, _ocf_invalidate_req);
}
_ocf_invalidate_rq(rq, 0);
_ocf_invalidate_req(req, 0);
return 0;
}
@@ -66,7 +66,7 @@ static const struct ocf_io_if _io_if_invalidate = {
.write = _ocf_invalidate_do,
};
void ocf_engine_invalidate(struct ocf_request *rq)
void ocf_engine_invalidate(struct ocf_request *req)
{
ocf_engine_push_rq_front_if(rq, &_io_if_invalidate, true);
ocf_engine_push_req_front_if(req, &_io_if_invalidate, true);
}

View File

@@ -6,6 +6,6 @@
#ifndef ENGINE_INV_H_
#define ENGINE_INV_H_
void ocf_engine_invalidate(struct ocf_request *rq);
void ocf_engine_invalidate(struct ocf_request *req);
#endif /* ENGINE_INV_H_ */

View File

@@ -7,55 +7,55 @@
#include "engine_common.h"
#include "cache_engine.h"
#include "engine_ops.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_io.h"
#define OCF_ENGINE_DEBUG_IO_NAME "ops"
#include "engine_debug.h"
static void _ocf_engine_ops_io(struct ocf_request *rq, int error)
static void _ocf_engine_ops_io(struct ocf_request *req, int error)
{
if (error)
rq->error |= error;
req->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
OCF_DEBUG_RQ(req, "Completion");
if (rq->error) {
if (req->error) {
/* An error occured */
ocf_engine_error(rq, false, "Core operation failure");
ocf_engine_error(req, false, "Core operation failure");
}
/* Complete requests - both to cache and to core*/
rq->complete(rq, rq->error);
req->complete(req, req->error);
/* Release OCF request */
ocf_rq_put(rq);
ocf_req_put(req);
}
int ocf_engine_ops(struct ocf_request *rq)
int ocf_engine_ops(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
OCF_DEBUG_TRACE(rq->cache);
OCF_DEBUG_TRACE(req->cache);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* IO to the core device and to the cache device */
env_atomic_set(&rq->req_remaining, 2);
env_atomic_set(&req->req_remaining, 2);
/* Submit operation into core device */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
_ocf_engine_ops_io);
ocf_submit_cache_reqs(cache, rq->map, rq, rq->rw,
ocf_submit_cache_reqs(cache, req->map, req, req->rw,
1, _ocf_engine_ops_io);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}

View File

@@ -6,6 +6,6 @@
#ifndef __CACHE_ENGINE_OPS_H_
#define __CACHE_ENGINE_OPS_H_
int ocf_engine_ops(struct ocf_request *rq);
int ocf_engine_ops(struct ocf_request *req);
#endif /* __CACHE_ENGINE_OPS_H_ */

View File

@@ -7,7 +7,7 @@
#include "engine_pt.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_io.h"
#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
@@ -16,86 +16,86 @@
#define OCF_ENGINE_DEBUG_IO_NAME "pt"
#include "engine_debug.h"
static void _ocf_read_pt_io(struct ocf_request *rq, int error)
static void _ocf_read_pt_io(struct ocf_request *req, int error)
{
if (error)
rq->error |= error;
req->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
OCF_DEBUG_RQ(req, "Completion");
if (rq->error) {
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
if (req->error) {
req->info.core_error = 1;
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
core_errors.read);
}
/* Complete request */
rq->complete(rq, rq->error);
req->complete(req, req->error);
ocf_rq_unlock_rd(rq);
ocf_req_unlock_rd(req);
/* Release OCF request */
ocf_rq_put(rq);
ocf_req_put(req);
}
static inline void _ocf_read_pt_submit(struct ocf_request *rq)
static inline void _ocf_read_pt_submit(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
env_atomic_set(&rq->req_remaining, 1); /* Core device IO */
env_atomic_set(&req->req_remaining, 1); /* Core device IO */
OCF_DEBUG_RQ(rq, "Submit");
OCF_DEBUG_RQ(req, "Submit");
/* Core read */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
_ocf_read_pt_io);
}
int ocf_read_pt_do(struct ocf_request *rq)
int ocf_read_pt_do(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
if (rq->info.dirty_any) {
if (req->info.dirty_any) {
OCF_METADATA_LOCK_RD();
/* Need to clean, start it */
ocf_engine_clean(rq);
ocf_engine_clean(req);
OCF_METADATA_UNLOCK_RD();
/* Do not processing, because first we need to clean request */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
ocf_part_move(req);
OCF_METADATA_UNLOCK_WR();
}
/* Submit read IO to the core */
_ocf_read_pt_submit(rq);
_ocf_read_pt_submit(req);
/* Update statistics */
ocf_engine_update_block_stats(rq);
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].read_reqs.pass_through);
ocf_engine_update_block_stats(req);
env_atomic64_inc(&cache->core_obj[req->core_id].counters->
part_counters[req->part_id].read_reqs.pass_through);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
@@ -105,36 +105,36 @@ static const struct ocf_io_if _io_if_pt_resume = {
.write = ocf_read_pt_do,
};
int ocf_read_pt(struct ocf_request *rq)
int ocf_read_pt(struct ocf_request *req)
{
bool use_cache = false;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
OCF_DEBUG_TRACE(rq->cache);
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(rq->io);
ocf_io_start(req->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_pt_resume;
req->resume = ocf_engine_on_resume;
req->io_if = &_io_if_pt_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
if (rq->info.seq_cutoff && ocf_engine_is_dirty_all(rq)) {
if (req->info.seq_cutoff && ocf_engine_is_dirty_all(req)) {
use_cache = true;
} else {
if (ocf_engine_mapped_count(rq)) {
if (ocf_engine_mapped_count(req)) {
/* There are mapped cache line,
* lock request for READ access
*/
lock = ocf_rq_trylock_rd(rq);
lock = ocf_req_trylock_rd(req);
} else {
/* No mapped cache lines, no need to get lock */
lock = OCF_LOCK_ACQUIRED;
@@ -148,32 +148,32 @@ int ocf_read_pt(struct ocf_request *rq)
* There is dirt HIT, and sequential cut off,
* because of this force read data from cache
*/
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_wt)->read(rq);
ocf_req_clear(req);
ocf_get_io_if(ocf_cache_mode_wt)->read(req);
} else {
if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) {
/* Lock acquired perform read off operations */
ocf_read_pt_do(rq);
ocf_read_pt_do(req);
} else {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
OCF_DEBUG_RQ(req, "NO LOCK");
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
req->complete(req, lock);
ocf_req_put(req);
}
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
void ocf_engine_push_rq_front_pt(struct ocf_request *rq)
void ocf_engine_push_req_front_pt(struct ocf_request *req)
{
ocf_engine_push_rq_front_if(rq, &_io_if_pt_resume, true);
ocf_engine_push_req_front_if(req, &_io_if_pt_resume, true);
}

View File

@@ -6,10 +6,10 @@
#ifndef ENGINE_OFF_H_
#define ENGINE_OFF_H_
int ocf_read_pt(struct ocf_request *rq);
int ocf_read_pt(struct ocf_request *req);
int ocf_read_pt_do(struct ocf_request *rq);
int ocf_read_pt_do(struct ocf_request *req);
void ocf_engine_push_rq_front_pt(struct ocf_request *rq);
void ocf_engine_push_req_front_pt(struct ocf_request *req);
#endif /* ENGINE_OFF_H_ */

View File

@@ -13,7 +13,7 @@
#include "cache_engine.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_io.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
@@ -22,70 +22,70 @@
#define OCF_ENGINE_DEBUG_IO_NAME "rd"
#include "engine_debug.h"
static void _ocf_read_generic_hit_io(struct ocf_request *rq, int error)
static void _ocf_read_generic_hit_io(struct ocf_request *req, int error)
{
if (error)
rq->error |= error;
req->error |= error;
if (rq->error)
inc_fallback_pt_error_counter(rq->cache);
if (req->error)
inc_fallback_pt_error_counter(req->cache);
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
OCF_DEBUG_RQ(rq, "HIT completion");
if (env_atomic_dec_return(&req->req_remaining) == 0) {
OCF_DEBUG_RQ(req, "HIT completion");
if (rq->error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].
if (req->error) {
env_atomic_inc(&req->cache->core_obj[req->core_id].
counters->cache_errors.read);
ocf_engine_push_rq_front_pt(rq);
ocf_engine_push_req_front_pt(req);
} else {
ocf_rq_unlock(rq);
ocf_req_unlock(req);
/* Complete request */
rq->complete(rq, rq->error);
req->complete(req, req->error);
/* Free the request at the last point
* of the completion path
*/
ocf_rq_put(rq);
ocf_req_put(req);
}
}
}
static void _ocf_read_generic_miss_io(struct ocf_request *rq, int error)
static void _ocf_read_generic_miss_io(struct ocf_request *req, int error)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
if (error)
rq->error = error;
req->error = error;
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
OCF_DEBUG_RQ(rq, "MISS completion");
if (env_atomic_dec_return(&req->req_remaining) == 0) {
OCF_DEBUG_RQ(req, "MISS completion");
if (rq->error) {
if (req->error) {
/*
* --- Do not submit this request to write-back-thread.
* Stop it here ---
*/
rq->complete(rq, rq->error);
req->complete(req, req->error);
rq->info.core_error = 1;
env_atomic_inc(&cache->core_obj[rq->core_id].
req->info.core_error = 1;
env_atomic_inc(&cache->core_obj[req->core_id].
counters->core_errors.read);
ctx_data_free(cache->owner, rq->cp_data);
rq->cp_data = NULL;
ctx_data_free(cache->owner, req->cp_data);
req->cp_data = NULL;
/* Invalidate metadata */
ocf_engine_invalidate(rq);
ocf_engine_invalidate(req);
return;
}
@@ -93,77 +93,77 @@ static void _ocf_read_generic_miss_io(struct ocf_request *rq, int error)
/* Copy pages to copy vec, since this is the one needed
* by the above layer
*/
ctx_data_cpy(cache->owner, rq->cp_data, rq->data, 0, 0,
rq->byte_length);
ctx_data_cpy(cache->owner, req->cp_data, req->data, 0, 0,
req->byte_length);
/* Complete request */
rq->complete(rq, rq->error);
req->complete(req, req->error);
ocf_engine_backfill(rq);
ocf_engine_backfill(req);
}
}
static inline void _ocf_read_generic_submit_hit(struct ocf_request *rq)
static inline void _ocf_read_generic_submit_hit(struct ocf_request *req)
{
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
ocf_engine_io_count(rq), _ocf_read_generic_hit_io);
ocf_submit_cache_reqs(req->cache, req->map, req, OCF_READ,
ocf_engine_io_count(req), _ocf_read_generic_hit_io);
}
static inline void _ocf_read_generic_submit_miss(struct ocf_request *rq)
static inline void _ocf_read_generic_submit_miss(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
int ret;
env_atomic_set(&rq->req_remaining, 1);
env_atomic_set(&req->req_remaining, 1);
rq->cp_data = ctx_data_alloc(cache->owner,
BYTES_TO_PAGES(rq->byte_length));
if (!rq->cp_data)
req->cp_data = ctx_data_alloc(cache->owner,
BYTES_TO_PAGES(req->byte_length));
if (!req->cp_data)
goto err_alloc;
ret = ctx_data_mlock(cache->owner, rq->cp_data);
ret = ctx_data_mlock(cache->owner, req->cp_data);
if (ret)
goto err_alloc;
/* Submit read request to core device. */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
_ocf_read_generic_miss_io);
return;
err_alloc:
_ocf_read_generic_miss_io(rq, -ENOMEM);
_ocf_read_generic_miss_io(req, -ENOMEM);
}
static int _ocf_read_generic_do(struct ocf_request *rq)
static int _ocf_read_generic_do(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
if (ocf_engine_is_miss(rq) && rq->map->rd_locked) {
if (ocf_engine_is_miss(req) && req->map->rd_locked) {
/* Miss can be handled only on write locks.
* Need to switch to PT
*/
OCF_DEBUG_RQ(rq, "Switching to PT");
ocf_read_pt_do(rq);
OCF_DEBUG_RQ(req, "Switching to PT");
ocf_read_pt_do(req);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
if (ocf_engine_is_miss(rq)) {
if (rq->info.dirty_any) {
if (ocf_engine_is_miss(req)) {
if (req->info.dirty_any) {
OCF_METADATA_LOCK_RD();
/* Request is dirty need to clean request */
ocf_engine_clean(rq);
ocf_engine_clean(req);
OCF_METADATA_UNLOCK_RD();
/* We need to clean request before processing, return */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
@@ -171,38 +171,38 @@ static int _ocf_read_generic_do(struct ocf_request *rq)
OCF_METADATA_LOCK_RD();
/* Set valid status bits map */
ocf_set_valid_map_info(rq);
ocf_set_valid_map_info(req);
OCF_METADATA_UNLOCK_RD();
}
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
ocf_part_move(req);
OCF_METADATA_UNLOCK_WR();
}
OCF_DEBUG_RQ(rq, "Submit");
OCF_DEBUG_RQ(req, "Submit");
/* Submit IO */
if (ocf_engine_is_hit(rq))
_ocf_read_generic_submit_hit(rq);
if (ocf_engine_is_hit(req))
_ocf_read_generic_submit_hit(req);
else
_ocf_read_generic_submit_miss(rq);
_ocf_read_generic_submit_miss(req);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
ocf_engine_update_request_stats(req);
ocf_engine_update_block_stats(req);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
@@ -212,46 +212,46 @@ static const struct ocf_io_if _io_if_read_generic_resume = {
.write = _ocf_read_generic_do,
};
int ocf_read_generic(struct ocf_request *rq)
int ocf_read_generic(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
ocf_io_start(rq->io);
ocf_io_start(req->io);
if (env_atomic_read(&cache->pending_read_misses_list_blocked)) {
/* There are conditions to bypass IO */
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
ocf_get_io_if(ocf_cache_mode_pt)->read(req);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_read_generic_resume;
req->resume = ocf_engine_on_resume;
req->io_if = &_io_if_read_generic_resume;
/*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(rq);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
/* Request is fully mapped, no need to call eviction */
if (ocf_engine_is_hit(rq)) {
if (ocf_engine_is_hit(req)) {
/* There is a hit, lock request for READ access */
lock = ocf_rq_trylock_rd(rq);
lock = ocf_req_trylock_rd(req);
} else {
/* All cache line mapped, but some sectors are not valid
* and cache insert will be performed - lock for
* WRITE is required
*/
lock = ocf_rq_trylock_wr(rq);
lock = ocf_req_trylock_wr(req);
}
}
@@ -269,19 +269,19 @@ int ocf_read_generic(struct ocf_request *rq)
* again. If there are misses need to call eviction. This
* process is called 'mapping'.
*/
ocf_engine_map(rq);
ocf_engine_map(req);
if (!rq->info.eviction_error) {
if (ocf_engine_is_hit(rq)) {
if (!req->info.eviction_error) {
if (ocf_engine_is_hit(req)) {
/* After mapping turns out there is hit,
* so lock OCF request for read access
*/
lock = ocf_rq_trylock_rd(rq);
lock = ocf_req_trylock_rd(req);
} else {
/* Miss, new cache lines were mapped,
* need to lock OCF request for write access
*/
lock = ocf_rq_trylock_wr(rq);
lock = ocf_req_trylock_wr(req);
}
}
OCF_METADATA_UNLOCK_WR();
@@ -289,28 +289,28 @@ int ocf_read_generic(struct ocf_request *rq)
/*- END Metadata WR access -----------------------------------*/
}
if (!rq->info.eviction_error) {
if (!req->info.eviction_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
OCF_DEBUG_RQ(req, "NO LOCK");
} else {
/* Lock was acquired can perform IO */
_ocf_read_generic_do(rq);
_ocf_read_generic_do(req);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
req->complete(req, lock);
ocf_req_put(req);
}
} else {
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
ocf_req_clear(req);
ocf_get_io_if(ocf_cache_mode_pt)->read(req);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}

View File

@@ -6,6 +6,6 @@
#ifndef ENGINE_RD_H_
#define ENGINE_RD_H_
int ocf_read_generic(struct ocf_request *rq);
int ocf_read_generic(struct ocf_request *req);
#endif /* ENGINE_RD_H_ */

View File

@@ -7,82 +7,82 @@
#include "engine_wa.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wa"
#include "engine_debug.h"
static void _ocf_read_wa_io(struct ocf_request *rq, int error)
static void _ocf_read_wa_io(struct ocf_request *req, int error)
{
if (error)
rq->error |= error;
req->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
if (rq->error) {
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
if (req->error) {
req->info.core_error = 1;
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
core_errors.write);
}
/* Complete request */
rq->complete(rq, rq->error);
req->complete(req, req->error);
OCF_DEBUG_RQ(rq, "Completion");
OCF_DEBUG_RQ(req, "Completion");
/* Release OCF request */
ocf_rq_put(rq);
ocf_req_put(req);
}
int ocf_write_wa(struct ocf_request *rq)
int ocf_write_wa(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
ocf_io_start(rq->io);
ocf_io_start(req->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
if (ocf_engine_is_hit(rq)) {
ocf_rq_clear(rq);
if (ocf_engine_is_hit(req)) {
ocf_req_clear(req);
/* There is HIT, do WT */
ocf_get_io_if(ocf_cache_mode_wt)->write(rq);
ocf_get_io_if(ocf_cache_mode_wt)->write(req);
} else if (ocf_engine_mapped_count(rq)) {
ocf_rq_clear(rq);
} else if (ocf_engine_mapped_count(req)) {
ocf_req_clear(req);
/* Partial MISS, do WI */
ocf_get_io_if(ocf_cache_mode_wi)->write(rq);
ocf_get_io_if(ocf_cache_mode_wi)->write(req);
} else {
/* There is no mapped cache line, write directly into core */
OCF_DEBUG_RQ(rq, "Submit");
OCF_DEBUG_RQ(req, "Submit");
/* Submit write IO to the core */
env_atomic_set(&rq->req_remaining, 1);
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
env_atomic_set(&req->req_remaining, 1);
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
_ocf_read_wa_io);
/* Update statistics */
ocf_engine_update_block_stats(rq);
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].write_reqs.pass_through);
ocf_engine_update_block_stats(req);
env_atomic64_inc(&cache->core_obj[req->core_id].counters->
part_counters[req->part_id].write_reqs.pass_through);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}

View File

@@ -6,6 +6,6 @@
#ifndef ENGINE_WA_H_
#define ENGINE_WA_H_
int ocf_write_wa(struct ocf_request *rq);
int ocf_write_wa(struct ocf_request *req);
#endif /* ENGINE_WA_H_ */

View File

@@ -9,7 +9,7 @@
#include "engine_common.h"
#include "engine_wb.h"
#include "../metadata/metadata.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
@@ -23,59 +23,59 @@ static const struct ocf_io_if _io_if_wb_resume = {
.write = ocf_write_wb_do,
};
static void _ocf_write_wb_update_bits(struct ocf_request *rq)
static void _ocf_write_wb_update_bits(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
if (ocf_engine_is_miss(rq)) {
if (ocf_engine_is_miss(req)) {
OCF_METADATA_LOCK_RD();
/* Update valid status bits */
ocf_set_valid_map_info(rq);
ocf_set_valid_map_info(req);
OCF_METADATA_UNLOCK_RD();
}
if (!ocf_engine_is_dirty_all(rq)) {
if (!ocf_engine_is_dirty_all(req)) {
OCF_METADATA_LOCK_WR();
/* set dirty bits, and mark if metadata flushing is required */
ocf_set_dirty_map_info(rq);
ocf_set_dirty_map_info(req);
OCF_METADATA_UNLOCK_WR();
}
}
static void _ocf_write_wb_io_flush_metadata(struct ocf_request *rq, int error)
static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
{
if (error)
rq->error = error;
req->error = error;
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
if (rq->error)
ocf_engine_error(rq, true, "Failed to write data to cache");
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_rq_unlock_wr(rq);
ocf_req_unlock_wr(req);
rq->complete(rq, rq->error);
req->complete(req, req->error);
ocf_rq_put(rq);
ocf_req_put(req);
}
static int ocf_write_wb_do_flush_metadata(struct ocf_request *rq)
static int ocf_write_wb_do_flush_metadata(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
env_atomic_set(&req->req_remaining, 1); /* One core IO */
if (rq->info.flush_metadata) {
OCF_DEBUG_RQ(rq, "Flush metadata");
ocf_metadata_flush_do_asynch(cache, rq,
if (req->info.flush_metadata) {
OCF_DEBUG_RQ(req, "Flush metadata");
ocf_metadata_flush_do_asynch(cache, req,
_ocf_write_wb_io_flush_metadata);
}
_ocf_write_wb_io_flush_metadata(rq, 0);
_ocf_write_wb_io_flush_metadata(req, 0);
return 0;
}
@@ -85,39 +85,39 @@ static const struct ocf_io_if _io_if_wb_flush_metadata = {
.write = ocf_write_wb_do_flush_metadata,
};
static void _ocf_write_wb_io(struct ocf_request *rq, int error)
static void _ocf_write_wb_io(struct ocf_request *req, int error)
{
if (error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
cache_errors.write);
rq->error |= error;
req->error |= error;
}
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
OCF_DEBUG_RQ(req, "Completion");
if (rq->error) {
ocf_engine_error(rq, true, "Failed to write data to cache");
if (req->error) {
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_rq_unlock_wr(rq);
ocf_req_unlock_wr(req);
rq->complete(rq, rq->error);
req->complete(req, req->error);
ocf_rq_put(rq);
ocf_req_put(req);
} else {
ocf_engine_push_rq_front_if(rq, &_io_if_wb_flush_metadata,
ocf_engine_push_req_front_if(req, &_io_if_wb_flush_metadata,
true);
}
}
static inline void _ocf_write_wb_submit(struct ocf_request *rq)
static inline void _ocf_write_wb_submit(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
/*
* 1. Submit data
@@ -125,73 +125,73 @@ static inline void _ocf_write_wb_submit(struct ocf_request *rq)
* 3. Then continue processing request (flush metadata)
*/
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
ocf_part_move(req);
OCF_METADATA_UNLOCK_WR();
}
OCF_DEBUG_RQ(rq, "Submit Data");
OCF_DEBUG_RQ(req, "Submit Data");
/* Data IO */
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
ocf_engine_io_count(rq), _ocf_write_wb_io);
ocf_submit_cache_reqs(cache, req->map, req, OCF_WRITE,
ocf_engine_io_count(req), _ocf_write_wb_io);
}
int ocf_write_wb_do(struct ocf_request *rq)
int ocf_write_wb_do(struct ocf_request *req)
{
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Updata status bits */
_ocf_write_wb_update_bits(rq);
_ocf_write_wb_update_bits(req);
/* Submit IO */
_ocf_write_wb_submit(rq);
_ocf_write_wb_submit(req);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
ocf_engine_update_request_stats(req);
ocf_engine_update_block_stats(req);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
int ocf_write_wb(struct ocf_request *rq)
int ocf_write_wb(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
ocf_io_start(rq->io);
ocf_io_start(req->io);
/* Not sure if we need this. */
ocf_rq_get(rq);
ocf_req_get(req);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_wb_resume;
req->resume = ocf_engine_on_resume;
req->io_if = &_io_if_wb_resume;
/* TODO: Handle fits into dirty */
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(rq);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
/* All cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
lock = ocf_req_trylock_wr(req);
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
@@ -203,36 +203,36 @@ int ocf_write_wb(struct ocf_request *rq)
* again. If there are misses need to call eviction. This
* process is called 'mapping'.
*/
ocf_engine_map(rq);
ocf_engine_map(req);
if (!rq->info.eviction_error) {
if (!req->info.eviction_error) {
/* Lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
lock = ocf_req_trylock_wr(req);
}
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
if (!rq->info.eviction_error) {
if (!req->info.eviction_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
OCF_DEBUG_RQ(req, "NO LOCK");
} else {
ocf_write_wb_do(rq);
ocf_write_wb_do(req);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
req->complete(req, lock);
ocf_req_put(req);
}
} else {
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
ocf_req_clear(req);
ocf_get_io_if(ocf_cache_mode_pt)->write(req);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}

View File

@@ -5,8 +5,8 @@
#ifndef ENGINE_WB_H_
#define ENGINE_WB_H_
int ocf_write_wb(struct ocf_request *rq);
int ocf_write_wb(struct ocf_request *req);
int ocf_write_wb_do(struct ocf_request *rq);
int ocf_write_wb_do(struct ocf_request *req);
#endif /* ENGINE_WI_H_ */

View File

@@ -8,7 +8,7 @@
#include "engine_wi.h"
#include "engine_common.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
@@ -16,119 +16,119 @@
#define OCF_ENGINE_DEBUG_IO_NAME "wi"
#include "engine_debug.h"
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq);
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req);
static const struct ocf_io_if _io_if_wi_flush_metadata = {
.read = ocf_write_wi_update_and_flush_metadata,
.write = ocf_write_wi_update_and_flush_metadata,
};
static void _ocf_write_wi_io_flush_metadata(struct ocf_request *rq, int error)
static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
{
if (error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
cache_errors.write);
rq->error |= error;
req->error |= error;
}
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
if (rq->error)
ocf_engine_error(rq, true, "Failed to write data to cache");
if (req->error)
ocf_engine_error(req, true, "Failed to write data to cache");
ocf_rq_unlock_wr(rq);
ocf_req_unlock_wr(req);
rq->complete(rq, rq->error);
req->complete(req, req->error);
ocf_rq_put(rq);
ocf_req_put(req);
}
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq)
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
env_atomic_set(&req->req_remaining, 1); /* One core IO */
if (ocf_engine_mapped_count(rq)) {
if (ocf_engine_mapped_count(req)) {
/* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(rq);
ocf_purge_map_info(req);
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
if (rq->info.flush_metadata) {
if (req->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
ocf_metadata_flush_do_asynch(cache, rq,
ocf_metadata_flush_do_asynch(cache, req,
_ocf_write_wi_io_flush_metadata);
}
}
_ocf_write_wi_io_flush_metadata(rq, 0);
_ocf_write_wi_io_flush_metadata(req, 0);
return 0;
}
static void _ocf_write_wi_core_io(struct ocf_request *rq, int error)
static void _ocf_write_wi_core_io(struct ocf_request *req, int error)
{
if (error) {
rq->error = error;
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
req->error = error;
req->info.core_error = 1;
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
core_errors.write);
}
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
OCF_DEBUG_RQ(req, "Completion");
if (rq->error) {
ocf_rq_unlock_wr(rq);
if (req->error) {
ocf_req_unlock_wr(req);
rq->complete(rq, rq->error);
req->complete(req, req->error);
ocf_rq_put(rq);
ocf_req_put(req);
} else {
ocf_engine_push_rq_front_if(rq, &_io_if_wi_flush_metadata,
ocf_engine_push_req_front_if(req, &_io_if_wi_flush_metadata,
true);
}
}
static int _ocf_write_wi_do(struct ocf_request *rq)
static int _ocf_write_wi_do(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
env_atomic_set(&req->req_remaining, 1); /* One core IO */
OCF_DEBUG_RQ(rq, "Submit");
OCF_DEBUG_RQ(req, "Submit");
/* Submit write IO to the core */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
_ocf_write_wi_core_io);
/* Update statistics */
ocf_engine_update_block_stats(rq);
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].write_reqs.pass_through);
ocf_engine_update_block_stats(req);
env_atomic64_inc(&cache->core_obj[req->core_id].counters->
part_counters[req->part_id].write_reqs.pass_through);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
static void _ocf_write_wi_on_resume(struct ocf_request *rq)
static void _ocf_write_wi_on_resume(struct ocf_request *req)
{
OCF_DEBUG_RQ(rq, "On resume");
ocf_engine_push_rq_front(rq, true);
OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front(req, true);
}
static const struct ocf_io_if _io_if_wi_resume = {
@@ -136,30 +136,30 @@ static const struct ocf_io_if _io_if_wi_resume = {
.write = _ocf_write_wi_do,
};
int ocf_write_wi(struct ocf_request *rq)
int ocf_write_wi(struct ocf_request *req)
{
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
OCF_DEBUG_TRACE(rq->cache);
OCF_DEBUG_TRACE(req->cache);
ocf_io_start(rq->io);
ocf_io_start(req->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Set resume call backs */
rq->resume = _ocf_write_wi_on_resume;
rq->io_if = &_io_if_wi_resume;
req->resume = _ocf_write_wi_on_resume;
req->io_if = &_io_if_wi_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
if (ocf_engine_mapped_count(rq)) {
if (ocf_engine_mapped_count(req)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
lock = ocf_req_trylock_wr(req);
} else {
lock = OCF_LOCK_ACQUIRED;
}
@@ -168,19 +168,19 @@ int ocf_write_wi(struct ocf_request *rq)
if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) {
_ocf_write_wi_do(rq);
_ocf_write_wi_do(req);
} else {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
OCF_DEBUG_RQ(req, "NO LOCK");
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
req->complete(req, lock);
ocf_req_put(req);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}

View File

@@ -6,6 +6,6 @@
#ifndef ENGINE_WI_H_
#define ENGINE_WI_H_
int ocf_write_wi(struct ocf_request *rq);
int ocf_write_wi(struct ocf_request *req);
#endif /* ENGINE_WI_H_ */

View File

@@ -8,7 +8,7 @@
#include "engine_wt.h"
#include "engine_inv.h"
#include "engine_common.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
@@ -18,141 +18,141 @@
#define OCF_ENGINE_DEBUG_IO_NAME "wt"
#include "engine_debug.h"
static void _ocf_write_wt_io(struct ocf_request *rq)
static void _ocf_write_wt_io(struct ocf_request *req)
{
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
OCF_DEBUG_RQ(req, "Completion");
if (rq->error) {
if (req->error) {
/* An error occured */
/* Complete request */
rq->complete(rq, rq->info.core_error ? rq->error : 0);
req->complete(req, req->info.core_error ? req->error : 0);
ocf_engine_invalidate(rq);
ocf_engine_invalidate(req);
} else {
/* Unlock reqest from WRITE access */
ocf_rq_unlock_wr(rq);
ocf_req_unlock_wr(req);
/* Complete request */
rq->complete(rq, rq->info.core_error ? rq->error : 0);
req->complete(req, req->info.core_error ? req->error : 0);
/* Release OCF request */
ocf_rq_put(rq);
ocf_req_put(req);
}
}
static void _ocf_write_wt_cache_io(struct ocf_request *rq, int error)
static void _ocf_write_wt_cache_io(struct ocf_request *req, int error)
{
if (error) {
rq->error = rq->error ?: error;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
req->error = req->error ?: error;
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
cache_errors.write);
if (rq->error)
inc_fallback_pt_error_counter(rq->cache);
if (req->error)
inc_fallback_pt_error_counter(req->cache);
}
_ocf_write_wt_io(rq);
_ocf_write_wt_io(req);
}
static void _ocf_write_wt_core_io(struct ocf_request *rq, int error)
static void _ocf_write_wt_core_io(struct ocf_request *req, int error)
{
if (error) {
rq->error = error;
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
req->error = error;
req->info.core_error = 1;
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
core_errors.write);
}
_ocf_write_wt_io(rq);
_ocf_write_wt_io(req);
}
static inline void _ocf_write_wt_submit(struct ocf_request *rq)
static inline void _ocf_write_wt_submit(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
/* Submit IOs */
OCF_DEBUG_RQ(rq, "Submit");
OCF_DEBUG_RQ(req, "Submit");
/* Calculate how many IOs need to be submited */
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq)); /* Cache IO */
env_atomic_inc(&rq->req_remaining); /* Core device IO */
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req)); /* Cache IO */
env_atomic_inc(&req->req_remaining); /* Core device IO */
if (rq->info.flush_metadata) {
if (req->info.flush_metadata) {
/* Metadata flush IO */
ocf_metadata_flush_do_asynch(cache, rq,
ocf_metadata_flush_do_asynch(cache, req,
_ocf_write_wt_cache_io);
}
/* To cache */
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
ocf_engine_io_count(rq), _ocf_write_wt_cache_io);
ocf_submit_cache_reqs(cache, req->map, req, OCF_WRITE,
ocf_engine_io_count(req), _ocf_write_wt_cache_io);
/* To core */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
ocf_submit_obj_req(&cache->core_obj[req->core_id].obj, req,
_ocf_write_wt_core_io);
}
static void _ocf_write_wt_update_bits(struct ocf_request *rq)
static void _ocf_write_wt_update_bits(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
if (ocf_engine_is_miss(rq)) {
if (ocf_engine_is_miss(req)) {
OCF_METADATA_LOCK_RD();
/* Update valid status bits */
ocf_set_valid_map_info(rq);
ocf_set_valid_map_info(req);
OCF_METADATA_UNLOCK_RD();
}
if (rq->info.dirty_any) {
if (req->info.dirty_any) {
OCF_METADATA_LOCK_WR();
/* Writes goes to SDD and HDD, need to update status bits from
* dirty to clean
*/
ocf_set_clean_map_info(rq);
ocf_set_clean_map_info(req);
OCF_METADATA_UNLOCK_WR();
}
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
if (req->info.re_part) {
OCF_DEBUG_RQ(req, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
ocf_part_move(req);
OCF_METADATA_UNLOCK_WR();
}
}
static int _ocf_write_wt_do(struct ocf_request *rq)
static int _ocf_write_wt_do(struct ocf_request *req)
{
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Update status bits */
_ocf_write_wt_update_bits(rq);
_ocf_write_wt_update_bits(req);
/* Submit IO */
_ocf_write_wt_submit(rq);
_ocf_write_wt_submit(req);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
ocf_engine_update_request_stats(req);
ocf_engine_update_block_stats(req);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
@@ -162,30 +162,30 @@ static const struct ocf_io_if _io_if_wt_resume = {
.write = _ocf_write_wt_do,
};
int ocf_write_wt(struct ocf_request *rq)
int ocf_write_wt(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
ocf_io_start(rq->io);
ocf_io_start(req->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_wt_resume;
req->resume = ocf_engine_on_resume;
req->io_if = &_io_if_wt_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(rq);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
/* All cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
lock = ocf_req_trylock_wr(req);
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
@@ -197,36 +197,36 @@ int ocf_write_wt(struct ocf_request *rq)
* again. If there are misses need to call eviction. This
* process is called 'mapping'.
*/
ocf_engine_map(rq);
ocf_engine_map(req);
if (!rq->info.eviction_error) {
if (!req->info.eviction_error) {
/* Lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
lock = ocf_req_trylock_wr(req);
}
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
if (!rq->info.eviction_error) {
if (!req->info.eviction_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
OCF_DEBUG_RQ(req, "NO LOCK");
} else {
_ocf_write_wt_do(rq);
_ocf_write_wt_do(req);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d\n", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
OCF_DEBUG_RQ(req, "LOCK ERROR %d\n", lock);
req->complete(req, lock);
ocf_req_put(req);
}
} else {
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
ocf_req_clear(req);
ocf_get_io_if(ocf_cache_mode_pt)->write(req);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}

View File

@@ -6,6 +6,6 @@
#ifndef ENGINE_WT_H_
#define ENGINE_WT_H_
int ocf_write_wt(struct ocf_request *rq);
int ocf_write_wt(struct ocf_request *req);
#endif /* ENGINE_WT_H_ */

View File

@@ -8,7 +8,7 @@
#include "engine_zero.h"
#include "engine_common.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_req.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
@@ -16,28 +16,28 @@
#define OCF_ENGINE_DEBUG_IO_NAME "zero"
#include "engine_debug.h"
static int ocf_zero_purge(struct ocf_request *rq)
static int ocf_zero_purge(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
if (rq->error) {
ocf_engine_error(rq, true, "Failed to discard data on cache");
if (req->error) {
ocf_engine_error(req, true, "Failed to discard data on cache");
} else {
/* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(rq);
ocf_purge_map_info(req);
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
ocf_rq_unlock_wr(rq);
ocf_req_unlock_wr(req);
rq->complete(rq, rq->error);
req->complete(req, req->error);
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
@@ -47,28 +47,28 @@ static const struct ocf_io_if _io_if_zero_purge = {
.write = ocf_zero_purge,
};
static void _ocf_zero_io_flush_metadata(struct ocf_request *rq, int error)
static void _ocf_zero_io_flush_metadata(struct ocf_request *req, int error)
{
if (error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
env_atomic_inc(&req->cache->core_obj[req->core_id].counters->
cache_errors.write);
rq->error = error;
req->error = error;
}
if (env_atomic_dec_return(&rq->req_remaining))
if (env_atomic_dec_return(&req->req_remaining))
return;
ocf_engine_push_rq_front_if(rq, &_io_if_zero_purge, true);
ocf_engine_push_req_front_if(req, &_io_if_zero_purge, true);
}
static inline void ocf_zero_map_info(struct ocf_request *rq)
static inline void ocf_zero_map_info(struct ocf_request *req)
{
uint32_t map_idx = 0;
uint8_t start_bit;
uint8_t end_bit;
struct ocf_map_info *map = rq->map;
struct ocf_cache *cache = rq->cache;
uint32_t count = rq->core_line_count;
struct ocf_map_info *map = req->map;
struct ocf_cache *cache = req->cache;
uint32_t count = req->core_line_count;
/* Purge range on the basis of map info
*
@@ -86,43 +86,43 @@ static inline void ocf_zero_map_info(struct ocf_request *rq)
if (map_idx == 0) {
/* First */
start_bit = BYTES_TO_SECTORS(rq->byte_position)
start_bit = BYTES_TO_SECTORS(req->byte_position)
% ocf_line_sectors(cache);
}
if (map_idx == (count - 1)) {
/* Last */
end_bit = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1) %
end_bit = BYTES_TO_SECTORS(req->byte_position +
req->byte_length - 1) %
ocf_line_sectors(cache);
}
ocf_metadata_flush_mark(cache, rq, map_idx, INVALID,
ocf_metadata_flush_mark(cache, req, map_idx, INVALID,
start_bit, end_bit);
}
}
static int _ocf_zero_do(struct ocf_request *rq)
static int _ocf_zero_do(struct ocf_request *req)
{
struct ocf_cache *cache = rq->cache;
struct ocf_cache *cache = req->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_req_get(req);
/* Mark cache lines for zeroing/discarding */
ocf_zero_map_info(rq);
ocf_zero_map_info(req);
/* Discard marked cache lines */
env_atomic_set(&rq->req_remaining, 1);
if (rq->info.flush_metadata) {
env_atomic_set(&req->req_remaining, 1);
if (req->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
ocf_metadata_flush_do_asynch(cache, rq,
ocf_metadata_flush_do_asynch(cache, req,
_ocf_zero_io_flush_metadata);
}
_ocf_zero_io_flush_metadata(rq, 0);
_ocf_zero_io_flush_metadata(req, 0);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
ocf_req_put(req);
return 0;
}
@@ -137,30 +137,30 @@ static const struct ocf_io_if _io_if_ocf_zero_do = {
* - Caller has to have metadata write lock
* - Core line has to be mapped
*/
void ocf_engine_zero_line(struct ocf_request *rq)
void ocf_engine_zero_line(struct ocf_request *req)
{
int lock = OCF_LOCK_NOT_ACQUIRED;
ENV_BUG_ON(rq->core_line_count != 1);
ENV_BUG_ON(req->core_line_count != 1);
/* Traverse to check if request is mapped */
ocf_engine_traverse(rq);
ocf_engine_traverse(req);
ENV_BUG_ON(!ocf_engine_is_mapped(rq));
ENV_BUG_ON(!ocf_engine_is_mapped(req));
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_ocf_zero_do;
req->resume = ocf_engine_on_resume;
req->io_if = &_io_if_ocf_zero_do;
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
lock = ocf_req_trylock_wr(req);
if (lock >= 0) {
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
ocf_engine_push_rq_front_if(rq, &_io_if_ocf_zero_do, true);
ocf_engine_push_req_front_if(req, &_io_if_ocf_zero_do, true);
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
req->complete(req, lock);
ocf_req_put(req);
}
}

View File

@@ -6,6 +6,6 @@
#ifndef ENGINE_ZERO_H_
#define ENGINE_ZERO_H_
void ocf_engine_zero_line(struct ocf_request *rq);
void ocf_engine_zero_line(struct ocf_request *req);
#endif /* ENGINE_ZERO_H_ */