Initial commit
Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
314
src/engine/cache_engine.c
Normal file
314
src/engine/cache_engine.c
Normal file
@@ -0,0 +1,314 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_priv.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "../ocf_queue_priv.h"
|
||||
#include "cache_engine.h"
|
||||
#include "engine_common.h"
|
||||
#include "engine_rd.h"
|
||||
#include "engine_wt.h"
|
||||
#include "engine_pt.h"
|
||||
#include "engine_wi.h"
|
||||
#include "engine_wa.h"
|
||||
#include "engine_wb.h"
|
||||
#include "engine_fast.h"
|
||||
#include "engine_discard.h"
|
||||
#include "engine_d2c.h"
|
||||
#include "engine_ops.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../layer_space_management.h"
|
||||
|
||||
enum ocf_io_if_type {
|
||||
/* Public OCF IO interfaces to be set by user */
|
||||
OCF_IO_WT_IF,
|
||||
OCF_IO_WB_IF,
|
||||
OCF_IO_WA_IF,
|
||||
OCF_IO_WI_IF,
|
||||
OCF_IO_PT_IF,
|
||||
OCF_IO_MAX_IF,
|
||||
|
||||
/* Private OCF interfaces */
|
||||
OCF_IO_FAST_IF,
|
||||
OCF_IO_DISCARD_IF,
|
||||
OCF_IO_D2C_IF,
|
||||
OCF_IO_OPS_IF,
|
||||
OCF_IO_PRIV_MAX_IF,
|
||||
};
|
||||
|
||||
static const struct ocf_io_if IO_IFS[OCF_IO_PRIV_MAX_IF] = {
|
||||
[OCF_IO_WT_IF] = {
|
||||
.read = ocf_read_generic,
|
||||
.write = ocf_write_wt,
|
||||
.name = "Write Through"
|
||||
},
|
||||
[OCF_IO_WB_IF] = {
|
||||
.read = ocf_read_generic,
|
||||
.write = ocf_write_wb,
|
||||
.name = "Write Back"
|
||||
},
|
||||
[OCF_IO_WA_IF] = {
|
||||
.read = ocf_read_generic,
|
||||
.write = ocf_write_wa,
|
||||
.name = "Write Around"
|
||||
},
|
||||
[OCF_IO_WI_IF] = {
|
||||
.read = ocf_read_generic,
|
||||
.write = ocf_write_wi,
|
||||
.name = "Write Invalidate"
|
||||
},
|
||||
[OCF_IO_PT_IF] = {
|
||||
.read = ocf_read_pt,
|
||||
.write = ocf_write_wi,
|
||||
.name = "Pass Through",
|
||||
},
|
||||
[OCF_IO_FAST_IF] = {
|
||||
.read = ocf_read_fast,
|
||||
.write = ocf_write_fast,
|
||||
.name = "Fast",
|
||||
},
|
||||
[OCF_IO_DISCARD_IF] = {
|
||||
.read = ocf_discard,
|
||||
.write = ocf_discard,
|
||||
.name = "Discard",
|
||||
},
|
||||
[OCF_IO_D2C_IF] = {
|
||||
.read = ocf_io_d2c,
|
||||
.write = ocf_io_d2c,
|
||||
.name = "Direct to core",
|
||||
},
|
||||
[OCF_IO_OPS_IF] = {
|
||||
.read = ocf_engine_ops,
|
||||
.write = ocf_engine_ops,
|
||||
.name = "Ops engine",
|
||||
},
|
||||
};
|
||||
|
||||
static const struct ocf_io_if *cache_mode_io_if_map[ocf_req_cache_mode_max] = {
|
||||
[ocf_req_cache_mode_wt] = &IO_IFS[OCF_IO_WT_IF],
|
||||
[ocf_req_cache_mode_wb] = &IO_IFS[OCF_IO_WB_IF],
|
||||
[ocf_req_cache_mode_wa] = &IO_IFS[OCF_IO_WA_IF],
|
||||
[ocf_req_cache_mode_wi] = &IO_IFS[OCF_IO_WI_IF],
|
||||
[ocf_req_cache_mode_pt] = &IO_IFS[OCF_IO_PT_IF],
|
||||
[ocf_req_cache_mode_fast] = &IO_IFS[OCF_IO_FAST_IF],
|
||||
[ocf_req_cache_mode_d2c] = &IO_IFS[OCF_IO_D2C_IF],
|
||||
};
|
||||
|
||||
const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t req_cache_mode)
|
||||
{
|
||||
if (req_cache_mode == ocf_req_cache_mode_max)
|
||||
return NULL;
|
||||
return cache_mode_io_if_map[req_cache_mode];
|
||||
}
|
||||
|
||||
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
|
||||
struct ocf_queue *q)
|
||||
{
|
||||
unsigned long lock_flags;
|
||||
struct ocf_request *rq;
|
||||
|
||||
OCF_CHECK_NULL(q);
|
||||
|
||||
/* LOCK */
|
||||
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
|
||||
|
||||
if (list_empty(&q->io_list)) {
|
||||
/* No items on the list */
|
||||
env_spinlock_unlock_irqrestore(&q->io_list_lock,
|
||||
lock_flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Get the first request and remove it from the list */
|
||||
rq = list_first_entry(&q->io_list, struct ocf_request, list);
|
||||
|
||||
env_atomic_dec(&q->io_no);
|
||||
list_del(&rq->list);
|
||||
|
||||
/* UNLOCK */
|
||||
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
|
||||
|
||||
OCF_CHECK_NULL(rq);
|
||||
|
||||
if (ocf_rq_alloc_map(rq)) {
|
||||
rq->complete(rq, rq->error);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
bool ocf_fallback_pt_is_on(ocf_cache_t cache)
|
||||
{
|
||||
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
|
||||
|
||||
return (cache->fallback_pt_error_threshold !=
|
||||
OCF_CACHE_FALLBACK_PT_INACTIVE &&
|
||||
env_atomic_read(&cache->fallback_pt_error_counter) >=
|
||||
cache->fallback_pt_error_threshold);
|
||||
}
|
||||
|
||||
#define SEQ_CUTOFF_FULL_MARGIN \
|
||||
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
|
||||
|
||||
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
|
||||
{
|
||||
if (!env_atomic_read(&cache->attached))
|
||||
return false;
|
||||
|
||||
return (cache->device->freelist_part->curr_size <= SEQ_CUTOFF_FULL_MARGIN);
|
||||
}
|
||||
|
||||
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
|
||||
uint64_t bytes)
|
||||
{
|
||||
ocf_cache_t cache = ocf_core_get_cache(core);
|
||||
|
||||
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
|
||||
|
||||
switch (policy) {
|
||||
case ocf_seq_cutoff_policy_always:
|
||||
break;
|
||||
|
||||
case ocf_seq_cutoff_policy_full:
|
||||
if (ocf_seq_cutoff_is_on(cache))
|
||||
break;
|
||||
|
||||
case ocf_seq_cutoff_policy_never:
|
||||
return false;
|
||||
default:
|
||||
ENV_WARN(true, "Invalid sequential cutoff policy!");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (dir == core->seq_cutoff.rw &&
|
||||
core->seq_cutoff.last == addr &&
|
||||
core->seq_cutoff.bytes + bytes >=
|
||||
ocf_core_get_seq_cutoff_threshold(core)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
|
||||
{
|
||||
/*
|
||||
* If IO is not consequent or has another direction,
|
||||
* reset sequential cutoff state.
|
||||
*/
|
||||
if (req->byte_position != core->seq_cutoff.last ||
|
||||
req->rw != core->seq_cutoff.rw) {
|
||||
core->seq_cutoff.rw = req->rw;
|
||||
core->seq_cutoff.bytes = 0;
|
||||
}
|
||||
|
||||
/* Update last accessed position and bytes counter */
|
||||
core->seq_cutoff.last = req->byte_position + req->byte_length;
|
||||
core->seq_cutoff.bytes += req->byte_length;
|
||||
}
|
||||
|
||||
ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
|
||||
ocf_core_t core, struct ocf_io *io)
|
||||
{
|
||||
ocf_cache_mode_t mode;
|
||||
|
||||
if (cache->pt_unaligned_io && !ocf_rq_is_4k(io->addr, io->bytes))
|
||||
return ocf_cache_mode_pt;
|
||||
|
||||
mode = ocf_part_get_cache_mode(cache,
|
||||
ocf_part_class2id(cache, io->class));
|
||||
if (!ocf_cache_mode_is_valid(mode))
|
||||
mode = cache->conf_meta->cache_mode;
|
||||
|
||||
if (ocf_seq_cutoff_check(core, io->dir, io->addr, io->bytes))
|
||||
mode = ocf_cache_mode_pt;
|
||||
|
||||
if (ocf_fallback_pt_is_on(cache))
|
||||
mode = ocf_cache_mode_pt;
|
||||
|
||||
if (mode == ocf_cache_mode_wb &&
|
||||
env_atomic_read(&cache->flush_started))
|
||||
mode = ocf_cache_mode_wt;
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
int ocf_engine_hndl_rq(struct ocf_request *rq,
|
||||
ocf_req_cache_mode_t req_cache_mode)
|
||||
{
|
||||
ocf_cache_t cache = rq->cache;
|
||||
|
||||
OCF_CHECK_NULL(cache);
|
||||
|
||||
rq->io_if = ocf_get_io_if(req_cache_mode);
|
||||
if (!rq->io_if)
|
||||
return -EINVAL;
|
||||
|
||||
/* Till OCF engine is not synchronous fully need to push OCF request
|
||||
* to into OCF workers
|
||||
*/
|
||||
|
||||
ocf_engine_push_rq_back(rq, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
|
||||
ocf_req_cache_mode_t req_cache_mode)
|
||||
{
|
||||
const struct ocf_io_if *io_if;
|
||||
|
||||
io_if = ocf_get_io_if(req_cache_mode);
|
||||
if (!io_if)
|
||||
return -EINVAL;
|
||||
|
||||
switch (rq->rw) {
|
||||
case OCF_READ:
|
||||
return io_if->read(rq);
|
||||
case OCF_WRITE:
|
||||
return io_if->write(rq);
|
||||
default:
|
||||
return OCF_FAST_PATH_NO;
|
||||
}
|
||||
}
|
||||
|
||||
static void ocf_engine_hndl_2dc_rq(struct ocf_request *rq)
|
||||
{
|
||||
if (OCF_READ == rq->rw)
|
||||
IO_IFS[OCF_IO_D2C_IF].read(rq);
|
||||
else if (OCF_WRITE == rq->rw)
|
||||
IO_IFS[OCF_IO_D2C_IF].write(rq);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
void ocf_engine_hndl_discard_rq(struct ocf_request *rq)
|
||||
{
|
||||
if (rq->d2c) {
|
||||
ocf_engine_hndl_2dc_rq(rq);
|
||||
return;
|
||||
}
|
||||
|
||||
if (OCF_READ == rq->rw)
|
||||
IO_IFS[OCF_IO_DISCARD_IF].read(rq);
|
||||
else if (OCF_WRITE == rq->rw)
|
||||
IO_IFS[OCF_IO_DISCARD_IF].write(rq);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
void ocf_engine_hndl_ops_rq(struct ocf_request *rq)
|
||||
{
|
||||
if (rq->d2c)
|
||||
rq->io_if = &IO_IFS[OCF_IO_D2C_IF];
|
||||
else
|
||||
rq->io_if = &IO_IFS[OCF_IO_OPS_IF];
|
||||
|
||||
ocf_engine_push_rq_back(rq, true);
|
||||
}
|
82
src/engine/cache_engine.h
Normal file
82
src/engine/cache_engine.h
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef __CACHE_ENGINE_H_
|
||||
#define __CACHE_ENGINE_H_
|
||||
|
||||
struct ocf_thread_priv;
|
||||
struct ocf_request;
|
||||
|
||||
#define LOOKUP_HIT 5
|
||||
#define LOOKUP_MISS 6
|
||||
#define LOOKUP_MAPPED 8
|
||||
|
||||
typedef enum {
|
||||
/* modes inherited from user API */
|
||||
ocf_req_cache_mode_wt = ocf_cache_mode_wt,
|
||||
ocf_req_cache_mode_wb = ocf_cache_mode_wb,
|
||||
ocf_req_cache_mode_wa = ocf_cache_mode_wa,
|
||||
ocf_req_cache_mode_pt = ocf_cache_mode_pt,
|
||||
ocf_req_cache_mode_wi = ocf_cache_mode_wi,
|
||||
|
||||
/* internal modes */
|
||||
ocf_req_cache_mode_fast,
|
||||
/*!< Fast path */
|
||||
ocf_req_cache_mode_d2c,
|
||||
/*!< Direct to Core - pass through to core without
|
||||
touching cacheline metadata */
|
||||
|
||||
ocf_req_cache_mode_max,
|
||||
} ocf_req_cache_mode_t;
|
||||
|
||||
struct ocf_io_if {
|
||||
int (*read)(struct ocf_request *req);
|
||||
|
||||
int (*write)(struct ocf_request *req);
|
||||
|
||||
const char *name;
|
||||
};
|
||||
|
||||
ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
|
||||
ocf_core_t core, struct ocf_io *io);
|
||||
|
||||
const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t cache_mode);
|
||||
|
||||
static inline const char *ocf_get_io_iface_name(ocf_cache_mode_t cache_mode)
|
||||
{
|
||||
const struct ocf_io_if *iface = ocf_get_io_if(cache_mode);
|
||||
|
||||
return iface ? iface->name : "Unknown";
|
||||
}
|
||||
|
||||
static inline bool ocf_cache_mode_is_valid(ocf_cache_mode_t mode)
|
||||
{
|
||||
return mode >= ocf_cache_mode_wt && mode < ocf_cache_mode_max;
|
||||
}
|
||||
|
||||
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
|
||||
|
||||
bool ocf_fallback_pt_is_on(ocf_cache_t cache);
|
||||
|
||||
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
|
||||
uint64_t bytes);
|
||||
|
||||
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
|
||||
struct ocf_queue *q);
|
||||
|
||||
int ocf_engine_hndl_rq(struct ocf_request *rq,
|
||||
ocf_req_cache_mode_t req_cache_mode);
|
||||
|
||||
#define OCF_FAST_PATH_YES 7
|
||||
#define OCF_FAST_PATH_NO 13
|
||||
|
||||
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
|
||||
ocf_req_cache_mode_t req_cache_mode);
|
||||
|
||||
void ocf_engine_hndl_discard_rq(struct ocf_request *rq);
|
||||
|
||||
void ocf_engine_hndl_ops_rq(struct ocf_request *rq);
|
||||
|
||||
#endif
|
105
src/engine/engine_bf.c
Normal file
105
src/engine/engine_bf.c
Normal file
@@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "../ocf_ctx_priv.h"
|
||||
#include "engine_bf.h"
|
||||
#include "engine_inv.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "bf"
|
||||
#include "engine_debug.h"
|
||||
|
||||
/* Decrements and checks if queue may be unblocked again */
|
||||
static inline void backfill_queue_dec_unblock(struct ocf_cache *cache)
|
||||
{
|
||||
env_atomic_dec(&cache->pending_read_misses_list_count);
|
||||
|
||||
if (!env_atomic_read(&cache->pending_read_misses_list_blocked))
|
||||
return;
|
||||
|
||||
if (env_atomic_read(&cache->pending_read_misses_list_count)
|
||||
< cache->backfill.queue_unblock_size)
|
||||
env_atomic_set(&cache->pending_read_misses_list_blocked, 0);
|
||||
}
|
||||
|
||||
static inline void backfill_queue_inc_block(struct ocf_cache *cache)
|
||||
{
|
||||
if (env_atomic_inc_return(&cache->pending_read_misses_list_count)
|
||||
>= cache->backfill.max_queue_size)
|
||||
env_atomic_set(&cache->pending_read_misses_list_blocked, 1);
|
||||
}
|
||||
|
||||
static void _ocf_backfill_do_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = (struct ocf_request *)private_data;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
if (error)
|
||||
rq->error = error;
|
||||
|
||||
if (rq->error)
|
||||
inc_fallback_pt_error_counter(rq->cache);
|
||||
|
||||
/* Handle callback-caller race to let only one of the two complete the
|
||||
* request. Also, complete original request only if this is the last
|
||||
* sub-request to complete
|
||||
*/
|
||||
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
|
||||
/* We must free the pages we have allocated */
|
||||
ctx_data_secure_erase(cache->owner, rq->data);
|
||||
ctx_data_munlock(cache->owner, rq->data);
|
||||
ctx_data_free(cache->owner, rq->data);
|
||||
rq->data = NULL;
|
||||
|
||||
if (rq->error) {
|
||||
env_atomic_inc(&cache->core_obj[rq->core_id].
|
||||
counters->cache_errors.write);
|
||||
ocf_engine_invalidate(rq);
|
||||
} else {
|
||||
ocf_rq_unlock(rq);
|
||||
|
||||
/* always free the request at the last point
|
||||
* of the completion path
|
||||
*/
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_backfill_do(struct ocf_request *rq)
|
||||
{
|
||||
unsigned int reqs_to_issue;
|
||||
|
||||
backfill_queue_dec_unblock(rq->cache);
|
||||
|
||||
reqs_to_issue = ocf_engine_io_count(rq);
|
||||
|
||||
/* There will be #reqs_to_issue completions */
|
||||
env_atomic_set(&rq->req_remaining, reqs_to_issue);
|
||||
|
||||
rq->data = rq->cp_data;
|
||||
|
||||
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_WRITE, reqs_to_issue,
|
||||
_ocf_backfill_do_io, rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_backfill = {
|
||||
.read = _ocf_backfill_do,
|
||||
.write = _ocf_backfill_do,
|
||||
};
|
||||
|
||||
void ocf_engine_backfill(struct ocf_request *rq)
|
||||
{
|
||||
backfill_queue_inc_block(rq->cache);
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_backfill, true);
|
||||
}
|
11
src/engine/engine_bf.h
Normal file
11
src/engine/engine_bf.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_BF_H_
|
||||
#define ENGINE_BF_H_
|
||||
|
||||
void ocf_engine_backfill(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_BF_H_ */
|
621
src/engine/engine_common.c
Normal file
621
src/engine/engine_common.c
Normal file
@@ -0,0 +1,621 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_priv.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "../ocf_queue_priv.h"
|
||||
#include "engine_common.h"
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "common"
|
||||
#include "engine_debug.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../layer_space_management.h"
|
||||
|
||||
void ocf_engine_error(struct ocf_request *rq,
|
||||
bool stop_cache, const char *msg)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
if (stop_cache)
|
||||
env_bit_clear(ocf_cache_state_running, &cache->cache_state);
|
||||
|
||||
ocf_core_log(&cache->core_obj[rq->core_id], log_err,
|
||||
"%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg,
|
||||
BYTES_TO_SECTORS(rq->byte_position), rq->byte_length);
|
||||
}
|
||||
|
||||
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
|
||||
struct ocf_map_info *entry, ocf_core_id_t core_id,
|
||||
uint64_t core_line)
|
||||
{
|
||||
ocf_cache_line_t line;
|
||||
ocf_cache_line_t hash_key;
|
||||
|
||||
hash_key = ocf_metadata_hash_func(cache, core_line, core_id);
|
||||
|
||||
/* Initially assume that we have cache miss.
|
||||
* Hash points to proper bucket.
|
||||
*/
|
||||
entry->hash_key = hash_key;
|
||||
entry->status = LOOKUP_MISS;
|
||||
entry->coll_idx = cache->device->collision_table_entries;
|
||||
entry->core_line = core_line;
|
||||
|
||||
line = ocf_metadata_get_hash(cache, hash_key);
|
||||
|
||||
while (line != cache->device->collision_table_entries) {
|
||||
ocf_core_id_t curr_core_id;
|
||||
uint64_t curr_core_line;
|
||||
|
||||
ocf_metadata_get_core_info(cache, line, &curr_core_id,
|
||||
&curr_core_line);
|
||||
|
||||
if (core_id == curr_core_id && curr_core_line == core_line) {
|
||||
entry->coll_idx = line;
|
||||
entry->status = LOOKUP_HIT;
|
||||
break;
|
||||
}
|
||||
|
||||
line = ocf_metadata_get_collision_next(cache, line);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int _ocf_engine_check_map_entry(struct ocf_cache *cache,
|
||||
struct ocf_map_info *entry, ocf_core_id_t core_id)
|
||||
{
|
||||
ocf_core_id_t _core_id;
|
||||
uint64_t _core_line;
|
||||
|
||||
if (entry->status == LOOKUP_MISS)
|
||||
return 0;
|
||||
|
||||
ENV_BUG_ON(entry->coll_idx >= cache->device->collision_table_entries);
|
||||
|
||||
ocf_metadata_get_core_info(cache, entry->coll_idx, &_core_id,
|
||||
&_core_line);
|
||||
|
||||
if (core_id == _core_id && _core_line == entry->core_line)
|
||||
return 0;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ocf_engine_update_rq_info(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t entry)
|
||||
{
|
||||
uint8_t start_sector = 0;
|
||||
uint8_t end_sector = ocf_line_end_sector(cache);
|
||||
struct ocf_map_info *_entry = &(rq->map[entry]);
|
||||
|
||||
if (entry == 0) {
|
||||
start_sector = BYTES_TO_SECTORS(rq->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (entry == rq->core_line_count - 1) {
|
||||
end_sector = BYTES_TO_SECTORS(rq->byte_position +
|
||||
rq->byte_length - 1)% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
/* Handle return value */
|
||||
switch (_entry->status) {
|
||||
case LOOKUP_HIT:
|
||||
if (metadata_test_valid_sec(cache, _entry->coll_idx,
|
||||
start_sector, end_sector)) {
|
||||
rq->info.hit_no++;
|
||||
} else {
|
||||
rq->info.invalid_no++;
|
||||
}
|
||||
|
||||
/* Check request is dirty */
|
||||
if (metadata_test_dirty(cache, _entry->coll_idx)) {
|
||||
rq->info.dirty_any++;
|
||||
|
||||
/* Check if cache line is fully dirty */
|
||||
if (metadata_test_dirty_all(cache, _entry->coll_idx))
|
||||
rq->info.dirty_all++;
|
||||
}
|
||||
|
||||
if (rq->part_id != ocf_metadata_get_partition_id(cache,
|
||||
_entry->coll_idx)) {
|
||||
/*
|
||||
* Need to move this cache line into other partition
|
||||
*/
|
||||
_entry->re_part = rq->info.re_part = true;
|
||||
}
|
||||
|
||||
break;
|
||||
case LOOKUP_MISS:
|
||||
rq->info.seq_req = false;
|
||||
break;
|
||||
case LOOKUP_MAPPED:
|
||||
break;
|
||||
default:
|
||||
ENV_BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
/* Check if cache hit is sequential */
|
||||
if (rq->info.seq_req && entry) {
|
||||
if (ocf_metadata_map_lg2phy(cache,
|
||||
(rq->map[entry - 1].coll_idx)) + 1 !=
|
||||
ocf_metadata_map_lg2phy(cache,
|
||||
_entry->coll_idx)) {
|
||||
rq->info.seq_req = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ocf_engine_traverse(struct ocf_request *rq)
|
||||
{
|
||||
uint32_t i;
|
||||
uint64_t core_line;
|
||||
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
ocf_rq_clear_info(rq);
|
||||
rq->info.seq_req = true;
|
||||
|
||||
for (i = 0, core_line = rq->core_line_first;
|
||||
core_line <= rq->core_line_last; core_line++, i++) {
|
||||
|
||||
struct ocf_map_info *entry = &(rq->map[i]);
|
||||
|
||||
ocf_engine_lookup_map_entry(cache, entry, core_id,
|
||||
core_line);
|
||||
|
||||
if (entry->status != LOOKUP_HIT) {
|
||||
rq->info.seq_req = false;
|
||||
/* There is miss then lookup for next map entry */
|
||||
OCF_DEBUG_PARAM(cache, "Miss, core line = %llu",
|
||||
entry->core_line);
|
||||
continue;
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Hit, cache line %u, core line = %llu",
|
||||
entry->coll_idx, entry->core_line);
|
||||
|
||||
/* Update eviction (LRU) */
|
||||
ocf_eviction_set_hot_cache_line(cache, entry->coll_idx);
|
||||
|
||||
ocf_engine_update_rq_info(cache, rq, i);
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
|
||||
"Yes" : "No");
|
||||
}
|
||||
|
||||
int ocf_engine_check(struct ocf_request *rq)
|
||||
{
|
||||
int result = 0;
|
||||
uint32_t i;
|
||||
uint64_t core_line;
|
||||
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
ocf_rq_clear_info(rq);
|
||||
rq->info.seq_req = true;
|
||||
|
||||
for (i = 0, core_line = rq->core_line_first;
|
||||
core_line <= rq->core_line_last; core_line++, i++) {
|
||||
|
||||
struct ocf_map_info *entry = &(rq->map[i]);
|
||||
|
||||
if (entry->status == LOOKUP_MISS) {
|
||||
rq->info.seq_req = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (_ocf_engine_check_map_entry(cache, entry, rq->core_id)) {
|
||||
/* Mapping is invalid */
|
||||
entry->invalid = true;
|
||||
rq->info.seq_req = false;
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Invalid, Cache line %u",
|
||||
entry->coll_idx);
|
||||
|
||||
result = -1;
|
||||
} else {
|
||||
entry->invalid = false;
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Valid, Cache line %u",
|
||||
entry->coll_idx);
|
||||
|
||||
ocf_engine_update_rq_info(cache, rq, i);
|
||||
}
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
|
||||
"Yes" : "No");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void ocf_engine_map_cache_line(struct ocf_request *rq,
|
||||
uint64_t core_line, unsigned int hash_index,
|
||||
ocf_cache_line_t *cache_line)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
ocf_part_id_t part_id = rq->part_id;
|
||||
ocf_cleaning_t clean_policy_type;
|
||||
|
||||
if (cache->device->freelist_part->curr_size == 0) {
|
||||
rq->info.eviction_error = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
*cache_line = cache->device->freelist_part->head;
|
||||
|
||||
/* add_to_collision_list changes .next_col and other fields for entry
|
||||
* so updated last_cache_line_give must be updated before calling it.
|
||||
*/
|
||||
|
||||
ocf_metadata_remove_from_free_list(cache, *cache_line);
|
||||
|
||||
ocf_metadata_add_to_partition(cache, part_id, *cache_line);
|
||||
|
||||
/* Add the block to the corresponding collision list */
|
||||
ocf_metadata_add_to_collision(cache, rq->core_id, core_line, hash_index,
|
||||
*cache_line);
|
||||
|
||||
ocf_eviction_init_cache_line(cache, *cache_line, part_id);
|
||||
|
||||
/* Update LRU:: Move this node to head of lru list. */
|
||||
ocf_eviction_set_hot_cache_line(cache, *cache_line);
|
||||
|
||||
/* Update dirty cache-block list */
|
||||
clean_policy_type = cache->conf_meta->cleaning_policy_type;
|
||||
|
||||
ENV_BUG_ON(clean_policy_type >= ocf_cleaning_max);
|
||||
|
||||
if (cleaning_policy_ops[clean_policy_type].init_cache_block != NULL)
|
||||
cleaning_policy_ops[clean_policy_type].
|
||||
init_cache_block(cache, *cache_line);
|
||||
}
|
||||
|
||||
static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
|
||||
struct ocf_request *rq)
|
||||
{
|
||||
uint32_t i;
|
||||
struct ocf_map_info *entry;
|
||||
|
||||
for (i = 0; i < rq->core_line_count; i++) {
|
||||
entry = &(rq->map[i]);
|
||||
|
||||
switch (entry->status) {
|
||||
case LOOKUP_HIT:
|
||||
case LOOKUP_MISS:
|
||||
break;
|
||||
|
||||
case LOOKUP_MAPPED:
|
||||
OCF_DEBUG_RQ(rq, "Canceling cache line %u",
|
||||
entry->coll_idx);
|
||||
set_cache_line_invalid_no_flush(cache, 0,
|
||||
ocf_line_end_sector(cache),
|
||||
entry->coll_idx);
|
||||
break;
|
||||
|
||||
default:
|
||||
ENV_BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ocf_engine_map(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
uint32_t i;
|
||||
struct ocf_map_info *entry;
|
||||
uint64_t core_line;
|
||||
int status = LOOKUP_MAPPED;
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
|
||||
if (ocf_engine_unmapped_count(rq))
|
||||
status = space_managment_evict_do(cache, rq,
|
||||
ocf_engine_unmapped_count(rq));
|
||||
|
||||
if (rq->info.eviction_error)
|
||||
return;
|
||||
|
||||
ocf_rq_clear_info(rq);
|
||||
rq->info.seq_req = true;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
for (i = 0, core_line = rq->core_line_first;
|
||||
core_line <= rq->core_line_last; core_line++, i++) {
|
||||
entry = &(rq->map[i]);
|
||||
|
||||
ocf_engine_lookup_map_entry(cache, entry, core_id, core_line);
|
||||
|
||||
if (entry->status != LOOKUP_HIT) {
|
||||
ocf_engine_map_cache_line(rq, entry->core_line,
|
||||
entry->hash_key, &entry->coll_idx);
|
||||
|
||||
if (rq->info.eviction_error) {
|
||||
/*
|
||||
* Eviction error (mapping error), need to
|
||||
* clean, return and do pass through
|
||||
*/
|
||||
OCF_DEBUG_RQ(rq, "Eviction ERROR when mapping");
|
||||
ocf_engine_map_hndl_error(cache, rq);
|
||||
break;
|
||||
}
|
||||
|
||||
entry->status = status;
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(rq->cache,
|
||||
"%s, cache line %u, core line = %llu",
|
||||
entry->status == LOOKUP_HIT ? "Hit" : "Map",
|
||||
entry->coll_idx, entry->core_line);
|
||||
|
||||
ocf_engine_update_rq_info(cache, rq, i);
|
||||
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(rq->cache, "Sequential - %s", rq->info.seq_req ?
|
||||
"Yes" : "No");
|
||||
}
|
||||
|
||||
static void _ocf_engine_clean_end(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error) {
|
||||
OCF_DEBUG_RQ(rq, "Cleaning ERROR");
|
||||
rq->error |= error;
|
||||
|
||||
/* End request and do not processing */
|
||||
ocf_rq_unlock(rq);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, error);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
} else {
|
||||
rq->info.dirty_any = 0;
|
||||
rq->info.dirty_all = 0;
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
|
||||
void *getter_context, uint32_t item, ocf_cache_line_t *line)
|
||||
{
|
||||
struct ocf_cleaner_attribs *attribs = getter_context;
|
||||
struct ocf_request *rq = attribs->cmpl_context;
|
||||
|
||||
for (; attribs->getter_item < rq->core_line_count;
|
||||
attribs->getter_item++) {
|
||||
|
||||
struct ocf_map_info *entry = &rq->map[attribs->getter_item];
|
||||
|
||||
if (entry->status != LOOKUP_HIT)
|
||||
continue;
|
||||
|
||||
if (!metadata_test_dirty(cache, entry->coll_idx))
|
||||
continue;
|
||||
|
||||
/* Line to be cleaned found, go to next item and return */
|
||||
*line = entry->coll_idx;
|
||||
attribs->getter_item++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ocf_engine_clean(struct ocf_request *rq)
|
||||
{
|
||||
/* Initialize attributes for cleaner */
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.cache_line_lock = false,
|
||||
|
||||
.cmpl_context = rq,
|
||||
.cmpl_fn = _ocf_engine_clean_end,
|
||||
|
||||
.getter = _ocf_engine_clean_getter,
|
||||
.getter_context = &attribs,
|
||||
.getter_item = 0,
|
||||
|
||||
.count = rq->info.dirty_any,
|
||||
.io_queue = rq->io_queue
|
||||
};
|
||||
|
||||
/* Start cleaning */
|
||||
ocf_cleaner_fire(rq->cache, &attribs);
|
||||
}
|
||||
|
||||
void ocf_engine_update_block_stats(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
ocf_part_id_t part_id = rq->part_id;
|
||||
struct ocf_counters_block *blocks;
|
||||
|
||||
blocks = &cache->core_obj[core_id].counters->
|
||||
part_counters[part_id].blocks;
|
||||
|
||||
if (rq->rw == OCF_READ)
|
||||
env_atomic64_add(rq->byte_length, &blocks->read_bytes);
|
||||
else if (rq->rw == OCF_WRITE)
|
||||
env_atomic64_add(rq->byte_length, &blocks->write_bytes);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
void ocf_engine_update_request_stats(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
ocf_core_id_t core_id = rq->core_id;
|
||||
ocf_part_id_t part_id = rq->part_id;
|
||||
struct ocf_counters_req *reqs;
|
||||
|
||||
switch (rq->rw) {
|
||||
case OCF_READ:
|
||||
reqs = &cache->core_obj[core_id].counters->
|
||||
part_counters[part_id].read_reqs;
|
||||
break;
|
||||
case OCF_WRITE:
|
||||
reqs = &cache->core_obj[core_id].counters->
|
||||
part_counters[part_id].write_reqs;
|
||||
break;
|
||||
default:
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
env_atomic64_inc(&reqs->total);
|
||||
|
||||
if (rq->info.hit_no == 0)
|
||||
env_atomic64_inc(&reqs->full_miss);
|
||||
else if (rq->info.hit_no < rq->core_line_count)
|
||||
env_atomic64_inc(&reqs->partial_miss);
|
||||
}
|
||||
|
||||
void ocf_engine_push_rq_back(struct ocf_request *rq, bool allow_sync)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_queue *q = NULL;
|
||||
unsigned long lock_flags;
|
||||
|
||||
INIT_LIST_HEAD(&rq->list);
|
||||
|
||||
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
|
||||
q = &cache->io_queues[rq->io_queue];
|
||||
|
||||
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
|
||||
|
||||
list_add_tail(&rq->list, &q->io_list);
|
||||
env_atomic_inc(&q->io_no);
|
||||
|
||||
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
|
||||
|
||||
if (!rq->info.internal)
|
||||
env_atomic_set(&cache->last_access_ms,
|
||||
env_ticks_to_msecs(env_get_tick_count()));
|
||||
|
||||
ctx_queue_kick(cache->owner, q, allow_sync);
|
||||
}
|
||||
|
||||
void ocf_engine_push_rq_front(struct ocf_request *rq, bool allow_sync)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
struct ocf_queue *q = NULL;
|
||||
unsigned long lock_flags;
|
||||
|
||||
INIT_LIST_HEAD(&rq->list);
|
||||
|
||||
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
|
||||
q = &cache->io_queues[rq->io_queue];
|
||||
|
||||
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
|
||||
|
||||
list_add(&rq->list, &q->io_list);
|
||||
env_atomic_inc(&q->io_no);
|
||||
|
||||
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
|
||||
|
||||
if (!rq->info.internal)
|
||||
env_atomic_set(&cache->last_access_ms,
|
||||
env_ticks_to_msecs(env_get_tick_count()));
|
||||
|
||||
ctx_queue_kick(cache->owner, q, allow_sync);
|
||||
}
|
||||
|
||||
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
|
||||
const struct ocf_io_if *io_if,
|
||||
bool allow_sync)
|
||||
{
|
||||
rq->error = 0; /* Please explain why!!! */
|
||||
rq->io_if = io_if;
|
||||
ocf_engine_push_rq_front(rq, allow_sync);
|
||||
}
|
||||
|
||||
void inc_fallback_pt_error_counter(ocf_cache_t cache)
|
||||
{
|
||||
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
|
||||
|
||||
if (cache->fallback_pt_error_threshold == OCF_CACHE_FALLBACK_PT_INACTIVE)
|
||||
return;
|
||||
|
||||
if (env_atomic_inc_return(&cache->fallback_pt_error_counter) ==
|
||||
cache->fallback_pt_error_threshold) {
|
||||
ocf_cache_log(cache, log_info, "Error threshold reached. "
|
||||
"Fallback Pass Through activated\n");
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_engine_refresh(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
int result;
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
/* Check under metadata RD lock */
|
||||
|
||||
result = ocf_engine_check(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
if (result == 0) {
|
||||
|
||||
/* Refresh successful, can process with original IO interface */
|
||||
rq->io_if = rq->priv;
|
||||
|
||||
rq->resume = NULL;
|
||||
rq->priv = NULL;
|
||||
|
||||
if (rq->rw == OCF_READ)
|
||||
rq->io_if->read(rq);
|
||||
else if (rq->rw == OCF_WRITE)
|
||||
rq->io_if->write(rq);
|
||||
else
|
||||
ENV_BUG();
|
||||
} else {
|
||||
ENV_WARN(true, OCF_PREFIX_SHORT" Inconsistent request");
|
||||
rq->error = -EINVAL;
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
/* Release WRITE lock of request */
|
||||
ocf_rq_unlock(rq);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_refresh = {
|
||||
.read = _ocf_engine_refresh,
|
||||
.write = _ocf_engine_refresh,
|
||||
};
|
||||
|
||||
void ocf_engine_on_resume(struct ocf_request *rq)
|
||||
{
|
||||
ENV_BUG_ON(rq->priv);
|
||||
ENV_BUG_ON(ocf_engine_on_resume != rq->resume);
|
||||
OCF_CHECK_NULL(rq->io_if);
|
||||
|
||||
/* Exchange IO interface */
|
||||
rq->priv = (void *)rq->io_if;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "On resume");
|
||||
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_refresh, false);
|
||||
}
|
223
src/engine/engine_common.h
Normal file
223
src/engine/engine_common.h
Normal file
@@ -0,0 +1,223 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_COMMON_H_
|
||||
#define ENGINE_COMMON_H_
|
||||
|
||||
#include "../ocf_request.h"
|
||||
|
||||
/**
|
||||
* @file engine_common.h
|
||||
* @brief OCF cache engine common module
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Signal and handle OCF request error
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param stop_cache Indicates if OCF cache engine need to be stopped
|
||||
* @param msg Error message to be printed into log
|
||||
*/
|
||||
void ocf_engine_error(struct ocf_request *rq, bool stop_cache,
|
||||
const char *msg);
|
||||
|
||||
/**
|
||||
* @brief Check if OCF request is hit
|
||||
*
|
||||
* @param rq OCF request
|
||||
*
|
||||
* @retval true HIT
|
||||
* @retval false MISS
|
||||
*/
|
||||
static inline bool ocf_engine_is_hit(struct ocf_request *rq)
|
||||
{
|
||||
return rq->info.hit_no == rq->core_line_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if OCF request is miss
|
||||
*
|
||||
* @param rq OCF request
|
||||
*
|
||||
* @retval true MISS
|
||||
* @retval false HIT
|
||||
*/
|
||||
#define ocf_engine_is_miss(rq) (!ocf_engine_is_hit(rq))
|
||||
|
||||
/**
|
||||
* @brief Check if all cache lines are mapped fully
|
||||
*
|
||||
* @param rq OCF request
|
||||
*
|
||||
* @retval true request is mapped fully
|
||||
* @retval false request is not mapped fully and eviction might be run in
|
||||
* order to complete mapping
|
||||
*/
|
||||
static inline bool ocf_engine_is_mapped(struct ocf_request *rq)
|
||||
{
|
||||
return rq->info.hit_no + rq->info.invalid_no == rq->core_line_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check if all cache lines are dirty
|
||||
*
|
||||
* @param rq OCF request
|
||||
*
|
||||
* @retval true request is dirty fully
|
||||
* @retval false request is not dirty fully
|
||||
*/
|
||||
static inline bool ocf_engine_is_dirty_all(struct ocf_request *rq)
|
||||
{
|
||||
return rq->info.dirty_all == rq->core_line_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get number of mapped cache lines
|
||||
*
|
||||
* @param rq OCF request
|
||||
*
|
||||
* @return Number of mapped cache lines
|
||||
*/
|
||||
static inline uint32_t ocf_engine_mapped_count(struct ocf_request *rq)
|
||||
{
|
||||
return rq->info.hit_no + rq->info.invalid_no;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get number of unmapped cache lines
|
||||
*
|
||||
* @param rq OCF request
|
||||
*
|
||||
* @return Number of unmapped cache lines
|
||||
*/
|
||||
static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *rq)
|
||||
{
|
||||
return rq->core_line_count - (rq->info.hit_no + rq->info.invalid_no);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get number of IOs to perform cache read or write
|
||||
*
|
||||
* @param rq OCF request
|
||||
*
|
||||
* @return Count of cache IOs
|
||||
*/
|
||||
static inline uint32_t ocf_engine_io_count(struct ocf_request *rq)
|
||||
{
|
||||
return rq->info.seq_req ? 1 : rq->core_line_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Clean request (flush dirty data to the core device)
|
||||
*
|
||||
* @param rq OCF request
|
||||
*
|
||||
* @note After successful cleaning:
|
||||
* - Dirty status bits in request info will be cleared
|
||||
* - Request will be pushed front, <B>IO interface need to be set</B>
|
||||
*
|
||||
* @note In case of failure:
|
||||
* - unlock request
|
||||
* - complete request to the application
|
||||
* - free request
|
||||
*/
|
||||
void ocf_engine_clean(struct ocf_request *rq);
|
||||
|
||||
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
|
||||
struct ocf_map_info *entry, ocf_core_id_t core_id,
|
||||
uint64_t core_line);
|
||||
|
||||
/**
|
||||
* @brief Traverse request in order to lookup cache lines If there are misses
|
||||
* need to call eviction. This process is called 'mapping'.
|
||||
*
|
||||
* @note This function CALL EVICTION
|
||||
*
|
||||
* @param rq OCF request
|
||||
*/
|
||||
void ocf_engine_map(struct ocf_request *rq);
|
||||
|
||||
/**
|
||||
* @brief Traverse OCF request (lookup cache)
|
||||
*
|
||||
* @note This function DO NOT CALL EVICTION. Only lookup in metadata is
|
||||
* performed. Main purpose of this function is to check if there is a HIT.
|
||||
*
|
||||
* @param rq OCF request
|
||||
*/
|
||||
void ocf_engine_traverse(struct ocf_request *rq);
|
||||
|
||||
/**
|
||||
* @brief Check if OCF request mapping is still valid
|
||||
*
|
||||
* @note If mapping entries is invalid it will be marked
|
||||
*
|
||||
* @param rq OCF request
|
||||
*
|
||||
* @retval 0 - OCF request mapping is valid
|
||||
* @return Non zero - OCF request mapping is invalid and need to call re-mapping
|
||||
*/
|
||||
int ocf_engine_check(struct ocf_request *rq);
|
||||
|
||||
/**
|
||||
* @brief Update OCF request info
|
||||
*
|
||||
* @param rq OCF request
|
||||
*/
|
||||
void ocf_engine_update_rq_info(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t entry);
|
||||
|
||||
/**
|
||||
* @brief Update OCF request block statistics for an exported object
|
||||
*
|
||||
* @param rq OCF request
|
||||
*/
|
||||
void ocf_engine_update_block_stats(struct ocf_request *rq);
|
||||
|
||||
/**
|
||||
* @brief Update OCF request request statistics for an exported object
|
||||
* (not applicable to write wi and to read wt
|
||||
*
|
||||
* @param rq OCF request
|
||||
*/
|
||||
void ocf_engine_update_request_stats(struct ocf_request *rq);
|
||||
|
||||
/**
|
||||
* @brief Push front OCF request to the OCF thread worker queue
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param allow_sync caller allows for request from queue to be ran immediately
|
||||
from push function in caller context
|
||||
*/
|
||||
void ocf_engine_push_rq_back(struct ocf_request *rq,
|
||||
bool allow_sync);
|
||||
|
||||
/**
|
||||
* @brief Push back OCF request to the OCF thread worker queue
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param allow_sync caller allows for request from queue to be ran immediately
|
||||
from push function in caller context
|
||||
*/
|
||||
void ocf_engine_push_rq_front(struct ocf_request *rq,
|
||||
bool allow_sync);
|
||||
|
||||
/**
|
||||
* @brief Set interface and push from request to the OCF thread worker queue
|
||||
*
|
||||
* @param rq OCF request
|
||||
* @param io_if IO interface
|
||||
* @param allow_sync caller allows for request from queue to be ran immediately
|
||||
from push function in caller context
|
||||
*/
|
||||
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
|
||||
const struct ocf_io_if *io_if,
|
||||
bool allow_sync);
|
||||
|
||||
void inc_fallback_pt_error_counter(ocf_cache_t cache);
|
||||
|
||||
void ocf_engine_on_resume(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_COMMON_H_ */
|
72
src/engine/engine_d2c.c
Normal file
72
src/engine/engine_d2c.c
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_d2c.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../metadata/metadata.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "d2c"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_d2c_completion(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
rq->error = error;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
rq->info.core_error = 1;
|
||||
if (rq->rw == OCF_READ) {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
core_errors.read);
|
||||
} else {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
core_errors.write);
|
||||
}
|
||||
}
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
int ocf_io_d2c(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, rq->rw,
|
||||
_ocf_d2c_completion, rq);
|
||||
|
||||
ocf_engine_update_block_stats(rq);
|
||||
|
||||
if (rq->rw == OCF_READ) {
|
||||
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
|
||||
part_counters[rq->part_id].read_reqs.pass_through);
|
||||
} else {
|
||||
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
|
||||
part_counters[rq->part_id].write_reqs.pass_through);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
11
src/engine/engine_d2c.h
Normal file
11
src/engine/engine_d2c.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_2DC_H_
|
||||
#define ENGINE_2DC_H_
|
||||
|
||||
int ocf_io_d2c(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_2DC_H_ */
|
48
src/engine/engine_debug.h
Normal file
48
src/engine/engine_debug.h
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_DEBUG_H_
|
||||
#define ENGINE_DEBUG_H_
|
||||
|
||||
#ifndef OCF_ENGINE_DEBUG
|
||||
#define OCF_ENGINE_DEBUG 0
|
||||
#endif
|
||||
|
||||
#if 1 == OCF_ENGINE_DEBUG
|
||||
|
||||
#ifndef OCF_ENGINE_DEBUG_IO_NAME
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "null"
|
||||
#endif
|
||||
|
||||
#define OCF_DEBUG_PREFIX "[Engine][%s] %s "
|
||||
|
||||
#define OCF_DEBUG_LOG(cache, format, ...) \
|
||||
ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
|
||||
format"\n", OCF_ENGINE_DEBUG_IO_NAME, __func__, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
|
||||
|
||||
#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
|
||||
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define OCF_DEBUG_RQ(rq, format, ...) \
|
||||
ocf_cache_log(rq->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \
|
||||
format"\n", OCF_ENGINE_DEBUG_IO_NAME, \
|
||||
OCF_READ == (rq)->rw ? "RD" : "WR", rq->byte_position, \
|
||||
rq->byte_length, __func__, ##__VA_ARGS__)
|
||||
|
||||
#else
|
||||
#define OCF_DEBUG_PREFIX
|
||||
#define OCF_DEBUG_LOG(cache, format, ...)
|
||||
#define OCF_DEBUG_TRACE(cache)
|
||||
#define OCF_DEBUG_MSG(cache, msg)
|
||||
#define OCF_DEBUG_PARAM(cache, format, ...)
|
||||
#define OCF_DEBUG_RQ(rq, format, ...)
|
||||
#endif
|
||||
|
||||
#endif /* ENGINE_DEBUG_H_ */
|
248
src/engine/engine_discard.c
Normal file
248
src/engine/engine_discard.c
Normal file
@@ -0,0 +1,248 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "cache_engine.h"
|
||||
#include "engine_common.h"
|
||||
#include "engine_discard.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG 0
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "discard"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static int _ocf_discard_step_do(struct ocf_request *rq);
|
||||
static int _ocf_discard_step(struct ocf_request *rq);
|
||||
static int _ocf_discard_flush_cache(struct ocf_request *rq);
|
||||
static int _ocf_discard_core(struct ocf_request *rq);
|
||||
|
||||
static const struct ocf_io_if _io_if_discard_step = {
|
||||
.read = _ocf_discard_step,
|
||||
.write = _ocf_discard_step
|
||||
};
|
||||
|
||||
static const struct ocf_io_if _io_if_discard_step_resume = {
|
||||
.read = _ocf_discard_step_do,
|
||||
.write = _ocf_discard_step_do
|
||||
};
|
||||
|
||||
static const struct ocf_io_if _io_if_discard_flush_cache = {
|
||||
.read = _ocf_discard_flush_cache,
|
||||
.write = _ocf_discard_flush_cache,
|
||||
};
|
||||
|
||||
static const struct ocf_io_if _io_if_discard_core = {
|
||||
.read = _ocf_discard_core,
|
||||
.write = _ocf_discard_core
|
||||
};
|
||||
|
||||
static void _ocf_discard_complete_rq(struct ocf_request *rq, int error)
|
||||
{
|
||||
rq->complete(rq, error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
static void _ocf_discard_core_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Core DISCARD Completion");
|
||||
|
||||
_ocf_discard_complete_rq(rq, error);
|
||||
}
|
||||
|
||||
static int _ocf_discard_core(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
ocf_submit_obj_discard(&cache->core_obj[rq->core_id].obj, rq,
|
||||
_ocf_discard_core_io, rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _ocf_discard_cache_flush_io_cmpl(void *priv, int error)
|
||||
{
|
||||
struct ocf_request *rq = priv;
|
||||
|
||||
if (error) {
|
||||
ocf_metadata_error(rq->cache);
|
||||
_ocf_discard_complete_rq(rq, error);
|
||||
return;
|
||||
}
|
||||
|
||||
rq->io_if = &_io_if_discard_core;
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
}
|
||||
|
||||
static int _ocf_discard_flush_cache(struct ocf_request *rq)
|
||||
{
|
||||
ocf_submit_obj_flush(&rq->cache->device->obj,
|
||||
_ocf_discard_cache_flush_io_cmpl, rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _ocf_discard_finish_step(struct ocf_request *rq)
|
||||
{
|
||||
rq->discard.handled += BYTES_TO_SECTORS(rq->byte_length);
|
||||
|
||||
if (rq->discard.handled < rq->discard.nr_sects)
|
||||
rq->io_if = &_io_if_discard_step;
|
||||
else if (rq->cache->device->init_mode != ocf_init_mode_metadata_volatile)
|
||||
rq->io_if = &_io_if_discard_flush_cache;
|
||||
else
|
||||
rq->io_if = &_io_if_discard_core;
|
||||
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
}
|
||||
|
||||
static void _ocf_discard_step_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
|
||||
/* Release WRITE lock of request */
|
||||
ocf_rq_unlock_wr(rq);
|
||||
|
||||
if (rq->error) {
|
||||
ocf_metadata_error(rq->cache);
|
||||
_ocf_discard_complete_rq(rq, rq->error);
|
||||
return;
|
||||
}
|
||||
|
||||
_ocf_discard_finish_step(rq);
|
||||
}
|
||||
|
||||
int _ocf_discard_step_do(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
|
||||
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
/* There are mapped cache line, need to remove them */
|
||||
|
||||
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
|
||||
|
||||
/* Remove mapped cache lines from metadata */
|
||||
ocf_purge_map_info(rq);
|
||||
|
||||
if (rq->info.flush_metadata) {
|
||||
/* Request was dirty and need to flush metadata */
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
_ocf_discard_step_io);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Discard");
|
||||
_ocf_discard_step_io(rq, 0);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _ocf_discard_on_resume(struct ocf_request *rq)
|
||||
{
|
||||
OCF_DEBUG_RQ(rq, "On resume");
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
}
|
||||
|
||||
static int _ocf_discard_step(struct ocf_request *rq)
|
||||
{
|
||||
int lock;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
rq->byte_position = SECTORS_TO_BYTES(rq->discard.sector +
|
||||
rq->discard.handled);
|
||||
rq->byte_length = MIN(SECTORS_TO_BYTES(rq->discard.nr_sects -
|
||||
rq->discard.handled), MAX_TRIM_RQ_SIZE);
|
||||
rq->core_line_first = ocf_bytes_2_lines(cache, rq->byte_position);
|
||||
rq->core_line_last =
|
||||
ocf_bytes_2_lines(cache, rq->byte_position + rq->byte_length - 1);
|
||||
rq->core_line_count = rq->core_line_last - rq->core_line_first + 1;
|
||||
rq->io_if = &_io_if_discard_step_resume;
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
|
||||
|
||||
ENV_BUG_ON(env_memset(rq->map, sizeof(*rq->map) * rq->core_line_count,
|
||||
0));
|
||||
|
||||
/* Travers to check if request is mapped fully */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
|
||||
|
||||
if (lock >= 0) {
|
||||
if (OCF_LOCK_ACQUIRED == lock) {
|
||||
_ocf_discard_step_do(rq);
|
||||
} else {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK")
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->error |= lock;
|
||||
_ocf_discard_finish_step(rq);
|
||||
}
|
||||
|
||||
env_cond_resched();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ocf_discard(struct ocf_request *rq)
|
||||
{
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
|
||||
if (rq->rw == OCF_READ) {
|
||||
rq->complete(rq, -EINVAL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = _ocf_discard_on_resume;
|
||||
|
||||
_ocf_discard_step(rq);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
11
src/engine/engine_discard.h
Normal file
11
src/engine/engine_discard.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef __ENGINE_DISCARD_H__
|
||||
#define __ENGINE_DISCARD_H__
|
||||
|
||||
int ocf_discard(struct ocf_request *rq);
|
||||
|
||||
#endif
|
235
src/engine/engine_fast.c
Normal file
235
src/engine/engine_fast.c
Normal file
@@ -0,0 +1,235 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_fast.h"
|
||||
#include "engine_common.h"
|
||||
#include "engine_pt.h"
|
||||
#include "engine_wb.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../metadata/metadata.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG 0
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "fast"
|
||||
#include "engine_debug.h"
|
||||
|
||||
/* _____ _ ______ _ _____ _ _
|
||||
* | __ \ | | | ____| | | | __ \ | | | |
|
||||
* | |__) |___ __ _ __| | | |__ __ _ ___| |_ | |__) |_ _| |_| |__
|
||||
* | _ // _ \/ _` |/ _` | | __/ _` / __| __| | ___/ _` | __| '_ \
|
||||
* | | \ \ __/ (_| | (_| | | | | (_| \__ \ |_ | | | (_| | |_| | | |
|
||||
* |_| \_\___|\__,_|\__,_| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
|
||||
*/
|
||||
|
||||
static void _ocf_read_fast_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining)) {
|
||||
/* Not all requests finished */
|
||||
return;
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "HIT completion");
|
||||
|
||||
if (rq->error) {
|
||||
OCF_DEBUG_RQ(rq, "ERROR");
|
||||
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
cache_errors.read);
|
||||
ocf_engine_push_rq_front_pt(rq);
|
||||
} else {
|
||||
ocf_rq_unlock(rq);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
/* Free the request at the last point of the completion path */
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_read_fast_do(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
if (ocf_engine_is_miss(rq)) {
|
||||
/* It seams that after resume, now request is MISS, do PT */
|
||||
OCF_DEBUG_RQ(rq, "Switching to read PT");
|
||||
ocf_read_pt_do(rq);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
/* Submit IO */
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
|
||||
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
|
||||
ocf_engine_io_count(rq), _ocf_read_fast_io, rq);
|
||||
|
||||
|
||||
/* Updata statistics */
|
||||
ocf_engine_update_request_stats(rq);
|
||||
ocf_engine_update_block_stats(rq);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_read_fast_resume = {
|
||||
.read = _ocf_read_fast_do,
|
||||
.write = _ocf_read_fast_do,
|
||||
};
|
||||
|
||||
int ocf_read_fast(struct ocf_request *rq)
|
||||
{
|
||||
bool hit;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_read_fast_resume;
|
||||
|
||||
/*- Metadata RD access -----------------------------------------------*/
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Traverse request to cache if there is hit */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
hit = ocf_engine_is_hit(rq);
|
||||
if (hit) {
|
||||
ocf_io_start(rq->io);
|
||||
lock = ocf_rq_trylock_rd(rq);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
if (hit) {
|
||||
OCF_DEBUG_RQ(rq, "Fast path success");
|
||||
|
||||
if (lock >= 0) {
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* Lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
} else {
|
||||
/* Lock was acquired can perform IO */
|
||||
_ocf_read_fast_do(rq);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR");
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "Fast path failure");
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
if (hit)
|
||||
return OCF_FAST_PATH_YES;
|
||||
else
|
||||
return OCF_FAST_PATH_NO;
|
||||
}
|
||||
|
||||
/* __ __ _ _ ______ _ _____ _ _
|
||||
* \ \ / / (_) | | ____| | | | __ \ | | | |
|
||||
* \ \ /\ / / __ _| |_ ___ | |__ __ _ ___| |_ | |__) |_ _| |_| |__
|
||||
* \ \/ \/ / '__| | __/ _ \ | __/ _` / __| __| | ___/ _` | __| '_ \
|
||||
* \ /\ /| | | | || __/ | | | (_| \__ \ |_ | | | (_| | |_| | | |
|
||||
* \/ \/ |_| |_|\__\___| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
|
||||
*/
|
||||
|
||||
static const struct ocf_io_if _io_if_write_fast_resume = {
|
||||
.read = ocf_write_wb_do,
|
||||
.write = ocf_write_wb_do,
|
||||
};
|
||||
|
||||
int ocf_write_fast(struct ocf_request *rq)
|
||||
{
|
||||
bool mapped;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_write_fast_resume;
|
||||
|
||||
/*- Metadata RD access -----------------------------------------------*/
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Traverse request to cache if there is hit */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
mapped = ocf_engine_is_mapped(rq);
|
||||
if (mapped) {
|
||||
ocf_io_start(rq->io);
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
if (mapped) {
|
||||
if (lock >= 0) {
|
||||
OCF_DEBUG_RQ(rq, "Fast path success");
|
||||
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* Lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
} else {
|
||||
/* Lock was acquired can perform IO */
|
||||
ocf_write_wb_do(rq);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "Fast path lock failure");
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "Fast path failure");
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
|
||||
|
||||
}
|
12
src/engine/engine_fast.h
Normal file
12
src/engine/engine_fast.h
Normal file
@@ -0,0 +1,12 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_FAST_H_
|
||||
#define ENGINE_FAST_H_
|
||||
|
||||
int ocf_read_fast(struct ocf_request *rq);
|
||||
int ocf_write_fast(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_WI_H_ */
|
74
src/engine/engine_inv.c
Normal file
74
src/engine/engine_inv.c
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_inv.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "inv"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_invalidate_rq(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error) {
|
||||
rq->error = error;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
cache_errors.write);
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
|
||||
if (rq->error)
|
||||
ocf_engine_error(rq, true, "Failed to flush metadata to cache");
|
||||
|
||||
ocf_rq_unlock(rq);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
static int _ocf_invalidate_do(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
ENV_BUG_ON(env_atomic_read(&rq->req_remaining));
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
ocf_purge_map_info(rq);
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
|
||||
env_atomic_inc(&rq->req_remaining);
|
||||
|
||||
if (ocf_data_obj_is_atomic(&cache->device->obj) &&
|
||||
rq->info.flush_metadata) {
|
||||
/* Metadata flush IO */
|
||||
ocf_metadata_flush_do_asynch(cache, rq, _ocf_invalidate_rq);
|
||||
}
|
||||
|
||||
_ocf_invalidate_rq(rq, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_invalidate = {
|
||||
.read = _ocf_invalidate_do,
|
||||
.write = _ocf_invalidate_do,
|
||||
};
|
||||
|
||||
void ocf_engine_invalidate(struct ocf_request *rq)
|
||||
{
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_invalidate, true);
|
||||
}
|
11
src/engine/engine_inv.h
Normal file
11
src/engine/engine_inv.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_INV_H_
|
||||
#define ENGINE_INV_H_
|
||||
|
||||
void ocf_engine_invalidate(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_INV_H_ */
|
65
src/engine/engine_ops.c
Normal file
65
src/engine/engine_ops.c
Normal file
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "engine_ops.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_io.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "ops"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_engine_ops_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
/* An error occured */
|
||||
ocf_engine_error(rq, false, "Core operation failure");
|
||||
}
|
||||
|
||||
/* Complete requests - both to cache and to core*/
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
int ocf_engine_ops(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* IO to the core device and to the cache device */
|
||||
env_atomic_set(&rq->req_remaining, 2);
|
||||
|
||||
/* Submit operation into core device */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, rq->rw,
|
||||
_ocf_engine_ops_io, rq);
|
||||
|
||||
ocf_submit_cache_reqs(cache, rq->map, rq, rq->rw,
|
||||
1, _ocf_engine_ops_io, rq);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
11
src/engine/engine_ops.h
Normal file
11
src/engine/engine_ops.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef __CACHE_ENGINE_OPS_H_
|
||||
#define __CACHE_ENGINE_OPS_H_
|
||||
|
||||
int ocf_engine_ops(struct ocf_request *rq);
|
||||
|
||||
#endif /* __CACHE_ENGINE_OPS_H_ */
|
181
src/engine/engine_pt.c
Normal file
181
src/engine/engine_pt.c
Normal file
@@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_pt.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "pt"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_read_pt_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
core_errors.read);
|
||||
}
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
ocf_rq_unlock_rd(rq);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
static inline void _ocf_read_pt_submit(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* Core device IO */
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
|
||||
/* Core read */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_READ,
|
||||
_ocf_read_pt_io, rq);
|
||||
}
|
||||
|
||||
int ocf_read_pt_do(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
if (rq->info.dirty_any) {
|
||||
OCF_METADATA_LOCK_RD();
|
||||
/* Need to clean, start it */
|
||||
ocf_engine_clean(rq);
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
/* Do not processing, because first we need to clean request */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
/* Submit read IO to the core */
|
||||
_ocf_read_pt_submit(rq);
|
||||
|
||||
/* Update statistics */
|
||||
ocf_engine_update_block_stats(rq);
|
||||
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
|
||||
part_counters[rq->part_id].read_reqs.pass_through);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_pt_resume = {
|
||||
.read = ocf_read_pt_do,
|
||||
.write = ocf_read_pt_do,
|
||||
};
|
||||
|
||||
int ocf_read_pt(struct ocf_request *rq)
|
||||
{
|
||||
bool use_cache = false;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_pt_resume;
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
|
||||
|
||||
/* Traverse request to check if there are mapped cache lines */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
if (rq->info.seq_cutoff && ocf_engine_is_dirty_all(rq)) {
|
||||
use_cache = true;
|
||||
} else {
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
/* There are mapped cache line,
|
||||
* lock request for READ access
|
||||
*/
|
||||
lock = ocf_rq_trylock_rd(rq);
|
||||
} else {
|
||||
/* No mapped cache lines, no need to get lock */
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
|
||||
|
||||
if (use_cache) {
|
||||
/*
|
||||
* There is dirt HIT, and sequential cut off,
|
||||
* because of this force read data from cache
|
||||
*/
|
||||
ocf_rq_clear(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_wt)->read(rq);
|
||||
} else {
|
||||
if (lock >= 0) {
|
||||
if (lock == OCF_LOCK_ACQUIRED) {
|
||||
/* Lock acquired perform read off operations */
|
||||
ocf_read_pt_do(rq);
|
||||
} else {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ocf_engine_push_rq_front_pt(struct ocf_request *rq)
|
||||
{
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_pt_resume, true);
|
||||
}
|
||||
|
15
src/engine/engine_pt.h
Normal file
15
src/engine/engine_pt.h
Normal file
@@ -0,0 +1,15 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_OFF_H_
|
||||
#define ENGINE_OFF_H_
|
||||
|
||||
int ocf_read_pt(struct ocf_request *rq);
|
||||
|
||||
int ocf_read_pt_do(struct ocf_request *rq);
|
||||
|
||||
void ocf_engine_push_rq_front_pt(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_OFF_H_ */
|
319
src/engine/engine_rd.c
Normal file
319
src/engine/engine_rd.c
Normal file
@@ -0,0 +1,319 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_rd.h"
|
||||
#include "engine_pt.h"
|
||||
#include "engine_inv.h"
|
||||
#include "engine_bf.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../ocf_def_priv.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "rd"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_read_generic_hit_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
|
||||
if (rq->error)
|
||||
inc_fallback_pt_error_counter(rq->cache);
|
||||
|
||||
/* Handle callback-caller race to let only one of the two complete the
|
||||
* request. Also, complete original request only if this is the last
|
||||
* sub-request to complete
|
||||
*/
|
||||
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
|
||||
OCF_DEBUG_RQ(rq, "HIT completion");
|
||||
|
||||
if (rq->error) {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].
|
||||
counters->cache_errors.read);
|
||||
ocf_engine_push_rq_front_pt(rq);
|
||||
} else {
|
||||
|
||||
ocf_rq_unlock(rq);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
/* Free the request at the last point
|
||||
* of the completion path
|
||||
*/
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void _ocf_read_generic_miss_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
if (error)
|
||||
rq->error = error;
|
||||
|
||||
/* Handle callback-caller race to let only one of the two complete the
|
||||
* request. Also, complete original request only if this is the last
|
||||
* sub-request to complete
|
||||
*/
|
||||
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
|
||||
OCF_DEBUG_RQ(rq, "MISS completion");
|
||||
|
||||
if (rq->error) {
|
||||
/*
|
||||
* --- Do not submit this request to write-back-thread.
|
||||
* Stop it here ---
|
||||
*/
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&cache->core_obj[rq->core_id].
|
||||
counters->core_errors.read);
|
||||
|
||||
ctx_data_free(cache->owner, rq->cp_data);
|
||||
rq->cp_data = NULL;
|
||||
|
||||
/* Invalidate metadata */
|
||||
ocf_engine_invalidate(rq);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Copy pages to copy vec, since this is the one needed
|
||||
* by the above layer
|
||||
*/
|
||||
ctx_data_cpy(cache->owner, rq->cp_data, rq->data, 0, 0,
|
||||
rq->byte_length);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
ocf_engine_backfill(rq);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void _ocf_read_generic_submit_hit(struct ocf_request *rq)
|
||||
{
|
||||
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
|
||||
|
||||
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
|
||||
ocf_engine_io_count(rq), _ocf_read_generic_hit_io, rq);
|
||||
}
|
||||
|
||||
static inline void _ocf_read_generic_submit_miss(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
int ret;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1);
|
||||
|
||||
rq->cp_data = ctx_data_alloc(cache->owner,
|
||||
BYTES_TO_PAGES(rq->byte_length));
|
||||
if (!rq->cp_data)
|
||||
goto err_alloc;
|
||||
|
||||
ret = ctx_data_mlock(cache->owner, rq->cp_data);
|
||||
if (ret)
|
||||
goto err_alloc;
|
||||
|
||||
/* Submit read request to core device. */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_READ,
|
||||
_ocf_read_generic_miss_io, rq);
|
||||
|
||||
return;
|
||||
|
||||
err_alloc:
|
||||
_ocf_read_generic_miss_io(rq, -ENOMEM);
|
||||
}
|
||||
|
||||
static int _ocf_read_generic_do(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
if (ocf_engine_is_miss(rq) && rq->map->rd_locked) {
|
||||
/* Miss can be handled only on write locks.
|
||||
* Need to switch to PT
|
||||
*/
|
||||
OCF_DEBUG_RQ(rq, "Switching to PT");
|
||||
ocf_read_pt_do(rq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
if (ocf_engine_is_miss(rq)) {
|
||||
if (rq->info.dirty_any) {
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Request is dirty need to clean request */
|
||||
ocf_engine_clean(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
/* We need to clean request before processing, return */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Set valid status bits map */
|
||||
ocf_set_valid_map_info(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
}
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
|
||||
/* Submit IO */
|
||||
if (ocf_engine_is_hit(rq))
|
||||
_ocf_read_generic_submit_hit(rq);
|
||||
else
|
||||
_ocf_read_generic_submit_miss(rq);
|
||||
|
||||
/* Updata statistics */
|
||||
ocf_engine_update_request_stats(rq);
|
||||
ocf_engine_update_block_stats(rq);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_read_generic_resume = {
|
||||
.read = _ocf_read_generic_do,
|
||||
.write = _ocf_read_generic_do,
|
||||
};
|
||||
|
||||
int ocf_read_generic(struct ocf_request *rq)
|
||||
{
|
||||
bool mapped;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
|
||||
if (env_atomic_read(&cache->pending_read_misses_list_blocked)) {
|
||||
/* There are conditions to bypass IO */
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_read_generic_resume;
|
||||
|
||||
/*- Metadata RD access -----------------------------------------------*/
|
||||
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Traverse request to cache if there is hit */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
mapped = ocf_engine_is_mapped(rq);
|
||||
if (mapped) {
|
||||
/* Request is fully mapped, no need to call eviction */
|
||||
if (ocf_engine_is_hit(rq)) {
|
||||
/* There is a hit, lock request for READ access */
|
||||
lock = ocf_rq_trylock_rd(rq);
|
||||
} else {
|
||||
/* All cache line mapped, but some sectors are not valid
|
||||
* and cache insert will be performed - lock for
|
||||
* WRITE is required
|
||||
*/
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
}
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
||||
/*- END Metadata RD access -------------------------------------------*/
|
||||
|
||||
if (!mapped) {
|
||||
|
||||
/*- Metadata WR access ---------------------------------------*/
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Now there is exclusive access for metadata. May traverse once
|
||||
* again. If there are misses need to call eviction. This
|
||||
* process is called 'mapping'.
|
||||
*/
|
||||
ocf_engine_map(rq);
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (ocf_engine_is_hit(rq)) {
|
||||
/* After mapping turns out there is hit,
|
||||
* so lock OCF request for read access
|
||||
*/
|
||||
lock = ocf_rq_trylock_rd(rq);
|
||||
} else {
|
||||
/* Miss, new cache lines were mapped,
|
||||
* need to lock OCF request for write access
|
||||
*/
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
}
|
||||
}
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
|
||||
/*- END Metadata WR access -----------------------------------*/
|
||||
}
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (lock >= 0) {
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* Lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
} else {
|
||||
/* Lock was acquired can perform IO */
|
||||
_ocf_read_generic_do(rq);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
} else {
|
||||
ocf_rq_clear(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
|
||||
}
|
||||
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
11
src/engine/engine_rd.h
Normal file
11
src/engine/engine_rd.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_RD_H_
|
||||
#define ENGINE_RD_H_
|
||||
|
||||
int ocf_read_generic(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_RD_H_ */
|
92
src/engine/engine_wa.c
Normal file
92
src/engine/engine_wa.c
Normal file
@@ -0,0 +1,92 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_wa.h"
|
||||
#include "engine_common.h"
|
||||
#include "cache_engine.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../metadata/metadata.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wa"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_read_wa_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error)
|
||||
rq->error |= error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
if (rq->error) {
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
core_errors.write);
|
||||
}
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
int ocf_write_wa(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
|
||||
|
||||
/* Traverse request to check if there are mapped cache lines */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
|
||||
|
||||
if (ocf_engine_is_hit(rq)) {
|
||||
ocf_rq_clear(rq);
|
||||
|
||||
/* There is HIT, do WT */
|
||||
ocf_get_io_if(ocf_cache_mode_wt)->write(rq);
|
||||
|
||||
} else if (ocf_engine_mapped_count(rq)) {
|
||||
ocf_rq_clear(rq);
|
||||
|
||||
/* Partial MISS, do WI */
|
||||
ocf_get_io_if(ocf_cache_mode_wi)->write(rq);
|
||||
} else {
|
||||
|
||||
/* There is no mapped cache line, write directly into core */
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
|
||||
/* Submit write IO to the core */
|
||||
env_atomic_set(&rq->req_remaining, 1);
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
|
||||
OCF_WRITE, _ocf_read_wa_io, rq);
|
||||
|
||||
/* Update statistics */
|
||||
ocf_engine_update_block_stats(rq);
|
||||
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
|
||||
part_counters[rq->part_id].write_reqs.pass_through);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
11
src/engine/engine_wa.h
Normal file
11
src/engine/engine_wa.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_WA_H_
|
||||
#define ENGINE_WA_H_
|
||||
|
||||
int ocf_write_wa(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_WA_H_ */
|
242
src/engine/engine_wb.c
Normal file
242
src/engine/engine_wb.c
Normal file
@@ -0,0 +1,242 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "cache_engine.h"
|
||||
#include "engine_common.h"
|
||||
#include "engine_wb.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wb"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static const struct ocf_io_if _io_if_wb_resume = {
|
||||
.read = ocf_write_wb_do,
|
||||
.write = ocf_write_wb_do,
|
||||
};
|
||||
|
||||
static void _ocf_write_wb_update_bits(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
if (ocf_engine_is_miss(rq)) {
|
||||
OCF_METADATA_LOCK_RD();
|
||||
/* Update valid status bits */
|
||||
ocf_set_valid_map_info(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
}
|
||||
|
||||
if (!ocf_engine_is_dirty_all(rq)) {
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* set dirty bits, and mark if metadata flushing is required */
|
||||
ocf_set_dirty_map_info(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
}
|
||||
|
||||
static void _ocf_write_wb_io_flush_metadata(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = (struct ocf_request *) private_data;
|
||||
|
||||
if (error)
|
||||
rq->error = error;
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
if (rq->error)
|
||||
ocf_engine_error(rq, true, "Failed to write data to cache");
|
||||
|
||||
ocf_rq_unlock_wr(rq);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
static int ocf_write_wb_do_flush_metadata(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
|
||||
|
||||
if (rq->info.flush_metadata) {
|
||||
OCF_DEBUG_RQ(rq, "Flush metadata");
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
_ocf_write_wb_io_flush_metadata);
|
||||
}
|
||||
|
||||
_ocf_write_wb_io_flush_metadata(rq, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_wb_flush_metadata = {
|
||||
.read = ocf_write_wb_do_flush_metadata,
|
||||
.write = ocf_write_wb_do_flush_metadata,
|
||||
};
|
||||
|
||||
static void _ocf_write_wb_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = (struct ocf_request *) private_data;
|
||||
|
||||
if (error) {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
cache_errors.write);
|
||||
rq->error |= error;
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
ocf_engine_error(rq, true, "Failed to write data to cache");
|
||||
|
||||
ocf_rq_unlock_wr(rq);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
} else {
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_wb_flush_metadata,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void _ocf_write_wb_submit(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
|
||||
|
||||
/*
|
||||
* 1. Submit data
|
||||
* 2. Wait for completion of data
|
||||
* 3. Then continue processing request (flush metadata)
|
||||
*/
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit Data");
|
||||
|
||||
/* Data IO */
|
||||
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
|
||||
ocf_engine_io_count(rq), _ocf_write_wb_io, rq);
|
||||
}
|
||||
|
||||
int ocf_write_wb_do(struct ocf_request *rq)
|
||||
{
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Updata status bits */
|
||||
_ocf_write_wb_update_bits(rq);
|
||||
|
||||
/* Submit IO */
|
||||
_ocf_write_wb_submit(rq);
|
||||
|
||||
/* Updata statistics */
|
||||
ocf_engine_update_request_stats(rq);
|
||||
ocf_engine_update_block_stats(rq);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ocf_write_wb(struct ocf_request *rq)
|
||||
{
|
||||
bool mapped;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
|
||||
/* Not sure if we need this. */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_wb_resume;
|
||||
|
||||
/* TODO: Handle fits into dirty */
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
|
||||
|
||||
/* Travers to check if request is mapped fully */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
mapped = ocf_engine_is_mapped(rq);
|
||||
if (mapped) {
|
||||
/* All cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
|
||||
|
||||
if (!mapped) {
|
||||
OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/
|
||||
|
||||
/* Now there is exclusive access for metadata. May traverse once
|
||||
* again. If there are misses need to call eviction. This
|
||||
* process is called 'mapping'.
|
||||
*/
|
||||
ocf_engine_map(rq);
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
/* Lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (lock >= 0) {
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
} else {
|
||||
ocf_write_wb_do(rq);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
} else {
|
||||
ocf_rq_clear(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
12
src/engine/engine_wb.h
Normal file
12
src/engine/engine_wb.h
Normal file
@@ -0,0 +1,12 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#ifndef ENGINE_WB_H_
|
||||
#define ENGINE_WB_H_
|
||||
|
||||
int ocf_write_wb(struct ocf_request *rq);
|
||||
|
||||
int ocf_write_wb_do(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_WI_H_ */
|
190
src/engine/engine_wi.c
Normal file
190
src/engine/engine_wi.c
Normal file
@@ -0,0 +1,190 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_wi.h"
|
||||
#include "engine_common.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../metadata/metadata.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wi"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq);
|
||||
|
||||
static const struct ocf_io_if _io_if_wi_flush_metadata = {
|
||||
.read = ocf_write_wi_update_and_flush_metadata,
|
||||
.write = ocf_write_wi_update_and_flush_metadata,
|
||||
};
|
||||
|
||||
static void _ocf_write_wi_io_flush_metadata(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = (struct ocf_request *) private_data;
|
||||
|
||||
if (error) {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
cache_errors.write);
|
||||
rq->error |= error;
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
if (rq->error)
|
||||
ocf_engine_error(rq, true, "Failed to write data to cache");
|
||||
|
||||
ocf_rq_unlock_wr(rq);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
|
||||
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
/* There are mapped cache line, need to remove them */
|
||||
|
||||
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
|
||||
|
||||
/* Remove mapped cache lines from metadata */
|
||||
ocf_purge_map_info(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
|
||||
if (rq->info.flush_metadata) {
|
||||
/* Request was dirty and need to flush metadata */
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
_ocf_write_wi_io_flush_metadata);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
_ocf_write_wi_io_flush_metadata(rq, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _ocf_write_wi_core_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error) {
|
||||
rq->error = error;
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
core_errors.write);
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
ocf_rq_unlock_wr(rq);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
} else {
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_wi_flush_metadata,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_write_wi_do(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
|
||||
/* Submit write IO to the core */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_WRITE,
|
||||
_ocf_write_wi_core_io, rq);
|
||||
|
||||
/* Update statistics */
|
||||
ocf_engine_update_block_stats(rq);
|
||||
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
|
||||
part_counters[rq->part_id].write_reqs.pass_through);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _ocf_write_wi_on_resume(struct ocf_request *rq)
|
||||
{
|
||||
OCF_DEBUG_RQ(rq, "On resume");
|
||||
ocf_engine_push_rq_front(rq, true);
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_wi_resume = {
|
||||
.read = _ocf_write_wi_do,
|
||||
.write = _ocf_write_wi_do,
|
||||
};
|
||||
|
||||
int ocf_write_wi(struct ocf_request *rq)
|
||||
{
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
OCF_DEBUG_TRACE(rq->cache);
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = _ocf_write_wi_on_resume;
|
||||
rq->io_if = &_io_if_wi_resume;
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
|
||||
|
||||
/* Travers to check if request is mapped fully */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
if (ocf_engine_mapped_count(rq)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
|
||||
|
||||
if (lock >= 0) {
|
||||
if (lock == OCF_LOCK_ACQUIRED) {
|
||||
_ocf_write_wi_do(rq);
|
||||
} else {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
11
src/engine/engine_wi.h
Normal file
11
src/engine/engine_wi.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_WI_H_
|
||||
#define ENGINE_WI_H_
|
||||
|
||||
int ocf_write_wi(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_WI_H_ */
|
236
src/engine/engine_wt.c
Normal file
236
src/engine/engine_wt.c
Normal file
@@ -0,0 +1,236 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_wt.h"
|
||||
#include "engine_inv.h"
|
||||
#include "engine_common.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wt"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void _ocf_write_wt_io(struct ocf_request *rq)
|
||||
{
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(rq, "Completion");
|
||||
|
||||
if (rq->error) {
|
||||
/* An error occured */
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->info.core_error ? rq->error : 0);
|
||||
|
||||
ocf_engine_invalidate(rq);
|
||||
} else {
|
||||
/* Unlock reqest from WRITE access */
|
||||
ocf_rq_unlock_wr(rq);
|
||||
|
||||
/* Complete request */
|
||||
rq->complete(rq, rq->info.core_error ? rq->error : 0);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
}
|
||||
|
||||
static void _ocf_write_wt_cache_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error) {
|
||||
rq->error = rq->error ?: error;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
cache_errors.write);
|
||||
|
||||
if (rq->error)
|
||||
inc_fallback_pt_error_counter(rq->cache);
|
||||
}
|
||||
|
||||
_ocf_write_wt_io(rq);
|
||||
}
|
||||
|
||||
static void _ocf_write_wt_core_io(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = private_data;
|
||||
|
||||
if (error) {
|
||||
rq->error = error;
|
||||
rq->info.core_error = 1;
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
core_errors.write);
|
||||
}
|
||||
|
||||
_ocf_write_wt_io(rq);
|
||||
}
|
||||
|
||||
static inline void _ocf_write_wt_submit(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
/* Submit IOs */
|
||||
OCF_DEBUG_RQ(rq, "Submit");
|
||||
|
||||
/* Calculate how many IOs need to be submited */
|
||||
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq)); /* Cache IO */
|
||||
env_atomic_inc(&rq->req_remaining); /* Core device IO */
|
||||
|
||||
if (rq->info.flush_metadata) {
|
||||
/* Metadata flush IO */
|
||||
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
_ocf_write_wt_cache_io);
|
||||
}
|
||||
|
||||
/* To cache */
|
||||
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
|
||||
ocf_engine_io_count(rq), _ocf_write_wt_cache_io, rq);
|
||||
|
||||
/* To core */
|
||||
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_WRITE,
|
||||
_ocf_write_wt_core_io, rq);
|
||||
}
|
||||
|
||||
static void _ocf_write_wt_update_bits(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
if (ocf_engine_is_miss(rq)) {
|
||||
OCF_METADATA_LOCK_RD();
|
||||
|
||||
/* Update valid status bits */
|
||||
ocf_set_valid_map_info(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
}
|
||||
|
||||
if (rq->info.dirty_any) {
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Writes goes to SDD and HDD, need to update status bits from
|
||||
* dirty to clean
|
||||
*/
|
||||
|
||||
ocf_set_clean_map_info(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
if (rq->info.re_part) {
|
||||
OCF_DEBUG_RQ(rq, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_write_wt_do(struct ocf_request *rq)
|
||||
{
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Update status bits */
|
||||
_ocf_write_wt_update_bits(rq);
|
||||
|
||||
/* Submit IO */
|
||||
_ocf_write_wt_submit(rq);
|
||||
|
||||
/* Updata statistics */
|
||||
ocf_engine_update_request_stats(rq);
|
||||
ocf_engine_update_block_stats(rq);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_wt_resume = {
|
||||
.read = _ocf_write_wt_do,
|
||||
.write = _ocf_write_wt_do,
|
||||
};
|
||||
|
||||
int ocf_write_wt(struct ocf_request *rq)
|
||||
{
|
||||
bool mapped;
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
ocf_io_start(rq->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Set resume call backs */
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_wt_resume;
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
|
||||
|
||||
/* Travers to check if request is mapped fully */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
mapped = ocf_engine_is_mapped(rq);
|
||||
if (mapped) {
|
||||
/* All cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
|
||||
|
||||
if (!mapped) {
|
||||
OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/
|
||||
|
||||
/* Now there is exclusive access for metadata. May traverse once
|
||||
* again. If there are misses need to call eviction. This
|
||||
* process is called 'mapping'.
|
||||
*/
|
||||
ocf_engine_map(rq);
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
/* Lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
if (!rq->info.eviction_error) {
|
||||
if (lock >= 0) {
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* WR lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(rq, "NO LOCK");
|
||||
} else {
|
||||
_ocf_write_wt_do(rq);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d\n", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
} else {
|
||||
ocf_rq_clear(rq);
|
||||
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
11
src/engine/engine_wt.h
Normal file
11
src/engine/engine_wt.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_WT_H_
|
||||
#define ENGINE_WT_H_
|
||||
|
||||
int ocf_write_wt(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_WT_H_ */
|
168
src/engine/engine_zero.c
Normal file
168
src/engine/engine_zero.c
Normal file
@@ -0,0 +1,168 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "engine_zero.h"
|
||||
#include "engine_common.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../utils/utils_rq.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../metadata/metadata.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "zero"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static int ocf_zero_purge(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
if (rq->error) {
|
||||
ocf_engine_error(rq, true, "Failed to discard data on cache");
|
||||
} else {
|
||||
/* There are mapped cache line, need to remove them */
|
||||
|
||||
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
|
||||
|
||||
/* Remove mapped cache lines from metadata */
|
||||
ocf_purge_map_info(rq);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
}
|
||||
|
||||
ocf_rq_unlock_wr(rq);
|
||||
|
||||
rq->complete(rq, rq->error);
|
||||
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_zero_purge = {
|
||||
.read = ocf_zero_purge,
|
||||
.write = ocf_zero_purge,
|
||||
};
|
||||
|
||||
static void _ocf_zero_io_flush_metadata(void *private_data, int error)
|
||||
{
|
||||
struct ocf_request *rq = (struct ocf_request *) private_data;
|
||||
|
||||
if (error) {
|
||||
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
|
||||
cache_errors.write);
|
||||
rq->error = error;
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&rq->req_remaining))
|
||||
return;
|
||||
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_zero_purge, true);
|
||||
}
|
||||
|
||||
static inline void ocf_zero_map_info(struct ocf_request *rq)
|
||||
{
|
||||
uint32_t map_idx = 0;
|
||||
uint8_t start_bit;
|
||||
uint8_t end_bit;
|
||||
struct ocf_map_info *map = rq->map;
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
uint32_t count = rq->core_line_count;
|
||||
|
||||
/* Purge range on the basis of map info
|
||||
*
|
||||
* | 01234567 | 01234567 | ... | 01234567 | 01234567 |
|
||||
* | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
|
||||
* | first | Middle | last |
|
||||
*/
|
||||
|
||||
for (map_idx = 0; map_idx < count; map_idx++) {
|
||||
if (map[map_idx].status == LOOKUP_MISS)
|
||||
continue;
|
||||
|
||||
start_bit = 0;
|
||||
end_bit = ocf_line_end_sector(cache);
|
||||
|
||||
if (map_idx == 0) {
|
||||
/* First */
|
||||
start_bit = BYTES_TO_SECTORS(rq->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (map_idx == (count - 1)) {
|
||||
/* Last */
|
||||
end_bit = BYTES_TO_SECTORS(rq->byte_position +
|
||||
rq->byte_length - 1) %
|
||||
ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
ocf_metadata_flush_mark(cache, rq, map_idx, INVALID,
|
||||
start_bit, end_bit);
|
||||
}
|
||||
}
|
||||
|
||||
static int _ocf_zero_do(struct ocf_request *rq)
|
||||
{
|
||||
struct ocf_cache *cache = rq->cache;
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_rq_get(rq);
|
||||
|
||||
/* Mark cache lines for zeroing/discarding */
|
||||
ocf_zero_map_info(rq);
|
||||
|
||||
/* Discard marked cache lines */
|
||||
env_atomic_set(&rq->req_remaining, 1);
|
||||
if (rq->info.flush_metadata) {
|
||||
/* Request was dirty and need to flush metadata */
|
||||
ocf_metadata_flush_do_asynch(cache, rq,
|
||||
_ocf_zero_io_flush_metadata);
|
||||
}
|
||||
_ocf_zero_io_flush_metadata(rq, 0);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_rq_put(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_ocf_zero_do = {
|
||||
.read = _ocf_zero_do,
|
||||
.write = _ocf_zero_do,
|
||||
};
|
||||
|
||||
/**
|
||||
* @note
|
||||
* - Caller has to have metadata write lock
|
||||
* - Core line has to be mapped
|
||||
*/
|
||||
void ocf_engine_zero_line(struct ocf_request *rq)
|
||||
{
|
||||
int lock = OCF_LOCK_NOT_ACQUIRED;
|
||||
|
||||
ENV_BUG_ON(rq->core_line_count != 1);
|
||||
|
||||
/* Traverse to check if request is mapped */
|
||||
ocf_engine_traverse(rq);
|
||||
|
||||
ENV_BUG_ON(!ocf_engine_is_mapped(rq));
|
||||
|
||||
rq->resume = ocf_engine_on_resume;
|
||||
rq->io_if = &_io_if_ocf_zero_do;
|
||||
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_rq_trylock_wr(rq);
|
||||
|
||||
if (lock >= 0) {
|
||||
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
|
||||
ocf_engine_push_rq_front_if(rq, &_io_if_ocf_zero_do, true);
|
||||
} else {
|
||||
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
|
||||
rq->complete(rq, lock);
|
||||
ocf_rq_put(rq);
|
||||
}
|
||||
}
|
||||
|
11
src/engine/engine_zero.h
Normal file
11
src/engine/engine_zero.h
Normal file
@@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2018 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_ZERO_H_
|
||||
#define ENGINE_ZERO_H_
|
||||
|
||||
void ocf_engine_zero_line(struct ocf_request *rq);
|
||||
|
||||
#endif /* ENGINE_ZERO_H_ */
|
Reference in New Issue
Block a user