Introduce multi-stream sequential cutoff

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga 2020-03-20 00:10:21 +01:00
parent 332ad1dfbc
commit 93cd0615d3
8 changed files with 189 additions and 77 deletions

View File

@ -175,6 +175,8 @@ typedef enum {
/*!< Current cache mode of given cache instance */
} ocf_cache_mode_t;
#define OCF_SEQ_CUTOFF_MAX_STREAMS 256
typedef enum {
ocf_seq_cutoff_policy_always = 0,
/*!< Sequential cutoff always on */

View File

@ -7,6 +7,7 @@
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "../ocf_seq_cutoff.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_rd.h"
@ -161,68 +162,6 @@ bool ocf_fallback_pt_is_on(ocf_cache_t cache)
cache->fallback_pt_error_threshold);
}
#define SEQ_CUTOFF_FULL_MARGIN \
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
{
if (!ocf_cache_is_device_attached(cache))
return false;
return (ocf_freelist_num_free(cache->freelist) <=
SEQ_CUTOFF_FULL_MARGIN);
}
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
switch (policy) {
case ocf_seq_cutoff_policy_always:
break;
case ocf_seq_cutoff_policy_full:
if (ocf_seq_cutoff_is_on(cache))
break;
return false;
case ocf_seq_cutoff_policy_never:
return false;
default:
ENV_WARN(true, "Invalid sequential cutoff policy!");
return false;
}
if (dir == core->seq_cutoff.rw &&
core->seq_cutoff.last == addr &&
core->seq_cutoff.bytes + bytes >=
ocf_core_get_seq_cutoff_threshold(core)) {
return true;
}
return false;
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
{
/*
* If IO is not consequent or has another direction,
* reset sequential cutoff state.
*/
if (req->byte_position != core->seq_cutoff.last ||
req->rw != core->seq_cutoff.rw) {
core->seq_cutoff.rw = req->rw;
core->seq_cutoff.bytes = 0;
}
/* Update last accessed position and bytes counter */
core->seq_cutoff.last = req->byte_position + req->byte_length;
core->seq_cutoff.bytes += req->byte_length;
}
void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
ocf_core_t core, struct ocf_request *req)
{
@ -242,8 +181,7 @@ void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
return;
}
if (ocf_seq_cutoff_check(core, req->rw, req->byte_position,
req->byte_length)) {
if (ocf_core_seq_cutoff_check(core, req)) {
req->cache_mode = ocf_req_cache_mode_pt;
req->seq_cutoff = 1;
return;

View File

@ -64,13 +64,8 @@ static inline bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode)
(ocf_cache_mode_t)mode);
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
bool ocf_fallback_pt_is_on(ocf_cache_t cache);
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes);
struct ocf_request *ocf_engine_pop_req(struct ocf_cache *cache,
struct ocf_queue *q);

View File

@ -406,6 +406,8 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
context->flags.clean_pol_added = true;
}
ocf_core_seq_cutoff_init(core);
/* When adding new core to cache, allocate stat counters */
core->counters =
env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL);

View File

@ -249,7 +249,7 @@ void ocf_core_volume_submit_io(struct ocf_io *io)
ocf_resolve_effective_cache_mode(cache, core, req);
ocf_seq_cutoff_update(core, req);
ocf_core_seq_cutoff_update(core, req);
ocf_core_update_stats(core, io);
@ -339,7 +339,7 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
fast = ocf_engine_hndl_fast_req(req);
if (fast != OCF_FAST_PATH_NO) {
ocf_trace_push(io->io_queue, &trace_event, sizeof(trace_event));
ocf_seq_cutoff_update(core, req);
ocf_core_seq_cutoff_update(core, req);
return 0;
}

View File

@ -10,6 +10,7 @@
#include "ocf_env.h"
#include "ocf_ctx_priv.h"
#include "ocf_volume_priv.h"
#include "ocf_seq_cutoff.h"
#define ocf_core_log_prefix(core, lvl, prefix, fmt, ...) \
ocf_cache_log_prefix(ocf_core_get_cache(core), lvl, ".%s" prefix, \
@ -69,7 +70,6 @@ struct ocf_core_meta_runtime {
} part_counters[OCF_IO_CLASS_MAX];
};
struct ocf_core {
struct ocf_volume front_volume;
struct ocf_volume volume;
@ -77,11 +77,7 @@ struct ocf_core {
struct ocf_core_meta_config *conf_meta;
struct ocf_core_meta_runtime *runtime_meta;
struct {
uint64_t last;
uint64_t bytes;
int rw;
} seq_cutoff;
struct ocf_seq_cutoff seq_cutoff;
env_atomic flushed;

144
src/ocf_seq_cutoff.c Normal file
View File

@ -0,0 +1,144 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_seq_cutoff.h"
#include "ocf_cache_priv.h"
#include "ocf_priv.h"
#define SEQ_CUTOFF_FULL_MARGIN \
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
{
if (!ocf_cache_is_device_attached(cache))
return false;
return (ocf_freelist_num_free(cache->freelist) <=
SEQ_CUTOFF_FULL_MARGIN);
}
static int ocf_seq_cutoff_stream_cmp(struct ocf_rb_node *n1,
struct ocf_rb_node *n2)
{
struct ocf_seq_cutoff_stream *stream1 = container_of(n1,
struct ocf_seq_cutoff_stream, node);
struct ocf_seq_cutoff_stream *stream2 = container_of(n2,
struct ocf_seq_cutoff_stream, node);
if (stream1->rw < stream2->rw)
return -1;
if (stream1->rw > stream2->rw)
return 1;
if (stream1->last < stream2->last)
return -1;
if (stream1->last > stream2->last)
return 1;
return 0;
}
void ocf_core_seq_cutoff_init(ocf_core_t core)
{
struct ocf_seq_cutoff_stream *stream;
int i;
ocf_core_log(core, log_info, "Seqential cutoff init\n");
env_rwlock_init(&core->seq_cutoff.lock);
ocf_rb_tree_init(&core->seq_cutoff.tree, ocf_seq_cutoff_stream_cmp);
INIT_LIST_HEAD(&core->seq_cutoff.lru);
for (i = 0; i < OCF_SEQ_CUTOFF_MAX_STREAMS; i++) {
stream = &core->seq_cutoff.streams[i];
stream->last = 0;
stream->bytes = 0;
stream->rw = 0;
ocf_rb_tree_insert(&core->seq_cutoff.tree, &stream->node);
list_add_tail(&stream->list, &core->seq_cutoff.lru);
}
}
bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req)
{
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
uint32_t threshold = ocf_core_get_seq_cutoff_threshold(core);
ocf_cache_t cache = ocf_core_get_cache(core);
struct ocf_seq_cutoff_stream item = {
.last = req->byte_position, .rw = req->rw
};
struct ocf_seq_cutoff_stream *stream;
struct ocf_rb_node *node;
bool result = false;
switch (policy) {
case ocf_seq_cutoff_policy_always:
break;
case ocf_seq_cutoff_policy_full:
if (ocf_seq_cutoff_is_on(cache))
break;
return false;
case ocf_seq_cutoff_policy_never:
return false;
default:
ENV_WARN(true, "Invalid sequential cutoff policy!");
return false;
}
env_rwlock_read_lock(&core->seq_cutoff.lock);
node = ocf_rb_tree_find(&core->seq_cutoff.tree, &item.node);
if (node) {
stream = container_of(node, struct ocf_seq_cutoff_stream, node);
if (stream->bytes + req->byte_length >= threshold)
result = true;
}
env_rwlock_read_unlock(&core->seq_cutoff.lock);
return result;
}
void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
{
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
struct ocf_seq_cutoff_stream item = {
.last = req->byte_position, .rw = req->rw
};
struct ocf_seq_cutoff_stream *stream;
struct ocf_rb_node *node;
bool can_update;
if (policy == ocf_seq_cutoff_policy_never)
return;
/* Update last accessed position and bytes counter */
env_rwlock_write_lock(&core->seq_cutoff.lock);
node = ocf_rb_tree_find(&core->seq_cutoff.tree, &item.node);
if (node) {
stream = container_of(node, struct ocf_seq_cutoff_stream, node);
item.last = req->byte_position + req->byte_length;
can_update = ocf_rb_tree_can_update(&core->seq_cutoff.tree,
node, &item.node);
stream->last = req->byte_position + req->byte_length;
stream->bytes += req->byte_length;
if (!can_update) {
ocf_rb_tree_remove(&core->seq_cutoff.tree, node);
ocf_rb_tree_insert(&core->seq_cutoff.tree, node);
}
list_move_tail(&stream->list, &core->seq_cutoff.lru);
} else {
stream = list_first_entry(&core->seq_cutoff.lru,
struct ocf_seq_cutoff_stream, list);
ocf_rb_tree_remove(&core->seq_cutoff.tree, &stream->node);
stream->rw = req->rw;
stream->last = req->byte_position + req->byte_length;
stream->bytes = req->byte_length;
ocf_rb_tree_insert(&core->seq_cutoff.tree, &stream->node);
list_move_tail(&stream->list, &core->seq_cutoff.lru);
}
env_rwlock_write_unlock(&core->seq_cutoff.lock);
}

35
src/ocf_seq_cutoff.h Normal file
View File

@ -0,0 +1,35 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_SEQ_CUTOFF_H__
#define __OCF_SEQ_CUTOFF_H__
#include "ocf/ocf.h"
#include "ocf_request.h"
#include "utils/utils_rbtree.h"
struct ocf_seq_cutoff_stream {
uint64_t last;
uint64_t bytes;
uint32_t rw : 1;
struct ocf_rb_node node;
struct list_head list;
};
struct ocf_seq_cutoff {
ocf_core_t core;
env_rwlock lock;
struct ocf_seq_cutoff_stream streams[OCF_SEQ_CUTOFF_MAX_STREAMS];
struct ocf_rb_tree tree;
struct list_head lru;
};
void ocf_core_seq_cutoff_init(ocf_core_t core);
bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req);
void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
#endif /* __OCF_SEQ_CUTOFF_H__ */