Merge pull request #354 from robertbaldyga/multistream-seq-cutoff

Introduce multi-stream seqential cutoff
This commit is contained in:
Robert Baldyga 2020-04-22 15:35:42 +02:00 committed by GitHub
commit 188559416c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 909 additions and 88 deletions

21
inc/ocf_debug.h Normal file
View File

@ -0,0 +1,21 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_DEBUG_H__
#define __OCF_DEBUG_H__
struct ocf_dbg_seq_cutoff_status {
struct {
uint64_t last;
uint64_t bytes;
uint32_t rw : 1;
uint32_t active : 1;
} streams[OCF_SEQ_CUTOFF_MAX_STREAMS];
};
void ocf_dbg_get_seq_cutoff_status(ocf_core_t core,
struct ocf_dbg_seq_cutoff_status *status);
#endif /* __OCF_DEBUG_H__ */

View File

@ -175,6 +175,8 @@ typedef enum {
/*!< Current cache mode of given cache instance */
} ocf_cache_mode_t;
#define OCF_SEQ_CUTOFF_MAX_STREAMS 256
typedef enum {
ocf_seq_cutoff_policy_always = 0,
/*!< Sequential cutoff always on */

View File

@ -7,6 +7,7 @@
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "../ocf_seq_cutoff.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_rd.h"
@ -161,68 +162,6 @@ bool ocf_fallback_pt_is_on(ocf_cache_t cache)
cache->fallback_pt_error_threshold);
}
#define SEQ_CUTOFF_FULL_MARGIN \
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
{
if (!ocf_cache_is_device_attached(cache))
return false;
return (ocf_freelist_num_free(cache->freelist) <=
SEQ_CUTOFF_FULL_MARGIN);
}
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
switch (policy) {
case ocf_seq_cutoff_policy_always:
break;
case ocf_seq_cutoff_policy_full:
if (ocf_seq_cutoff_is_on(cache))
break;
return false;
case ocf_seq_cutoff_policy_never:
return false;
default:
ENV_WARN(true, "Invalid sequential cutoff policy!");
return false;
}
if (dir == core->seq_cutoff.rw &&
core->seq_cutoff.last == addr &&
core->seq_cutoff.bytes + bytes >=
ocf_core_get_seq_cutoff_threshold(core)) {
return true;
}
return false;
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
{
/*
* If IO is not consequent or has another direction,
* reset sequential cutoff state.
*/
if (req->byte_position != core->seq_cutoff.last ||
req->rw != core->seq_cutoff.rw) {
core->seq_cutoff.rw = req->rw;
core->seq_cutoff.bytes = 0;
}
/* Update last accessed position and bytes counter */
core->seq_cutoff.last = req->byte_position + req->byte_length;
core->seq_cutoff.bytes += req->byte_length;
}
void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
ocf_core_t core, struct ocf_request *req)
{
@ -242,8 +181,7 @@ void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
return;
}
if (ocf_seq_cutoff_check(core, req->rw, req->byte_position,
req->byte_length)) {
if (ocf_core_seq_cutoff_check(core, req)) {
req->cache_mode = ocf_req_cache_mode_pt;
req->seq_cutoff = 1;
return;

View File

@ -64,13 +64,8 @@ static inline bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode)
(ocf_cache_mode_t)mode);
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
bool ocf_fallback_pt_is_on(ocf_cache_t cache);
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes);
struct ocf_request *ocf_engine_pop_req(struct ocf_cache *cache,
struct ocf_queue *q);

View File

@ -406,6 +406,8 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
context->flags.clean_pol_added = true;
}
ocf_core_seq_cutoff_init(core);
/* When adding new core to cache, allocate stat counters */
core->counters =
env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL);
@ -427,8 +429,10 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
core->opened = true;
/* Set default cache parameters for sequential */
core->conf_meta->seq_cutoff_policy = ocf_seq_cutoff_policy_default;
core->conf_meta->seq_cutoff_threshold = cfg->seq_cutoff_threshold;
env_atomic_set(&core->conf_meta->seq_cutoff_policy,
ocf_seq_cutoff_policy_default);
env_atomic_set(&core->conf_meta->seq_cutoff_threshold,
cfg->seq_cutoff_threshold);
/* Add core sequence number for atomic metadata matching */
core_sequence_no = _ocf_mngt_get_core_seq_no(cache);
@ -855,7 +859,7 @@ int ocf_mngt_core_get_user_metadata(ocf_core_t core, void *data, size_t size)
static int _cache_mngt_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx)
{
uint32_t threshold = *(uint32_t*) cntx;
uint32_t threshold_old = core->conf_meta->seq_cutoff_threshold;
uint32_t threshold_old = ocf_core_get_seq_cutoff_threshold(core);
if (threshold_old == threshold) {
ocf_core_log(core, log_info,
@ -863,7 +867,8 @@ static int _cache_mngt_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx
"already set\n", threshold);
return 0;
}
core->conf_meta->seq_cutoff_threshold = threshold;
env_atomic_set(&core->conf_meta->seq_cutoff_threshold, threshold);
ocf_core_log(core, log_info, "Changing sequential cutoff "
"threshold from %u to %u bytes successful\n",
@ -916,7 +921,7 @@ static const char *_cache_mngt_seq_cutoff_policy_get_name(
static int _cache_mngt_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx)
{
ocf_seq_cutoff_policy policy = *(ocf_seq_cutoff_policy*) cntx;
uint32_t policy_old = core->conf_meta->seq_cutoff_policy;
uint32_t policy_old = ocf_core_get_seq_cutoff_policy(core);
if (policy_old == policy) {
ocf_core_log(core, log_info,
@ -931,7 +936,7 @@ static int _cache_mngt_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx)
return -OCF_ERR_INVAL;
}
core->conf_meta->seq_cutoff_policy = policy;
env_atomic_set(&core->conf_meta->seq_cutoff_policy, policy);
ocf_core_log(core, log_info,
"Changing sequential cutoff policy from %s to %s\n",

View File

@ -106,12 +106,12 @@ int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core)
uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core)
{
return core->conf_meta->seq_cutoff_threshold;
return env_atomic_read(&core->conf_meta->seq_cutoff_threshold);
}
ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core)
{
return core->conf_meta->seq_cutoff_policy;
return env_atomic_read(&core->conf_meta->seq_cutoff_policy);
}
int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
@ -249,7 +249,7 @@ void ocf_core_volume_submit_io(struct ocf_io *io)
ocf_resolve_effective_cache_mode(cache, core, req);
ocf_seq_cutoff_update(core, req);
ocf_core_seq_cutoff_update(core, req);
ocf_core_update_stats(core, io);
@ -339,7 +339,7 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
fast = ocf_engine_hndl_fast_req(req);
if (fast != OCF_FAST_PATH_NO) {
ocf_trace_push(io->io_queue, &trace_event, sizeof(trace_event));
ocf_seq_cutoff_update(core, req);
ocf_core_seq_cutoff_update(core, req);
return 0;
}

View File

@ -10,6 +10,7 @@
#include "ocf_env.h"
#include "ocf_ctx_priv.h"
#include "ocf_volume_priv.h"
#include "ocf_seq_cutoff.h"
#define ocf_core_log_prefix(core, lvl, prefix, fmt, ...) \
ocf_cache_log_prefix(ocf_core_get_cache(core), lvl, ".%s" prefix, \
@ -38,10 +39,10 @@ struct ocf_core_meta_config {
ocf_seq_no_t seq_no;
/* Sequential cutoff threshold (in bytes) */
uint32_t seq_cutoff_threshold;
env_atomic seq_cutoff_threshold;
/* Sequential cutoff policy */
ocf_seq_cutoff_policy seq_cutoff_policy;
env_atomic seq_cutoff_policy;
/* core object size in bytes */
uint64_t length;
@ -69,7 +70,6 @@ struct ocf_core_meta_runtime {
} part_counters[OCF_IO_CLASS_MAX];
};
struct ocf_core {
struct ocf_volume front_volume;
struct ocf_volume volume;
@ -77,11 +77,7 @@ struct ocf_core {
struct ocf_core_meta_config *conf_meta;
struct ocf_core_meta_runtime *runtime_meta;
struct {
uint64_t last;
uint64_t bytes;
int rw;
} seq_cutoff;
struct ocf_seq_cutoff seq_cutoff;
env_atomic flushed;

168
src/ocf_seq_cutoff.c Normal file
View File

@ -0,0 +1,168 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_seq_cutoff.h"
#include "ocf_cache_priv.h"
#include "ocf_priv.h"
#include "ocf/ocf_debug.h"
#define SEQ_CUTOFF_FULL_MARGIN \
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
{
if (!ocf_cache_is_device_attached(cache))
return false;
return (ocf_freelist_num_free(cache->freelist) <=
SEQ_CUTOFF_FULL_MARGIN);
}
static int ocf_seq_cutoff_stream_cmp(struct ocf_rb_node *n1,
struct ocf_rb_node *n2)
{
struct ocf_seq_cutoff_stream *stream1 = container_of(n1,
struct ocf_seq_cutoff_stream, node);
struct ocf_seq_cutoff_stream *stream2 = container_of(n2,
struct ocf_seq_cutoff_stream, node);
if (stream1->rw < stream2->rw)
return -1;
if (stream1->rw > stream2->rw)
return 1;
if (stream1->last < stream2->last)
return -1;
if (stream1->last > stream2->last)
return 1;
return 0;
}
void ocf_core_seq_cutoff_init(ocf_core_t core)
{
struct ocf_seq_cutoff_stream *stream;
int i;
ocf_core_log(core, log_info, "Seqential cutoff init\n");
env_rwlock_init(&core->seq_cutoff.lock);
ocf_rb_tree_init(&core->seq_cutoff.tree, ocf_seq_cutoff_stream_cmp);
INIT_LIST_HEAD(&core->seq_cutoff.lru);
for (i = 0; i < OCF_SEQ_CUTOFF_MAX_STREAMS; i++) {
stream = &core->seq_cutoff.streams[i];
stream->last = 0;
stream->bytes = 0;
stream->rw = 0;
ocf_rb_tree_insert(&core->seq_cutoff.tree, &stream->node);
list_add_tail(&stream->list, &core->seq_cutoff.lru);
}
}
void ocf_dbg_get_seq_cutoff_status(ocf_core_t core,
struct ocf_dbg_seq_cutoff_status *status)
{
struct ocf_seq_cutoff_stream *stream;
uint32_t threshold;
int i = 0;
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(status);
threshold = ocf_core_get_seq_cutoff_threshold(core);
env_rwlock_read_lock(&core->seq_cutoff.lock);
list_for_each_entry(stream, &core->seq_cutoff.lru, list) {
status->streams[i].last = stream->last;
status->streams[i].bytes = stream->bytes;
status->streams[i].rw = stream->rw;
status->streams[i].active = (stream->bytes >= threshold);
i++;
}
env_rwlock_read_unlock(&core->seq_cutoff.lock);
}
bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req)
{
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
uint32_t threshold = ocf_core_get_seq_cutoff_threshold(core);
ocf_cache_t cache = ocf_core_get_cache(core);
struct ocf_seq_cutoff_stream item = {
.last = req->byte_position, .rw = req->rw
};
struct ocf_seq_cutoff_stream *stream;
struct ocf_rb_node *node;
bool result = false;
switch (policy) {
case ocf_seq_cutoff_policy_always:
break;
case ocf_seq_cutoff_policy_full:
if (ocf_seq_cutoff_is_on(cache))
break;
return false;
case ocf_seq_cutoff_policy_never:
return false;
default:
ENV_WARN(true, "Invalid sequential cutoff policy!");
return false;
}
env_rwlock_read_lock(&core->seq_cutoff.lock);
node = ocf_rb_tree_find(&core->seq_cutoff.tree, &item.node);
if (node) {
stream = container_of(node, struct ocf_seq_cutoff_stream, node);
if (stream->bytes + req->byte_length >= threshold)
result = true;
}
env_rwlock_read_unlock(&core->seq_cutoff.lock);
return result;
}
void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
{
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
struct ocf_seq_cutoff_stream item = {
.last = req->byte_position, .rw = req->rw
};
struct ocf_seq_cutoff_stream *stream;
struct ocf_rb_node *node;
bool can_update;
if (policy == ocf_seq_cutoff_policy_never)
return;
/* Update last accessed position and bytes counter */
env_rwlock_write_lock(&core->seq_cutoff.lock);
node = ocf_rb_tree_find(&core->seq_cutoff.tree, &item.node);
if (node) {
stream = container_of(node, struct ocf_seq_cutoff_stream, node);
item.last = req->byte_position + req->byte_length;
can_update = ocf_rb_tree_can_update(&core->seq_cutoff.tree,
node, &item.node);
stream->last = req->byte_position + req->byte_length;
stream->bytes += req->byte_length;
if (!can_update) {
ocf_rb_tree_remove(&core->seq_cutoff.tree, node);
ocf_rb_tree_insert(&core->seq_cutoff.tree, node);
}
list_move_tail(&stream->list, &core->seq_cutoff.lru);
} else {
stream = list_first_entry(&core->seq_cutoff.lru,
struct ocf_seq_cutoff_stream, list);
ocf_rb_tree_remove(&core->seq_cutoff.tree, &stream->node);
stream->rw = req->rw;
stream->last = req->byte_position + req->byte_length;
stream->bytes = req->byte_length;
ocf_rb_tree_insert(&core->seq_cutoff.tree, &stream->node);
list_move_tail(&stream->list, &core->seq_cutoff.lru);
}
env_rwlock_write_unlock(&core->seq_cutoff.lock);
}

35
src/ocf_seq_cutoff.h Normal file
View File

@ -0,0 +1,35 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_SEQ_CUTOFF_H__
#define __OCF_SEQ_CUTOFF_H__
#include "ocf/ocf.h"
#include "ocf_request.h"
#include "utils/utils_rbtree.h"
struct ocf_seq_cutoff_stream {
uint64_t last;
uint64_t bytes;
uint32_t rw : 1;
struct ocf_rb_node node;
struct list_head list;
};
struct ocf_seq_cutoff {
ocf_core_t core;
env_rwlock lock;
struct ocf_seq_cutoff_stream streams[OCF_SEQ_CUTOFF_MAX_STREAMS];
struct ocf_rb_tree tree;
struct list_head lru;
};
void ocf_core_seq_cutoff_init(ocf_core_t core);
bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req);
void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
#endif /* __OCF_SEQ_CUTOFF_H__ */

411
src/utils/utils_rbtree.c Normal file
View File

@ -0,0 +1,411 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "utils_rbtree.h"
void ocf_rb_tree_init(struct ocf_rb_tree *tree, ocf_rb_tree_node_cmp_cb cmp)
{
tree->root = NULL;
tree->cmp = cmp;
}
static void ocf_rb_tree_update_parent(struct ocf_rb_tree *tree,
struct ocf_rb_node *node, struct ocf_rb_node *old_node,
struct ocf_rb_node *new_node)
{
if (!node->parent)
tree->root = new_node;
else if (old_node == node->parent->left)
node->parent->left = new_node;
else if (old_node == node->parent->right)
node->parent->right = new_node;
}
static void ocf_rb_tree_update_children(struct ocf_rb_node *node)
{
if (node->left)
node->left->parent = node;
if (node->right)
node->right->parent = node;
}
static void ocf_rb_tree_rotate_left(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *right = node->right;
node->right = right->left;
if (node->right)
node->right->parent = node;
right->parent = node->parent;
ocf_rb_tree_update_parent(tree, node, node, right);
right->left = node;
node->parent = right;
}
static void ocf_rb_tree_rotate_right(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *left = node->left;
node->left = left->right;
if (node->left)
node->left->parent = node;
left->parent = node->parent;
ocf_rb_tree_update_parent(tree, node, node, left);
left->right = node;
node->parent = left;
}
static void ocf_rb_tree_fix_violation(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *parent, *grandparent, *uncle;
int tmp;
while (node->red && node->parent && node->parent->red) {
parent = node->parent;
grandparent = parent->parent;
if (!grandparent)
break;
if (parent == grandparent->left) {
/* Parent is left child */
uncle = grandparent->right;
if (uncle && uncle->red) {
/* Uncle is red -> recolor */
grandparent->red = true;
parent->red = false;
uncle->red = false;
node = grandparent; /* Recheck grandparent */
} else if (node == parent->right) {
/* Node is right child -> rot left */
ocf_rb_tree_rotate_left(tree, parent);
node = parent;
parent = node->parent;
} else {
/* Node is left child -> rot right + recolor */
ocf_rb_tree_rotate_right(tree, grandparent);
tmp = parent->red;
parent->red = grandparent->red;
grandparent->red = tmp;
node = parent;
}
} else {
/* Parent is right child */
uncle = grandparent->left;
if (uncle && uncle->red) {
/* Uncle is red -> recolor */
grandparent->red = true;
parent->red = false;
uncle->red = false;
node = grandparent; /* Recheck grandparent */
} else if (node == parent->left) {
/* Node is left child -> rot right */
ocf_rb_tree_rotate_right(tree, parent);
node = parent;
parent = node->parent;
} else {
/* Node is left child -> rot left + recolor */
ocf_rb_tree_rotate_left(tree, grandparent);
tmp = parent->red;
parent->red = grandparent->red;
grandparent->red = tmp;
node = parent;
}
}
}
/* Final recolor */
tree->root->red = false;
}
void ocf_rb_tree_insert(struct ocf_rb_tree *tree, struct ocf_rb_node *node)
{
struct ocf_rb_node *iter, *new_iter;
int cmp;
node->left = NULL;
node->right = NULL;
if (!tree->root) {
node->red = false;
node->parent = NULL;
tree->root = node;
return;
}
for (new_iter = tree->root; new_iter;) {
iter = new_iter;
cmp = tree->cmp(node, iter);
new_iter = (cmp < 0) ? iter->left : iter->right;
}
node->red = true;
node->parent = iter;
if (cmp < 0)
iter->left = node;
else
iter->right = node;
ocf_rb_tree_fix_violation(tree, node);
}
static void ocf_rb_tree_swap(struct ocf_rb_tree *tree,
struct ocf_rb_node *node1, struct ocf_rb_node *node2)
{
struct ocf_rb_node tmp;
if (node1->left == node2)
node1->left = node1;
else if (node1->right == node2)
node1->right = node1;
else if (node1->parent == node2)
node1->parent = node1;
if (node2->left == node1)
node2->left = node2;
else if (node2->right == node1)
node2->right = node2;
else if (node2->parent == node1)
node2->parent = node2;
tmp = *node1;
*node1 = *node2;
*node2 = tmp;
ocf_rb_tree_update_parent(tree, node1, node2, node1);
ocf_rb_tree_update_parent(tree, node2, node1, node2);
ocf_rb_tree_update_children(node1);
ocf_rb_tree_update_children(node2);
}
static struct ocf_rb_node *ocf_rb_tree_successor(struct ocf_rb_node *node)
{
struct ocf_rb_node *succ;
if (!node->right)
return NULL;
for (succ = node->right; succ->left;)
succ = succ->left;
return succ;
}
static struct ocf_rb_node *ocf_rb_tree_predecessor(struct ocf_rb_node *node)
{
struct ocf_rb_node *pred;
if (!node->left)
return NULL;
for (pred = node->left; pred->right;)
pred = pred->right;
return pred;
}
static struct ocf_rb_node *ocf_rb_tree_bst_replacement(struct ocf_rb_node *node)
{
if (node->left && node->right)
return ocf_rb_tree_successor(node);
if (node->left)
return node->left;
if (node->right)
return node->right;
return NULL;
}
static struct ocf_rb_node *ocf_rb_tree_sibling(struct ocf_rb_node *node)
{
if (!node->parent)
return NULL;
return (node == node->parent->left) ?
node->parent->right : node->parent->left;
}
void ocf_rb_tree_fix_double_black(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *sibling;
while (true) {
if (!node->parent) {
/* Reached root -> end */
break;
}
sibling = ocf_rb_tree_sibling(node);
if (!sibling) {
/* No sibling -> move up */
node = node->parent;
continue;
}
if (sibling->red) {
/* Sibling is red -> recolor, rot and repeat */
node->parent->red = true;
sibling->red = false;
if (sibling == node->parent->left)
ocf_rb_tree_rotate_right(tree, node->parent);
else
ocf_rb_tree_rotate_left(tree, node->parent);
continue;
}
if (sibling->left && sibling->left->red) {
/* Sibling has left red child -> recolor and rot */
if (sibling == node->parent->left) {
sibling->left->red = sibling->red;
sibling->red = node->parent->red;
ocf_rb_tree_rotate_right(tree, node->parent);
} else {
sibling->left->red = node->parent->red;
ocf_rb_tree_rotate_right(tree, sibling);
ocf_rb_tree_rotate_left(tree, node->parent);
}
node->parent->red = false;
break;
} else if (sibling->right && sibling->right->red) {
/* Sibling has right red child -> recolor and rot */
if (sibling == node->parent->left) {
sibling->right->red = node->parent->red;
ocf_rb_tree_rotate_left(tree, sibling);
ocf_rb_tree_rotate_right(tree, node->parent);
} else {
sibling->right->red = sibling->red;
sibling->red = node->parent->red;
ocf_rb_tree_rotate_left(tree, node->parent);
}
node->parent->red = false;
break;
} else {
/* Sibling has both black children */
sibling->red = true;
if (!node->parent->red) {
/* Parent is black -> move up */
node = node->parent;
continue;
}
/* Parent is red -> recolor */
node->parent->red = false;
break;
}
}
}
void ocf_rb_tree_remove(struct ocf_rb_tree *tree, struct ocf_rb_node *node)
{
struct ocf_rb_node *sibling, *rep;
while (true) {
sibling = ocf_rb_tree_sibling(node);
rep = ocf_rb_tree_bst_replacement(node);
if (!rep) {
/* Node has no children -> remove */
if (node == tree->root) {
tree->root = NULL;
} else {
if (!node->red)
ocf_rb_tree_fix_double_black(tree, node);
else if (sibling)
sibling->red = true;
ocf_rb_tree_update_parent(tree, node, node, NULL);
}
break;
}
if (!rep->left & !rep->right) {
/* BST replacement is leaf -> swap and remove */
ocf_rb_tree_swap(tree, node, rep);
if (!node->red)
ocf_rb_tree_fix_double_black(tree, node);
ocf_rb_tree_update_parent(tree, node, node, NULL);
break;
}
/* BST replacement has children -> swap and repeat */
ocf_rb_tree_swap(tree, node, rep);
}
}
bool ocf_rb_tree_can_update(struct ocf_rb_tree *tree,
struct ocf_rb_node *node, struct ocf_rb_node *new_node)
{
struct ocf_rb_node *iter = tree->root;
int cmp = 0;
while (iter) {
if (iter == node)
break;
cmp = tree->cmp(new_node, iter);
iter = (cmp < 0) ? iter->left : iter->right;
}
if (!iter)
return false;
cmp = tree->cmp(new_node, iter);
if (cmp < 0) {
iter = ocf_rb_tree_predecessor(iter);
if (!iter)
return true;
cmp = tree->cmp(new_node, iter);
return (cmp > 0);
}
if (cmp > 0) {
iter = ocf_rb_tree_successor(iter);
if (!iter)
return true;
cmp = tree->cmp(new_node, iter);
return (cmp < 0);
}
return true;
}
struct ocf_rb_node *ocf_rb_tree_find(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *iter = tree->root;
int cmp = 0;
while (iter) {
cmp = tree->cmp(node, iter);
if (!cmp)
break;
iter = (cmp < 0) ? iter->left : iter->right;
}
return iter;
}

38
src/utils/utils_rbtree.h Normal file
View File

@ -0,0 +1,38 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_RBTREE_H__
#define __UTILS_RBTREE_H__
#include "ocf/ocf.h"
struct ocf_rb_node {
bool red;
struct ocf_rb_node *left;
struct ocf_rb_node *right;
struct ocf_rb_node *parent;
};
typedef int (*ocf_rb_tree_node_cmp_cb)(struct ocf_rb_node *n1,
struct ocf_rb_node *n2);
struct ocf_rb_tree {
struct ocf_rb_node *root;
ocf_rb_tree_node_cmp_cb cmp;
};
void ocf_rb_tree_init(struct ocf_rb_tree *tree, ocf_rb_tree_node_cmp_cb cmp);
void ocf_rb_tree_insert(struct ocf_rb_tree *tree, struct ocf_rb_node *node);
void ocf_rb_tree_remove(struct ocf_rb_tree *tree, struct ocf_rb_node *node);
bool ocf_rb_tree_can_update(struct ocf_rb_tree *tree,
struct ocf_rb_node *node, struct ocf_rb_node *new_node);
struct ocf_rb_node *ocf_rb_tree_find(struct ocf_rb_tree *tree,
struct ocf_rb_node *node);
#endif /* __UTILS_RBTREE_H__ */

View File

@ -287,6 +287,18 @@ class Cache:
if status:
raise OcfError("Error setting cache seq cut off policy", status)
def set_seq_cut_off_threshold(self, threshold: int):
self.write_lock()
status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_threshold_all(
self.cache_handle, threshold
)
self.write_unlock()
if status:
raise OcfError("Error setting cache seq cut off policy", status)
def configure_device(
self, device, force=False, perform_test=True, cache_line_size=None
):
@ -574,6 +586,8 @@ lib.ocf_mngt_cache_cleaning_set_policy.argtypes = [c_void_p, c_uint32]
lib.ocf_mngt_cache_cleaning_set_policy.restype = c_int
lib.ocf_mngt_core_set_seq_cutoff_policy_all.argtypes = [c_void_p, c_uint32]
lib.ocf_mngt_core_set_seq_cutoff_policy_all.restype = c_int
lib.ocf_mngt_core_set_seq_cutoff_threshold_all.argtypes = [c_void_p, c_uint32]
lib.ocf_mngt_core_set_seq_cutoff_threshold_all.restype = c_int
lib.ocf_stats_collect_cache.argtypes = [
c_void_p,
c_void_p,

View File

@ -72,7 +72,7 @@ class OcfCompletion:
except KeyError:
raise KeyError(f"No completion argument {key} specified")
def __init__(self, completion_args: list):
def __init__(self, completion_args: list, context=None):
"""
Provide ctypes arg list, and optionally index of status argument in
completion function which will be extracted (default - last argument).
@ -83,6 +83,7 @@ class OcfCompletion:
self.e = Event()
self.results = OcfCompletion.CompletionResult(completion_args)
self._as_parameter_ = self.callback
self.context = context
@property
def callback(self):

View File

@ -148,6 +148,61 @@ class Size:
else:
return "{} TiB".format(self.TiB)
def __repr__(self):
return f"Size({self.bytes})"
def __eq__(self, other):
return self.bytes == other.bytes
def __add__(self, other):
return Size(self.bytes + other.bytes)
def __sub__(self, other):
return Size(self.bytes - other.bytes)
def __mul__(self, other):
return Size(self.bytes * int(other))
def __truediv__(self, other):
return Size(self.bytes / int(other))
def __floordiv__(self, other):
return Size(self.bytes // int(other))
def __rmul__(self, other):
return Size(self.bytes * int(other))
def __rtruediv__(self, other):
return Size(int(other) / self.bytes)
def __rfloordiv__(self, other):
return Size(int(other) // self.bytes)
def __iadd__(self, other):
self.bytes += other.bytes
return self
def __isub__(self, other):
self.bytes -= other.bytes
return self
def __imul__(self, other):
self.bytes *= int(other)
return self
def __itruediv__(self, other):
self.bytes /= int(other)
return self
def __ifloordir__(self, other):
self.bytes //= int(other)
return self
def print_structure(struct, indent=0):
print(struct)

View File

@ -0,0 +1,142 @@
#
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from ctypes import c_int
from random import shuffle, choice
from time import sleep
import pytest
from pyocf.types.cache import Cache, CacheMode
from pyocf.types.core import Core
from pyocf.types.volume import Volume
from pyocf.types.data import Data
from pyocf.types.io import IoDir
from pyocf.utils import Size
from pyocf.types.shared import OcfCompletion, SeqCutOffPolicy
class Stream:
def __init__(self, last, length, direction):
self.last = last
self.length = length
self.direction = direction
def __repr__(self):
return f"{self.last} {self.length} {self.direction}"
def _io(core, addr, size, direction, context):
comp = OcfCompletion([("error", c_int)], context=context)
data = Data(size)
io = core.new_io(core.cache.get_default_queue(), addr, size, direction, 0, 0)
io.set_data(data)
io.callback = comp.callback
io.submit()
return comp
def io_to_streams(core, streams, io_size):
completions = []
for stream in streams:
completions.append(
_io(core, stream.last, io_size, stream.direction, context=(io_size, stream))
)
for c in completions:
c.wait()
io_size, stream = c.context
stream.last += io_size
stream.length += io_size
assert not c.results["error"], "No IO should fail"
def test_seq_cutoff_max_streams(pyocf_ctx):
"""
Test number of sequential streams tracked by OCF.
MAX_STREAMS is the maximal amount of streams which OCF is able to track.
1. Issue MAX_STREAMS requests (write or reads) to cache, 1 sector shorter than
seq cutoff threshold
2. Issue MAX_STREAMS-1 requests continuing the streams from 1. to surpass the threshold and
check if cutoff was triggered (requests used PT engine)
3. Issue single request to stream not used in 1. or 2. and check if it's been handled by cache
4. Issue single request to stream least recently used in 1. and 2. and check if it's been
handled by cache. It should no longer be tracked by OCF, because of request in step 3. which
overflowed the OCF handling structure)
"""
MAX_STREAMS = 256
TEST_STREAMS = MAX_STREAMS + 1 # Number of streams used by test - one more than OCF can track
core_size = Size.from_MiB(200)
threshold = Size.from_KiB(4)
streams = [
Stream(
last=Size((stream_no * int(core_size) // TEST_STREAMS), sector_aligned=True),
length=Size(0),
direction=choice(list(IoDir)),
)
for stream_no in range(TEST_STREAMS)
] # Generate MAX_STREAMS + 1 non-overlapping streams
# Remove one stream - this is the one we are going to use to overflow OCF tracking structure
# in step 3
non_active_stream = choice(streams)
streams.remove(non_active_stream)
cache = Cache.start_on_device(Volume(Size.from_MiB(200)), cache_mode=CacheMode.WT)
core = Core.using_device(Volume(core_size))
cache.add_core(core)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.ALWAYS)
cache.set_seq_cut_off_threshold(threshold)
# STEP 1
shuffle(streams)
io_size = threshold - Size.from_sector(1)
io_to_streams(core, streams, io_size)
stats = cache.get_stats()
assert (
stats["req"]["serviced"]["value"] == stats["req"]["total"]["value"] == len(streams)
), "All request should be serviced - no cutoff"
old_serviced = len(streams)
# STEP 2
lru_stream = streams[0]
streams.remove(lru_stream)
shuffle(streams)
io_to_streams(core, streams, Size.from_sector(1))
stats = cache.get_stats()
assert (
stats["req"]["serviced"]["value"] == old_serviced
), "Serviced requests stat should not increase - cutoff engaged for all"
assert stats["req"]["wr_pt"]["value"] + stats["req"]["rd_pt"]["value"] == len(
streams
), "All streams should be handled in PT - cutoff engaged for all streams"
# STEP 3
io_to_streams(core, [non_active_stream], Size.from_sector(1))
stats = cache.get_stats()
assert (
stats["req"]["serviced"]["value"] == old_serviced + 1
), "This request should be serviced by cache - no cutoff for inactive stream"
# STEP 4
io_to_streams(core, [lru_stream], Size.from_sector(1))
stats = cache.get_stats()
assert (
stats["req"]["serviced"]["value"] == old_serviced + 2
), "This request should be serviced by cache - lru_stream should be no longer tracked"