Merge pull request #354 from robertbaldyga/multistream-seq-cutoff

Introduce multi-stream seqential cutoff
This commit is contained in:
Robert Baldyga
2020-04-22 15:35:42 +02:00
committed by GitHub
15 changed files with 909 additions and 88 deletions

View File

@@ -7,6 +7,7 @@
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "../ocf_seq_cutoff.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_rd.h"
@@ -161,68 +162,6 @@ bool ocf_fallback_pt_is_on(ocf_cache_t cache)
cache->fallback_pt_error_threshold);
}
#define SEQ_CUTOFF_FULL_MARGIN \
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
{
if (!ocf_cache_is_device_attached(cache))
return false;
return (ocf_freelist_num_free(cache->freelist) <=
SEQ_CUTOFF_FULL_MARGIN);
}
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
switch (policy) {
case ocf_seq_cutoff_policy_always:
break;
case ocf_seq_cutoff_policy_full:
if (ocf_seq_cutoff_is_on(cache))
break;
return false;
case ocf_seq_cutoff_policy_never:
return false;
default:
ENV_WARN(true, "Invalid sequential cutoff policy!");
return false;
}
if (dir == core->seq_cutoff.rw &&
core->seq_cutoff.last == addr &&
core->seq_cutoff.bytes + bytes >=
ocf_core_get_seq_cutoff_threshold(core)) {
return true;
}
return false;
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
{
/*
* If IO is not consequent or has another direction,
* reset sequential cutoff state.
*/
if (req->byte_position != core->seq_cutoff.last ||
req->rw != core->seq_cutoff.rw) {
core->seq_cutoff.rw = req->rw;
core->seq_cutoff.bytes = 0;
}
/* Update last accessed position and bytes counter */
core->seq_cutoff.last = req->byte_position + req->byte_length;
core->seq_cutoff.bytes += req->byte_length;
}
void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
ocf_core_t core, struct ocf_request *req)
{
@@ -242,8 +181,7 @@ void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
return;
}
if (ocf_seq_cutoff_check(core, req->rw, req->byte_position,
req->byte_length)) {
if (ocf_core_seq_cutoff_check(core, req)) {
req->cache_mode = ocf_req_cache_mode_pt;
req->seq_cutoff = 1;
return;

View File

@@ -64,13 +64,8 @@ static inline bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode)
(ocf_cache_mode_t)mode);
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
bool ocf_fallback_pt_is_on(ocf_cache_t cache);
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes);
struct ocf_request *ocf_engine_pop_req(struct ocf_cache *cache,
struct ocf_queue *q);

View File

@@ -406,6 +406,8 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
context->flags.clean_pol_added = true;
}
ocf_core_seq_cutoff_init(core);
/* When adding new core to cache, allocate stat counters */
core->counters =
env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL);
@@ -427,8 +429,10 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
core->opened = true;
/* Set default cache parameters for sequential */
core->conf_meta->seq_cutoff_policy = ocf_seq_cutoff_policy_default;
core->conf_meta->seq_cutoff_threshold = cfg->seq_cutoff_threshold;
env_atomic_set(&core->conf_meta->seq_cutoff_policy,
ocf_seq_cutoff_policy_default);
env_atomic_set(&core->conf_meta->seq_cutoff_threshold,
cfg->seq_cutoff_threshold);
/* Add core sequence number for atomic metadata matching */
core_sequence_no = _ocf_mngt_get_core_seq_no(cache);
@@ -855,7 +859,7 @@ int ocf_mngt_core_get_user_metadata(ocf_core_t core, void *data, size_t size)
static int _cache_mngt_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx)
{
uint32_t threshold = *(uint32_t*) cntx;
uint32_t threshold_old = core->conf_meta->seq_cutoff_threshold;
uint32_t threshold_old = ocf_core_get_seq_cutoff_threshold(core);
if (threshold_old == threshold) {
ocf_core_log(core, log_info,
@@ -863,7 +867,8 @@ static int _cache_mngt_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx
"already set\n", threshold);
return 0;
}
core->conf_meta->seq_cutoff_threshold = threshold;
env_atomic_set(&core->conf_meta->seq_cutoff_threshold, threshold);
ocf_core_log(core, log_info, "Changing sequential cutoff "
"threshold from %u to %u bytes successful\n",
@@ -916,7 +921,7 @@ static const char *_cache_mngt_seq_cutoff_policy_get_name(
static int _cache_mngt_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx)
{
ocf_seq_cutoff_policy policy = *(ocf_seq_cutoff_policy*) cntx;
uint32_t policy_old = core->conf_meta->seq_cutoff_policy;
uint32_t policy_old = ocf_core_get_seq_cutoff_policy(core);
if (policy_old == policy) {
ocf_core_log(core, log_info,
@@ -931,7 +936,7 @@ static int _cache_mngt_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx)
return -OCF_ERR_INVAL;
}
core->conf_meta->seq_cutoff_policy = policy;
env_atomic_set(&core->conf_meta->seq_cutoff_policy, policy);
ocf_core_log(core, log_info,
"Changing sequential cutoff policy from %s to %s\n",

View File

@@ -106,12 +106,12 @@ int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core)
uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core)
{
return core->conf_meta->seq_cutoff_threshold;
return env_atomic_read(&core->conf_meta->seq_cutoff_threshold);
}
ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core)
{
return core->conf_meta->seq_cutoff_policy;
return env_atomic_read(&core->conf_meta->seq_cutoff_policy);
}
int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
@@ -249,7 +249,7 @@ void ocf_core_volume_submit_io(struct ocf_io *io)
ocf_resolve_effective_cache_mode(cache, core, req);
ocf_seq_cutoff_update(core, req);
ocf_core_seq_cutoff_update(core, req);
ocf_core_update_stats(core, io);
@@ -339,7 +339,7 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
fast = ocf_engine_hndl_fast_req(req);
if (fast != OCF_FAST_PATH_NO) {
ocf_trace_push(io->io_queue, &trace_event, sizeof(trace_event));
ocf_seq_cutoff_update(core, req);
ocf_core_seq_cutoff_update(core, req);
return 0;
}

View File

@@ -10,6 +10,7 @@
#include "ocf_env.h"
#include "ocf_ctx_priv.h"
#include "ocf_volume_priv.h"
#include "ocf_seq_cutoff.h"
#define ocf_core_log_prefix(core, lvl, prefix, fmt, ...) \
ocf_cache_log_prefix(ocf_core_get_cache(core), lvl, ".%s" prefix, \
@@ -38,10 +39,10 @@ struct ocf_core_meta_config {
ocf_seq_no_t seq_no;
/* Sequential cutoff threshold (in bytes) */
uint32_t seq_cutoff_threshold;
env_atomic seq_cutoff_threshold;
/* Sequential cutoff policy */
ocf_seq_cutoff_policy seq_cutoff_policy;
env_atomic seq_cutoff_policy;
/* core object size in bytes */
uint64_t length;
@@ -69,7 +70,6 @@ struct ocf_core_meta_runtime {
} part_counters[OCF_IO_CLASS_MAX];
};
struct ocf_core {
struct ocf_volume front_volume;
struct ocf_volume volume;
@@ -77,11 +77,7 @@ struct ocf_core {
struct ocf_core_meta_config *conf_meta;
struct ocf_core_meta_runtime *runtime_meta;
struct {
uint64_t last;
uint64_t bytes;
int rw;
} seq_cutoff;
struct ocf_seq_cutoff seq_cutoff;
env_atomic flushed;

168
src/ocf_seq_cutoff.c Normal file
View File

@@ -0,0 +1,168 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_seq_cutoff.h"
#include "ocf_cache_priv.h"
#include "ocf_priv.h"
#include "ocf/ocf_debug.h"
#define SEQ_CUTOFF_FULL_MARGIN \
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
{
if (!ocf_cache_is_device_attached(cache))
return false;
return (ocf_freelist_num_free(cache->freelist) <=
SEQ_CUTOFF_FULL_MARGIN);
}
static int ocf_seq_cutoff_stream_cmp(struct ocf_rb_node *n1,
struct ocf_rb_node *n2)
{
struct ocf_seq_cutoff_stream *stream1 = container_of(n1,
struct ocf_seq_cutoff_stream, node);
struct ocf_seq_cutoff_stream *stream2 = container_of(n2,
struct ocf_seq_cutoff_stream, node);
if (stream1->rw < stream2->rw)
return -1;
if (stream1->rw > stream2->rw)
return 1;
if (stream1->last < stream2->last)
return -1;
if (stream1->last > stream2->last)
return 1;
return 0;
}
void ocf_core_seq_cutoff_init(ocf_core_t core)
{
struct ocf_seq_cutoff_stream *stream;
int i;
ocf_core_log(core, log_info, "Seqential cutoff init\n");
env_rwlock_init(&core->seq_cutoff.lock);
ocf_rb_tree_init(&core->seq_cutoff.tree, ocf_seq_cutoff_stream_cmp);
INIT_LIST_HEAD(&core->seq_cutoff.lru);
for (i = 0; i < OCF_SEQ_CUTOFF_MAX_STREAMS; i++) {
stream = &core->seq_cutoff.streams[i];
stream->last = 0;
stream->bytes = 0;
stream->rw = 0;
ocf_rb_tree_insert(&core->seq_cutoff.tree, &stream->node);
list_add_tail(&stream->list, &core->seq_cutoff.lru);
}
}
void ocf_dbg_get_seq_cutoff_status(ocf_core_t core,
struct ocf_dbg_seq_cutoff_status *status)
{
struct ocf_seq_cutoff_stream *stream;
uint32_t threshold;
int i = 0;
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(status);
threshold = ocf_core_get_seq_cutoff_threshold(core);
env_rwlock_read_lock(&core->seq_cutoff.lock);
list_for_each_entry(stream, &core->seq_cutoff.lru, list) {
status->streams[i].last = stream->last;
status->streams[i].bytes = stream->bytes;
status->streams[i].rw = stream->rw;
status->streams[i].active = (stream->bytes >= threshold);
i++;
}
env_rwlock_read_unlock(&core->seq_cutoff.lock);
}
bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req)
{
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
uint32_t threshold = ocf_core_get_seq_cutoff_threshold(core);
ocf_cache_t cache = ocf_core_get_cache(core);
struct ocf_seq_cutoff_stream item = {
.last = req->byte_position, .rw = req->rw
};
struct ocf_seq_cutoff_stream *stream;
struct ocf_rb_node *node;
bool result = false;
switch (policy) {
case ocf_seq_cutoff_policy_always:
break;
case ocf_seq_cutoff_policy_full:
if (ocf_seq_cutoff_is_on(cache))
break;
return false;
case ocf_seq_cutoff_policy_never:
return false;
default:
ENV_WARN(true, "Invalid sequential cutoff policy!");
return false;
}
env_rwlock_read_lock(&core->seq_cutoff.lock);
node = ocf_rb_tree_find(&core->seq_cutoff.tree, &item.node);
if (node) {
stream = container_of(node, struct ocf_seq_cutoff_stream, node);
if (stream->bytes + req->byte_length >= threshold)
result = true;
}
env_rwlock_read_unlock(&core->seq_cutoff.lock);
return result;
}
void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
{
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
struct ocf_seq_cutoff_stream item = {
.last = req->byte_position, .rw = req->rw
};
struct ocf_seq_cutoff_stream *stream;
struct ocf_rb_node *node;
bool can_update;
if (policy == ocf_seq_cutoff_policy_never)
return;
/* Update last accessed position and bytes counter */
env_rwlock_write_lock(&core->seq_cutoff.lock);
node = ocf_rb_tree_find(&core->seq_cutoff.tree, &item.node);
if (node) {
stream = container_of(node, struct ocf_seq_cutoff_stream, node);
item.last = req->byte_position + req->byte_length;
can_update = ocf_rb_tree_can_update(&core->seq_cutoff.tree,
node, &item.node);
stream->last = req->byte_position + req->byte_length;
stream->bytes += req->byte_length;
if (!can_update) {
ocf_rb_tree_remove(&core->seq_cutoff.tree, node);
ocf_rb_tree_insert(&core->seq_cutoff.tree, node);
}
list_move_tail(&stream->list, &core->seq_cutoff.lru);
} else {
stream = list_first_entry(&core->seq_cutoff.lru,
struct ocf_seq_cutoff_stream, list);
ocf_rb_tree_remove(&core->seq_cutoff.tree, &stream->node);
stream->rw = req->rw;
stream->last = req->byte_position + req->byte_length;
stream->bytes = req->byte_length;
ocf_rb_tree_insert(&core->seq_cutoff.tree, &stream->node);
list_move_tail(&stream->list, &core->seq_cutoff.lru);
}
env_rwlock_write_unlock(&core->seq_cutoff.lock);
}

35
src/ocf_seq_cutoff.h Normal file
View File

@@ -0,0 +1,35 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_SEQ_CUTOFF_H__
#define __OCF_SEQ_CUTOFF_H__
#include "ocf/ocf.h"
#include "ocf_request.h"
#include "utils/utils_rbtree.h"
struct ocf_seq_cutoff_stream {
uint64_t last;
uint64_t bytes;
uint32_t rw : 1;
struct ocf_rb_node node;
struct list_head list;
};
struct ocf_seq_cutoff {
ocf_core_t core;
env_rwlock lock;
struct ocf_seq_cutoff_stream streams[OCF_SEQ_CUTOFF_MAX_STREAMS];
struct ocf_rb_tree tree;
struct list_head lru;
};
void ocf_core_seq_cutoff_init(ocf_core_t core);
bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req);
void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
#endif /* __OCF_SEQ_CUTOFF_H__ */

411
src/utils/utils_rbtree.c Normal file
View File

@@ -0,0 +1,411 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "utils_rbtree.h"
void ocf_rb_tree_init(struct ocf_rb_tree *tree, ocf_rb_tree_node_cmp_cb cmp)
{
tree->root = NULL;
tree->cmp = cmp;
}
static void ocf_rb_tree_update_parent(struct ocf_rb_tree *tree,
struct ocf_rb_node *node, struct ocf_rb_node *old_node,
struct ocf_rb_node *new_node)
{
if (!node->parent)
tree->root = new_node;
else if (old_node == node->parent->left)
node->parent->left = new_node;
else if (old_node == node->parent->right)
node->parent->right = new_node;
}
static void ocf_rb_tree_update_children(struct ocf_rb_node *node)
{
if (node->left)
node->left->parent = node;
if (node->right)
node->right->parent = node;
}
static void ocf_rb_tree_rotate_left(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *right = node->right;
node->right = right->left;
if (node->right)
node->right->parent = node;
right->parent = node->parent;
ocf_rb_tree_update_parent(tree, node, node, right);
right->left = node;
node->parent = right;
}
static void ocf_rb_tree_rotate_right(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *left = node->left;
node->left = left->right;
if (node->left)
node->left->parent = node;
left->parent = node->parent;
ocf_rb_tree_update_parent(tree, node, node, left);
left->right = node;
node->parent = left;
}
static void ocf_rb_tree_fix_violation(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *parent, *grandparent, *uncle;
int tmp;
while (node->red && node->parent && node->parent->red) {
parent = node->parent;
grandparent = parent->parent;
if (!grandparent)
break;
if (parent == grandparent->left) {
/* Parent is left child */
uncle = grandparent->right;
if (uncle && uncle->red) {
/* Uncle is red -> recolor */
grandparent->red = true;
parent->red = false;
uncle->red = false;
node = grandparent; /* Recheck grandparent */
} else if (node == parent->right) {
/* Node is right child -> rot left */
ocf_rb_tree_rotate_left(tree, parent);
node = parent;
parent = node->parent;
} else {
/* Node is left child -> rot right + recolor */
ocf_rb_tree_rotate_right(tree, grandparent);
tmp = parent->red;
parent->red = grandparent->red;
grandparent->red = tmp;
node = parent;
}
} else {
/* Parent is right child */
uncle = grandparent->left;
if (uncle && uncle->red) {
/* Uncle is red -> recolor */
grandparent->red = true;
parent->red = false;
uncle->red = false;
node = grandparent; /* Recheck grandparent */
} else if (node == parent->left) {
/* Node is left child -> rot right */
ocf_rb_tree_rotate_right(tree, parent);
node = parent;
parent = node->parent;
} else {
/* Node is left child -> rot left + recolor */
ocf_rb_tree_rotate_left(tree, grandparent);
tmp = parent->red;
parent->red = grandparent->red;
grandparent->red = tmp;
node = parent;
}
}
}
/* Final recolor */
tree->root->red = false;
}
void ocf_rb_tree_insert(struct ocf_rb_tree *tree, struct ocf_rb_node *node)
{
struct ocf_rb_node *iter, *new_iter;
int cmp;
node->left = NULL;
node->right = NULL;
if (!tree->root) {
node->red = false;
node->parent = NULL;
tree->root = node;
return;
}
for (new_iter = tree->root; new_iter;) {
iter = new_iter;
cmp = tree->cmp(node, iter);
new_iter = (cmp < 0) ? iter->left : iter->right;
}
node->red = true;
node->parent = iter;
if (cmp < 0)
iter->left = node;
else
iter->right = node;
ocf_rb_tree_fix_violation(tree, node);
}
static void ocf_rb_tree_swap(struct ocf_rb_tree *tree,
struct ocf_rb_node *node1, struct ocf_rb_node *node2)
{
struct ocf_rb_node tmp;
if (node1->left == node2)
node1->left = node1;
else if (node1->right == node2)
node1->right = node1;
else if (node1->parent == node2)
node1->parent = node1;
if (node2->left == node1)
node2->left = node2;
else if (node2->right == node1)
node2->right = node2;
else if (node2->parent == node1)
node2->parent = node2;
tmp = *node1;
*node1 = *node2;
*node2 = tmp;
ocf_rb_tree_update_parent(tree, node1, node2, node1);
ocf_rb_tree_update_parent(tree, node2, node1, node2);
ocf_rb_tree_update_children(node1);
ocf_rb_tree_update_children(node2);
}
static struct ocf_rb_node *ocf_rb_tree_successor(struct ocf_rb_node *node)
{
struct ocf_rb_node *succ;
if (!node->right)
return NULL;
for (succ = node->right; succ->left;)
succ = succ->left;
return succ;
}
static struct ocf_rb_node *ocf_rb_tree_predecessor(struct ocf_rb_node *node)
{
struct ocf_rb_node *pred;
if (!node->left)
return NULL;
for (pred = node->left; pred->right;)
pred = pred->right;
return pred;
}
static struct ocf_rb_node *ocf_rb_tree_bst_replacement(struct ocf_rb_node *node)
{
if (node->left && node->right)
return ocf_rb_tree_successor(node);
if (node->left)
return node->left;
if (node->right)
return node->right;
return NULL;
}
static struct ocf_rb_node *ocf_rb_tree_sibling(struct ocf_rb_node *node)
{
if (!node->parent)
return NULL;
return (node == node->parent->left) ?
node->parent->right : node->parent->left;
}
void ocf_rb_tree_fix_double_black(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *sibling;
while (true) {
if (!node->parent) {
/* Reached root -> end */
break;
}
sibling = ocf_rb_tree_sibling(node);
if (!sibling) {
/* No sibling -> move up */
node = node->parent;
continue;
}
if (sibling->red) {
/* Sibling is red -> recolor, rot and repeat */
node->parent->red = true;
sibling->red = false;
if (sibling == node->parent->left)
ocf_rb_tree_rotate_right(tree, node->parent);
else
ocf_rb_tree_rotate_left(tree, node->parent);
continue;
}
if (sibling->left && sibling->left->red) {
/* Sibling has left red child -> recolor and rot */
if (sibling == node->parent->left) {
sibling->left->red = sibling->red;
sibling->red = node->parent->red;
ocf_rb_tree_rotate_right(tree, node->parent);
} else {
sibling->left->red = node->parent->red;
ocf_rb_tree_rotate_right(tree, sibling);
ocf_rb_tree_rotate_left(tree, node->parent);
}
node->parent->red = false;
break;
} else if (sibling->right && sibling->right->red) {
/* Sibling has right red child -> recolor and rot */
if (sibling == node->parent->left) {
sibling->right->red = node->parent->red;
ocf_rb_tree_rotate_left(tree, sibling);
ocf_rb_tree_rotate_right(tree, node->parent);
} else {
sibling->right->red = sibling->red;
sibling->red = node->parent->red;
ocf_rb_tree_rotate_left(tree, node->parent);
}
node->parent->red = false;
break;
} else {
/* Sibling has both black children */
sibling->red = true;
if (!node->parent->red) {
/* Parent is black -> move up */
node = node->parent;
continue;
}
/* Parent is red -> recolor */
node->parent->red = false;
break;
}
}
}
void ocf_rb_tree_remove(struct ocf_rb_tree *tree, struct ocf_rb_node *node)
{
struct ocf_rb_node *sibling, *rep;
while (true) {
sibling = ocf_rb_tree_sibling(node);
rep = ocf_rb_tree_bst_replacement(node);
if (!rep) {
/* Node has no children -> remove */
if (node == tree->root) {
tree->root = NULL;
} else {
if (!node->red)
ocf_rb_tree_fix_double_black(tree, node);
else if (sibling)
sibling->red = true;
ocf_rb_tree_update_parent(tree, node, node, NULL);
}
break;
}
if (!rep->left & !rep->right) {
/* BST replacement is leaf -> swap and remove */
ocf_rb_tree_swap(tree, node, rep);
if (!node->red)
ocf_rb_tree_fix_double_black(tree, node);
ocf_rb_tree_update_parent(tree, node, node, NULL);
break;
}
/* BST replacement has children -> swap and repeat */
ocf_rb_tree_swap(tree, node, rep);
}
}
bool ocf_rb_tree_can_update(struct ocf_rb_tree *tree,
struct ocf_rb_node *node, struct ocf_rb_node *new_node)
{
struct ocf_rb_node *iter = tree->root;
int cmp = 0;
while (iter) {
if (iter == node)
break;
cmp = tree->cmp(new_node, iter);
iter = (cmp < 0) ? iter->left : iter->right;
}
if (!iter)
return false;
cmp = tree->cmp(new_node, iter);
if (cmp < 0) {
iter = ocf_rb_tree_predecessor(iter);
if (!iter)
return true;
cmp = tree->cmp(new_node, iter);
return (cmp > 0);
}
if (cmp > 0) {
iter = ocf_rb_tree_successor(iter);
if (!iter)
return true;
cmp = tree->cmp(new_node, iter);
return (cmp < 0);
}
return true;
}
struct ocf_rb_node *ocf_rb_tree_find(struct ocf_rb_tree *tree,
struct ocf_rb_node *node)
{
struct ocf_rb_node *iter = tree->root;
int cmp = 0;
while (iter) {
cmp = tree->cmp(node, iter);
if (!cmp)
break;
iter = (cmp < 0) ? iter->left : iter->right;
}
return iter;
}

38
src/utils/utils_rbtree.h Normal file
View File

@@ -0,0 +1,38 @@
/*
* Copyright(c) 2020 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_RBTREE_H__
#define __UTILS_RBTREE_H__
#include "ocf/ocf.h"
struct ocf_rb_node {
bool red;
struct ocf_rb_node *left;
struct ocf_rb_node *right;
struct ocf_rb_node *parent;
};
typedef int (*ocf_rb_tree_node_cmp_cb)(struct ocf_rb_node *n1,
struct ocf_rb_node *n2);
struct ocf_rb_tree {
struct ocf_rb_node *root;
ocf_rb_tree_node_cmp_cb cmp;
};
void ocf_rb_tree_init(struct ocf_rb_tree *tree, ocf_rb_tree_node_cmp_cb cmp);
void ocf_rb_tree_insert(struct ocf_rb_tree *tree, struct ocf_rb_node *node);
void ocf_rb_tree_remove(struct ocf_rb_tree *tree, struct ocf_rb_node *node);
bool ocf_rb_tree_can_update(struct ocf_rb_tree *tree,
struct ocf_rb_node *node, struct ocf_rb_node *new_node);
struct ocf_rb_node *ocf_rb_tree_find(struct ocf_rb_tree *tree,
struct ocf_rb_node *node);
#endif /* __UTILS_RBTREE_H__ */