Per-queue multi-stream sequential cutoff

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga 2020-12-28 16:46:22 +01:00
parent ac9bd5b094
commit 3ee253cc4e
13 changed files with 354 additions and 60 deletions

View File

@ -112,6 +112,15 @@ uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core);
*/ */
ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core); ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core);
/**
* @brief Get sequential cutoff stream promotion req count of given core object
*
* @param[in] core Core object
*
* @retval Sequential cutoff stream promotion request count
*/
uint32_t ocf_core_get_seq_cutoff_promotion_count(ocf_core_t core);
/** /**
* @brief Get name of given core object * @brief Get name of given core object
* *

View File

@ -12,7 +12,7 @@ struct ocf_dbg_seq_cutoff_status {
uint64_t bytes; uint64_t bytes;
uint32_t rw : 1; uint32_t rw : 1;
uint32_t active : 1; uint32_t active : 1;
} streams[OCF_SEQ_CUTOFF_MAX_STREAMS]; } streams[OCF_SEQ_CUTOFF_PERCORE_STREAMS];
}; };
void ocf_dbg_get_seq_cutoff_status(ocf_core_t core, void ocf_dbg_get_seq_cutoff_status(ocf_core_t core,

View File

@ -175,9 +175,12 @@ typedef enum {
/*!< Current cache mode of given cache instance */ /*!< Current cache mode of given cache instance */
} ocf_cache_mode_t; } ocf_cache_mode_t;
#define OCF_SEQ_CUTOFF_MAX_STREAMS 256 #define OCF_SEQ_CUTOFF_PERCORE_STREAMS 128
#define OCF_SEQ_CUTOFF_PERQUEUE_STREAMS 64
#define OCF_SEQ_CUTOFF_MIN_THRESHOLD 1 #define OCF_SEQ_CUTOFF_MIN_THRESHOLD 1
#define OCF_SEQ_CUTOFF_MAX_THRESHOLD 4194181 #define OCF_SEQ_CUTOFF_MAX_THRESHOLD 4194181
#define OCF_SEQ_CUTOFF_MIN_PROMOTION_COUNT 1
#define OCF_SEQ_CUTOFF_MAX_PROMOTION_COUNT 65535
typedef enum { typedef enum {
ocf_seq_cutoff_policy_always = 0, ocf_seq_cutoff_policy_always = 0,

View File

@ -42,6 +42,9 @@ struct ocf_mngt_core_config {
uint32_t seq_cutoff_threshold; uint32_t seq_cutoff_threshold;
/*!< Sequential cutoff threshold (in bytes) */ /*!< Sequential cutoff threshold (in bytes) */
uint32_t seq_cutoff_promotion_count;
/*!< Sequential cutoff promotion request count */
struct { struct {
void *data; void *data;
size_t size; size_t size;
@ -61,6 +64,7 @@ static inline void ocf_mngt_core_config_set_default(
{ {
cfg->try_add = false; cfg->try_add = false;
cfg->seq_cutoff_threshold = 1024; cfg->seq_cutoff_threshold = 1024;
cfg->seq_cutoff_promotion_count = 8;
cfg->user_metadata.data = NULL; cfg->user_metadata.data = NULL;
cfg->user_metadata.size = 0; cfg->user_metadata.size = 0;
} }
@ -1010,6 +1014,47 @@ int ocf_mngt_core_set_seq_cutoff_policy_all(ocf_cache_t cache,
int ocf_mngt_core_get_seq_cutoff_policy(ocf_core_t core, int ocf_mngt_core_get_seq_cutoff_policy(ocf_core_t core,
ocf_seq_cutoff_policy *policy); ocf_seq_cutoff_policy *policy);
/**
* @brief Set core sequential cutoff promotion request count
*
* @attention This changes only runtime state. To make changes persistent
* use function ocf_mngt_cache_save().
*
* @param[in] core Core handle
* @param[in] thresh promotion request count
*
* @retval 0 Sequential cutoff promotion requets count has been set successfully
* @retval Non-zero Error occured and request count hasn't been updated
*/
int ocf_mngt_core_set_seq_cutoff_promotion_count(ocf_core_t core,
uint32_t count);
/**
* @brief Set sequential cutoff promotion request count for all cores in cache
*
* @attention This changes only runtime state. To make changes persistent
* use function ocf_mngt_cache_save().
*
* @param[in] cache Cache handle
* @param[in] count promotion request count
*
* @retval 0 Sequential cutoff promotion request count has been set successfully
* @retval Non-zero Error occured and request count hasn't been updated
*/
int ocf_mngt_core_set_seq_cutoff_promotion_count_all(ocf_cache_t cache,
uint32_t count);
/**
* @brief Get core sequential cutoff promotion threshold
*
* @param[in] core Core handle
* @param[out] count promotion request count
*
* @retval 0 Sequential cutoff promotion request count has been get successfully
* @retval Non-zero Error occured
*/
int ocf_mngt_core_get_seq_cutoff_promotion_count(ocf_core_t core,
uint32_t *count);
/** /**
* @brief Set cache fallback Pass Through error threshold * @brief Set cache fallback Pass Through error threshold
* *

View File

@ -451,6 +451,8 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
ocf_seq_cutoff_policy_default); ocf_seq_cutoff_policy_default);
env_atomic_set(&core->conf_meta->seq_cutoff_threshold, env_atomic_set(&core->conf_meta->seq_cutoff_threshold,
cfg->seq_cutoff_threshold); cfg->seq_cutoff_threshold);
env_atomic_set(&core->conf_meta->seq_cutoff_promo_count,
cfg->seq_cutoff_promotion_count);
/* Add core sequence number for atomic metadata matching */ /* Add core sequence number for atomic metadata matching */
core_sequence_no = _ocf_mngt_get_core_seq_no(cache); core_sequence_no = _ocf_mngt_get_core_seq_no(cache);
@ -1016,3 +1018,62 @@ int ocf_mngt_core_get_seq_cutoff_policy(ocf_core_t core,
return 0; return 0;
} }
static int _cache_mngt_set_core_seq_cutoff_promo_count(ocf_core_t core,
void *cntx)
{
uint32_t count = *(uint32_t*) cntx;
uint32_t count_old = ocf_core_get_seq_cutoff_promotion_count(core);
if (count < OCF_SEQ_CUTOFF_MIN_PROMOTION_COUNT ||
count > OCF_SEQ_CUTOFF_MAX_PROMOTION_COUNT) {
ocf_core_log(core, log_info,
"Invalid sequential cutoff promotion count!\n");
return -OCF_ERR_INVAL;
}
if (count_old == count) {
ocf_core_log(core, log_info,
"Sequential cutoff promotion count %u "
"bytes is already set\n", count);
return 0;
}
env_atomic_set(&core->conf_meta->seq_cutoff_promo_count, count);
ocf_core_log(core, log_info, "Changing sequential cutoff promotion"
"count from %u to %u bytes successful\n",
count_old, count);
return 0;
}
int ocf_mngt_core_set_seq_cutoff_promotion_count(ocf_core_t core,
uint32_t count)
{
OCF_CHECK_NULL(core);
return _cache_mngt_set_core_seq_cutoff_promo_count(core, &count);
}
int ocf_mngt_core_set_seq_cutoff_promotion_count_all(ocf_cache_t cache,
uint32_t count)
{
OCF_CHECK_NULL(cache);
return ocf_core_visit(cache,
_cache_mngt_set_core_seq_cutoff_promo_count,
&count, true);
}
int ocf_mngt_core_get_seq_cutoff_promotion_count(ocf_core_t core,
uint32_t *count)
{
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(count);
*count = ocf_core_get_seq_cutoff_promotion_count(core);
return 0;
}

View File

@ -11,6 +11,7 @@
#include "utils/utils_part.h" #include "utils/utils_part.h"
#include "ocf_priv.h" #include "ocf_priv.h"
#include "ocf_cache_priv.h" #include "ocf_cache_priv.h"
#include "ocf_queue_priv.h"
#include "utils/utils_stats.h" #include "utils/utils_stats.h"
ocf_volume_t ocf_cache_get_volume(ocf_cache_t cache) ocf_volume_t ocf_cache_get_volume(ocf_cache_t cache)

View File

@ -114,6 +114,11 @@ ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core)
return env_atomic_read(&core->conf_meta->seq_cutoff_policy); return env_atomic_read(&core->conf_meta->seq_cutoff_policy);
} }
uint32_t ocf_core_get_seq_cutoff_promotion_count(ocf_core_t core)
{
return env_atomic_read(&core->conf_meta->seq_cutoff_promo_count);
}
int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx, int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
bool only_opened) bool only_opened)
{ {

View File

@ -44,6 +44,9 @@ struct ocf_core_meta_config {
/* Sequential cutoff policy */ /* Sequential cutoff policy */
env_atomic seq_cutoff_policy; env_atomic seq_cutoff_policy;
/* Sequential cutoff stream promotion request count */
env_atomic seq_cutoff_promo_count;
/* core object size in bytes */ /* core object size in bytes */
uint64_t length; uint64_t length;

View File

@ -44,6 +44,13 @@ int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
tmp_queue->cache = cache; tmp_queue->cache = cache;
tmp_queue->ops = ops; tmp_queue->ops = ops;
result = ocf_queue_seq_cutoff_init(tmp_queue);
if (result) {
ocf_mngt_cache_put(cache);
env_free(tmp_queue);
return result;
}
list_add(&tmp_queue->list, &cache->io_queues); list_add(&tmp_queue->list, &cache->io_queues);
*queue = tmp_queue; *queue = tmp_queue;
@ -65,6 +72,7 @@ void ocf_queue_put(ocf_queue_t queue)
if (env_atomic_dec_return(&queue->ref_count) == 0) { if (env_atomic_dec_return(&queue->ref_count) == 0) {
list_del(&queue->list); list_del(&queue->list);
queue->ops->stop(queue); queue->ops->stop(queue);
ocf_queue_seq_cutoff_deinit(queue);
ocf_mngt_cache_put(queue->cache); ocf_mngt_cache_put(queue->cache);
env_spinlock_destroy(&queue->io_list_lock); env_spinlock_destroy(&queue->io_list_lock);
env_free(queue); env_free(queue);

View File

@ -31,6 +31,8 @@ struct ocf_queue {
const struct ocf_queue_ops *ops; const struct ocf_queue_ops *ops;
struct ocf_seq_cutoff *seq_cutoff;
void *priv; void *priv;
}; };

View File

@ -182,6 +182,9 @@ struct ocf_request {
uint8_t master_io_req_type : 2; uint8_t master_io_req_type : 2;
/*!< Core device request context type */ /*!< Core device request context type */
uint8_t seq_cutoff_core : 1;
/*!< Sequential cut off stream promoted to core level */
uint8_t seq_cutoff : 1; uint8_t seq_cutoff : 1;
/*!< Sequential cut off set for this request */ /*!< Sequential cut off set for this request */

View File

@ -5,6 +5,8 @@
#include "ocf_seq_cutoff.h" #include "ocf_seq_cutoff.h"
#include "ocf_cache_priv.h" #include "ocf_cache_priv.h"
#include "ocf_core_priv.h"
#include "ocf_queue_priv.h"
#include "ocf_priv.h" #include "ocf_priv.h"
#include "ocf/ocf_debug.h" #include "ocf/ocf_debug.h"
#include "utils/utils_cache_line.h" #include "utils/utils_cache_line.h"
@ -52,50 +54,86 @@ static struct ocf_rb_node *ocf_seq_cutoff_stream_list_find(
struct ocf_rb_node *node; struct ocf_rb_node *node;
node = list_entry(node_list, struct ocf_rb_node, list); node = list_entry(node_list, struct ocf_rb_node, list);
max_stream = container_of(node, struct ocf_seq_cutoff_stream, node); stream = container_of(node, struct ocf_seq_cutoff_stream, node);
if (stream->valid)
max_stream = stream;
list_for_each_entry(node, node_list, list) { list_for_each_entry(node, node_list, list) {
stream = container_of(node, struct ocf_seq_cutoff_stream, node); stream = container_of(node, struct ocf_seq_cutoff_stream, node);
if (!stream->valid)
continue;
if (!max_stream)
max_stream = stream;
if (stream->bytes > max_stream->bytes) if (stream->bytes > max_stream->bytes)
max_stream = stream; max_stream = stream;
} }
return &max_stream->node; return max_stream ? &max_stream->node : NULL;
} }
int ocf_core_seq_cutoff_init(ocf_core_t core) static void ocf_seq_cutoff_base_init(struct ocf_seq_cutoff *base, int nstreams)
{ {
struct ocf_seq_cutoff_stream *stream; struct ocf_seq_cutoff_stream *stream;
int i; int i;
ocf_core_log(core, log_info, "Seqential cutoff init\n"); env_rwlock_init(&base->lock);
ocf_rb_tree_init(&base->tree, ocf_seq_cutoff_stream_cmp,
core->seq_cutoff = env_vmalloc(sizeof(*core->seq_cutoff));
if (!core->seq_cutoff)
return -OCF_ERR_NO_MEM;
env_rwlock_init(&core->seq_cutoff->lock);
ocf_rb_tree_init(&core->seq_cutoff->tree, ocf_seq_cutoff_stream_cmp,
ocf_seq_cutoff_stream_list_find); ocf_seq_cutoff_stream_list_find);
INIT_LIST_HEAD(&core->seq_cutoff->lru); INIT_LIST_HEAD(&base->lru);
for (i = 0; i < OCF_SEQ_CUTOFF_MAX_STREAMS; i++) { for (i = 0; i < nstreams; i++) {
stream = &core->seq_cutoff->streams[i]; stream = &base->streams[i];
stream->last = 4096 * i; stream->last = 4096 * i;
stream->bytes = 0; stream->bytes = 0;
stream->rw = 0; stream->rw = 0;
ocf_rb_tree_insert(&core->seq_cutoff->tree, &stream->node); stream->valid = false;
list_add_tail(&stream->list, &core->seq_cutoff->lru); ocf_rb_tree_insert(&base->tree, &stream->node);
list_add_tail(&stream->list, &base->lru);
} }
}
void ocf_seq_cutoff_base_deinit(struct ocf_seq_cutoff *base)
{
env_rwlock_destroy(&base->lock);
}
int ocf_core_seq_cutoff_init(ocf_core_t core)
{
ocf_core_log(core, log_info, "Seqential cutoff init\n");
core->seq_cutoff = env_vmalloc(sizeof(struct ocf_seq_cutoff_percore));
if (!core->seq_cutoff)
return -OCF_ERR_NO_MEM;
ocf_seq_cutoff_base_init(core->seq_cutoff,
OCF_SEQ_CUTOFF_PERCORE_STREAMS);
return 0; return 0;
} }
void ocf_core_seq_cutoff_deinit(ocf_core_t core) void ocf_core_seq_cutoff_deinit(ocf_core_t core)
{ {
env_rwlock_destroy(&core->seq_cutoff->lock); ocf_seq_cutoff_base_deinit(core->seq_cutoff);
env_vfree(core->seq_cutoff); env_vfree(core->seq_cutoff);
} }
int ocf_queue_seq_cutoff_init(ocf_queue_t queue)
{
queue->seq_cutoff = env_vmalloc(sizeof(struct ocf_seq_cutoff_perqueue));
if (!queue->seq_cutoff)
return -OCF_ERR_NO_MEM;
ocf_seq_cutoff_base_init(queue->seq_cutoff,
OCF_SEQ_CUTOFF_PERQUEUE_STREAMS);
return 0;
}
void ocf_queue_seq_cutoff_deinit(ocf_queue_t queue)
{
ocf_seq_cutoff_base_deinit(queue->seq_cutoff);
env_vfree(queue->seq_cutoff);
}
void ocf_dbg_get_seq_cutoff_status(ocf_core_t core, void ocf_dbg_get_seq_cutoff_status(ocf_core_t core,
struct ocf_dbg_seq_cutoff_status *status) struct ocf_dbg_seq_cutoff_status *status)
{ {
@ -119,17 +157,38 @@ void ocf_dbg_get_seq_cutoff_status(ocf_core_t core,
env_rwlock_read_unlock(&core->seq_cutoff->lock); env_rwlock_read_unlock(&core->seq_cutoff->lock);
} }
static bool ocf_core_seq_cutoff_base_check(struct ocf_seq_cutoff *seq_cutoff,
uint64_t addr, uint32_t len, int rw, uint32_t threshold,
struct ocf_seq_cutoff_stream **out_stream)
{
struct ocf_seq_cutoff_stream item = {
.last = addr, .rw = rw
};
struct ocf_seq_cutoff_stream *stream;
struct ocf_rb_node *node;
bool result = false;
node = ocf_rb_tree_find(&seq_cutoff->tree, &item.node);
if (node) {
stream = container_of(node, struct ocf_seq_cutoff_stream, node);
if (stream->bytes + len >= threshold)
result = true;
if (out_stream)
*out_stream = stream;
}
return result;
}
bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req) bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req)
{ {
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core); ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
uint32_t threshold = ocf_core_get_seq_cutoff_threshold(core); uint32_t threshold = ocf_core_get_seq_cutoff_threshold(core);
ocf_cache_t cache = ocf_core_get_cache(core); ocf_cache_t cache = ocf_core_get_cache(core);
struct ocf_seq_cutoff_stream item = { struct ocf_seq_cutoff_stream *queue_stream = NULL;
.last = req->byte_position, .rw = req->rw struct ocf_seq_cutoff_stream *core_stream = NULL;
}; bool result;
struct ocf_seq_cutoff_stream *stream;
struct ocf_rb_node *node;
bool result = false;
switch (policy) { switch (policy) {
case ocf_seq_cutoff_policy_always: case ocf_seq_cutoff_policy_always:
@ -146,55 +205,134 @@ bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req)
return false; return false;
} }
result = ocf_core_seq_cutoff_base_check(req->io_queue->seq_cutoff,
req->byte_position, req->byte_length, req->rw,
threshold, &queue_stream);
if (queue_stream)
return result;
env_rwlock_read_lock(&core->seq_cutoff->lock); env_rwlock_read_lock(&core->seq_cutoff->lock);
node = ocf_rb_tree_find(&core->seq_cutoff->tree, &item.node); result = ocf_core_seq_cutoff_base_check(core->seq_cutoff,
if (node) { req->byte_position, req->byte_length, req->rw,
stream = container_of(node, struct ocf_seq_cutoff_stream, node); threshold, &core_stream);
if (stream->bytes + req->byte_length >= threshold)
result = true;
}
env_rwlock_read_unlock(&core->seq_cutoff->lock); env_rwlock_read_unlock(&core->seq_cutoff->lock);
if (core_stream)
req->seq_cutoff_core = true;
return result; return result;
} }
void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req) static struct ocf_seq_cutoff_stream *ocf_core_seq_cutoff_base_update(
struct ocf_seq_cutoff *seq_cutoff,
uint64_t addr, uint32_t len, int rw, bool insert)
{ {
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
struct ocf_seq_cutoff_stream item = { struct ocf_seq_cutoff_stream item = {
.last = req->byte_position, .rw = req->rw .last = addr, .rw = rw
}; };
struct ocf_seq_cutoff_stream *stream; struct ocf_seq_cutoff_stream *stream;
struct ocf_rb_node *node; struct ocf_rb_node *node;
bool can_update; bool can_update;
node = ocf_rb_tree_find(&seq_cutoff->tree, &item.node);
if (node) {
stream = container_of(node, struct ocf_seq_cutoff_stream, node);
item.last = addr + len;
can_update = ocf_rb_tree_can_update(&seq_cutoff->tree,
node, &item.node);
stream->last = addr + len;
stream->bytes += len;
stream->req_count++;
if (!can_update) {
ocf_rb_tree_remove(&seq_cutoff->tree, node);
ocf_rb_tree_insert(&seq_cutoff->tree, node);
}
list_move_tail(&stream->list, &seq_cutoff->lru);
return stream;
}
if (insert) {
stream = list_first_entry(&seq_cutoff->lru,
struct ocf_seq_cutoff_stream, list);
ocf_rb_tree_remove(&seq_cutoff->tree, &stream->node);
stream->rw = rw;
stream->last = addr + len;
stream->bytes = len;
stream->req_count = 1;
stream->valid = true;
ocf_rb_tree_insert(&seq_cutoff->tree, &stream->node);
list_move_tail(&stream->list, &seq_cutoff->lru);
return stream;
}
return NULL;
}
static void ocf_core_seq_cutoff_base_promote(
struct ocf_seq_cutoff *dst_seq_cutoff,
struct ocf_seq_cutoff *src_seq_cutoff,
struct ocf_seq_cutoff_stream *src_stream)
{
struct ocf_seq_cutoff_stream *dst_stream;
dst_stream = list_first_entry(&dst_seq_cutoff->lru,
struct ocf_seq_cutoff_stream, list);
ocf_rb_tree_remove(&dst_seq_cutoff->tree, &dst_stream->node);
dst_stream->rw = src_stream->rw;
dst_stream->last = src_stream->last;
dst_stream->bytes = src_stream->bytes;
dst_stream->req_count = src_stream->req_count;
dst_stream->valid = true;
ocf_rb_tree_insert(&dst_seq_cutoff->tree, &dst_stream->node);
list_move_tail(&dst_stream->list, &dst_seq_cutoff->lru);
src_stream->valid = false;
list_move(&src_stream->list, &src_seq_cutoff->lru);
}
void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
{
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
uint32_t threshold = ocf_core_get_seq_cutoff_threshold(core);
uint32_t promotion_count =
ocf_core_get_seq_cutoff_promotion_count(core);
struct ocf_seq_cutoff_stream *stream;
bool promote = false;
if (policy == ocf_seq_cutoff_policy_never) if (policy == ocf_seq_cutoff_policy_never)
return; return;
/* Update last accessed position and bytes counter */ if (req->byte_length >= threshold)
env_rwlock_write_lock(&core->seq_cutoff->lock); promote = true;
node = ocf_rb_tree_find(&core->seq_cutoff->tree, &item.node);
if (node) { if (promotion_count == 0)
stream = container_of(node, struct ocf_seq_cutoff_stream, node); promote = true;
item.last = req->byte_position + req->byte_length;
can_update = ocf_rb_tree_can_update(&core->seq_cutoff->tree, if (req->seq_cutoff_core || promote) {
node, &item.node); env_rwlock_write_lock(&core->seq_cutoff->lock);
stream->last = req->byte_position + req->byte_length; stream = ocf_core_seq_cutoff_base_update(core->seq_cutoff,
stream->bytes += req->byte_length; req->byte_position, req->byte_length, req->rw,
if (!can_update) { promote);
ocf_rb_tree_remove(&core->seq_cutoff->tree, node); env_rwlock_write_unlock(&core->seq_cutoff->lock);
ocf_rb_tree_insert(&core->seq_cutoff->tree, node);
} if (stream)
list_move_tail(&stream->list, &core->seq_cutoff->lru); return;
} else { }
stream = list_first_entry(&core->seq_cutoff->lru,
struct ocf_seq_cutoff_stream, list); stream = ocf_core_seq_cutoff_base_update(req->io_queue->seq_cutoff,
ocf_rb_tree_remove(&core->seq_cutoff->tree, &stream->node); req->byte_position, req->byte_length, req->rw, true);
stream->rw = req->rw;
stream->last = req->byte_position + req->byte_length; if (stream->bytes >= threshold)
stream->bytes = req->byte_length; promote = true;
ocf_rb_tree_insert(&core->seq_cutoff->tree, &stream->node);
list_move_tail(&stream->list, &core->seq_cutoff->lru); if (stream->req_count >= promotion_count)
promote = true;
if (promote) {
env_rwlock_write_lock(&core->seq_cutoff->lock);
ocf_core_seq_cutoff_base_promote(core->seq_cutoff,
req->io_queue->seq_cutoff, stream);
env_rwlock_write_unlock(&core->seq_cutoff->lock);
} }
env_rwlock_write_unlock(&core->seq_cutoff->lock);
} }

View File

@ -14,6 +14,8 @@ struct ocf_seq_cutoff_stream {
uint64_t last; uint64_t last;
uint64_t bytes; uint64_t bytes;
uint32_t rw : 1; uint32_t rw : 1;
uint32_t valid : 1;
uint32_t req_count : 16;
struct ocf_rb_node node; struct ocf_rb_node node;
struct list_head list; struct list_head list;
}; };
@ -21,15 +23,29 @@ struct ocf_seq_cutoff_stream {
struct ocf_seq_cutoff { struct ocf_seq_cutoff {
ocf_core_t core; ocf_core_t core;
env_rwlock lock; env_rwlock lock;
struct ocf_seq_cutoff_stream streams[OCF_SEQ_CUTOFF_MAX_STREAMS];
struct ocf_rb_tree tree; struct ocf_rb_tree tree;
struct list_head lru; struct list_head lru;
struct ocf_seq_cutoff_stream streams[];
};
struct ocf_seq_cutoff_percore {
struct ocf_seq_cutoff base;
struct ocf_seq_cutoff_stream streams[OCF_SEQ_CUTOFF_PERCORE_STREAMS];
};
struct ocf_seq_cutoff_perqueue {
struct ocf_seq_cutoff base;
struct ocf_seq_cutoff_stream streams[OCF_SEQ_CUTOFF_PERQUEUE_STREAMS];
}; };
int ocf_core_seq_cutoff_init(ocf_core_t core); int ocf_core_seq_cutoff_init(ocf_core_t core);
void ocf_core_seq_cutoff_deinit(ocf_core_t core); void ocf_core_seq_cutoff_deinit(ocf_core_t core);
int ocf_queue_seq_cutoff_init(ocf_queue_t queue);
void ocf_queue_seq_cutoff_deinit(ocf_queue_t queue);
bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req); bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req);
void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req); void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);