diff --git a/inc/ocf_core.h b/inc/ocf_core.h index 700a3bb..1dec9ee 100644 --- a/inc/ocf_core.h +++ b/inc/ocf_core.h @@ -112,6 +112,15 @@ uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core); */ ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core); +/** + * @brief Get sequential cutoff stream promotion req count of given core object + * + * @param[in] core Core object + * + * @retval Sequential cutoff stream promotion request count + */ +uint32_t ocf_core_get_seq_cutoff_promotion_count(ocf_core_t core); + /** * @brief Get name of given core object * diff --git a/inc/ocf_debug.h b/inc/ocf_debug.h index 8a2c4c0..1eac42d 100644 --- a/inc/ocf_debug.h +++ b/inc/ocf_debug.h @@ -12,7 +12,7 @@ struct ocf_dbg_seq_cutoff_status { uint64_t bytes; uint32_t rw : 1; uint32_t active : 1; - } streams[OCF_SEQ_CUTOFF_MAX_STREAMS]; + } streams[OCF_SEQ_CUTOFF_PERCORE_STREAMS]; }; void ocf_dbg_get_seq_cutoff_status(ocf_core_t core, diff --git a/inc/ocf_def.h b/inc/ocf_def.h index 5d9c95a..140b1b1 100644 --- a/inc/ocf_def.h +++ b/inc/ocf_def.h @@ -175,9 +175,12 @@ typedef enum { /*!< Current cache mode of given cache instance */ } ocf_cache_mode_t; -#define OCF_SEQ_CUTOFF_MAX_STREAMS 256 +#define OCF_SEQ_CUTOFF_PERCORE_STREAMS 128 +#define OCF_SEQ_CUTOFF_PERQUEUE_STREAMS 64 #define OCF_SEQ_CUTOFF_MIN_THRESHOLD 1 #define OCF_SEQ_CUTOFF_MAX_THRESHOLD 4194181 +#define OCF_SEQ_CUTOFF_MIN_PROMOTION_COUNT 1 +#define OCF_SEQ_CUTOFF_MAX_PROMOTION_COUNT 65535 typedef enum { ocf_seq_cutoff_policy_always = 0, diff --git a/inc/ocf_mngt.h b/inc/ocf_mngt.h index 1e61690..6d8f159 100644 --- a/inc/ocf_mngt.h +++ b/inc/ocf_mngt.h @@ -42,6 +42,9 @@ struct ocf_mngt_core_config { uint32_t seq_cutoff_threshold; /*!< Sequential cutoff threshold (in bytes) */ + uint32_t seq_cutoff_promotion_count; + /*!< Sequential cutoff promotion request count */ + struct { void *data; size_t size; @@ -61,6 +64,7 @@ static inline void ocf_mngt_core_config_set_default( { cfg->try_add = false; cfg->seq_cutoff_threshold = 1024; + cfg->seq_cutoff_promotion_count = 8; cfg->user_metadata.data = NULL; cfg->user_metadata.size = 0; } @@ -1010,6 +1014,47 @@ int ocf_mngt_core_set_seq_cutoff_policy_all(ocf_cache_t cache, int ocf_mngt_core_get_seq_cutoff_policy(ocf_core_t core, ocf_seq_cutoff_policy *policy); +/** + * @brief Set core sequential cutoff promotion request count + * + * @attention This changes only runtime state. To make changes persistent + * use function ocf_mngt_cache_save(). + * + * @param[in] core Core handle + * @param[in] thresh promotion request count + * + * @retval 0 Sequential cutoff promotion requets count has been set successfully + * @retval Non-zero Error occured and request count hasn't been updated + */ +int ocf_mngt_core_set_seq_cutoff_promotion_count(ocf_core_t core, + uint32_t count); + +/** + * @brief Set sequential cutoff promotion request count for all cores in cache + * + * @attention This changes only runtime state. To make changes persistent + * use function ocf_mngt_cache_save(). + * + * @param[in] cache Cache handle + * @param[in] count promotion request count + * + * @retval 0 Sequential cutoff promotion request count has been set successfully + * @retval Non-zero Error occured and request count hasn't been updated + */ +int ocf_mngt_core_set_seq_cutoff_promotion_count_all(ocf_cache_t cache, + uint32_t count); +/** + * @brief Get core sequential cutoff promotion threshold + * + * @param[in] core Core handle + * @param[out] count promotion request count + * + * @retval 0 Sequential cutoff promotion request count has been get successfully + * @retval Non-zero Error occured + */ +int ocf_mngt_core_get_seq_cutoff_promotion_count(ocf_core_t core, + uint32_t *count); + /** * @brief Set cache fallback Pass Through error threshold * diff --git a/src/mngt/ocf_mngt_core.c b/src/mngt/ocf_mngt_core.c index 19300b6..e68e1cd 100644 --- a/src/mngt/ocf_mngt_core.c +++ b/src/mngt/ocf_mngt_core.c @@ -451,6 +451,8 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline, ocf_seq_cutoff_policy_default); env_atomic_set(&core->conf_meta->seq_cutoff_threshold, cfg->seq_cutoff_threshold); + env_atomic_set(&core->conf_meta->seq_cutoff_promo_count, + cfg->seq_cutoff_promotion_count); /* Add core sequence number for atomic metadata matching */ core_sequence_no = _ocf_mngt_get_core_seq_no(cache); @@ -1016,3 +1018,62 @@ int ocf_mngt_core_get_seq_cutoff_policy(ocf_core_t core, return 0; } + +static int _cache_mngt_set_core_seq_cutoff_promo_count(ocf_core_t core, + void *cntx) +{ + uint32_t count = *(uint32_t*) cntx; + uint32_t count_old = ocf_core_get_seq_cutoff_promotion_count(core); + + if (count < OCF_SEQ_CUTOFF_MIN_PROMOTION_COUNT || + count > OCF_SEQ_CUTOFF_MAX_PROMOTION_COUNT) { + ocf_core_log(core, log_info, + "Invalid sequential cutoff promotion count!\n"); + return -OCF_ERR_INVAL; + } + + + if (count_old == count) { + ocf_core_log(core, log_info, + "Sequential cutoff promotion count %u " + "bytes is already set\n", count); + return 0; + } + + env_atomic_set(&core->conf_meta->seq_cutoff_promo_count, count); + + ocf_core_log(core, log_info, "Changing sequential cutoff promotion" + "count from %u to %u bytes successful\n", + count_old, count); + + return 0; +} + +int ocf_mngt_core_set_seq_cutoff_promotion_count(ocf_core_t core, + uint32_t count) +{ + OCF_CHECK_NULL(core); + + return _cache_mngt_set_core_seq_cutoff_promo_count(core, &count); +} + +int ocf_mngt_core_set_seq_cutoff_promotion_count_all(ocf_cache_t cache, + uint32_t count) +{ + OCF_CHECK_NULL(cache); + + return ocf_core_visit(cache, + _cache_mngt_set_core_seq_cutoff_promo_count, + &count, true); +} + +int ocf_mngt_core_get_seq_cutoff_promotion_count(ocf_core_t core, + uint32_t *count) +{ + OCF_CHECK_NULL(core); + OCF_CHECK_NULL(count); + + *count = ocf_core_get_seq_cutoff_promotion_count(core); + + return 0; +} diff --git a/src/ocf_cache.c b/src/ocf_cache.c index 99cdae1..666cea3 100644 --- a/src/ocf_cache.c +++ b/src/ocf_cache.c @@ -11,6 +11,7 @@ #include "utils/utils_part.h" #include "ocf_priv.h" #include "ocf_cache_priv.h" +#include "ocf_queue_priv.h" #include "utils/utils_stats.h" ocf_volume_t ocf_cache_get_volume(ocf_cache_t cache) diff --git a/src/ocf_core.c b/src/ocf_core.c index b351250..075ebdb 100644 --- a/src/ocf_core.c +++ b/src/ocf_core.c @@ -114,6 +114,11 @@ ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core) return env_atomic_read(&core->conf_meta->seq_cutoff_policy); } +uint32_t ocf_core_get_seq_cutoff_promotion_count(ocf_core_t core) +{ + return env_atomic_read(&core->conf_meta->seq_cutoff_promo_count); +} + int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx, bool only_opened) { diff --git a/src/ocf_core_priv.h b/src/ocf_core_priv.h index 5018cf9..ad52771 100644 --- a/src/ocf_core_priv.h +++ b/src/ocf_core_priv.h @@ -44,6 +44,9 @@ struct ocf_core_meta_config { /* Sequential cutoff policy */ env_atomic seq_cutoff_policy; + /* Sequential cutoff stream promotion request count */ + env_atomic seq_cutoff_promo_count; + /* core object size in bytes */ uint64_t length; diff --git a/src/ocf_queue.c b/src/ocf_queue.c index 60694c9..a754d6e 100644 --- a/src/ocf_queue.c +++ b/src/ocf_queue.c @@ -44,6 +44,13 @@ int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue, tmp_queue->cache = cache; tmp_queue->ops = ops; + result = ocf_queue_seq_cutoff_init(tmp_queue); + if (result) { + ocf_mngt_cache_put(cache); + env_free(tmp_queue); + return result; + } + list_add(&tmp_queue->list, &cache->io_queues); *queue = tmp_queue; @@ -65,6 +72,7 @@ void ocf_queue_put(ocf_queue_t queue) if (env_atomic_dec_return(&queue->ref_count) == 0) { list_del(&queue->list); queue->ops->stop(queue); + ocf_queue_seq_cutoff_deinit(queue); ocf_mngt_cache_put(queue->cache); env_spinlock_destroy(&queue->io_list_lock); env_free(queue); diff --git a/src/ocf_queue_priv.h b/src/ocf_queue_priv.h index aca0e4a..8c6282b 100644 --- a/src/ocf_queue_priv.h +++ b/src/ocf_queue_priv.h @@ -31,6 +31,8 @@ struct ocf_queue { const struct ocf_queue_ops *ops; + struct ocf_seq_cutoff *seq_cutoff; + void *priv; }; diff --git a/src/ocf_request.h b/src/ocf_request.h index 93b7067..5f26434 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -182,6 +182,9 @@ struct ocf_request { uint8_t master_io_req_type : 2; /*!< Core device request context type */ + uint8_t seq_cutoff_core : 1; + /*!< Sequential cut off stream promoted to core level */ + uint8_t seq_cutoff : 1; /*!< Sequential cut off set for this request */ diff --git a/src/ocf_seq_cutoff.c b/src/ocf_seq_cutoff.c index e55b041..0496de9 100644 --- a/src/ocf_seq_cutoff.c +++ b/src/ocf_seq_cutoff.c @@ -5,6 +5,8 @@ #include "ocf_seq_cutoff.h" #include "ocf_cache_priv.h" +#include "ocf_core_priv.h" +#include "ocf_queue_priv.h" #include "ocf_priv.h" #include "ocf/ocf_debug.h" #include "utils/utils_cache_line.h" @@ -52,50 +54,86 @@ static struct ocf_rb_node *ocf_seq_cutoff_stream_list_find( struct ocf_rb_node *node; node = list_entry(node_list, struct ocf_rb_node, list); - max_stream = container_of(node, struct ocf_seq_cutoff_stream, node); + stream = container_of(node, struct ocf_seq_cutoff_stream, node); + if (stream->valid) + max_stream = stream; list_for_each_entry(node, node_list, list) { stream = container_of(node, struct ocf_seq_cutoff_stream, node); + if (!stream->valid) + continue; + if (!max_stream) + max_stream = stream; if (stream->bytes > max_stream->bytes) max_stream = stream; } - return &max_stream->node; + return max_stream ? &max_stream->node : NULL; } -int ocf_core_seq_cutoff_init(ocf_core_t core) +static void ocf_seq_cutoff_base_init(struct ocf_seq_cutoff *base, int nstreams) { struct ocf_seq_cutoff_stream *stream; int i; - ocf_core_log(core, log_info, "Seqential cutoff init\n"); - - core->seq_cutoff = env_vmalloc(sizeof(*core->seq_cutoff)); - if (!core->seq_cutoff) - return -OCF_ERR_NO_MEM; - - env_rwlock_init(&core->seq_cutoff->lock); - ocf_rb_tree_init(&core->seq_cutoff->tree, ocf_seq_cutoff_stream_cmp, + env_rwlock_init(&base->lock); + ocf_rb_tree_init(&base->tree, ocf_seq_cutoff_stream_cmp, ocf_seq_cutoff_stream_list_find); - INIT_LIST_HEAD(&core->seq_cutoff->lru); + INIT_LIST_HEAD(&base->lru); - for (i = 0; i < OCF_SEQ_CUTOFF_MAX_STREAMS; i++) { - stream = &core->seq_cutoff->streams[i]; + for (i = 0; i < nstreams; i++) { + stream = &base->streams[i]; stream->last = 4096 * i; stream->bytes = 0; stream->rw = 0; - ocf_rb_tree_insert(&core->seq_cutoff->tree, &stream->node); - list_add_tail(&stream->list, &core->seq_cutoff->lru); + stream->valid = false; + ocf_rb_tree_insert(&base->tree, &stream->node); + list_add_tail(&stream->list, &base->lru); } +} + +void ocf_seq_cutoff_base_deinit(struct ocf_seq_cutoff *base) +{ + env_rwlock_destroy(&base->lock); +} + +int ocf_core_seq_cutoff_init(ocf_core_t core) +{ + ocf_core_log(core, log_info, "Seqential cutoff init\n"); + + core->seq_cutoff = env_vmalloc(sizeof(struct ocf_seq_cutoff_percore)); + if (!core->seq_cutoff) + return -OCF_ERR_NO_MEM; + + ocf_seq_cutoff_base_init(core->seq_cutoff, + OCF_SEQ_CUTOFF_PERCORE_STREAMS); return 0; } void ocf_core_seq_cutoff_deinit(ocf_core_t core) { - env_rwlock_destroy(&core->seq_cutoff->lock); + ocf_seq_cutoff_base_deinit(core->seq_cutoff); env_vfree(core->seq_cutoff); } +int ocf_queue_seq_cutoff_init(ocf_queue_t queue) +{ + queue->seq_cutoff = env_vmalloc(sizeof(struct ocf_seq_cutoff_perqueue)); + if (!queue->seq_cutoff) + return -OCF_ERR_NO_MEM; + + ocf_seq_cutoff_base_init(queue->seq_cutoff, + OCF_SEQ_CUTOFF_PERQUEUE_STREAMS); + + return 0; +} + +void ocf_queue_seq_cutoff_deinit(ocf_queue_t queue) +{ + ocf_seq_cutoff_base_deinit(queue->seq_cutoff); + env_vfree(queue->seq_cutoff); +} + void ocf_dbg_get_seq_cutoff_status(ocf_core_t core, struct ocf_dbg_seq_cutoff_status *status) { @@ -119,17 +157,38 @@ void ocf_dbg_get_seq_cutoff_status(ocf_core_t core, env_rwlock_read_unlock(&core->seq_cutoff->lock); } +static bool ocf_core_seq_cutoff_base_check(struct ocf_seq_cutoff *seq_cutoff, + uint64_t addr, uint32_t len, int rw, uint32_t threshold, + struct ocf_seq_cutoff_stream **out_stream) +{ + struct ocf_seq_cutoff_stream item = { + .last = addr, .rw = rw + }; + struct ocf_seq_cutoff_stream *stream; + struct ocf_rb_node *node; + bool result = false; + + node = ocf_rb_tree_find(&seq_cutoff->tree, &item.node); + if (node) { + stream = container_of(node, struct ocf_seq_cutoff_stream, node); + if (stream->bytes + len >= threshold) + result = true; + + if (out_stream) + *out_stream = stream; + } + + return result; +} + bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req) { ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core); uint32_t threshold = ocf_core_get_seq_cutoff_threshold(core); ocf_cache_t cache = ocf_core_get_cache(core); - struct ocf_seq_cutoff_stream item = { - .last = req->byte_position, .rw = req->rw - }; - struct ocf_seq_cutoff_stream *stream; - struct ocf_rb_node *node; - bool result = false; + struct ocf_seq_cutoff_stream *queue_stream = NULL; + struct ocf_seq_cutoff_stream *core_stream = NULL; + bool result; switch (policy) { case ocf_seq_cutoff_policy_always: @@ -146,55 +205,134 @@ bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req) return false; } + result = ocf_core_seq_cutoff_base_check(req->io_queue->seq_cutoff, + req->byte_position, req->byte_length, req->rw, + threshold, &queue_stream); + if (queue_stream) + return result; + env_rwlock_read_lock(&core->seq_cutoff->lock); - node = ocf_rb_tree_find(&core->seq_cutoff->tree, &item.node); - if (node) { - stream = container_of(node, struct ocf_seq_cutoff_stream, node); - if (stream->bytes + req->byte_length >= threshold) - result = true; - } + result = ocf_core_seq_cutoff_base_check(core->seq_cutoff, + req->byte_position, req->byte_length, req->rw, + threshold, &core_stream); env_rwlock_read_unlock(&core->seq_cutoff->lock); + if (core_stream) + req->seq_cutoff_core = true; + return result; } -void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req) +static struct ocf_seq_cutoff_stream *ocf_core_seq_cutoff_base_update( + struct ocf_seq_cutoff *seq_cutoff, + uint64_t addr, uint32_t len, int rw, bool insert) { - ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core); struct ocf_seq_cutoff_stream item = { - .last = req->byte_position, .rw = req->rw + .last = addr, .rw = rw }; struct ocf_seq_cutoff_stream *stream; struct ocf_rb_node *node; bool can_update; + node = ocf_rb_tree_find(&seq_cutoff->tree, &item.node); + if (node) { + stream = container_of(node, struct ocf_seq_cutoff_stream, node); + item.last = addr + len; + can_update = ocf_rb_tree_can_update(&seq_cutoff->tree, + node, &item.node); + stream->last = addr + len; + stream->bytes += len; + stream->req_count++; + if (!can_update) { + ocf_rb_tree_remove(&seq_cutoff->tree, node); + ocf_rb_tree_insert(&seq_cutoff->tree, node); + } + list_move_tail(&stream->list, &seq_cutoff->lru); + + return stream; + } + + if (insert) { + stream = list_first_entry(&seq_cutoff->lru, + struct ocf_seq_cutoff_stream, list); + ocf_rb_tree_remove(&seq_cutoff->tree, &stream->node); + stream->rw = rw; + stream->last = addr + len; + stream->bytes = len; + stream->req_count = 1; + stream->valid = true; + ocf_rb_tree_insert(&seq_cutoff->tree, &stream->node); + list_move_tail(&stream->list, &seq_cutoff->lru); + + return stream; + } + + return NULL; +} + +static void ocf_core_seq_cutoff_base_promote( + struct ocf_seq_cutoff *dst_seq_cutoff, + struct ocf_seq_cutoff *src_seq_cutoff, + struct ocf_seq_cutoff_stream *src_stream) +{ + struct ocf_seq_cutoff_stream *dst_stream; + + dst_stream = list_first_entry(&dst_seq_cutoff->lru, + struct ocf_seq_cutoff_stream, list); + ocf_rb_tree_remove(&dst_seq_cutoff->tree, &dst_stream->node); + dst_stream->rw = src_stream->rw; + dst_stream->last = src_stream->last; + dst_stream->bytes = src_stream->bytes; + dst_stream->req_count = src_stream->req_count; + dst_stream->valid = true; + ocf_rb_tree_insert(&dst_seq_cutoff->tree, &dst_stream->node); + list_move_tail(&dst_stream->list, &dst_seq_cutoff->lru); + src_stream->valid = false; + list_move(&src_stream->list, &src_seq_cutoff->lru); +} + +void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req) +{ + ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core); + uint32_t threshold = ocf_core_get_seq_cutoff_threshold(core); + uint32_t promotion_count = + ocf_core_get_seq_cutoff_promotion_count(core); + struct ocf_seq_cutoff_stream *stream; + bool promote = false; + if (policy == ocf_seq_cutoff_policy_never) return; - /* Update last accessed position and bytes counter */ - env_rwlock_write_lock(&core->seq_cutoff->lock); - node = ocf_rb_tree_find(&core->seq_cutoff->tree, &item.node); - if (node) { - stream = container_of(node, struct ocf_seq_cutoff_stream, node); - item.last = req->byte_position + req->byte_length; - can_update = ocf_rb_tree_can_update(&core->seq_cutoff->tree, - node, &item.node); - stream->last = req->byte_position + req->byte_length; - stream->bytes += req->byte_length; - if (!can_update) { - ocf_rb_tree_remove(&core->seq_cutoff->tree, node); - ocf_rb_tree_insert(&core->seq_cutoff->tree, node); - } - list_move_tail(&stream->list, &core->seq_cutoff->lru); - } else { - stream = list_first_entry(&core->seq_cutoff->lru, - struct ocf_seq_cutoff_stream, list); - ocf_rb_tree_remove(&core->seq_cutoff->tree, &stream->node); - stream->rw = req->rw; - stream->last = req->byte_position + req->byte_length; - stream->bytes = req->byte_length; - ocf_rb_tree_insert(&core->seq_cutoff->tree, &stream->node); - list_move_tail(&stream->list, &core->seq_cutoff->lru); + if (req->byte_length >= threshold) + promote = true; + + if (promotion_count == 0) + promote = true; + + if (req->seq_cutoff_core || promote) { + env_rwlock_write_lock(&core->seq_cutoff->lock); + stream = ocf_core_seq_cutoff_base_update(core->seq_cutoff, + req->byte_position, req->byte_length, req->rw, + promote); + env_rwlock_write_unlock(&core->seq_cutoff->lock); + + if (stream) + return; + } + + stream = ocf_core_seq_cutoff_base_update(req->io_queue->seq_cutoff, + req->byte_position, req->byte_length, req->rw, true); + + if (stream->bytes >= threshold) + promote = true; + + if (stream->req_count >= promotion_count) + promote = true; + + if (promote) { + env_rwlock_write_lock(&core->seq_cutoff->lock); + ocf_core_seq_cutoff_base_promote(core->seq_cutoff, + req->io_queue->seq_cutoff, stream); + env_rwlock_write_unlock(&core->seq_cutoff->lock); } - env_rwlock_write_unlock(&core->seq_cutoff->lock); } diff --git a/src/ocf_seq_cutoff.h b/src/ocf_seq_cutoff.h index acf1330..fce56af 100644 --- a/src/ocf_seq_cutoff.h +++ b/src/ocf_seq_cutoff.h @@ -14,6 +14,8 @@ struct ocf_seq_cutoff_stream { uint64_t last; uint64_t bytes; uint32_t rw : 1; + uint32_t valid : 1; + uint32_t req_count : 16; struct ocf_rb_node node; struct list_head list; }; @@ -21,15 +23,29 @@ struct ocf_seq_cutoff_stream { struct ocf_seq_cutoff { ocf_core_t core; env_rwlock lock; - struct ocf_seq_cutoff_stream streams[OCF_SEQ_CUTOFF_MAX_STREAMS]; struct ocf_rb_tree tree; struct list_head lru; + struct ocf_seq_cutoff_stream streams[]; +}; + +struct ocf_seq_cutoff_percore { + struct ocf_seq_cutoff base; + struct ocf_seq_cutoff_stream streams[OCF_SEQ_CUTOFF_PERCORE_STREAMS]; +}; + +struct ocf_seq_cutoff_perqueue { + struct ocf_seq_cutoff base; + struct ocf_seq_cutoff_stream streams[OCF_SEQ_CUTOFF_PERQUEUE_STREAMS]; }; int ocf_core_seq_cutoff_init(ocf_core_t core); void ocf_core_seq_cutoff_deinit(ocf_core_t core); +int ocf_queue_seq_cutoff_init(ocf_queue_t queue); + +void ocf_queue_seq_cutoff_deinit(ocf_queue_t queue); + bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req); void ocf_core_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);