Merge pull request #800 from robertbaldyga/redesign-queue-api

Redesign queue API
This commit is contained in:
Robert Baldyga 2024-08-02 14:43:52 +02:00 committed by GitHub
commit 5b2f26decf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 211 additions and 216 deletions

View File

@ -138,7 +138,7 @@ const char *ocf_get_io_iface_name(ocf_req_cache_mode_t cache_mode)
return cache_mode_io_if_map[cache_mode]->name;
}
static ocf_engine_cb ocf_io_if_type_to_engine_cb(
static ocf_req_cb ocf_io_if_type_to_engine_cb(
enum ocf_io_if_type io_if_type, int rw)
{
if (unlikely(io_if_type == OCF_IO_MAX_IF ||
@ -149,7 +149,7 @@ static ocf_engine_cb ocf_io_if_type_to_engine_cb(
return IO_IFS[io_if_type].cbs[rw];
}
static ocf_engine_cb ocf_cache_mode_to_engine_cb(
static ocf_req_cb ocf_cache_mode_to_engine_cb(
ocf_req_cache_mode_t req_cache_mode, int rw)
{
if (req_cache_mode == ocf_req_cache_mode_max)
@ -158,37 +158,6 @@ static ocf_engine_cb ocf_cache_mode_to_engine_cb(
return cache_mode_io_if_map[req_cache_mode]->cbs[rw];
}
struct ocf_request *ocf_engine_pop_req(ocf_queue_t q)
{
unsigned long lock_flags = 0;
struct ocf_request *req;
OCF_CHECK_NULL(q);
/* LOCK */
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
if (list_empty(&q->io_list)) {
/* No items on the list */
env_spinlock_unlock_irqrestore(&q->io_list_lock,
lock_flags);
return NULL;
}
/* Get the first request and remove it from the list */
req = list_first_entry(&q->io_list, struct ocf_request, list);
env_atomic_dec(&q->io_no);
list_del(&req->list);
/* UNLOCK */
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
OCF_CHECK_NULL(req);
return req;
}
bool ocf_fallback_pt_is_on(ocf_cache_t cache)
{
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
@ -264,14 +233,14 @@ int ocf_engine_hndl_req(struct ocf_request *req)
* to into OCF workers
*/
ocf_engine_push_req_back(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC);
return 0;
}
int ocf_engine_hndl_fast_req(struct ocf_request *req)
{
ocf_engine_cb engine_cb;
ocf_req_cb engine_cb;
int ret;
engine_cb = ocf_cache_mode_to_engine_cb(req->cache_mode, req->rw);
@ -313,7 +282,7 @@ void ocf_engine_hndl_ops_req(struct ocf_request *req)
ocf_io_if_type_to_engine_cb(OCF_IO_D2C_IF, req->rw) :
ocf_io_if_type_to_engine_cb(OCF_IO_OPS_IF, req->rw);
ocf_engine_push_req_back(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC);
}
bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode)

View File

@ -7,42 +7,22 @@
#ifndef __CACHE_ENGINE_H_
#define __CACHE_ENGINE_H_
#include "../ocf_request.h"
struct ocf_thread_priv;
struct ocf_request;
#define LOOKUP_HIT 5
#define LOOKUP_MISS 6
#define LOOKUP_REMAPPED 8
typedef enum {
/* modes inherited from user API */
ocf_req_cache_mode_wt = ocf_cache_mode_wt,
ocf_req_cache_mode_wb = ocf_cache_mode_wb,
ocf_req_cache_mode_wa = ocf_cache_mode_wa,
ocf_req_cache_mode_pt = ocf_cache_mode_pt,
ocf_req_cache_mode_wi = ocf_cache_mode_wi,
ocf_req_cache_mode_wo = ocf_cache_mode_wo,
/* internal modes */
ocf_req_cache_mode_fast,
/*!< Fast path */
ocf_req_cache_mode_d2c,
/*!< Direct to Core - pass through to core without
touching cacheline metadata */
ocf_req_cache_mode_max,
} ocf_req_cache_mode_t;
static inline ocf_req_cache_mode_t ocf_cache_mode_to_req_cache_mode(
ocf_cache_mode_t mode)
{
return (ocf_req_cache_mode_t)mode;
}
typedef int (*ocf_engine_cb)(struct ocf_request *req);
struct ocf_io_if {
ocf_engine_cb cbs[2]; /* READ and WRITE */
ocf_req_cb cbs[2]; /* READ and WRITE */
const char *name;
};
@ -56,8 +36,6 @@ bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode);
bool ocf_fallback_pt_is_on(ocf_cache_t cache);
struct ocf_request *ocf_engine_pop_req(struct ocf_queue *q);
int ocf_engine_hndl_req(struct ocf_request *req);
#define OCF_FAST_PATH_YES 7

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -94,5 +95,6 @@ static int _ocf_backfill_do(struct ocf_request *req)
void ocf_engine_backfill(struct ocf_request *req)
{
backfill_queue_inc_block(req->cache);
ocf_engine_push_req_front_cb(req, _ocf_backfill_do, true);
ocf_queue_push_req_cb(req, _ocf_backfill_do,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}

View File

@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies Co., Ltd.
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -381,7 +381,8 @@ static void _ocf_engine_clean_end(void *private_data, int error)
} else {
req->info.dirty_any = 0;
req->info.dirty_all = 0;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
}
@ -584,75 +585,6 @@ void ocf_engine_update_request_stats(struct ocf_request *req)
req->info.hit_no, req->core_line_count);
}
void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(!req->io_queue);
q = req->io_queue;
if (!req->info.internal) {
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
}
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add_tail(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
/* NOTE: do not dereference @req past this line, it might
* be picked up by concurrent io thread and deallocated
* at this point */
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
ENV_BUG_ON(!req->io_queue);
INIT_LIST_HEAD(&req->list);
q = req->io_queue;
if (!req->info.internal) {
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
}
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
/* NOTE: do not dereference @req past this line, it might
* be picked up by concurrent io thread and deallocated
* at this point */
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front_cb(struct ocf_request *req,
ocf_engine_cb engine_cb,
bool allow_sync)
{
req->error = 0; /* Please explain why!!! */
req->engine_handler = engine_cb;
ocf_engine_push_req_front(req, allow_sync);
}
void inc_fallback_pt_error_counter(ocf_cache_t cache)
{
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
@ -712,5 +644,5 @@ void ocf_engine_on_resume(struct ocf_request *req)
OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front_cb(req, _ocf_engine_refresh, false);
ocf_queue_push_req_cb(req, _ocf_engine_refresh, OCF_QUEUE_PRIO_HIGH);
}

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -280,38 +281,6 @@ void ocf_engine_update_block_stats(struct ocf_request *req);
*/
void ocf_engine_update_request_stats(struct ocf_request *req);
/**
* @brief Push front OCF request to the OCF thread worker queue
*
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_req_back(struct ocf_request *req,
bool allow_sync);
/**
* @brief Push back OCF request to the OCF thread worker queue
*
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_req_front(struct ocf_request *req,
bool allow_sync);
/**
* @brief Set interface and push from request to the OCF thread worker queue
*
* @param req OCF request
* @param engine_cb IO engine handler callback
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_req_front_cb(struct ocf_request *req,
ocf_engine_cb engine_cb,
bool allow_sync);
void inc_fallback_pt_error_counter(ocf_cache_t cache);
void ocf_engine_on_resume(struct ocf_request *req);

View File

@ -74,7 +74,7 @@ static void _ocf_discard_cache_flush_complete(struct ocf_io *io, int error)
}
req->engine_handler = _ocf_discard_core;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
ocf_io_put(io);
}
@ -111,7 +111,7 @@ static void _ocf_discard_finish_step(struct ocf_request *req)
else
req->engine_handler = _ocf_discard_core;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static void _ocf_discard_step_complete(struct ocf_request *req, int error)
@ -182,7 +182,7 @@ static int _ocf_discard_step_do(struct ocf_request *req)
static void _ocf_discard_on_resume(struct ocf_request *req)
{
OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static int _ocf_discard_step(struct ocf_request *req)

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -45,7 +46,7 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
if (req->error) {
OCF_DEBUG_RQ(req, "ERROR");
ocf_engine_push_req_front_pt(req);
ocf_queue_push_req_pt(req);
} else {
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -62,5 +63,6 @@ static int _ocf_invalidate_do(struct ocf_request *req)
void ocf_engine_invalidate(struct ocf_request *req)
{
ocf_engine_push_req_front_cb(req, _ocf_invalidate_do, true);
ocf_queue_push_req_cb(req, _ocf_invalidate_do,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}

View File

@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2023 Huawei Technologies
* Copyright(c) 2023-2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "ocf/ocf.h"
@ -165,8 +165,9 @@ int ocf_read_pt(struct ocf_request *req)
return 0;
}
void ocf_engine_push_req_front_pt(struct ocf_request *req)
void ocf_queue_push_req_pt(struct ocf_request *req)
{
ocf_engine_push_req_front_cb(req, ocf_read_pt_do, true);
ocf_queue_push_req_cb(req, ocf_read_pt_do,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,6 +11,6 @@ int ocf_read_pt(struct ocf_request *req);
int ocf_read_pt_do(struct ocf_request *req);
void ocf_engine_push_req_front_pt(struct ocf_request *req);
void ocf_queue_push_req_pt(struct ocf_request *req);
#endif /* ENGINE_OFF_H_ */

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -41,7 +42,7 @@ static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "HIT completion");
if (req->error) {
ocf_engine_push_req_front_pt(req);
ocf_queue_push_req_pt(req);
} else {
ocf_req_unlock(c, req);

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -102,8 +103,8 @@ static void _ocf_write_wb_complete(struct ocf_request *req, int error)
ocf_engine_invalidate(req);
} else {
ocf_engine_push_req_front_cb(req,
ocf_write_wb_do_flush_metadata, true);
ocf_queue_push_req_cb(req, ocf_write_wb_do_flush_metadata,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
}

View File

@ -56,8 +56,8 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
if (!req->error && !req->wi_second_pass && ocf_engine_is_miss(req)) {
/* need another pass */
ocf_engine_push_req_front_cb(req, _ocf_write_wi_next_pass,
true);
ocf_queue_push_req_cb(req, _ocf_write_wi_next_pass,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
return;
}
@ -123,8 +123,9 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
ocf_req_put(req);
} else {
ocf_engine_push_req_front_cb(req,
ocf_write_wi_update_and_flush_metadata, true);
ocf_queue_push_req_cb(req,
ocf_write_wi_update_and_flush_metadata,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
}
@ -155,7 +156,7 @@ static int _ocf_write_wi_core_write(struct ocf_request *req)
static void _ocf_write_wi_on_resume(struct ocf_request *req)
{
OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
int ocf_write_wi(struct ocf_request *req)

View File

@ -171,7 +171,7 @@ static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
}
req->engine_handler = ocf_read_wo_cache_do;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static int ocf_read_wo_do(struct ocf_request *req)

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -112,8 +113,8 @@ static void _ocf_write_wt_req_complete(struct ocf_request *req)
if (req->info.dirty_any) {
/* Some of the request's cachelines changed its state to clean */
ocf_engine_push_req_front_cb(req,
ocf_write_wt_do_flush_metadata, true);
ocf_queue_push_req_cb(req, ocf_write_wt_do_flush_metadata,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
} else {
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -50,7 +51,8 @@ static void _ocf_zero_io_flush_metadata(struct ocf_request *req, int error)
if (env_atomic_dec_return(&req->req_remaining))
return;
ocf_engine_push_req_front_cb(req, ocf_zero_purge, true);
ocf_queue_push_req_cb(req, ocf_zero_purge,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static inline void ocf_zero_map_info(struct ocf_request *req)
@ -148,7 +150,8 @@ void ocf_engine_zero_line(struct ocf_request *req)
if (lock >= 0) {
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
ocf_engine_push_req_front_cb(req, _ocf_zero_do, true);
ocf_queue_push_req_cb(req, _ocf_zero_do,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
} else {
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
req->complete(req, lock);

View File

@ -93,7 +93,8 @@ static void metadata_io_read_i_atomic_step_end(struct ocf_io *io, int error)
context->curr_offset += context->curr_count;
if (context->count > 0)
ocf_engine_push_req_front(context->req, true);
ocf_queue_push_req(context->req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
else
metadata_io_read_i_atomic_complete(context, 0);
}
@ -181,7 +182,8 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv,
context->compl_hndl = compl_hndl;
context->priv = priv;
ocf_engine_push_req_front(context->req, true);
ocf_queue_push_req(context->req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
return 0;
}
@ -269,7 +271,7 @@ static void metadata_io_req_finalize(struct metadata_io_request *m_req)
static void metadata_io_page_lock_acquired(struct ocf_request *req)
{
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static int metadata_io_restart_req(struct ocf_request *req)
@ -401,7 +403,8 @@ void metadata_io_req_complete(struct metadata_io_request *m_req)
}
m_req->req.engine_handler = metadata_io_restart_req;
ocf_engine_push_req_front(&m_req->req, true);
ocf_queue_push_req(&m_req->req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
/*

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -70,7 +71,7 @@ static int passive_io_resume(struct ocf_request *req)
static void passive_io_page_lock_acquired(struct ocf_request *req)
{
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,

View File

@ -389,7 +389,8 @@ static void raw_dynamic_load_all_read_end(struct ocf_io *io, int error)
}
context->req->engine_handler = raw_dynamic_load_all_update;
ocf_engine_push_req_front(context->req, true);
ocf_queue_push_req(context->req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static int raw_dynamic_load_all_read(struct ocf_request *req)
@ -455,7 +456,8 @@ static int raw_dynamic_load_all_update(struct ocf_request *req)
}
context->req->engine_handler = raw_dynamic_load_all_read;
ocf_engine_push_req_front(context->req, true);
ocf_queue_push_req(context->req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
return 0;
}
@ -501,7 +503,8 @@ void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
context->req->priv = context;
context->req->engine_handler = raw_dynamic_load_all_read;
ocf_engine_push_req_front(context->req, true);
ocf_queue_push_req(context->req,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
return;
err_req:

View File

@ -402,7 +402,7 @@ static void _ocf_mngt_flush_portion_end(void *private_data, int error)
return;
}
ocf_engine_push_req_back(fc->req, false);
ocf_queue_push_req(fc->req, 0);
}
@ -452,7 +452,7 @@ static void _ocf_mngt_flush_container(
fc->ticks1 = 0;
fc->ticks2 = UINT_MAX;
ocf_engine_push_req_back(fc->req, true);
ocf_queue_push_req(fc->req, OCF_QUEUE_ALLOW_SYNC);
return;
finish:

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "ocf/ocf.h"
@ -39,7 +40,8 @@ int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
return result;
}
INIT_LIST_HEAD(&tmp_queue->io_list);
INIT_LIST_HEAD(&tmp_queue->io_list_high);
INIT_LIST_HEAD(&tmp_queue->io_list_low);
env_atomic_set(&tmp_queue->ref_count, 1);
tmp_queue->cache = cache;
tmp_queue->ops = ops;
@ -88,13 +90,48 @@ void ocf_io_handle(struct ocf_io *io, void *opaque)
req->engine_handler(req);
}
static struct ocf_request *ocf_queue_pop_req(ocf_queue_t q)
{
unsigned long lock_flags = 0;
struct ocf_request *req;
struct list_head *io_list;
OCF_CHECK_NULL(q);
/* LOCK */
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
if (!list_empty(&q->io_list_high)) {
io_list = &q->io_list_high;
} else if (!list_empty(&q->io_list_low)) {
io_list = &q->io_list_low;
} else { /* No items on the list */
env_spinlock_unlock_irqrestore(&q->io_list_lock,
lock_flags);
return NULL;
}
/* Get the first request and remove it from the list */
req = list_first_entry(io_list, struct ocf_request, list);
env_atomic_dec(&q->io_no);
list_del(&req->list);
/* UNLOCK */
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
OCF_CHECK_NULL(req);
return req;
}
void ocf_queue_run_single(ocf_queue_t q)
{
struct ocf_request *io_req = NULL;
OCF_CHECK_NULL(q);
io_req = ocf_engine_pop_req(q);
io_req = ocf_queue_pop_req(q);
if (!io_req)
return;
@ -141,3 +178,43 @@ ocf_cache_t ocf_queue_get_cache(ocf_queue_t q)
OCF_CHECK_NULL(q);
return q->cache;
}
void ocf_queue_push_req(struct ocf_request *req, uint flags)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
struct list_head *io_list;
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(!req->io_queue);
q = req->io_queue;
if (!req->info.internal) {
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
}
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
io_list = (flags & OCF_QUEUE_PRIO_HIGH) ? &q->io_list_high : &q->io_list_low;
list_add_tail(&req->list, io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
/* NOTE: do not dereference @req past this line, it might
* be picked up by concurrent io thread and deallocated
* at this point */
ocf_queue_kick(q, (bool)(flags & OCF_QUEUE_ALLOW_SYNC));
}
void ocf_queue_push_req_cb(struct ocf_request *req,
ocf_req_cb req_cb, uint flags)
{
req->error = 0; /* Please explain why!!! */
req->engine_handler = req_cb;
ocf_queue_push_req(req, flags);
}

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -7,13 +8,19 @@
#define OCF_QUEUE_PRIV_H_
#include "ocf_env.h"
#include "ocf_request.h"
/* ocf_queue_push_req flags */
#define OCF_QUEUE_ALLOW_SYNC 0x01 /* Request can run immediately in caller context */
#define OCF_QUEUE_PRIO_HIGH 0x02 /* Push to the high priority queue */
struct ocf_queue {
ocf_cache_t cache;
void *priv;
struct list_head io_list;
struct list_head io_list_high;
struct list_head io_list_low;
/* per-queue free running global metadata lock index */
unsigned lock_idx;
@ -46,4 +53,22 @@ static inline void ocf_queue_kick(ocf_queue_t queue, bool allow_sync)
queue->ops->kick(queue);
}
/**
* @brief Push OCF request to the OCF thread worker queue
*
* @param req OCF request
* @param flags See ocf_queue_push_req flags above
*/
void ocf_queue_push_req(struct ocf_request *req, uint flags);
/**
* @brief Set interface and push from request to the OCF thread worker queue
*
* @param req OCF request
* @param req_cb IO engine handler callback
* @param flags See ocf_queue_push_req flags above
*/
void ocf_queue_push_req_cb(struct ocf_request *req,
ocf_req_cb req_cb, uint flags);
#endif

View File

@ -9,9 +9,27 @@
#include "ocf_env.h"
#include "ocf_io_priv.h"
#include "engine/cache_engine.h"
#include "metadata/metadata_structs.h"
typedef enum {
/* modes inherited from user API */
ocf_req_cache_mode_wt = ocf_cache_mode_wt,
ocf_req_cache_mode_wb = ocf_cache_mode_wb,
ocf_req_cache_mode_wa = ocf_cache_mode_wa,
ocf_req_cache_mode_pt = ocf_cache_mode_pt,
ocf_req_cache_mode_wi = ocf_cache_mode_wi,
ocf_req_cache_mode_wo = ocf_cache_mode_wo,
/* internal modes */
ocf_req_cache_mode_fast,
/*!< Fast path */
ocf_req_cache_mode_d2c,
/*!< Direct to Core - pass through to core without
touching cacheline metadata */
ocf_req_cache_mode_max,
} ocf_req_cache_mode_t;
struct ocf_req_allocator;
struct ocf_req_info {
@ -94,6 +112,12 @@ struct ocf_req_discard_info {
/*!< Number of processed sector during discard operation */
};
/**
* @brief OCF IO engine handler callback
*/
struct ocf_request;
typedef int (*ocf_req_cb)(struct ocf_request *req);
/**
* @brief OCF IO request
*/
@ -129,7 +153,7 @@ struct ocf_request {
ocf_core_t core;
/*!< Handle to core instance */
ocf_engine_cb engine_handler;
ocf_req_cb engine_handler;
/*!< IO engine handler */
void *priv;

View File

@ -217,8 +217,8 @@ static void _ocf_cleaner_complete_req(struct ocf_request *req)
if (master->complete_queue) {
ocf_req_get(master);
ocf_engine_push_req_front_cb(master,
_ocf_cleaner_complete, true);
ocf_queue_push_req_cb(master, _ocf_cleaner_complete,
OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
} else {
/* Only master contains completion function and priv */
cmpl = master->master_io_req;
@ -232,7 +232,7 @@ static void _ocf_cleaner_complete_req(struct ocf_request *req)
static void _ocf_cleaner_on_resume(struct ocf_request *req)
{
OCF_DEBUG_TRACE(req->cache);
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
/*
@ -336,7 +336,7 @@ static void _ocf_cleaner_metadata_io_end(struct ocf_request *req, int error)
OCF_DEBUG_MSG(req->cache, "Metadata flush finished");
req->engine_handler = _ocf_cleaner_fire_flush_cache;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static int _ocf_cleaner_update_metadata(struct ocf_request *req)
@ -415,7 +415,7 @@ static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
* All core writes done, switch to post cleaning activities
*/
req->engine_handler = _ocf_cleaner_update_metadata;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error)
@ -487,7 +487,7 @@ static void _ocf_cleaner_core_io_end(struct ocf_request *req)
* Move processing to thread, where IO will be (and can be) submitted
*/
req->engine_handler = _ocf_cleaner_fire_flush_cores;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
}
static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error)
@ -645,7 +645,7 @@ static void _ocf_cleaner_cache_io_end(struct ocf_request *req)
* Move processing to thread, where IO will be (and can be) submitted
*/
req->engine_handler = _ocf_cleaner_fire_core;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
OCF_DEBUG_MSG(req->cache, "Cache reads finished");
}

View File

@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2023 Huawei Technologies
* Copyright(c) 2023-2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -142,5 +142,5 @@ void ocf_parallelize_run(ocf_parallelize_t parallelize)
int i;
for (i = 0; i < parallelize->shards_cnt; i++)
ocf_engine_push_req_front(parallelize->reqs[i], false);
ocf_queue_push_req(parallelize->reqs[i], OCF_QUEUE_PRIO_HIGH);
}

View File

@ -1,6 +1,6 @@
/*
* Copyright(c) 2019-2022 Intel Corporation
* Copyright(c) 2023 Huawei Technologies
* Copyright(c) 2023-2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -47,7 +47,7 @@ static int _ocf_pipeline_run_step(struct ocf_request *req)
if (step->pred(pipeline, pipeline->priv, &step->arg)) {
step->hndl(pipeline, pipeline->priv, &step->arg);
return 0;
}
}
continue;
case ocf_pipeline_step_foreach:
arg = &step->args[pipeline->next_arg++];
@ -126,12 +126,12 @@ void *ocf_pipeline_get_priv(ocf_pipeline_t pipeline)
void ocf_pipeline_next(ocf_pipeline_t pipeline)
{
ocf_engine_push_req_front(pipeline->req, false);
ocf_queue_push_req(pipeline->req, OCF_QUEUE_PRIO_HIGH);
}
void ocf_pipeline_finish(ocf_pipeline_t pipeline, int error)
{
pipeline->finish = true;
pipeline->error = error;
ocf_engine_push_req_front(pipeline->req, false);
ocf_queue_push_req(pipeline->req, OCF_QUEUE_PRIO_HIGH);
}