Move and rename ocf_engine_push_req_* from engine_common to ocf_queue_push_req_* in ocf_queue

Signed-off-by: Ian Levine <ian.levine@huawei.com>
Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com>
This commit is contained in:
Ian Levine 2023-10-12 15:00:06 +03:00 committed by Robert Baldyga
parent de32a9649a
commit 038126e9ab
24 changed files with 179 additions and 146 deletions

View File

@ -264,7 +264,7 @@ int ocf_engine_hndl_req(struct ocf_request *req)
* to into OCF workers * to into OCF workers
*/ */
ocf_engine_push_req_back(req, true); ocf_queue_push_req_back(req, true);
return 0; return 0;
} }
@ -313,7 +313,7 @@ void ocf_engine_hndl_ops_req(struct ocf_request *req)
ocf_io_if_type_to_engine_cb(OCF_IO_D2C_IF, req->rw) : ocf_io_if_type_to_engine_cb(OCF_IO_D2C_IF, req->rw) :
ocf_io_if_type_to_engine_cb(OCF_IO_OPS_IF, req->rw); ocf_io_if_type_to_engine_cb(OCF_IO_OPS_IF, req->rw);
ocf_engine_push_req_back(req, true); ocf_queue_push_req_back(req, true);
} }
bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode) bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode)

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -94,5 +95,5 @@ static int _ocf_backfill_do(struct ocf_request *req)
void ocf_engine_backfill(struct ocf_request *req) void ocf_engine_backfill(struct ocf_request *req)
{ {
backfill_queue_inc_block(req->cache); backfill_queue_inc_block(req->cache);
ocf_engine_push_req_front_cb(req, _ocf_backfill_do, true); ocf_queue_push_req_front_cb(req, _ocf_backfill_do, true);
} }

View File

@ -381,7 +381,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
} else { } else {
req->info.dirty_any = 0; req->info.dirty_any = 0;
req->info.dirty_all = 0; req->info.dirty_all = 0;
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
} }
@ -584,75 +584,6 @@ void ocf_engine_update_request_stats(struct ocf_request *req)
req->info.hit_no, req->core_line_count); req->info.hit_no, req->core_line_count);
} }
void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(!req->io_queue);
q = req->io_queue;
if (!req->info.internal) {
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
}
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add_tail(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
/* NOTE: do not dereference @req past this line, it might
* be picked up by concurrent io thread and deallocated
* at this point */
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
ENV_BUG_ON(!req->io_queue);
INIT_LIST_HEAD(&req->list);
q = req->io_queue;
if (!req->info.internal) {
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
}
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
/* NOTE: do not dereference @req past this line, it might
* be picked up by concurrent io thread and deallocated
* at this point */
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front_cb(struct ocf_request *req,
ocf_req_cb req_cb,
bool allow_sync)
{
req->error = 0; /* Please explain why!!! */
req->engine_handler = req_cb;
ocf_engine_push_req_front(req, allow_sync);
}
void inc_fallback_pt_error_counter(ocf_cache_t cache) void inc_fallback_pt_error_counter(ocf_cache_t cache)
{ {
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0); ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
@ -712,5 +643,5 @@ void ocf_engine_on_resume(struct ocf_request *req)
OCF_DEBUG_RQ(req, "On resume"); OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front_cb(req, _ocf_engine_refresh, false); ocf_queue_push_req_front_cb(req, _ocf_engine_refresh, false);
} }

View File

@ -281,38 +281,6 @@ void ocf_engine_update_block_stats(struct ocf_request *req);
*/ */
void ocf_engine_update_request_stats(struct ocf_request *req); void ocf_engine_update_request_stats(struct ocf_request *req);
/**
* @brief Push front OCF request to the OCF thread worker queue
*
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_req_back(struct ocf_request *req,
bool allow_sync);
/**
* @brief Push back OCF request to the OCF thread worker queue
*
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_req_front(struct ocf_request *req,
bool allow_sync);
/**
* @brief Set interface and push from request to the OCF thread worker queue
*
* @param req OCF request
* @param engine_cb IO engine handler callback
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_req_front_cb(struct ocf_request *req,
ocf_req_cb req_cb,
bool allow_sync);
void inc_fallback_pt_error_counter(ocf_cache_t cache); void inc_fallback_pt_error_counter(ocf_cache_t cache);
void ocf_engine_on_resume(struct ocf_request *req); void ocf_engine_on_resume(struct ocf_request *req);

View File

@ -74,7 +74,7 @@ static void _ocf_discard_cache_flush_complete(struct ocf_io *io, int error)
} }
req->engine_handler = _ocf_discard_core; req->engine_handler = _ocf_discard_core;
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
ocf_io_put(io); ocf_io_put(io);
} }
@ -111,7 +111,7 @@ static void _ocf_discard_finish_step(struct ocf_request *req)
else else
req->engine_handler = _ocf_discard_core; req->engine_handler = _ocf_discard_core;
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
static void _ocf_discard_step_complete(struct ocf_request *req, int error) static void _ocf_discard_step_complete(struct ocf_request *req, int error)
@ -182,7 +182,7 @@ static int _ocf_discard_step_do(struct ocf_request *req)
static void _ocf_discard_on_resume(struct ocf_request *req) static void _ocf_discard_on_resume(struct ocf_request *req)
{ {
OCF_DEBUG_RQ(req, "On resume"); OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
static int _ocf_discard_step(struct ocf_request *req) static int _ocf_discard_step(struct ocf_request *req)

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -45,7 +46,7 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
if (req->error) { if (req->error) {
OCF_DEBUG_RQ(req, "ERROR"); OCF_DEBUG_RQ(req, "ERROR");
ocf_engine_push_req_front_pt(req); ocf_queue_push_req_front_pt(req);
} else { } else {
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req); ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -62,5 +63,5 @@ static int _ocf_invalidate_do(struct ocf_request *req)
void ocf_engine_invalidate(struct ocf_request *req) void ocf_engine_invalidate(struct ocf_request *req)
{ {
ocf_engine_push_req_front_cb(req, _ocf_invalidate_do, true); ocf_queue_push_req_front_cb(req, _ocf_invalidate_do, true);
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2023 Huawei Technologies * Copyright(c) 2023-2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include "ocf/ocf.h" #include "ocf/ocf.h"
@ -165,8 +165,8 @@ int ocf_read_pt(struct ocf_request *req)
return 0; return 0;
} }
void ocf_engine_push_req_front_pt(struct ocf_request *req) void ocf_queue_push_req_front_pt(struct ocf_request *req)
{ {
ocf_engine_push_req_front_cb(req, ocf_read_pt_do, true); ocf_queue_push_req_front_cb(req, ocf_read_pt_do, true);
} }

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2021 Intel Corporation * Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -10,6 +11,6 @@ int ocf_read_pt(struct ocf_request *req);
int ocf_read_pt_do(struct ocf_request *req); int ocf_read_pt_do(struct ocf_request *req);
void ocf_engine_push_req_front_pt(struct ocf_request *req); void ocf_queue_push_req_front_pt(struct ocf_request *req);
#endif /* ENGINE_OFF_H_ */ #endif /* ENGINE_OFF_H_ */

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -41,7 +42,7 @@ static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "HIT completion"); OCF_DEBUG_RQ(req, "HIT completion");
if (req->error) { if (req->error) {
ocf_engine_push_req_front_pt(req); ocf_queue_push_req_front_pt(req);
} else { } else {
ocf_req_unlock(c, req); ocf_req_unlock(c, req);

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -102,7 +103,7 @@ static void _ocf_write_wb_complete(struct ocf_request *req, int error)
ocf_engine_invalidate(req); ocf_engine_invalidate(req);
} else { } else {
ocf_engine_push_req_front_cb(req, ocf_queue_push_req_front_cb(req,
ocf_write_wb_do_flush_metadata, true); ocf_write_wb_do_flush_metadata, true);
} }
} }

View File

@ -56,7 +56,7 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
if (!req->error && !req->wi_second_pass && ocf_engine_is_miss(req)) { if (!req->error && !req->wi_second_pass && ocf_engine_is_miss(req)) {
/* need another pass */ /* need another pass */
ocf_engine_push_req_front_cb(req, _ocf_write_wi_next_pass, ocf_queue_push_req_front_cb(req, _ocf_write_wi_next_pass,
true); true);
return; return;
} }
@ -123,7 +123,7 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
ocf_req_put(req); ocf_req_put(req);
} else { } else {
ocf_engine_push_req_front_cb(req, ocf_queue_push_req_front_cb(req,
ocf_write_wi_update_and_flush_metadata, true); ocf_write_wi_update_and_flush_metadata, true);
} }
} }
@ -155,7 +155,7 @@ static int _ocf_write_wi_core_write(struct ocf_request *req)
static void _ocf_write_wi_on_resume(struct ocf_request *req) static void _ocf_write_wi_on_resume(struct ocf_request *req)
{ {
OCF_DEBUG_RQ(req, "On resume"); OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
int ocf_write_wi(struct ocf_request *req) int ocf_write_wi(struct ocf_request *req)

View File

@ -171,7 +171,7 @@ static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
} }
req->engine_handler = ocf_read_wo_cache_do; req->engine_handler = ocf_read_wo_cache_do;
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
static int ocf_read_wo_do(struct ocf_request *req) static int ocf_read_wo_do(struct ocf_request *req)

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -112,7 +113,7 @@ static void _ocf_write_wt_req_complete(struct ocf_request *req)
if (req->info.dirty_any) { if (req->info.dirty_any) {
/* Some of the request's cachelines changed its state to clean */ /* Some of the request's cachelines changed its state to clean */
ocf_engine_push_req_front_cb(req, ocf_queue_push_req_front_cb(req,
ocf_write_wt_do_flush_metadata, true); ocf_write_wt_do_flush_metadata, true);
} else { } else {
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req); ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -50,7 +51,7 @@ static void _ocf_zero_io_flush_metadata(struct ocf_request *req, int error)
if (env_atomic_dec_return(&req->req_remaining)) if (env_atomic_dec_return(&req->req_remaining))
return; return;
ocf_engine_push_req_front_cb(req, ocf_zero_purge, true); ocf_queue_push_req_front_cb(req, ocf_zero_purge, true);
} }
static inline void ocf_zero_map_info(struct ocf_request *req) static inline void ocf_zero_map_info(struct ocf_request *req)
@ -148,7 +149,7 @@ void ocf_engine_zero_line(struct ocf_request *req)
if (lock >= 0) { if (lock >= 0) {
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED); ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
ocf_engine_push_req_front_cb(req, _ocf_zero_do, true); ocf_queue_push_req_front_cb(req, _ocf_zero_do, true);
} else { } else {
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock); OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
req->complete(req, lock); req->complete(req, lock);

View File

@ -93,7 +93,7 @@ static void metadata_io_read_i_atomic_step_end(struct ocf_io *io, int error)
context->curr_offset += context->curr_count; context->curr_offset += context->curr_count;
if (context->count > 0) if (context->count > 0)
ocf_engine_push_req_front(context->req, true); ocf_queue_push_req_front(context->req, true);
else else
metadata_io_read_i_atomic_complete(context, 0); metadata_io_read_i_atomic_complete(context, 0);
} }
@ -181,7 +181,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv,
context->compl_hndl = compl_hndl; context->compl_hndl = compl_hndl;
context->priv = priv; context->priv = priv;
ocf_engine_push_req_front(context->req, true); ocf_queue_push_req_front(context->req, true);
return 0; return 0;
} }
@ -269,7 +269,7 @@ static void metadata_io_req_finalize(struct metadata_io_request *m_req)
static void metadata_io_page_lock_acquired(struct ocf_request *req) static void metadata_io_page_lock_acquired(struct ocf_request *req)
{ {
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
static int metadata_io_restart_req(struct ocf_request *req) static int metadata_io_restart_req(struct ocf_request *req)
@ -401,7 +401,7 @@ void metadata_io_req_complete(struct metadata_io_request *m_req)
} }
m_req->req.engine_handler = metadata_io_restart_req; m_req->req.engine_handler = metadata_io_restart_req;
ocf_engine_push_req_front(&m_req->req, true); ocf_queue_push_req_front(&m_req->req, true);
} }
/* /*

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -70,7 +71,7 @@ static int passive_io_resume(struct ocf_request *req)
static void passive_io_page_lock_acquired(struct ocf_request *req) static void passive_io_page_lock_acquired(struct ocf_request *req)
{ {
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io, int ocf_metadata_passive_update(ocf_cache_t cache, struct ocf_io *io,

View File

@ -389,7 +389,7 @@ static void raw_dynamic_load_all_read_end(struct ocf_io *io, int error)
} }
context->req->engine_handler = raw_dynamic_load_all_update; context->req->engine_handler = raw_dynamic_load_all_update;
ocf_engine_push_req_front(context->req, true); ocf_queue_push_req_front(context->req, true);
} }
static int raw_dynamic_load_all_read(struct ocf_request *req) static int raw_dynamic_load_all_read(struct ocf_request *req)
@ -455,7 +455,7 @@ static int raw_dynamic_load_all_update(struct ocf_request *req)
} }
context->req->engine_handler = raw_dynamic_load_all_read; context->req->engine_handler = raw_dynamic_load_all_read;
ocf_engine_push_req_front(context->req, true); ocf_queue_push_req_front(context->req, true);
return 0; return 0;
} }
@ -501,7 +501,7 @@ void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
context->req->priv = context; context->req->priv = context;
context->req->engine_handler = raw_dynamic_load_all_read; context->req->engine_handler = raw_dynamic_load_all_read;
ocf_engine_push_req_front(context->req, true); ocf_queue_push_req_front(context->req, true);
return; return;
err_req: err_req:

View File

@ -402,7 +402,7 @@ static void _ocf_mngt_flush_portion_end(void *private_data, int error)
return; return;
} }
ocf_engine_push_req_back(fc->req, false); ocf_queue_push_req_back(fc->req, false);
} }
@ -452,7 +452,7 @@ static void _ocf_mngt_flush_container(
fc->ticks1 = 0; fc->ticks1 = 0;
fc->ticks2 = UINT_MAX; fc->ticks2 = UINT_MAX;
ocf_engine_push_req_back(fc->req, true); ocf_queue_push_req_back(fc->req, true);
return; return;
finish: finish:

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
#include "ocf/ocf.h" #include "ocf/ocf.h"
@ -141,3 +142,81 @@ ocf_cache_t ocf_queue_get_cache(ocf_queue_t q)
OCF_CHECK_NULL(q); OCF_CHECK_NULL(q);
return q->cache; return q->cache;
} }
void ocf_queue_push_req_back(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(!req->io_queue);
q = req->io_queue;
if (!req->info.internal) {
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
}
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add_tail(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
/* NOTE: do not dereference @req past this line, it might
* be picked up by concurrent io thread and deallocated
* at this point */
ocf_queue_kick(q, allow_sync);
}
void ocf_queue_push_req_front(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
ENV_BUG_ON(!req->io_queue);
INIT_LIST_HEAD(&req->list);
q = req->io_queue;
if (!req->info.internal) {
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
}
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
/* NOTE: do not dereference @req past this line, it might
* be picked up by concurrent io thread and deallocated
* at this point */
ocf_queue_kick(q, allow_sync);
}
void ocf_queue_push_req_front_cb(struct ocf_request *req,
ocf_req_cb req_cb,
bool allow_sync)
{
req->error = 0; /* Please explain why!!! */
req->engine_handler = req_cb;
ocf_queue_push_req_front(req, allow_sync);
}
void ocf_queue_push_req_back_cb(struct ocf_request *req,
ocf_req_cb req_cb,
bool allow_sync)
{
req->error = 0; /* Please explain why!!! */
req->engine_handler = req_cb;
ocf_queue_push_req_back(req, allow_sync);
}

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2021 Intel Corporation * Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -7,6 +8,7 @@
#define OCF_QUEUE_PRIV_H_ #define OCF_QUEUE_PRIV_H_
#include "ocf_env.h" #include "ocf_env.h"
#include "ocf_request.h"
struct ocf_queue { struct ocf_queue {
ocf_cache_t cache; ocf_cache_t cache;
@ -46,4 +48,48 @@ static inline void ocf_queue_kick(ocf_queue_t queue, bool allow_sync)
queue->ops->kick(queue); queue->ops->kick(queue);
} }
/**
* @brief Push front OCF request to the OCF thread worker queue
*
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_queue_push_req_back(struct ocf_request *req,
bool allow_sync);
/**
* @brief Push back OCF request to the OCF thread worker queue
*
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_queue_push_req_front(struct ocf_request *req,
bool allow_sync);
/**
* @brief Set interface and push from request to the OCF thread worker queue front
*
* @param req OCF request
* @param engine_cb IO engine handler callback
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_queue_push_req_front_cb(struct ocf_request *req,
ocf_req_cb req_cb,
bool allow_sync);
/**
* @brief Set interface and push from request to the OCF thread worker queue back
*
* @param req OCF request
* @param engine_cb IO engine handler callback
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_queue_push_req_back_cb(struct ocf_request *req,
ocf_req_cb req_cb,
bool allow_sync);
#endif #endif

View File

@ -217,7 +217,7 @@ static void _ocf_cleaner_complete_req(struct ocf_request *req)
if (master->complete_queue) { if (master->complete_queue) {
ocf_req_get(master); ocf_req_get(master);
ocf_engine_push_req_front_cb(master, ocf_queue_push_req_front_cb(master,
_ocf_cleaner_complete, true); _ocf_cleaner_complete, true);
} else { } else {
/* Only master contains completion function and priv */ /* Only master contains completion function and priv */
@ -232,7 +232,7 @@ static void _ocf_cleaner_complete_req(struct ocf_request *req)
static void _ocf_cleaner_on_resume(struct ocf_request *req) static void _ocf_cleaner_on_resume(struct ocf_request *req)
{ {
OCF_DEBUG_TRACE(req->cache); OCF_DEBUG_TRACE(req->cache);
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
/* /*
@ -336,7 +336,7 @@ static void _ocf_cleaner_metadata_io_end(struct ocf_request *req, int error)
OCF_DEBUG_MSG(req->cache, "Metadata flush finished"); OCF_DEBUG_MSG(req->cache, "Metadata flush finished");
req->engine_handler = _ocf_cleaner_fire_flush_cache; req->engine_handler = _ocf_cleaner_fire_flush_cache;
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
static int _ocf_cleaner_update_metadata(struct ocf_request *req) static int _ocf_cleaner_update_metadata(struct ocf_request *req)
@ -415,7 +415,7 @@ static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
* All core writes done, switch to post cleaning activities * All core writes done, switch to post cleaning activities
*/ */
req->engine_handler = _ocf_cleaner_update_metadata; req->engine_handler = _ocf_cleaner_update_metadata;
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error) static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error)
@ -487,7 +487,7 @@ static void _ocf_cleaner_core_io_end(struct ocf_request *req)
* Move processing to thread, where IO will be (and can be) submitted * Move processing to thread, where IO will be (and can be) submitted
*/ */
req->engine_handler = _ocf_cleaner_fire_flush_cores; req->engine_handler = _ocf_cleaner_fire_flush_cores;
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
} }
static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error) static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error)
@ -645,7 +645,7 @@ static void _ocf_cleaner_cache_io_end(struct ocf_request *req)
* Move processing to thread, where IO will be (and can be) submitted * Move processing to thread, where IO will be (and can be) submitted
*/ */
req->engine_handler = _ocf_cleaner_fire_core; req->engine_handler = _ocf_cleaner_fire_core;
ocf_engine_push_req_front(req, true); ocf_queue_push_req_front(req, true);
OCF_DEBUG_MSG(req->cache, "Cache reads finished"); OCF_DEBUG_MSG(req->cache, "Cache reads finished");
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2023 Huawei Technologies * Copyright(c) 2023-2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -142,5 +142,5 @@ void ocf_parallelize_run(ocf_parallelize_t parallelize)
int i; int i;
for (i = 0; i < parallelize->shards_cnt; i++) for (i = 0; i < parallelize->shards_cnt; i++)
ocf_engine_push_req_front(parallelize->reqs[i], false); ocf_queue_push_req_front(parallelize->reqs[i], false);
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright(c) 2019-2022 Intel Corporation * Copyright(c) 2019-2022 Intel Corporation
* Copyright(c) 2023 Huawei Technologies * Copyright(c) 2023-2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -126,12 +126,12 @@ void *ocf_pipeline_get_priv(ocf_pipeline_t pipeline)
void ocf_pipeline_next(ocf_pipeline_t pipeline) void ocf_pipeline_next(ocf_pipeline_t pipeline)
{ {
ocf_engine_push_req_front(pipeline->req, false); ocf_queue_push_req_front(pipeline->req, false);
} }
void ocf_pipeline_finish(ocf_pipeline_t pipeline, int error) void ocf_pipeline_finish(ocf_pipeline_t pipeline, int error)
{ {
pipeline->finish = true; pipeline->finish = true;
pipeline->error = error; pipeline->error = error;
ocf_engine_push_req_front(pipeline->req, false); ocf_queue_push_req_front(pipeline->req, false);
} }