Move and rename ocf_engine_push_req_* from engine_common to ocf_queue_push_req_* in ocf_queue

Signed-off-by: Ian Levine <ian.levine@huawei.com>
Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com>
This commit is contained in:
Ian Levine
2023-10-12 15:00:06 +03:00
committed by Robert Baldyga
parent de32a9649a
commit 038126e9ab
24 changed files with 179 additions and 146 deletions

View File

@@ -264,7 +264,7 @@ int ocf_engine_hndl_req(struct ocf_request *req)
* to into OCF workers
*/
ocf_engine_push_req_back(req, true);
ocf_queue_push_req_back(req, true);
return 0;
}
@@ -313,7 +313,7 @@ void ocf_engine_hndl_ops_req(struct ocf_request *req)
ocf_io_if_type_to_engine_cb(OCF_IO_D2C_IF, req->rw) :
ocf_io_if_type_to_engine_cb(OCF_IO_OPS_IF, req->rw);
ocf_engine_push_req_back(req, true);
ocf_queue_push_req_back(req, true);
}
bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode)

View File

@@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -94,5 +95,5 @@ static int _ocf_backfill_do(struct ocf_request *req)
void ocf_engine_backfill(struct ocf_request *req)
{
backfill_queue_inc_block(req->cache);
ocf_engine_push_req_front_cb(req, _ocf_backfill_do, true);
ocf_queue_push_req_front_cb(req, _ocf_backfill_do, true);
}

View File

@@ -381,7 +381,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
} else {
req->info.dirty_any = 0;
req->info.dirty_all = 0;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req_front(req, true);
}
}
@@ -584,75 +584,6 @@ void ocf_engine_update_request_stats(struct ocf_request *req)
req->info.hit_no, req->core_line_count);
}
void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(!req->io_queue);
q = req->io_queue;
if (!req->info.internal) {
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
}
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add_tail(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
/* NOTE: do not dereference @req past this line, it might
* be picked up by concurrent io thread and deallocated
* at this point */
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
ENV_BUG_ON(!req->io_queue);
INIT_LIST_HEAD(&req->list);
q = req->io_queue;
if (!req->info.internal) {
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
}
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add(&req->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
/* NOTE: do not dereference @req past this line, it might
* be picked up by concurrent io thread and deallocated
* at this point */
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front_cb(struct ocf_request *req,
ocf_req_cb req_cb,
bool allow_sync)
{
req->error = 0; /* Please explain why!!! */
req->engine_handler = req_cb;
ocf_engine_push_req_front(req, allow_sync);
}
void inc_fallback_pt_error_counter(ocf_cache_t cache)
{
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
@@ -712,5 +643,5 @@ void ocf_engine_on_resume(struct ocf_request *req)
OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front_cb(req, _ocf_engine_refresh, false);
ocf_queue_push_req_front_cb(req, _ocf_engine_refresh, false);
}

View File

@@ -281,38 +281,6 @@ void ocf_engine_update_block_stats(struct ocf_request *req);
*/
void ocf_engine_update_request_stats(struct ocf_request *req);
/**
* @brief Push front OCF request to the OCF thread worker queue
*
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_req_back(struct ocf_request *req,
bool allow_sync);
/**
* @brief Push back OCF request to the OCF thread worker queue
*
* @param req OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_req_front(struct ocf_request *req,
bool allow_sync);
/**
* @brief Set interface and push from request to the OCF thread worker queue
*
* @param req OCF request
* @param engine_cb IO engine handler callback
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_req_front_cb(struct ocf_request *req,
ocf_req_cb req_cb,
bool allow_sync);
void inc_fallback_pt_error_counter(ocf_cache_t cache);
void ocf_engine_on_resume(struct ocf_request *req);

View File

@@ -74,7 +74,7 @@ static void _ocf_discard_cache_flush_complete(struct ocf_io *io, int error)
}
req->engine_handler = _ocf_discard_core;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req_front(req, true);
ocf_io_put(io);
}
@@ -111,7 +111,7 @@ static void _ocf_discard_finish_step(struct ocf_request *req)
else
req->engine_handler = _ocf_discard_core;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req_front(req, true);
}
static void _ocf_discard_step_complete(struct ocf_request *req, int error)
@@ -182,7 +182,7 @@ static int _ocf_discard_step_do(struct ocf_request *req)
static void _ocf_discard_on_resume(struct ocf_request *req)
{
OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front(req, true);
ocf_queue_push_req_front(req, true);
}
static int _ocf_discard_step(struct ocf_request *req)

View File

@@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -45,7 +46,7 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
if (req->error) {
OCF_DEBUG_RQ(req, "ERROR");
ocf_engine_push_req_front_pt(req);
ocf_queue_push_req_front_pt(req);
} else {
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);

View File

@@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -62,5 +63,5 @@ static int _ocf_invalidate_do(struct ocf_request *req)
void ocf_engine_invalidate(struct ocf_request *req)
{
ocf_engine_push_req_front_cb(req, _ocf_invalidate_do, true);
ocf_queue_push_req_front_cb(req, _ocf_invalidate_do, true);
}

View File

@@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2023 Huawei Technologies
* Copyright(c) 2023-2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "ocf/ocf.h"
@@ -165,8 +165,8 @@ int ocf_read_pt(struct ocf_request *req)
return 0;
}
void ocf_engine_push_req_front_pt(struct ocf_request *req)
void ocf_queue_push_req_front_pt(struct ocf_request *req)
{
ocf_engine_push_req_front_cb(req, ocf_read_pt_do, true);
ocf_queue_push_req_front_cb(req, ocf_read_pt_do, true);
}

View File

@@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +11,6 @@ int ocf_read_pt(struct ocf_request *req);
int ocf_read_pt_do(struct ocf_request *req);
void ocf_engine_push_req_front_pt(struct ocf_request *req);
void ocf_queue_push_req_front_pt(struct ocf_request *req);
#endif /* ENGINE_OFF_H_ */

View File

@@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -41,7 +42,7 @@ static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
OCF_DEBUG_RQ(req, "HIT completion");
if (req->error) {
ocf_engine_push_req_front_pt(req);
ocf_queue_push_req_front_pt(req);
} else {
ocf_req_unlock(c, req);

View File

@@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -102,7 +103,7 @@ static void _ocf_write_wb_complete(struct ocf_request *req, int error)
ocf_engine_invalidate(req);
} else {
ocf_engine_push_req_front_cb(req,
ocf_queue_push_req_front_cb(req,
ocf_write_wb_do_flush_metadata, true);
}
}

View File

@@ -56,7 +56,7 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
if (!req->error && !req->wi_second_pass && ocf_engine_is_miss(req)) {
/* need another pass */
ocf_engine_push_req_front_cb(req, _ocf_write_wi_next_pass,
ocf_queue_push_req_front_cb(req, _ocf_write_wi_next_pass,
true);
return;
}
@@ -123,7 +123,7 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
ocf_req_put(req);
} else {
ocf_engine_push_req_front_cb(req,
ocf_queue_push_req_front_cb(req,
ocf_write_wi_update_and_flush_metadata, true);
}
}
@@ -155,7 +155,7 @@ static int _ocf_write_wi_core_write(struct ocf_request *req)
static void _ocf_write_wi_on_resume(struct ocf_request *req)
{
OCF_DEBUG_RQ(req, "On resume");
ocf_engine_push_req_front(req, true);
ocf_queue_push_req_front(req, true);
}
int ocf_write_wi(struct ocf_request *req)

View File

@@ -171,7 +171,7 @@ static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
}
req->engine_handler = ocf_read_wo_cache_do;
ocf_engine_push_req_front(req, true);
ocf_queue_push_req_front(req, true);
}
static int ocf_read_wo_do(struct ocf_request *req)

View File

@@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -112,7 +113,7 @@ static void _ocf_write_wt_req_complete(struct ocf_request *req)
if (req->info.dirty_any) {
/* Some of the request's cachelines changed its state to clean */
ocf_engine_push_req_front_cb(req,
ocf_queue_push_req_front_cb(req,
ocf_write_wt_do_flush_metadata, true);
} else {
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);

View File

@@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -50,7 +51,7 @@ static void _ocf_zero_io_flush_metadata(struct ocf_request *req, int error)
if (env_atomic_dec_return(&req->req_remaining))
return;
ocf_engine_push_req_front_cb(req, ocf_zero_purge, true);
ocf_queue_push_req_front_cb(req, ocf_zero_purge, true);
}
static inline void ocf_zero_map_info(struct ocf_request *req)
@@ -148,7 +149,7 @@ void ocf_engine_zero_line(struct ocf_request *req)
if (lock >= 0) {
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
ocf_engine_push_req_front_cb(req, _ocf_zero_do, true);
ocf_queue_push_req_front_cb(req, _ocf_zero_do, true);
} else {
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
req->complete(req, lock);