Allocate requests for management path separately

Management path does not benefit much from mpools, as number of requests
allocated is very small. It's less restrictive (mngt_queue does not have
single-CPU affinity) thus avoiding mpool usage in management path allows
to introduce additional restrictions on mpool, leading to I/O performance
improvement.

Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com>
Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
Robert Baldyga 2023-09-25 15:07:30 +02:00 committed by Michal Mielewczyk
parent 6cd5a27ea9
commit 460cd461d3
9 changed files with 138 additions and 30 deletions

View File

@ -493,7 +493,7 @@ void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
goto err_zpage; goto err_zpage;
} }
context->req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0); context->req = ocf_req_new_mngt(cache->mngt_queue);
if (!context->req) { if (!context->req) {
result = -OCF_ERR_NO_MEM; result = -OCF_ERR_NO_MEM;
goto err_req; goto err_req;

View File

@ -432,7 +432,7 @@ static void _ocf_mngt_flush_container(
fc->end = end; fc->end = end;
fc->context = context; fc->context = context;
req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0); req = ocf_req_new_mngt(cache->mngt_queue);
if (!req) { if (!req) {
error = OCF_ERR_NO_MEM; error = OCF_ERR_NO_MEM;
goto finish; goto finish;

View File

@ -154,7 +154,7 @@ static uint64_t _calc_dirty_for(uint64_t dirty_since)
return dirty_since ? (current_time - dirty_since) : 0; return dirty_since ? (current_time - dirty_since) : 0;
} }
static inline struct ocf_request *ocf_io_to_req(struct ocf_io *io) struct ocf_request *ocf_io_to_req(struct ocf_io *io)
{ {
struct ocf_io_internal *ioi; struct ocf_io_internal *ioi;

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2021 Intel Corporation * Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -105,4 +106,6 @@ ocf_core_id_t ocf_core_get_id(ocf_core_t core);
int ocf_core_volume_type_init(ocf_ctx_t ctx); int ocf_core_volume_type_init(ocf_ctx_t ctx);
struct ocf_request *ocf_io_to_req(struct ocf_io *io);
#endif /* __OCF_CORE_PRIV_H__ */ #endif /* __OCF_CORE_PRIV_H__ */

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -7,6 +8,7 @@
#include "ocf_request.h" #include "ocf_request.h"
#include "ocf_cache_priv.h" #include "ocf_cache_priv.h"
#include "concurrency/ocf_metadata_concurrency.h" #include "concurrency/ocf_metadata_concurrency.h"
#include "engine/engine_common.h"
#include "utils/utils_cache_line.h" #include "utils/utils_cache_line.h"
#define OCF_UTILS_RQ_DEBUG 0 #define OCF_UTILS_RQ_DEBUG 0
@ -34,9 +36,8 @@ enum ocf_req_size {
ocf_req_size_128, ocf_req_size_128,
}; };
static inline size_t ocf_req_sizeof_map(struct ocf_request *req) static inline size_t ocf_req_sizeof_map(uint32_t lines)
{ {
uint32_t lines = req->core_line_count;
size_t size = (lines * sizeof(struct ocf_map_info)); size_t size = (lines * sizeof(struct ocf_map_info));
ENV_BUG_ON(lines == 0); ENV_BUG_ON(lines == 0);
@ -80,6 +81,88 @@ void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx)
ocf_ctx->resources.req = NULL; ocf_ctx->resources.req = NULL;
} }
static inline void ocf_req_init(struct ocf_request *req, ocf_queue_t queue,
ocf_core_t core, uint64_t addr, uint32_t bytes, int rw)
{
req->io_queue = queue;
req->core = core;
req->cache = queue->cache;
env_atomic_set(&req->ref_count, 1);
req->byte_position = addr;
req->byte_length = bytes;
req->rw = rw;
}
struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue)
{
struct ocf_request *req;
req = env_zalloc(sizeof(*req), ENV_MEM_NORMAL);
if (unlikely(!req))
return NULL;
ocf_queue_get(queue);
ocf_req_init(req, queue, NULL, 0, 0, 0);
req->is_mngt = true;
return req;
}
struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count)
{
ocf_cache_t cache = queue->cache;
struct ocf_request *req;
bool map_allocated = true, is_mngt = false;
if (!ocf_refcnt_inc(&cache->refcnt.metadata))
return NULL;
if (unlikely(queue == cache->mngt_queue)) {
req = env_zalloc(sizeof(*req) + ocf_req_sizeof_map(count) +
ocf_req_sizeof_alock_status(count),
ENV_MEM_NORMAL);
is_mngt = true;
} else {
req = env_mpool_new(cache->owner->resources.req, count);
if (!req) {
map_allocated = false;
req = env_mpool_new(cache->owner->resources.req, 1);
}
}
if (!req) {
ocf_refcnt_dec(&cache->refcnt.metadata);
return NULL;
}
req->is_mngt = is_mngt;
ocf_queue_get(queue);
ocf_req_init(req, queue, NULL, 0, 0, OCF_READ);
if (map_allocated) {
req->map = req->__map;
req->alock_status = (uint8_t*)&req->__map[count];
req->alloc_core_line_count = count;
} else {
req->alloc_core_line_count = 1;
}
req->core_line_count = count;
req->lock_idx = ocf_metadata_concurrency_next_idx(queue);
req->cleaner = true;
if (ocf_req_alloc_map(req)) {
ocf_req_put(req);
req = NULL;
}
return req;
}
struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core, struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
uint64_t addr, uint32_t bytes, int rw) uint64_t addr, uint32_t bytes, int rw)
{ {
@ -88,6 +171,8 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
struct ocf_request *req; struct ocf_request *req;
bool map_allocated = true; bool map_allocated = true;
ENV_BUG_ON(queue == cache->mngt_queue);
if (likely(bytes)) { if (likely(bytes)) {
core_line_first = ocf_bytes_2_lines(cache, addr); core_line_first = ocf_bytes_2_lines(cache, addr);
core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1); core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1);
@ -115,32 +200,24 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
req->alloc_core_line_count = 1; req->alloc_core_line_count = 1;
} }
OCF_DEBUG_TRACE(cache); OCF_DEBUG_TRACE(cache);
ocf_queue_get(queue); ocf_queue_get(queue);
req->io_queue = queue;
req->core = core; ocf_req_init(req, queue, core, addr, bytes, rw);
req->cache = cache;
req->d2c = (queue != cache->mngt_queue) && !ocf_refcnt_inc( req->d2c = !ocf_refcnt_inc(&cache->refcnt.metadata);
&cache->refcnt.metadata);
env_atomic_set(&req->ref_count, 1);
req->byte_position = addr;
req->byte_length = bytes;
req->core_line_first = core_line_first; req->core_line_first = core_line_first;
req->core_line_last = core_line_last; req->core_line_last = core_line_last;
req->core_line_count = core_line_count; req->core_line_count = core_line_count;
req->rw = rw;
req->part_id = PARTITION_DEFAULT;
req->discard.sector = BYTES_TO_SECTORS(addr); req->discard.sector = BYTES_TO_SECTORS(addr);
req->discard.nr_sects = BYTES_TO_SECTORS(bytes); req->discard.nr_sects = BYTES_TO_SECTORS(bytes);
req->discard.handled = 0; req->discard.handled = 0;
req->part_id = PARTITION_DEFAULT;
req->lock_idx = ocf_metadata_concurrency_next_idx(queue); req->lock_idx = ocf_metadata_concurrency_next_idx(queue);
return req; return req;
@ -148,10 +225,12 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
int ocf_req_alloc_map(struct ocf_request *req) int ocf_req_alloc_map(struct ocf_request *req)
{ {
uint32_t lines = req->core_line_count;
if (req->map) if (req->map)
return 0; return 0;
req->map = env_zalloc(ocf_req_sizeof_map(req) + req->map = env_zalloc(ocf_req_sizeof_map(lines) +
ocf_req_sizeof_alock_status(req->core_line_count), ocf_req_sizeof_alock_status(req->core_line_count),
ENV_MEM_NOIO); ENV_MEM_NOIO);
if (!req->map) { if (!req->map) {
@ -159,7 +238,7 @@ int ocf_req_alloc_map(struct ocf_request *req)
return -OCF_ERR_NO_MEM; return -OCF_ERR_NO_MEM;
} }
req->alock_status = &((uint8_t*)req->map)[ocf_req_sizeof_map(req)]; req->alock_status = &((uint8_t*)req->map)[ocf_req_sizeof_map(lines)];
return 0; return 0;
} }
@ -229,14 +308,17 @@ void ocf_req_put(struct ocf_request *req)
OCF_DEBUG_TRACE(req->cache); OCF_DEBUG_TRACE(req->cache);
if (!req->d2c && req->io_queue != req->cache->mngt_queue) if ((!req->d2c && !req->is_mngt) || req->cleaner)
ocf_refcnt_dec(&req->cache->refcnt.metadata); ocf_refcnt_dec(&req->cache->refcnt.metadata);
if (unlikely(req->is_mngt)) {
env_free(req);
} else {
if (req->map != req->__map) if (req->map != req->__map)
env_free(req->map); env_free(req->map);
env_mpool_del(req->cache->owner->resources.req, req, env_mpool_del(req->cache->owner->resources.req, req,
req->alloc_core_line_count); req->alloc_core_line_count);
}
ocf_queue_put(queue); ocf_queue_put(queue);
} }

View File

@ -198,6 +198,9 @@ struct ocf_request {
uint8_t d2c : 1; uint8_t d2c : 1;
/**!< request affects metadata cachelines (is not direct-to-core) */ /**!< request affects metadata cachelines (is not direct-to-core) */
uint8_t cleaner : 1;
/**!< request allocated by cleaner */
uint8_t dirty : 1; uint8_t dirty : 1;
/**!< indicates that request produces dirty data */ /**!< indicates that request produces dirty data */
@ -228,6 +231,9 @@ struct ocf_request {
uint8_t is_deferred : 1; uint8_t is_deferred : 1;
/* !< request handling was deferred and eventually resumed */ /* !< request handling was deferred and eventually resumed */
uint8_t is_mngt : 1;
/* !< It's a management path request */
ocf_req_cache_mode_t cache_mode; ocf_req_cache_mode_t cache_mode;
uint64_t timestamp; uint64_t timestamp;
@ -275,6 +281,25 @@ int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx);
*/ */
void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx); void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx);
/**
* @brief Allocate new OCF request for the management path
*
* @param queue - I/O queue handle
*
* @return new OCF request
*/
struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue);
/**
* @brief Allocate new OCF request for cleaner
*
* @param queue - I/O queue handle
* @param count - Number of map entries
*
* @return new OCF request
*/
struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count);
/** /**
* @brief Allocate new OCF request * @brief Allocate new OCF request
* *

View File

@ -40,8 +40,7 @@
static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache, static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache,
uint32_t count, const struct ocf_cleaner_attribs *attribs) uint32_t count, const struct ocf_cleaner_attribs *attribs)
{ {
struct ocf_request *req = ocf_req_new_extended(attribs->io_queue, NULL, struct ocf_request *req = ocf_req_new_cleaner(attribs->io_queue, count);
0, count * ocf_line_size(cache), OCF_READ);
int ret; int ret;
if (!req) if (!req)

View File

@ -105,8 +105,7 @@ int ocf_parallelize_create(ocf_parallelize_t *parallelize,
} else { } else {
queue = cache->mngt_queue; queue = cache->mngt_queue;
} }
tmp_parallelize->reqs[i] = ocf_req_new(queue, tmp_parallelize->reqs[i] = ocf_req_new_mngt(queue);
NULL, 0, 0, 0);
if (!tmp_parallelize->reqs[i]) { if (!tmp_parallelize->reqs[i]) {
result = -OCF_ERR_NO_MEM; result = -OCF_ERR_NO_MEM;
goto err_reqs; goto err_reqs;

View File

@ -87,7 +87,7 @@ int ocf_pipeline_create(ocf_pipeline_t *pipeline, ocf_cache_t cache,
tmp_pipeline->priv = (void *)priv; tmp_pipeline->priv = (void *)priv;
} }
req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0); req = ocf_req_new_mngt(cache->mngt_queue);
if (!req) { if (!req) {
env_vfree(tmp_pipeline); env_vfree(tmp_pipeline);
return -OCF_ERR_NO_MEM; return -OCF_ERR_NO_MEM;