Eliminate queue -> cache mapping

Eliminate need to resolve cache based on the queue. This allows to share
the queue between cache instances. The queue still holds pointer to
a cache that owns the queue, but no management or io path relies on the
queue -> cache mapping.

Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com>
Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
Robert Baldyga 2024-02-19 22:25:57 +01:00 committed by Michal Mielewczyk
parent 460cd461d3
commit 8b93b699c3
11 changed files with 98 additions and 32 deletions

View File

@ -153,12 +153,12 @@ void *ocf_queue_get_priv(ocf_queue_t q);
uint32_t ocf_queue_pending_io(ocf_queue_t q);
/**
* @brief Get cache instance to which I/O queue belongs
* @brief Return if queue is management queue
*
* @param[in] q I/O queue
* @param[in] queue - queue object
*
* @retval Cache instance
* @retval true - if management queue, otherwise false
*/
ocf_cache_t ocf_queue_get_cache(ocf_queue_t q);
bool ocf_queue_is_mngt(ocf_queue_t queue);
#endif

View File

@ -156,7 +156,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv,
if (!context)
return -OCF_ERR_NO_MEM;
context->req = ocf_req_new(queue, NULL, 0, 0, 0);
context->req = ocf_req_new_mngt(cache, queue);
if (!context->req) {
env_vfree(context);
return -OCF_ERR_NO_MEM;

View File

@ -493,7 +493,7 @@ void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
goto err_zpage;
}
context->req = ocf_req_new_mngt(cache->mngt_queue);
context->req = ocf_req_new_mngt(cache, cache->mngt_queue);
if (!context->req) {
result = -OCF_ERR_NO_MEM;
goto err_req;

View File

@ -432,7 +432,7 @@ static void _ocf_mngt_flush_container(
fc->end = end;
fc->context = context;
req = ocf_req_new_mngt(cache->mngt_queue);
req = ocf_req_new_mngt(cache, cache->mngt_queue);
if (!req) {
error = OCF_ERR_NO_MEM;
goto finish;

View File

@ -489,9 +489,10 @@ static void *ocf_core_io_allocator_new(ocf_io_allocator_t allocator,
ocf_volume_t volume, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, uint32_t dir)
{
ocf_core_t core = ocf_volume_to_core(volume);
struct ocf_request *req;
req = ocf_req_new(queue, NULL, addr, bytes, dir);
req = ocf_req_new(queue, core, addr, bytes, dir);
if (!req)
return NULL;

View File

@ -126,6 +126,11 @@ int ocf_queue_create_mngt(ocf_cache_t cache, ocf_queue_t *queue,
return 0;
}
bool ocf_queue_is_mngt(ocf_queue_t queue)
{
return queue == queue->cache->mngt_queue;
}
void ocf_queue_get(ocf_queue_t queue)
{
OCF_CHECK_NULL(queue);
@ -144,7 +149,7 @@ void ocf_queue_put(ocf_queue_t queue)
return;
queue->ops->stop(queue);
if (queue != queue->cache->mngt_queue) {
if (!ocf_queue_is_mngt(queue)) {
env_spinlock_lock_irqsave(&cache->io_queues_lock, flags);
list_del(&queue->list);
env_spinlock_unlock_irqrestore(&cache->io_queues_lock, flags);
@ -247,12 +252,6 @@ uint32_t ocf_queue_pending_io(ocf_queue_t q)
return env_atomic_read(&q->io_no);
}
ocf_cache_t ocf_queue_get_cache(ocf_queue_t q)
{
OCF_CHECK_NULL(q);
return q->cache;
}
void ocf_queue_push_req(struct ocf_request *req, uint flags)
{
ocf_cache_t cache = req->cache;

View File

@ -81,13 +81,14 @@ void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx)
ocf_ctx->resources.req = NULL;
}
static inline void ocf_req_init(struct ocf_request *req, ocf_queue_t queue,
ocf_core_t core, uint64_t addr, uint32_t bytes, int rw)
static inline void ocf_req_init(struct ocf_request *req, ocf_cache_t cache,
ocf_queue_t queue, ocf_core_t core,
uint64_t addr, uint32_t bytes, int rw)
{
req->io_queue = queue;
req->core = core;
req->cache = queue->cache;
req->cache = cache;
env_atomic_set(&req->ref_count, 1);
@ -96,7 +97,7 @@ static inline void ocf_req_init(struct ocf_request *req, ocf_queue_t queue,
req->rw = rw;
}
struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue)
struct ocf_request *ocf_req_new_mngt(ocf_cache_t cache, ocf_queue_t queue)
{
struct ocf_request *req;
@ -106,23 +107,23 @@ struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue)
ocf_queue_get(queue);
ocf_req_init(req, queue, NULL, 0, 0, 0);
ocf_req_init(req, cache, queue, NULL, 0, 0, 0);
req->is_mngt = true;
return req;
}
struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count)
struct ocf_request *ocf_req_new_cleaner(ocf_cache_t cache, ocf_queue_t queue,
uint32_t count)
{
ocf_cache_t cache = queue->cache;
struct ocf_request *req;
bool map_allocated = true, is_mngt = false;
if (!ocf_refcnt_inc(&cache->refcnt.metadata))
return NULL;
if (unlikely(queue == cache->mngt_queue)) {
if (unlikely(ocf_queue_is_mngt(queue))) {
req = env_zalloc(sizeof(*req) + ocf_req_sizeof_map(count) +
ocf_req_sizeof_alock_status(count),
ENV_MEM_NORMAL);
@ -143,7 +144,7 @@ struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count)
ocf_queue_get(queue);
ocf_req_init(req, queue, NULL, 0, 0, OCF_READ);
ocf_req_init(req, cache, queue, NULL, 0, 0, OCF_READ);
if (map_allocated) {
req->map = req->__map;
@ -167,11 +168,11 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
uint64_t addr, uint32_t bytes, int rw)
{
uint64_t core_line_first, core_line_last, core_line_count;
ocf_cache_t cache = queue->cache;
ocf_cache_t cache = ocf_core_get_cache(core);
struct ocf_request *req;
bool map_allocated = true;
ENV_BUG_ON(queue == cache->mngt_queue);
ENV_BUG_ON(ocf_queue_is_mngt(queue));
if (likely(bytes)) {
core_line_first = ocf_bytes_2_lines(cache, addr);
@ -204,7 +205,7 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
ocf_queue_get(queue);
ocf_req_init(req, queue, core, addr, bytes, rw);
ocf_req_init(req, cache, queue, core, addr, bytes, rw);
req->d2c = !ocf_refcnt_inc(&cache->refcnt.metadata);
@ -223,6 +224,55 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
return req;
}
struct ocf_request *ocf_req_new_cache(ocf_cache_t cache, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, int rw)
{
uint64_t core_line_first, core_line_last, core_line_count;
struct ocf_request *req;
bool map_allocated = true;
ENV_BUG_ON(ocf_queue_is_mngt(queue));
if (!ocf_refcnt_inc(&cache->refcnt.metadata))
return NULL;
ocf_queue_get(queue);
if (likely(bytes)) {
core_line_first = ocf_bytes_2_lines(cache, addr);
core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1);
core_line_count = core_line_last - core_line_first + 1;
} else {
core_line_count = 1;
}
req = env_mpool_new(cache->owner->resources.req, core_line_count);
if (!req) {
map_allocated = false;
req = env_mpool_new(cache->owner->resources.req, 1);
}
if (unlikely(!req)) {
ocf_refcnt_dec(&cache->refcnt.metadata);
ocf_queue_put(queue);
return NULL;
}
if (map_allocated) {
req->map = req->__map;
req->alock_status = (uint8_t *)&req->__map[core_line_count];
req->alloc_core_line_count = core_line_count;
} else {
req->alloc_core_line_count = 1;
}
ocf_req_init(req, cache, queue, NULL, addr, bytes, rw);
req->lock_idx = ocf_metadata_concurrency_next_idx(queue);
return req;
}
int ocf_req_alloc_map(struct ocf_request *req)
{
uint32_t lines = req->core_line_count;

View File

@ -288,7 +288,7 @@ void ocf_req_allocator_deinit(struct ocf_ctx *ocf_ctx);
*
* @return new OCF request
*/
struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue);
struct ocf_request *ocf_req_new_mngt(ocf_cache_t cache, ocf_queue_t queue);
/**
* @brief Allocate new OCF request for cleaner
@ -298,7 +298,8 @@ struct ocf_request *ocf_req_new_mngt(ocf_queue_t queue);
*
* @return new OCF request
*/
struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count);
struct ocf_request *ocf_req_new_cleaner(ocf_cache_t cache, ocf_queue_t queue,
uint32_t count);
/**
* @brief Allocate new OCF request
@ -314,6 +315,20 @@ struct ocf_request *ocf_req_new_cleaner(ocf_queue_t queue, uint32_t count);
struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
uint64_t addr, uint32_t bytes, int rw);
/**
* @brief Allocate new OCF request for cache IO
*
* @param cache - OCF cache instance
* @param queue - I/O queue handle
* @param addr - LBA of request
* @param bytes - number of bytes of request
* @param rw - Read or Write
*
* @return new OCF request
*/
struct ocf_request *ocf_req_new_cache(ocf_cache_t cache, ocf_queue_t queue,
uint64_t addr, uint32_t bytes, int rw);
/**
* @brief Allocate OCF request map
*

View File

@ -40,9 +40,10 @@
static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache,
uint32_t count, const struct ocf_cleaner_attribs *attribs)
{
struct ocf_request *req = ocf_req_new_cleaner(attribs->io_queue, count);
struct ocf_request *req;
int ret;
req = ocf_req_new_cleaner(cache, attribs->io_queue, count);
if (!req)
return NULL;

View File

@ -105,7 +105,7 @@ int ocf_parallelize_create(ocf_parallelize_t *parallelize,
} else {
queue = cache->mngt_queue;
}
tmp_parallelize->reqs[i] = ocf_req_new_mngt(queue);
tmp_parallelize->reqs[i] = ocf_req_new_mngt(cache, queue);
if (!tmp_parallelize->reqs[i]) {
result = -OCF_ERR_NO_MEM;
goto err_reqs;

View File

@ -87,7 +87,7 @@ int ocf_pipeline_create(ocf_pipeline_t *pipeline, ocf_cache_t cache,
tmp_pipeline->priv = (void *)priv;
}
req = ocf_req_new_mngt(cache->mngt_queue);
req = ocf_req_new_mngt(cache, cache->mngt_queue);
if (!req) {
env_vfree(tmp_pipeline);
return -OCF_ERR_NO_MEM;