Avoid adding mngt_queue to io_queues list

Previously every created queue was added to io_queues list, which
made mngt_queue being used in ocf_parallelize. Change mngt_queue creation
API so that mngt_queue is not added to the list and doesn't have
unnecessary functionalities initialized.

Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com>
Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
Robert Baldyga 2023-05-17 19:30:36 +02:00 committed by Michal Mielewczyk
parent 1d903f4038
commit 8db93260ae
8 changed files with 101 additions and 61 deletions

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2019-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -129,23 +130,19 @@ int initialize_cache(ocf_ctx_t ctx, ocf_cache_t *cache)
/*
* Create management queue. It will be used for performing various
* asynchronous management operations, such as attaching cache volume
* or adding core object.
* or adding core object. This has to be done before any other
* management operation. Management queue is treated specially,
* and it may not be used for submitting IO requests. It also will not
* be put on the cache stop - we have to put it manually at the end.
*/
ret = ocf_queue_create(*cache, &cache_priv->mngt_queue, &queue_ops);
ret = ocf_queue_create_mngt(*cache, &cache_priv->mngt_queue,
&queue_ops);
if (ret) {
ocf_mngt_cache_stop(*cache, simple_complete, &context);
sem_wait(&context.sem);
goto err_priv;
}
/*
* Assign management queue to cache. This has to be done before any
* other management operation. Management queue is treated specially,
* and it may not be used for submitting IO requests. It also will not
* be put on the cache stop - we have to put it manually at the end.
*/
ocf_mngt_cache_set_mngt_queue(*cache, cache_priv->mngt_queue);
/* Create queue which will be used for IO submission. */
ret = ocf_queue_create(*cache, &cache_priv->io_queue, &queue_ops);
if (ret)

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -325,17 +326,6 @@ static inline void ocf_mngt_cache_config_set_default(
int ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
struct ocf_mngt_cache_config *cfg, void *priv);
/**
* @brief Set queue to be used during management operations
*
* @param[in] cache Cache object
* @param[in] queue Queue object
*
* @retval 0 Success
* @retval Non-zero Error occurred
*/
int ocf_mngt_cache_set_mngt_queue(ocf_cache_t cache, ocf_queue_t queue);
/**
* @brief Completion callback of cache stop operation
*

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -59,6 +60,18 @@ struct ocf_queue_ops {
int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
const struct ocf_queue_ops *ops);
/**
* @brief Allocate mngt queue and assign it to cache
*
* @param[in] cache Handle to cache instance
* @param[out] queue Handle to created queue
* @param[in] ops Queue operations
*
* @return Zero on success, otherwise error code
*/
int ocf_queue_create_mngt(ocf_cache_t cache, ocf_queue_t *queue,
const struct ocf_queue_ops *ops);
/**
* @brief Increase reference counter in queue
*

View File

@ -3039,20 +3039,6 @@ int ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
return result;
}
int ocf_mngt_cache_set_mngt_queue(ocf_cache_t cache, ocf_queue_t queue)
{
OCF_CHECK_NULL(cache);
OCF_CHECK_NULL(queue);
if (cache->mngt_queue)
return -OCF_ERR_INVAL;
ocf_queue_get(queue);
cache->mngt_queue = queue;
return 0;
}
static void _ocf_mngt_cache_attach_complete(ocf_cache_t cache, void *priv1,
void *priv2, int error)
{

View File

@ -14,6 +14,35 @@
#include "engine/cache_engine.h"
#include "ocf_def_priv.h"
int _ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
const struct ocf_queue_ops *ops)
{
ocf_queue_t tmp_queue;
int result;
tmp_queue = env_zalloc(sizeof(*tmp_queue), ENV_MEM_NORMAL);
if (!tmp_queue) {
return -OCF_ERR_NO_MEM;
}
env_atomic_set(&tmp_queue->io_no, 0);
result = env_spinlock_init(&tmp_queue->io_list_lock);
if (result) {
env_free(tmp_queue);
return result;
}
INIT_LIST_HEAD(&tmp_queue->io_list_high);
INIT_LIST_HEAD(&tmp_queue->io_list_low);
env_atomic_set(&tmp_queue->ref_count, 1);
tmp_queue->cache = cache;
tmp_queue->ops = ops;
*queue = tmp_queue;
return 0;
}
int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
const struct ocf_queue_ops *ops)
{
@ -26,26 +55,12 @@ int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
if (result)
return result;
tmp_queue = env_zalloc(sizeof(*tmp_queue), ENV_MEM_NORMAL);
if (!tmp_queue) {
ocf_mngt_cache_put(cache);
return -OCF_ERR_NO_MEM;
}
env_atomic_set(&tmp_queue->io_no, 0);
result = env_spinlock_init(&tmp_queue->io_list_lock);
result = _ocf_queue_create(cache, &tmp_queue, ops);
if (result) {
ocf_mngt_cache_put(cache);
env_free(tmp_queue);
return result;
}
INIT_LIST_HEAD(&tmp_queue->io_list_high);
INIT_LIST_HEAD(&tmp_queue->io_list_low);
env_atomic_set(&tmp_queue->ref_count, 1);
tmp_queue->cache = cache;
tmp_queue->ops = ops;
result = ocf_queue_seq_cutoff_init(tmp_queue);
if (result) {
ocf_mngt_cache_put(cache);
@ -60,6 +75,34 @@ int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
return 0;
}
int ocf_queue_create_mngt(ocf_cache_t cache, ocf_queue_t *queue,
const struct ocf_queue_ops *ops)
{
ocf_queue_t tmp_queue;
int result;
OCF_CHECK_NULL(cache);
if (cache->mngt_queue)
return -OCF_ERR_INVAL;
result = ocf_mngt_cache_get(cache);
if (result)
return result;
result = _ocf_queue_create(cache, &tmp_queue, ops);
if (result) {
ocf_mngt_cache_put(cache);
return result;
}
cache->mngt_queue = tmp_queue;
*queue = tmp_queue;
return 0;
}
void ocf_queue_get(ocf_queue_t queue)
{
OCF_CHECK_NULL(queue);
@ -72,9 +115,11 @@ void ocf_queue_put(ocf_queue_t queue)
OCF_CHECK_NULL(queue);
if (env_atomic_dec_return(&queue->ref_count) == 0) {
list_del(&queue->list);
queue->ops->stop(queue);
if (queue != queue->cache->mngt_queue) {
list_del(&queue->list);
ocf_queue_seq_cutoff_deinit(queue);
}
ocf_mngt_cache_put(queue->cache);
env_spinlock_destroy(&queue->io_list_lock);
env_free(queue);

View File

@ -1,8 +1,9 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause
#
from ctypes import c_void_p, cdll
from ctypes import c_int, c_void_p, cdll
import inspect
import os
@ -22,6 +23,9 @@ class OcfLib:
lib.ocf_core_get_front_volume.restype = c_void_p
lib.ocf_core_get_front_volume.argtypes = [c_void_p]
lib.ocf_queue_create_mngt.restype = c_int
lib.ocf_queue_create_mngt.argtypes = [c_void_p, c_void_p, c_void_p]
cls.__lib__ = lib
return cls.__lib__

View File

@ -277,10 +277,9 @@ class Cache:
raise OcfError("Creating cache instance failed", status)
if init_mngmt_queue:
self.mngt_queue = Queue(self, "mgmt-{}".format(self.get_name()))
status = self.owner.lib.ocf_mngt_cache_set_mngt_queue(self, self.mngt_queue)
if status:
raise OcfError("Error setting management queue", status)
self.mngt_queue = Queue(
self, "mgmt-{}".format(self.get_name()), mngt=True
)
if init_default_io_queue:
self.io_queues = [Queue(self, "default-io-{}".format(self.get_name()))]

View File

@ -1,5 +1,6 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause
#
@ -42,12 +43,17 @@ def io_queue_run(*, queue: Queue, kick: Condition, stop: Event, sem: Semaphore):
class Queue:
_instances_ = weakref.WeakValueDictionary()
def __init__(self, cache, name):
def __init__(self, cache, name, mngt=False):
self.ops = QueueOps(kick=type(self)._kick, stop=type(self)._stop)
self.name = name
self.handle = c_void_p()
if mngt:
status = OcfLib.getInstance().ocf_queue_create_mngt(
cache.cache_handle, byref(self.handle), byref(self.ops)
)
else:
status = OcfLib.getInstance().ocf_queue_create(
cache.cache_handle, byref(self.handle), byref(self.ops)
)