Use management queue for parallelized management operations
When IO queues are used for parallelized management operations, e.g. changing cleaning policy, a deadlock may occur due to global metadata lock interfering with taking request from IO queue, as they might be run on the same thread. As a workaround using a management queue specifically for such operations eliminates this problem. Signed-off-by: Rafal Stefanowski <rafal.stefanowski@huawei.com>
This commit is contained in:
@@ -59,7 +59,8 @@ static int _ocf_parallelize_hndl(struct ocf_request *req)
|
||||
int ocf_parallelize_create(ocf_parallelize_t *parallelize,
|
||||
ocf_cache_t cache, unsigned shards_cnt, uint32_t priv_size,
|
||||
ocf_parallelize_handle_t handle,
|
||||
ocf_parallelize_finish_t finish)
|
||||
ocf_parallelize_finish_t finish,
|
||||
bool use_mngt_queue)
|
||||
{
|
||||
ocf_parallelize_t tmp_parallelize;
|
||||
struct list_head *iter;
|
||||
@@ -97,7 +98,7 @@ int ocf_parallelize_create(ocf_parallelize_t *parallelize,
|
||||
|
||||
iter = cache->io_queues.next;
|
||||
for (i = 0; i < shards_cnt; i++) {
|
||||
if (queue_count > 0) {
|
||||
if (queue_count > 0 && !use_mngt_queue) {
|
||||
queue = list_entry(iter, struct ocf_queue, list);
|
||||
iter = iter->next;
|
||||
if (iter == &cache->io_queues)
|
||||
|
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright(c) 2022 Intel Corporation
|
||||
* Copyright(c) 2024 Huawei Technologies
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
@@ -19,7 +20,8 @@ typedef void (*ocf_parallelize_finish_t)(ocf_parallelize_t parallelize,
|
||||
int ocf_parallelize_create(ocf_parallelize_t *parallelize,
|
||||
ocf_cache_t cache, unsigned shards_cnt, uint32_t priv_size,
|
||||
ocf_parallelize_handle_t handle,
|
||||
ocf_parallelize_finish_t finish);
|
||||
ocf_parallelize_finish_t finish,
|
||||
bool use_mngt_queue);
|
||||
|
||||
void ocf_parallelize_destroy(ocf_parallelize_t parallelize);
|
||||
|
||||
|
Reference in New Issue
Block a user