Use management queue for parallelized management operations

When IO queues are used for parallelized management operations,
e.g. changing cleaning policy, a deadlock may occur due to global
metadata lock interfering with taking request from IO queue,
as they might be run on the same thread. As a workaround using
a management queue specifically for such operations eliminates
this problem.

Signed-off-by: Rafal Stefanowski <rafal.stefanowski@huawei.com>
This commit is contained in:
Rafal Stefanowski 2024-09-23 12:13:33 +02:00
parent d8994e886e
commit 97ee3af8f7
7 changed files with 15 additions and 9 deletions

View File

@ -435,7 +435,8 @@ void cleaning_policy_acp_populate(ocf_cache_t cache,
result = ocf_parallelize_create(&parallelize, cache, result = ocf_parallelize_create(&parallelize, cache,
OCF_ACP_POPULATE_SHARDS_CNT, sizeof(*context), OCF_ACP_POPULATE_SHARDS_CNT, sizeof(*context),
ocf_acp_populate_handle, ocf_acp_populate_finish); ocf_acp_populate_handle, ocf_acp_populate_finish,
true);
if (result) { if (result) {
cmpl(priv, result); cmpl(priv, result);
return; return;

View File

@ -547,7 +547,8 @@ void cleaning_policy_alru_populate(ocf_cache_t cache,
result = ocf_parallelize_create(&parallelize, cache, result = ocf_parallelize_create(&parallelize, cache,
OCF_ALRU_POPULATE_SHARDS_CNT, sizeof(*context), OCF_ALRU_POPULATE_SHARDS_CNT, sizeof(*context),
ocf_alru_populate_handle, ocf_alru_populate_finish); ocf_alru_populate_handle, ocf_alru_populate_finish,
true);
if (result) { if (result) {
cmpl(priv, result); cmpl(priv, result);
return; return;

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2012-2022 Intel Corporation * Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -882,7 +883,7 @@ void ocf_metadata_init_collision(ocf_pipeline_t pipeline, void *priv,
result = ocf_parallelize_create(&parallelize, cache, result = ocf_parallelize_create(&parallelize, cache,
ocf_cache_get_queue_count(cache), sizeof(*context), ocf_cache_get_queue_count(cache), sizeof(*context),
ocf_metadata_init_collision_handle, ocf_metadata_init_collision_handle,
ocf_metadata_init_finish); ocf_metadata_init_finish, false);
if (result) if (result)
OCF_PL_FINISH_RET(pipeline, result); OCF_PL_FINISH_RET(pipeline, result);
@ -934,7 +935,7 @@ void ocf_metadata_init_hash_table(ocf_pipeline_t pipeline, void *priv,
result = ocf_parallelize_create(&parallelize, cache, result = ocf_parallelize_create(&parallelize, cache,
ocf_cache_get_queue_count(cache), sizeof(*context), ocf_cache_get_queue_count(cache), sizeof(*context),
ocf_metadata_init_hash_table_handle, ocf_metadata_init_hash_table_handle,
ocf_metadata_init_finish); ocf_metadata_init_finish, false);
if (result) if (result)
OCF_PL_FINISH_RET(pipeline, result); OCF_PL_FINISH_RET(pipeline, result);

View File

@ -672,7 +672,7 @@ static void ocf_mngt_rebuild_metadata(ocf_cache_t cache,
result = ocf_parallelize_create(&parallelize, cache, result = ocf_parallelize_create(&parallelize, cache,
OCF_MNGT_REBUILD_METADATA_SHARDS_CNT, OCF_MNGT_REBUILD_METADATA_SHARDS_CNT,
sizeof(*context), ocf_mngt_rebuild_metadata_handle, sizeof(*context), ocf_mngt_rebuild_metadata_handle,
ocf_mngt_rebuild_metadata_finish); ocf_mngt_rebuild_metadata_finish, false);
if (result) { if (result) {
cmpl(priv, result); cmpl(priv, result);
return; return;

View File

@ -930,7 +930,7 @@ void ocf_lru_populate(ocf_cache_t cache,
result = ocf_parallelize_create(&parallelize, cache, OCF_NUM_LRU_LISTS, result = ocf_parallelize_create(&parallelize, cache, OCF_NUM_LRU_LISTS,
sizeof(*context), ocf_lru_populate_handle, sizeof(*context), ocf_lru_populate_handle,
ocf_lru_populate_finish); ocf_lru_populate_finish, false);
if (result) { if (result) {
cmpl(priv, result); cmpl(priv, result);
return; return;

View File

@ -59,7 +59,8 @@ static int _ocf_parallelize_hndl(struct ocf_request *req)
int ocf_parallelize_create(ocf_parallelize_t *parallelize, int ocf_parallelize_create(ocf_parallelize_t *parallelize,
ocf_cache_t cache, unsigned shards_cnt, uint32_t priv_size, ocf_cache_t cache, unsigned shards_cnt, uint32_t priv_size,
ocf_parallelize_handle_t handle, ocf_parallelize_handle_t handle,
ocf_parallelize_finish_t finish) ocf_parallelize_finish_t finish,
bool use_mngt_queue)
{ {
ocf_parallelize_t tmp_parallelize; ocf_parallelize_t tmp_parallelize;
struct list_head *iter; struct list_head *iter;
@ -97,7 +98,7 @@ int ocf_parallelize_create(ocf_parallelize_t *parallelize,
iter = cache->io_queues.next; iter = cache->io_queues.next;
for (i = 0; i < shards_cnt; i++) { for (i = 0; i < shards_cnt; i++) {
if (queue_count > 0) { if (queue_count > 0 && !use_mngt_queue) {
queue = list_entry(iter, struct ocf_queue, list); queue = list_entry(iter, struct ocf_queue, list);
iter = iter->next; iter = iter->next;
if (iter == &cache->io_queues) if (iter == &cache->io_queues)

View File

@ -1,5 +1,6 @@
/* /*
* Copyright(c) 2022 Intel Corporation * Copyright(c) 2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -19,7 +20,8 @@ typedef void (*ocf_parallelize_finish_t)(ocf_parallelize_t parallelize,
int ocf_parallelize_create(ocf_parallelize_t *parallelize, int ocf_parallelize_create(ocf_parallelize_t *parallelize,
ocf_cache_t cache, unsigned shards_cnt, uint32_t priv_size, ocf_cache_t cache, unsigned shards_cnt, uint32_t priv_size,
ocf_parallelize_handle_t handle, ocf_parallelize_handle_t handle,
ocf_parallelize_finish_t finish); ocf_parallelize_finish_t finish,
bool use_mngt_queue);
void ocf_parallelize_destroy(ocf_parallelize_t parallelize); void ocf_parallelize_destroy(ocf_parallelize_t parallelize);