diff --git a/modules/cas_cache/threads.c b/modules/cas_cache/threads.c index 6f3b7d9..7951f9b 100644 --- a/modules/cas_cache/threads.c +++ b/modules/cas_cache/threads.c @@ -1,6 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation -* Copyright(c) 2024 Huawei Technologies +* Copyright(c) 2024-2025 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -73,6 +73,7 @@ static int _cas_cleaner_thread(void *data) struct cache_priv *cache_priv = ocf_cache_get_priv(cache); struct cas_thread_info *info; uint32_t ms; + ocf_queue_t queue; BUG_ON(!c); @@ -94,7 +95,10 @@ static int _cas_cleaner_thread(void *data) atomic_set(&info->kicked, 0); init_completion(&info->sync_compl); - ocf_cleaner_run(c, cache_priv->io_queues[smp_processor_id()]); + get_cpu(); + queue = cache_priv->io_queues[smp_processor_id()]; + put_cpu(); + ocf_cleaner_run(c, queue); wait_for_completion(&info->sync_compl); /* diff --git a/modules/cas_cache/utils/utils_rpool.c b/modules/cas_cache/utils/utils_rpool.c index a7e058f..d957048 100644 --- a/modules/cas_cache/utils/utils_rpool.c +++ b/modules/cas_cache/utils/utils_rpool.c @@ -1,6 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation -* Copyright(c) 2024 Huawei Technologies +* Copyright(c) 2024-2025 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -208,9 +208,13 @@ void *cas_rpool_try_get(struct cas_reserve_pool *rpool_master, int *cpu) CAS_DEBUG_TRACE(); + get_cpu(); + *cpu = smp_processor_id(); current_rpool = &rpool_master->rpools[*cpu]; + put_cpu(); + spin_lock_irqsave(¤t_rpool->lock, flags); if (!list_empty(¤t_rpool->list)) { diff --git a/modules/cas_cache/volume/vol_block_dev_top.c b/modules/cas_cache/volume/vol_block_dev_top.c index 5be0f3c..d4d84b3 100644 --- a/modules/cas_cache/volume/vol_block_dev_top.c +++ b/modules/cas_cache/volume/vol_block_dev_top.c @@ -248,12 +248,16 @@ static int blkdev_handle_data_single(struct bd_object *bvol, struct bio *bio, { ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume); struct cache_priv *cache_priv = ocf_cache_get_priv(cache); - ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()]; + ocf_queue_t queue; ocf_io_t io; struct blk_data *data; uint64_t flags = CAS_BIO_OP_FLAGS(bio); int ret; + get_cpu(); + queue = cache_priv->io_queues[smp_processor_id()]; + put_cpu(); + data = cas_alloc_blk_data(bio_segments(bio), GFP_NOIO); if (!data) { CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n"); @@ -363,9 +367,13 @@ static void blkdev_handle_discard(struct bd_object *bvol, struct bio *bio) { ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume); struct cache_priv *cache_priv = ocf_cache_get_priv(cache); - ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()]; + ocf_queue_t queue; ocf_io_t io; + get_cpu(); + queue = cache_priv->io_queues[smp_processor_id()]; + put_cpu(); + io = ocf_volume_new_io(bvol->front_volume, queue, CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT, CAS_BIO_BISIZE(bio), OCF_WRITE, 0, 0); @@ -411,9 +419,13 @@ static void blkdev_handle_flush(struct bd_object *bvol, struct bio *bio) { ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume); struct cache_priv *cache_priv = ocf_cache_get_priv(cache); - ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()]; + ocf_queue_t queue; ocf_io_t io; + get_cpu(); + queue = cache_priv->io_queues[smp_processor_id()]; + put_cpu(); + io = ocf_volume_new_io(bvol->front_volume, queue, 0, 0, OCF_WRITE, 0, CAS_SET_FLUSH(0)); if (!io) {