Disable preemption when accessing current cpu id

Currently Open CAS doesn't support kernels with involuntary preemption
anyways and once we add the support, we can get rid of this workaround

Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
Michal Mielewczyk 2025-03-25 14:34:22 +01:00
parent debbfcc0d1
commit b1f61580fc
3 changed files with 26 additions and 6 deletions

View File

@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* Copyright(c) 2024-2025 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -73,6 +73,7 @@ static int _cas_cleaner_thread(void *data)
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
struct cas_thread_info *info;
uint32_t ms;
ocf_queue_t queue;
BUG_ON(!c);
@ -94,7 +95,10 @@ static int _cas_cleaner_thread(void *data)
atomic_set(&info->kicked, 0);
init_completion(&info->sync_compl);
ocf_cleaner_run(c, cache_priv->io_queues[smp_processor_id()]);
get_cpu();
queue = cache_priv->io_queues[smp_processor_id()];
put_cpu();
ocf_cleaner_run(c, queue);
wait_for_completion(&info->sync_compl);
/*

View File

@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* Copyright(c) 2024-2025 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -208,9 +208,13 @@ void *cas_rpool_try_get(struct cas_reserve_pool *rpool_master, int *cpu)
CAS_DEBUG_TRACE();
get_cpu();
*cpu = smp_processor_id();
current_rpool = &rpool_master->rpools[*cpu];
put_cpu();
spin_lock_irqsave(&current_rpool->lock, flags);
if (!list_empty(&current_rpool->list)) {

View File

@ -248,12 +248,16 @@ static int blkdev_handle_data_single(struct bd_object *bvol, struct bio *bio,
{
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
ocf_queue_t queue;
ocf_io_t io;
struct blk_data *data;
uint64_t flags = CAS_BIO_OP_FLAGS(bio);
int ret;
get_cpu();
queue = cache_priv->io_queues[smp_processor_id()];
put_cpu();
data = cas_alloc_blk_data(bio_segments(bio), GFP_NOIO);
if (!data) {
CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n");
@ -363,9 +367,13 @@ static void blkdev_handle_discard(struct bd_object *bvol, struct bio *bio)
{
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
ocf_queue_t queue;
ocf_io_t io;
get_cpu();
queue = cache_priv->io_queues[smp_processor_id()];
put_cpu();
io = ocf_volume_new_io(bvol->front_volume, queue,
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), OCF_WRITE, 0, 0);
@ -411,9 +419,13 @@ static void blkdev_handle_flush(struct bd_object *bvol, struct bio *bio)
{
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
ocf_queue_t queue;
ocf_io_t io;
get_cpu();
queue = cache_priv->io_queues[smp_processor_id()];
put_cpu();
io = ocf_volume_new_io(bvol->front_volume, queue, 0, 0, OCF_WRITE, 0,
CAS_SET_FLUSH(0));
if (!io) {