Set NOMERGES flag for cache volume

Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
Signed-off-by: Rafal Stefanowski <rafal.stefanowski@huawei.com>
This commit is contained in:
Michal Mielewczyk 2023-10-09 12:43:39 +02:00 committed by Rafal Stefanowski
parent 20276bd9c1
commit 33721e7c92
3 changed files with 42 additions and 0 deletions

View File

@ -1,6 +1,7 @@
#!/bin/bash #!/bin/bash
# #
# Copyright(c) 2012-2022 Intel Corporation # Copyright(c) 2012-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -45,6 +46,11 @@ apply() {
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors; exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
exp_q->limits.max_segments = core_q->limits.max_segments; exp_q->limits.max_segments = core_q->limits.max_segments;
exp_q->limits.max_write_same_sectors = 0; exp_q->limits.max_write_same_sectors = 0;
}"
add_function "
static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q)
{
if (queue_virt_boundary(cache_q)) if (queue_virt_boundary(cache_q))
queue_flag_set(QUEUE_FLAG_NOMERGES, cache_q); queue_flag_set(QUEUE_FLAG_NOMERGES, cache_q);
}" ;; }" ;;
@ -59,6 +65,11 @@ apply() {
exp_q->limits.max_segments = core_q->limits.max_segments; exp_q->limits.max_segments = core_q->limits.max_segments;
exp_q->limits.max_write_same_sectors = 0; exp_q->limits.max_write_same_sectors = 0;
exp_q->limits.max_write_zeroes_sectors = 0; exp_q->limits.max_write_zeroes_sectors = 0;
}"
add_function "
static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q)
{
}" ;; }" ;;
"3") "3")
add_function " add_function "
@ -70,6 +81,11 @@ apply() {
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors; exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
exp_q->limits.max_segments = core_q->limits.max_segments; exp_q->limits.max_segments = core_q->limits.max_segments;
exp_q->limits.max_write_zeroes_sectors = 0; exp_q->limits.max_write_zeroes_sectors = 0;
}"
add_function "
static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q)
{
}" ;; }" ;;
"4") "4")
add_function " add_function "
@ -81,6 +97,11 @@ apply() {
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors; exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
exp_q->limits.max_segments = core_q->limits.max_segments; exp_q->limits.max_segments = core_q->limits.max_segments;
exp_q->limits.max_write_same_sectors = 0; exp_q->limits.max_write_same_sectors = 0;
}"
add_function "
static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q)
{
}" ;; }" ;;

View File

@ -2202,6 +2202,24 @@ out_bdev:
return result; return result;
} }
static void volume_set_no_merges_flag_helper(ocf_cache_t cache)
{
struct request_queue *cache_q;
struct bd_object *bvol;
struct block_device *bd;
ocf_volume_t volume;
volume = ocf_cache_get_volume(cache);
if (!volume)
return;
bvol = bd_object(volume);
bd = cas_disk_get_blkdev(bvol->dsk);
cache_q = bd->bd_disk->queue;
cas_cache_set_no_merges_flag(cache_q);
}
static int _cache_start_finalize(ocf_cache_t cache, int init_mode, static int _cache_start_finalize(ocf_cache_t cache, int init_mode,
bool activate) bool activate)
{ {
@ -2219,6 +2237,8 @@ static int _cache_start_finalize(ocf_cache_t cache, int init_mode,
return result; return result;
} }
ctx->cls_inited = true; ctx->cls_inited = true;
volume_set_no_merges_flag_helper(cache);
} }
if (activate) if (activate)

View File

@ -470,6 +470,7 @@ static int blkdev_cache_set_geometry(struct cas_disk *dsk, void *private)
set_capacity(cas_exp_obj_get_gendisk(dsk), sectors); set_capacity(cas_exp_obj_get_gendisk(dsk), sectors);
cas_copy_queue_limits(exp_q, cache_q, cache_q); cas_copy_queue_limits(exp_q, cache_q, cache_q);
cas_cache_set_no_merges_flag(cache_q);
blk_stack_limits(&exp_q->limits, &cache_q->limits, 0); blk_stack_limits(&exp_q->limits, &cache_q->limits, 0);