Merge pull request #1549 from robertbaldyga/kernel-6.11

Support kernel 6.13
This commit is contained in:
Robert Baldyga
2025-02-28 16:26:19 +01:00
committed by GitHub
13 changed files with 344 additions and 90 deletions

View File

@@ -1,6 +1,6 @@
/*
* Copyright(c) 2019-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* Copyright(c) 2024-2025 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -29,8 +29,8 @@
trace_printk(format, ##__VA_ARGS__)
#else
#define CAS_CLS_DEBUG_MSG(format, ...)
#define CAS_CLS_DEBUG_TRACE(format, ...)
#define CAS_CLS_DEBUG_MSG(format, ...) ({})
#define CAS_CLS_DEBUG_TRACE(format, ...) ({})
#endif
/* Done condition test - always accepts and stops evaluation */

View File

@@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* Copyright(c) 2024-2025 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <linux/module.h>
@@ -417,6 +417,7 @@ int cas_exp_obj_create(struct cas_disk *dsk, const char *dev_name,
struct cas_exp_obj *exp_obj;
struct request_queue *queue;
struct gendisk *gd;
cas_queue_limits_t queue_limits;
int result = 0;
BUG_ON(!owner);
@@ -465,7 +466,15 @@ int cas_exp_obj_create(struct cas_disk *dsk, const char *dev_name,
goto error_init_tag_set;
}
result = cas_alloc_mq_disk(&gd, &queue, &exp_obj->tag_set);
if (exp_obj->ops->set_queue_limits) {
result = exp_obj->ops->set_queue_limits(dsk, priv,
&queue_limits);
if (result)
goto error_set_queue_limits;
}
result = cas_alloc_mq_disk(&gd, &queue, &exp_obj->tag_set,
&queue_limits);
if (result) {
goto error_alloc_mq_disk;
}
@@ -521,6 +530,7 @@ error_exp_obj_set_dev_t:
cas_cleanup_mq_disk(gd);
exp_obj->gd = NULL;
error_alloc_mq_disk:
error_set_queue_limits:
blk_mq_free_tag_set(&exp_obj->tag_set);
error_init_tag_set:
module_put(owner);

View File

@@ -1,11 +1,12 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* Copyright(c) 2024-2025 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __CASDISK_EXP_OBJ_H__
#define __CASDISK_EXP_OBJ_H__
#include "linux_kernel_version.h"
#include <linux/fs.h>
struct cas_disk;
@@ -17,6 +18,12 @@ struct cas_exp_obj_ops {
*/
int (*set_geometry)(struct cas_disk *dsk, void *private);
/**
* @brief Set queue limits of exported object (top) block device.
*/
int (*set_queue_limits)(struct cas_disk *dsk, void *private,
cas_queue_limits_t *lim);
/**
* @brief submit_bio of exported object (top) block device.
*

View File

@@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* Copyright(c) 2024-2025 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -2405,7 +2405,8 @@ static int cache_mngt_check_bdev(struct ocf_mngt_cache_device_config *cfg,
printk(KERN_WARNING "New cache device block properties "
"differ from the previous one.\n");
}
if (tmp_limits.misaligned) {
if (cas_queue_limits_is_misaligned(&tmp_limits)) {
reattach_properties_diff = true;
printk(KERN_WARNING "New cache device block interval "
"doesn't line up with the previous one.\n");

View File

@@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* Copyright(c) 2024-2025 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -40,7 +40,6 @@
#include <linux/mm.h>
#include <linux/blk-mq.h>
#include <linux/ktime.h>
#include "exp_obj.h"
#include "generated_defines.h"

View File

@@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies Co., Ltd.
* Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -63,13 +63,14 @@ static void blkdev_set_discard_properties(ocf_cache_t cache,
CAS_SET_DISCARD_ZEROES_DATA(exp_q->limits, 0);
if (core_q && cas_has_discard_support(core_bd)) {
blk_queue_max_discard_sectors(exp_q, core_q->limits.max_discard_sectors);
cas_queue_max_discard_sectors(exp_q,
core_q->limits.max_discard_sectors);
exp_q->limits.discard_alignment =
bdev_discard_alignment(core_bd);
exp_q->limits.discard_granularity =
core_q->limits.discard_granularity;
} else {
blk_queue_max_discard_sectors(exp_q,
cas_queue_max_discard_sectors(exp_q,
min((uint64_t)core_sectors, (uint64_t)UINT_MAX));
exp_q->limits.discard_granularity = ocf_cache_get_line_size(cache);
exp_q->limits.discard_alignment = 0;
@@ -129,7 +130,37 @@ static int blkdev_core_set_geometry(struct cas_disk *dsk, void *private)
blkdev_set_discard_properties(cache, exp_q, core_bd, sectors);
exp_q->queue_flags |= (1 << QUEUE_FLAG_NONROT);
cas_queue_set_nonrot(exp_q);
return 0;
}
static int blkdev_core_set_queue_limits(struct cas_disk *dsk, void *private,
cas_queue_limits_t *lim)
{
ocf_core_t core = private;
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_volume_t core_vol = ocf_core_get_volume(core);
struct bd_object *bd_core_vol;
struct request_queue *core_q;
bool flush, fua;
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
bd_core_vol = bd_object(core_vol);
core_q = cas_disk_get_queue(bd_core_vol->dsk);
flush = (CAS_CHECK_QUEUE_FLUSH(core_q) ||
cache_priv->device_properties.flush);
fua = (CAS_CHECK_QUEUE_FUA(core_q) ||
cache_priv->device_properties.fua);
memset(lim, 0, sizeof(cas_queue_limits_t));
if (flush)
CAS_SET_QUEUE_LIMIT(lim, CAS_BLK_FEAT_WRITE_CACHE);
if (fua)
CAS_SET_QUEUE_LIMIT(lim, CAS_BLK_FEAT_FUA);
return 0;
}
@@ -428,6 +459,7 @@ static void blkdev_core_submit_bio(struct cas_disk *dsk,
static struct cas_exp_obj_ops kcas_core_exp_obj_ops = {
.set_geometry = blkdev_core_set_geometry,
.set_queue_limits = blkdev_core_set_queue_limits,
.submit_bio = blkdev_core_submit_bio,
};
@@ -470,6 +502,37 @@ static int blkdev_cache_set_geometry(struct cas_disk *dsk, void *private)
return 0;
}
static int blkdev_cache_set_queue_limits(struct cas_disk *dsk, void *private,
cas_queue_limits_t *lim)
{
ocf_cache_t cache;
ocf_volume_t volume;
struct bd_object *bvol;
struct request_queue *cache_q;
struct block_device *bd;
BUG_ON(!private);
cache = private;
volume = ocf_cache_get_volume(cache);
bvol = bd_object(volume);
bd = cas_disk_get_blkdev(bvol->dsk);
BUG_ON(!bd);
cache_q = bd->bd_disk->queue;
memset(lim, 0, sizeof(cas_queue_limits_t));
if (CAS_CHECK_QUEUE_FLUSH(cache_q))
CAS_SET_QUEUE_LIMIT(lim, CAS_BLK_FEAT_WRITE_CACHE);
if (CAS_CHECK_QUEUE_FUA(cache_q))
CAS_SET_QUEUE_LIMIT(lim, CAS_BLK_FEAT_FUA);
return 0;
}
static void blkdev_cache_submit_bio(struct cas_disk *dsk,
struct bio *bio, void *private)
{
@@ -485,6 +548,7 @@ static void blkdev_cache_submit_bio(struct cas_disk *dsk,
static struct cas_exp_obj_ops kcas_cache_exp_obj_ops = {
.set_geometry = blkdev_cache_set_geometry,
.set_queue_limits = blkdev_cache_set_queue_limits,
.submit_bio = blkdev_cache_submit_bio,
};