Compare commits

..

12 Commits

Author SHA1 Message Date
Katarzyna Treder
f73a209371
Merge pull request #1644 from katlapinka/kasiat/fuzzy-start-device-fix
Make test_fuzzy_start_cache_device use only required disks
2025-04-14 08:12:07 +02:00
Katarzyna Treder
56ded4c7fd Make test_fuzzy_start_cache_device use only required disks
Signed-off-by: Katarzyna Treder <katarzyna.treder@h-partners.com>
2025-04-14 08:11:29 +02:00
Katarzyna Treder
3a5df70abe
Merge pull request #1643 from katlapinka/kasiat/di-unplug-fix
Fix data integrity unplug test to work with fio newer than 3.30
2025-04-14 08:10:57 +02:00
Katarzyna Treder
289355b83a Fix data integrity unplug test to work with fio newer than 3.30
Signed-off-by: Katarzyna Treder <katarzyna.treder@h-partners.com>
2025-04-14 08:10:18 +02:00
Robert Baldyga
99af7ee9b5
Merge pull request #1642 from robertbaldyga/xfs-ioclass-fix
Fix io classification for XFS
2025-04-10 09:02:18 +02:00
Katarzyna Treder
b239bdb624
Merge pull request #1594 from katlapinka/kasiat/promotion-tests
Add tests for promotion policy
2025-04-09 13:12:01 +02:00
Katarzyna Treder
e189584557 Add tests for promotion policy
Signed-off-by: Katarzyna Treder <katarzyna.treder@h-partners.com>
2025-04-09 13:11:37 +02:00
Robert Baldyga
3c19caae1e
Merge pull request #1646 from mmichal10/configure-preempt
configure: add preemption_model_*() functions
2025-04-09 11:20:05 +02:00
Michal Mielewczyk
f46de38db0 configure: add preemption_model_*() functions
Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
2025-04-09 10:49:31 +02:00
Robert Baldyga
73cd065bfb
Merge pull request #1645 from jfckm/fix-linguist
fix: github-linguist still detects test directory
2025-04-08 13:59:45 +02:00
Jan Musial
46a486a442 fix: github-linguist still detects test directory
Signed-off-by: Jan Musial <jan.musial@huawei.com>
2025-04-08 13:14:36 +02:00
Robert Baldyga
ceb208eb78 Fix io classification for XFS
Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com>
2025-04-04 19:46:12 +02:00
8 changed files with 390 additions and 18 deletions

2
.gitattributes vendored
View File

@ -1 +1 @@
test/* linguist-detectable=false
test/** -linguist-detectable

View File

@ -0,0 +1,45 @@
#!/bin/bash
#
# Copyright(c) 2025 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause
#
. $(dirname $3)/conf_framework.sh
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "page_folio((struct page *)NULL);" "linux/page-flags.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_function "
static inline struct address_space *cas_page_mapping(struct page *page)
{
struct folio *folio = page_folio(page);
return folio->mapping;
}" ;;
"2")
add_function "
static inline struct address_space *cas_page_mapping(struct page *page)
{
if (PageCompound(page))
return NULL;
return page->mapping;
}" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -0,0 +1,52 @@
#!/bin/bash
#
# Copyright(c) 2025 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause
#
. $(dirname $3)/conf_framework.sh
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "preempt_model_voluntary();" "linux/preempt.h" &&
compile_module $cur_name "preempt_model_none();" "linux/preempt.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_function "
static inline int cas_preempt_model_voluntary(void)
{
return preempt_model_voluntary();
}"
add_function "
static inline int cas_preempt_model_none(void)
{
return preempt_model_none();
}" ;;
"2")
add_function "
static inline int cas_preempt_model_voluntary(void)
{
return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
}"
add_function "
static inline int cas_preempt_model_none(void)
{
return IS_ENABLED(CONFIG_PREEMPT_NONE);
}" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -53,7 +53,7 @@ static cas_cls_eval_t _cas_cls_metadata_test(struct cas_classifier *cls,
if (PageAnon(io->page))
return cas_cls_eval_no;
if (PageSlab(io->page) || PageCompound(io->page)) {
if (PageSlab(io->page)) {
/* A filesystem issues IO on pages that does not belongs
* to the file page cache. It means that it is a
* part of metadata
@ -61,7 +61,7 @@ static cas_cls_eval_t _cas_cls_metadata_test(struct cas_classifier *cls,
return cas_cls_eval_yes;
}
if (!io->page->mapping) {
if (!cas_page_mapping(io->page)) {
/* XFS case, page are allocated internally and do not
* have references into inode
*/
@ -1229,6 +1229,7 @@ static void _cas_cls_get_bio_context(struct bio *bio,
struct cas_cls_io *ctx)
{
struct page *page = NULL;
struct address_space *mapping;
if (!bio)
return;
@ -1246,13 +1247,14 @@ static void _cas_cls_get_bio_context(struct bio *bio,
if (PageAnon(page))
return;
if (PageSlab(page) || PageCompound(page))
if (PageSlab(page))
return;
if (!page->mapping)
mapping = cas_page_mapping(page);
if (!mapping)
return;
ctx->inode = page->mapping->host;
ctx->inode = mapping->host;
return;
}

View File

@ -65,9 +65,6 @@ static inline uint32_t involuntary_preemption_enabled(void)
}
#ifdef CONFIG_PREEMPT_DYNAMIC
/* preempt_model_none() or preempt_model_voluntary() are not defined if
* the kernel has been compiled without PREEMPT_DYNAMIC
*/
printk(KERN_WARNING OCF_PREFIX_SHORT
"The kernel has been compiled with preemption configurable\n"
"at boot time (PREEMPT_DYNAMIC=y). Open CAS doesn't support\n"
@ -75,7 +72,7 @@ static inline uint32_t involuntary_preemption_enabled(void)
"\"preempt=\" to \"none\" or \"voluntary\" in the kernel"
" command line\n");
if (!preempt_model_none() && !preempt_model_voluntary()) {
if (!cas_preempt_model_none() && !cas_preempt_model_voluntary()) {
printk(KERN_ERR OCF_PREFIX_SHORT
"The kernel has been booted with involuntary "
"preemption enabled.\nFailed to load Open CAS kernel "

View File

@ -0,0 +1,263 @@
#
# Copyright(c) 2024-2025 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause
#
import math
import random
import pytest
from api.cas import casadm
from api.cas.cache_config import SeqCutOffPolicy, CleaningPolicy, PromotionPolicy, \
PromotionParametersNhit, CacheMode
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.dd import Dd
from test_tools.udev import Udev
from type_def.size import Size, Unit
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_promotion_policy_nhit_threshold():
"""
title: Functional test for promotion policy nhit - threshold
description: |
Test checking if data is cached only after number of hits to given cache line
accordingly to specified promotion nhit threshold.
pass_criteria:
- Promotion policy and hit parameters are set properly
- Data is cached only after number of hits to given cache line specified by threshold param
- Data is written in pass-through before number of hits to given cache line specified by
threshold param
- After meeting specified number of hits to given cache line, writes to other cache lines
are handled in pass-through
"""
random_thresholds = random.sample(range(2, 1000), 10)
additional_writes_count = 10
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(value=5, unit=Unit.GibiByte)])
core_device.create_partitions([Size(value=10, unit=Unit.GibiByte)])
cache_part = cache_device.partitions[0]
core_parts = core_device.partitions[0]
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Start cache and add core"):
cache = casadm.start_cache(cache_part, cache_mode=CacheMode.WB)
core = cache.add_core(core_parts)
with TestRun.step("Disable sequential cut-off and cleaning"):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
cache.set_cleaning_policy(CleaningPolicy.nop)
cache.reset_counters()
with TestRun.step("Check if statistics of writes to cache and writes to core are zeros"):
check_statistics(
cache,
expected_writes_to_cache=Size.zero(),
expected_writes_to_core=Size.zero()
)
with TestRun.step("Set nhit promotion policy"):
cache.set_promotion_policy(PromotionPolicy.nhit)
for iteration, threshold in enumerate(
TestRun.iteration(
random_thresholds,
"Set and validate nhit promotion policy threshold"
)
):
with TestRun.step(f"Set threshold to {threshold} and trigger to 0%"):
cache.set_params_nhit(
PromotionParametersNhit(
threshold=threshold,
trigger=0
)
)
with TestRun.step("Purge cache"):
cache.purge_cache()
with TestRun.step("Reset counters"):
cache.reset_counters()
with TestRun.step(
"Run dd and check if number of writes to cache and writes to core increase "
"accordingly to nhit parameters"
):
# dd_seek is counted as below to use different part of the cache in each iteration
dd_seek = int(
cache.size.get_value(Unit.Blocks4096) // len(random_thresholds) * iteration
)
for count in range(1, threshold + additional_writes_count):
Dd().input("/dev/random") \
.output(core.path) \
.oflag("direct") \
.block_size(Size(1, Unit.Blocks4096)) \
.count(1) \
.seek(dd_seek) \
.run()
if count < threshold:
expected_writes_to_cache = Size.zero()
expected_writes_to_core = Size(count, Unit.Blocks4096)
else:
expected_writes_to_cache = Size(count - threshold + 1, Unit.Blocks4096)
expected_writes_to_core = Size(threshold - 1, Unit.Blocks4096)
check_statistics(cache, expected_writes_to_cache, expected_writes_to_core)
with TestRun.step("Write to other cache line and check if it was handled in pass-through"):
Dd().input("/dev/random") \
.output(core.path) \
.oflag("direct") \
.block_size(Size(1, Unit.Blocks4096)) \
.count(1) \
.seek(int(dd_seek + Unit.Blocks4096.value)) \
.run()
expected_writes_to_core = expected_writes_to_core + Size(1, Unit.Blocks4096)
check_statistics(cache, expected_writes_to_cache, expected_writes_to_core)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_promotion_policy_nhit_trigger():
"""
title: Functional test for promotion policy nhit - trigger
description: |
Test checking if data is cached accordingly to nhit threshold parameter only after reaching
cache occupancy specified by nhit trigger value
pass_criteria:
- Promotion policy and hit parameters are set properly
- Data is cached accordingly to nhit threshold parameter only after reaching
cache occupancy specified by nhit trigger value
- Data is cached without nhit policy before reaching the trigger
"""
random_triggers = random.sample(range(0, 100), 10)
threshold = 2
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(value=50, unit=Unit.MebiByte)])
core_device.create_partitions([Size(value=100, unit=Unit.MebiByte)])
cache_part = cache_device.partitions[0]
core_parts = core_device.partitions[0]
with TestRun.step("Disable udev"):
Udev.disable()
for trigger in TestRun.iteration(
random_triggers,
"Validate nhit promotion policy trigger"
):
with TestRun.step("Start cache and add core"):
cache = casadm.start_cache(cache_part, cache_mode=CacheMode.WB, force=True)
core = cache.add_core(core_parts)
with TestRun.step("Disable sequential cut-off and cleaning"):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
cache.set_cleaning_policy(CleaningPolicy.nop)
with TestRun.step("Purge cache"):
cache.purge_cache()
with TestRun.step("Reset counters"):
cache.reset_counters()
with TestRun.step("Check if statistics of writes to cache and writes to core are zeros"):
check_statistics(
cache,
expected_writes_to_cache=Size.zero(),
expected_writes_to_core=Size.zero()
)
with TestRun.step("Set nhit promotion policy"):
cache.set_promotion_policy(PromotionPolicy.nhit)
with TestRun.step(f"Set threshold to {threshold} and trigger to {trigger}%"):
cache.set_params_nhit(
PromotionParametersNhit(
threshold=threshold,
trigger=trigger
)
)
with TestRun.step(f"Run dd to fill {trigger}% of cache size with data"):
blocks_count = math.ceil(cache.size.get_value(Unit.Blocks4096) * trigger / 100)
Dd().input("/dev/random") \
.output(core.path) \
.oflag("direct") \
.block_size(Size(1, Unit.Blocks4096)) \
.count(blocks_count) \
.seek(0) \
.run()
with TestRun.step("Check if all written data was cached"):
check_statistics(
cache,
expected_writes_to_cache=Size(blocks_count, Unit.Blocks4096),
expected_writes_to_core=Size.zero()
)
with TestRun.step("Write to free cached volume sectors"):
free_seek = (blocks_count + 1)
pt_blocks_count = int(cache.size.get_value(Unit.Blocks4096) - blocks_count)
Dd().input("/dev/random") \
.output(core.path) \
.oflag("direct") \
.block_size(Size(1, Unit.Blocks4096)) \
.count(pt_blocks_count) \
.seek(free_seek) \
.run()
with TestRun.step("Check if recently written data was written in pass-through"):
check_statistics(
cache,
expected_writes_to_cache=Size(blocks_count, Unit.Blocks4096),
expected_writes_to_core=Size(pt_blocks_count, Unit.Blocks4096)
)
with TestRun.step("Write to recently written sectors one more time"):
Dd().input("/dev/random") \
.output(core.path) \
.oflag("direct") \
.block_size(Size(1, Unit.Blocks4096)) \
.count(pt_blocks_count) \
.seek(free_seek) \
.run()
with TestRun.step("Check if recently written data was cached"):
check_statistics(
cache,
expected_writes_to_cache=Size(blocks_count + pt_blocks_count, Unit.Blocks4096),
expected_writes_to_core=Size(pt_blocks_count, Unit.Blocks4096)
)
with TestRun.step("Stop cache"):
cache.stop(no_data_flush=True)
def check_statistics(cache, expected_writes_to_cache, expected_writes_to_core):
cache_stats = cache.get_statistics()
writes_to_cache = cache_stats.block_stats.cache.writes
writes_to_core = cache_stats.block_stats.core.writes
if writes_to_cache != expected_writes_to_cache:
TestRun.LOGGER.error(
f"Number of writes to cache should be "
f"{expected_writes_to_cache.get_value(Unit.Blocks4096)} "
f"but it is {writes_to_cache.get_value(Unit.Blocks4096)}")
if writes_to_core != expected_writes_to_core:
TestRun.LOGGER.error(
f"Number of writes to core should be: "
f"{expected_writes_to_core.get_value(Unit.Blocks4096)} "
f"but it is {writes_to_core.get_value(Unit.Blocks4096)}")

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -230,7 +230,7 @@ def gen_log(seqno_max):
.set_param("write_iolog", f"{io_log_path}_{i}")
fio.run()
r = re.compile(r"\S+\s+(read|write)\s+(\d+)\s+(\d+)")
r = re.compile(r"\S+\s+\S+\s+write\s+(\d+)\s+(\d+)")
for j in range(num_jobs):
log = f"{io_log_path}_{j}"
nr = 0
@ -238,7 +238,7 @@ def gen_log(seqno_max):
m = r.match(line)
if m:
if nr > max_log_seqno:
block = int(m.group(2)) // block_size.value - j * job_workset_blocks
block = int(m.group(1)) // block_size.value - j * job_workset_blocks
g_io_log[j][block] += [nr]
nr += 1
if nr > seqno_max + 1:

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -16,7 +16,7 @@ from api.cas.cache_config import (
)
from api.cas.cli import start_cmd
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.peach_fuzzer.peach_fuzzer import PeachFuzzer
from type_def.size import Unit, Size
from tests.security.fuzzy.kernel.common.common import (
@ -26,6 +26,7 @@ from tests.security.fuzzy.kernel.common.common import (
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("other", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrizex("unaligned_io", UnalignedIo)
@ -44,7 +45,8 @@ def test_fuzzy_start_cache_device(cache_mode, cache_line_size, unaligned_io, use
cache_id = 1
with TestRun.step("Create partitions on all devices"):
for disk in TestRun.dut.disks:
available_disks = [TestRun.disks["cache"], TestRun.disks["other"]]
for disk in available_disks:
disk.create_partitions([Size(400, Unit.MebiByte)])
with TestRun.step("Start and stop cache"):
@ -61,12 +63,18 @@ def test_fuzzy_start_cache_device(cache_mode, cache_line_size, unaligned_io, use
cache.stop()
with TestRun.step("Prepare PeachFuzzer"):
disks_paths = [disk.path for disk in TestRun.dut.disks]
partitions_paths = [disk.partitions[0].path for disk in TestRun.dut.disks]
disks_paths = [disk.path for disk in available_disks]
partitions_paths = [disk.partitions[0].path for disk in available_disks]
valid_values = disks_paths + partitions_paths
# fuzz only partitions to speed up test
fuzz_config = get_device_fuzz_config(partitions_paths)
# forbidden values are created to prevent starting cache on other disks connected to DUT
forbidden_values = [
disk.path for disk in TestRun.dut.disks if disk.path not in valid_values
]
valid_values = [path.encode("ascii") for path in valid_values]
forbidden_values = [path.encode("ascii") for path in forbidden_values]
PeachFuzzer.generate_config(fuzz_config)
base_cmd = start_cmd(
cache_dev="{param}",
@ -83,6 +91,11 @@ def test_fuzzy_start_cache_device(cache_mode, cache_line_size, unaligned_io, use
enumerate(commands), f"Run command {TestRun.usr.fuzzy_iter_count} times"
):
with TestRun.step(f"Iteration {index + 1}"):
if cmd.param in forbidden_values:
TestRun.LOGGER.warning(
f"Iteration skipped due to the forbidden param value {cmd.param}."
)
continue
output = run_cmd_and_validate(
cmd=cmd,
value_name="Device path",