Compare commits
12 Commits
eee15d9ca4
...
f73a209371
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f73a209371 | ||
![]() |
56ded4c7fd | ||
![]() |
3a5df70abe | ||
![]() |
289355b83a | ||
![]() |
99af7ee9b5 | ||
![]() |
b239bdb624 | ||
![]() |
e189584557 | ||
![]() |
3c19caae1e | ||
![]() |
f46de38db0 | ||
![]() |
73cd065bfb | ||
![]() |
46a486a442 | ||
![]() |
ceb208eb78 |
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -1 +1 @@
|
|||||||
test/* linguist-detectable=false
|
test/** -linguist-detectable
|
||||||
|
45
configure.d/1_page_mapping.conf
Normal file
45
configure.d/1_page_mapping.conf
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright(c) 2025 Huawei Technologies
|
||||||
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
#
|
||||||
|
|
||||||
|
. $(dirname $3)/conf_framework.sh
|
||||||
|
|
||||||
|
check() {
|
||||||
|
cur_name=$(basename $2)
|
||||||
|
config_file_path=$1
|
||||||
|
if compile_module $cur_name "page_folio((struct page *)NULL);" "linux/page-flags.h"
|
||||||
|
then
|
||||||
|
echo $cur_name "1" >> $config_file_path
|
||||||
|
else
|
||||||
|
echo $cur_name "2" >> $config_file_path
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
apply() {
|
||||||
|
case "$1" in
|
||||||
|
"1")
|
||||||
|
add_function "
|
||||||
|
static inline struct address_space *cas_page_mapping(struct page *page)
|
||||||
|
{
|
||||||
|
struct folio *folio = page_folio(page);
|
||||||
|
|
||||||
|
return folio->mapping;
|
||||||
|
}" ;;
|
||||||
|
|
||||||
|
"2")
|
||||||
|
add_function "
|
||||||
|
static inline struct address_space *cas_page_mapping(struct page *page)
|
||||||
|
{
|
||||||
|
if (PageCompound(page))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return page->mapping;
|
||||||
|
}" ;;
|
||||||
|
*)
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
conf_run $@
|
52
configure.d/1_preempt_model.conf
Normal file
52
configure.d/1_preempt_model.conf
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright(c) 2025 Huawei Technologies
|
||||||
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
#
|
||||||
|
|
||||||
|
. $(dirname $3)/conf_framework.sh
|
||||||
|
|
||||||
|
check() {
|
||||||
|
cur_name=$(basename $2)
|
||||||
|
config_file_path=$1
|
||||||
|
if compile_module $cur_name "preempt_model_voluntary();" "linux/preempt.h" &&
|
||||||
|
compile_module $cur_name "preempt_model_none();" "linux/preempt.h"
|
||||||
|
then
|
||||||
|
echo $cur_name "1" >> $config_file_path
|
||||||
|
else
|
||||||
|
echo $cur_name "2" >> $config_file_path
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
apply() {
|
||||||
|
case "$1" in
|
||||||
|
"1")
|
||||||
|
add_function "
|
||||||
|
static inline int cas_preempt_model_voluntary(void)
|
||||||
|
{
|
||||||
|
return preempt_model_voluntary();
|
||||||
|
}"
|
||||||
|
add_function "
|
||||||
|
static inline int cas_preempt_model_none(void)
|
||||||
|
{
|
||||||
|
return preempt_model_none();
|
||||||
|
}" ;;
|
||||||
|
|
||||||
|
"2")
|
||||||
|
add_function "
|
||||||
|
static inline int cas_preempt_model_voluntary(void)
|
||||||
|
{
|
||||||
|
return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
|
||||||
|
}"
|
||||||
|
add_function "
|
||||||
|
static inline int cas_preempt_model_none(void)
|
||||||
|
{
|
||||||
|
return IS_ENABLED(CONFIG_PREEMPT_NONE);
|
||||||
|
}" ;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
conf_run $@
|
@ -53,7 +53,7 @@ static cas_cls_eval_t _cas_cls_metadata_test(struct cas_classifier *cls,
|
|||||||
if (PageAnon(io->page))
|
if (PageAnon(io->page))
|
||||||
return cas_cls_eval_no;
|
return cas_cls_eval_no;
|
||||||
|
|
||||||
if (PageSlab(io->page) || PageCompound(io->page)) {
|
if (PageSlab(io->page)) {
|
||||||
/* A filesystem issues IO on pages that does not belongs
|
/* A filesystem issues IO on pages that does not belongs
|
||||||
* to the file page cache. It means that it is a
|
* to the file page cache. It means that it is a
|
||||||
* part of metadata
|
* part of metadata
|
||||||
@ -61,7 +61,7 @@ static cas_cls_eval_t _cas_cls_metadata_test(struct cas_classifier *cls,
|
|||||||
return cas_cls_eval_yes;
|
return cas_cls_eval_yes;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!io->page->mapping) {
|
if (!cas_page_mapping(io->page)) {
|
||||||
/* XFS case, page are allocated internally and do not
|
/* XFS case, page are allocated internally and do not
|
||||||
* have references into inode
|
* have references into inode
|
||||||
*/
|
*/
|
||||||
@ -1229,6 +1229,7 @@ static void _cas_cls_get_bio_context(struct bio *bio,
|
|||||||
struct cas_cls_io *ctx)
|
struct cas_cls_io *ctx)
|
||||||
{
|
{
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
struct address_space *mapping;
|
||||||
|
|
||||||
if (!bio)
|
if (!bio)
|
||||||
return;
|
return;
|
||||||
@ -1246,13 +1247,14 @@ static void _cas_cls_get_bio_context(struct bio *bio,
|
|||||||
if (PageAnon(page))
|
if (PageAnon(page))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (PageSlab(page) || PageCompound(page))
|
if (PageSlab(page))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!page->mapping)
|
mapping = cas_page_mapping(page);
|
||||||
|
if (!mapping)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ctx->inode = page->mapping->host;
|
ctx->inode = mapping->host;
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -65,9 +65,6 @@ static inline uint32_t involuntary_preemption_enabled(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||||
/* preempt_model_none() or preempt_model_voluntary() are not defined if
|
|
||||||
* the kernel has been compiled without PREEMPT_DYNAMIC
|
|
||||||
*/
|
|
||||||
printk(KERN_WARNING OCF_PREFIX_SHORT
|
printk(KERN_WARNING OCF_PREFIX_SHORT
|
||||||
"The kernel has been compiled with preemption configurable\n"
|
"The kernel has been compiled with preemption configurable\n"
|
||||||
"at boot time (PREEMPT_DYNAMIC=y). Open CAS doesn't support\n"
|
"at boot time (PREEMPT_DYNAMIC=y). Open CAS doesn't support\n"
|
||||||
@ -75,7 +72,7 @@ static inline uint32_t involuntary_preemption_enabled(void)
|
|||||||
"\"preempt=\" to \"none\" or \"voluntary\" in the kernel"
|
"\"preempt=\" to \"none\" or \"voluntary\" in the kernel"
|
||||||
" command line\n");
|
" command line\n");
|
||||||
|
|
||||||
if (!preempt_model_none() && !preempt_model_voluntary()) {
|
if (!cas_preempt_model_none() && !cas_preempt_model_voluntary()) {
|
||||||
printk(KERN_ERR OCF_PREFIX_SHORT
|
printk(KERN_ERR OCF_PREFIX_SHORT
|
||||||
"The kernel has been booted with involuntary "
|
"The kernel has been booted with involuntary "
|
||||||
"preemption enabled.\nFailed to load Open CAS kernel "
|
"preemption enabled.\nFailed to load Open CAS kernel "
|
||||||
|
263
test/functional/tests/cache_ops/test_promotion.py
Normal file
263
test/functional/tests/cache_ops/test_promotion.py
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
#
|
||||||
|
# Copyright(c) 2024-2025 Huawei Technologies
|
||||||
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
#
|
||||||
|
|
||||||
|
import math
|
||||||
|
import random
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from api.cas import casadm
|
||||||
|
from api.cas.cache_config import SeqCutOffPolicy, CleaningPolicy, PromotionPolicy, \
|
||||||
|
PromotionParametersNhit, CacheMode
|
||||||
|
from core.test_run import TestRun
|
||||||
|
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
|
||||||
|
from test_tools.dd import Dd
|
||||||
|
from test_tools.udev import Udev
|
||||||
|
from type_def.size import Size, Unit
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_promotion_policy_nhit_threshold():
|
||||||
|
"""
|
||||||
|
title: Functional test for promotion policy nhit - threshold
|
||||||
|
description: |
|
||||||
|
Test checking if data is cached only after number of hits to given cache line
|
||||||
|
accordingly to specified promotion nhit threshold.
|
||||||
|
pass_criteria:
|
||||||
|
- Promotion policy and hit parameters are set properly
|
||||||
|
- Data is cached only after number of hits to given cache line specified by threshold param
|
||||||
|
- Data is written in pass-through before number of hits to given cache line specified by
|
||||||
|
threshold param
|
||||||
|
- After meeting specified number of hits to given cache line, writes to other cache lines
|
||||||
|
are handled in pass-through
|
||||||
|
"""
|
||||||
|
random_thresholds = random.sample(range(2, 1000), 10)
|
||||||
|
additional_writes_count = 10
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(value=5, unit=Unit.GibiByte)])
|
||||||
|
core_device.create_partitions([Size(value=10, unit=Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_parts = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, cache_mode=CacheMode.WB)
|
||||||
|
core = cache.add_core(core_parts)
|
||||||
|
|
||||||
|
with TestRun.step("Disable sequential cut-off and cleaning"):
|
||||||
|
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
||||||
|
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||||
|
cache.reset_counters()
|
||||||
|
|
||||||
|
with TestRun.step("Check if statistics of writes to cache and writes to core are zeros"):
|
||||||
|
check_statistics(
|
||||||
|
cache,
|
||||||
|
expected_writes_to_cache=Size.zero(),
|
||||||
|
expected_writes_to_core=Size.zero()
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Set nhit promotion policy"):
|
||||||
|
cache.set_promotion_policy(PromotionPolicy.nhit)
|
||||||
|
|
||||||
|
for iteration, threshold in enumerate(
|
||||||
|
TestRun.iteration(
|
||||||
|
random_thresholds,
|
||||||
|
"Set and validate nhit promotion policy threshold"
|
||||||
|
)
|
||||||
|
):
|
||||||
|
with TestRun.step(f"Set threshold to {threshold} and trigger to 0%"):
|
||||||
|
cache.set_params_nhit(
|
||||||
|
PromotionParametersNhit(
|
||||||
|
threshold=threshold,
|
||||||
|
trigger=0
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Purge cache"):
|
||||||
|
cache.purge_cache()
|
||||||
|
|
||||||
|
with TestRun.step("Reset counters"):
|
||||||
|
cache.reset_counters()
|
||||||
|
|
||||||
|
with TestRun.step(
|
||||||
|
"Run dd and check if number of writes to cache and writes to core increase "
|
||||||
|
"accordingly to nhit parameters"
|
||||||
|
):
|
||||||
|
# dd_seek is counted as below to use different part of the cache in each iteration
|
||||||
|
dd_seek = int(
|
||||||
|
cache.size.get_value(Unit.Blocks4096) // len(random_thresholds) * iteration
|
||||||
|
)
|
||||||
|
|
||||||
|
for count in range(1, threshold + additional_writes_count):
|
||||||
|
Dd().input("/dev/random") \
|
||||||
|
.output(core.path) \
|
||||||
|
.oflag("direct") \
|
||||||
|
.block_size(Size(1, Unit.Blocks4096)) \
|
||||||
|
.count(1) \
|
||||||
|
.seek(dd_seek) \
|
||||||
|
.run()
|
||||||
|
if count < threshold:
|
||||||
|
expected_writes_to_cache = Size.zero()
|
||||||
|
expected_writes_to_core = Size(count, Unit.Blocks4096)
|
||||||
|
else:
|
||||||
|
expected_writes_to_cache = Size(count - threshold + 1, Unit.Blocks4096)
|
||||||
|
expected_writes_to_core = Size(threshold - 1, Unit.Blocks4096)
|
||||||
|
check_statistics(cache, expected_writes_to_cache, expected_writes_to_core)
|
||||||
|
|
||||||
|
with TestRun.step("Write to other cache line and check if it was handled in pass-through"):
|
||||||
|
Dd().input("/dev/random") \
|
||||||
|
.output(core.path) \
|
||||||
|
.oflag("direct") \
|
||||||
|
.block_size(Size(1, Unit.Blocks4096)) \
|
||||||
|
.count(1) \
|
||||||
|
.seek(int(dd_seek + Unit.Blocks4096.value)) \
|
||||||
|
.run()
|
||||||
|
expected_writes_to_core = expected_writes_to_core + Size(1, Unit.Blocks4096)
|
||||||
|
check_statistics(cache, expected_writes_to_cache, expected_writes_to_core)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_promotion_policy_nhit_trigger():
|
||||||
|
"""
|
||||||
|
title: Functional test for promotion policy nhit - trigger
|
||||||
|
description: |
|
||||||
|
Test checking if data is cached accordingly to nhit threshold parameter only after reaching
|
||||||
|
cache occupancy specified by nhit trigger value
|
||||||
|
pass_criteria:
|
||||||
|
- Promotion policy and hit parameters are set properly
|
||||||
|
- Data is cached accordingly to nhit threshold parameter only after reaching
|
||||||
|
cache occupancy specified by nhit trigger value
|
||||||
|
- Data is cached without nhit policy before reaching the trigger
|
||||||
|
"""
|
||||||
|
random_triggers = random.sample(range(0, 100), 10)
|
||||||
|
threshold = 2
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(value=50, unit=Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(value=100, unit=Unit.MebiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_parts = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
for trigger in TestRun.iteration(
|
||||||
|
random_triggers,
|
||||||
|
"Validate nhit promotion policy trigger"
|
||||||
|
):
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, cache_mode=CacheMode.WB, force=True)
|
||||||
|
core = cache.add_core(core_parts)
|
||||||
|
|
||||||
|
with TestRun.step("Disable sequential cut-off and cleaning"):
|
||||||
|
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
||||||
|
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||||
|
|
||||||
|
with TestRun.step("Purge cache"):
|
||||||
|
cache.purge_cache()
|
||||||
|
|
||||||
|
with TestRun.step("Reset counters"):
|
||||||
|
cache.reset_counters()
|
||||||
|
|
||||||
|
with TestRun.step("Check if statistics of writes to cache and writes to core are zeros"):
|
||||||
|
check_statistics(
|
||||||
|
cache,
|
||||||
|
expected_writes_to_cache=Size.zero(),
|
||||||
|
expected_writes_to_core=Size.zero()
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Set nhit promotion policy"):
|
||||||
|
cache.set_promotion_policy(PromotionPolicy.nhit)
|
||||||
|
|
||||||
|
with TestRun.step(f"Set threshold to {threshold} and trigger to {trigger}%"):
|
||||||
|
cache.set_params_nhit(
|
||||||
|
PromotionParametersNhit(
|
||||||
|
threshold=threshold,
|
||||||
|
trigger=trigger
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step(f"Run dd to fill {trigger}% of cache size with data"):
|
||||||
|
blocks_count = math.ceil(cache.size.get_value(Unit.Blocks4096) * trigger / 100)
|
||||||
|
Dd().input("/dev/random") \
|
||||||
|
.output(core.path) \
|
||||||
|
.oflag("direct") \
|
||||||
|
.block_size(Size(1, Unit.Blocks4096)) \
|
||||||
|
.count(blocks_count) \
|
||||||
|
.seek(0) \
|
||||||
|
.run()
|
||||||
|
|
||||||
|
with TestRun.step("Check if all written data was cached"):
|
||||||
|
check_statistics(
|
||||||
|
cache,
|
||||||
|
expected_writes_to_cache=Size(blocks_count, Unit.Blocks4096),
|
||||||
|
expected_writes_to_core=Size.zero()
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Write to free cached volume sectors"):
|
||||||
|
free_seek = (blocks_count + 1)
|
||||||
|
pt_blocks_count = int(cache.size.get_value(Unit.Blocks4096) - blocks_count)
|
||||||
|
Dd().input("/dev/random") \
|
||||||
|
.output(core.path) \
|
||||||
|
.oflag("direct") \
|
||||||
|
.block_size(Size(1, Unit.Blocks4096)) \
|
||||||
|
.count(pt_blocks_count) \
|
||||||
|
.seek(free_seek) \
|
||||||
|
.run()
|
||||||
|
|
||||||
|
with TestRun.step("Check if recently written data was written in pass-through"):
|
||||||
|
check_statistics(
|
||||||
|
cache,
|
||||||
|
expected_writes_to_cache=Size(blocks_count, Unit.Blocks4096),
|
||||||
|
expected_writes_to_core=Size(pt_blocks_count, Unit.Blocks4096)
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Write to recently written sectors one more time"):
|
||||||
|
Dd().input("/dev/random") \
|
||||||
|
.output(core.path) \
|
||||||
|
.oflag("direct") \
|
||||||
|
.block_size(Size(1, Unit.Blocks4096)) \
|
||||||
|
.count(pt_blocks_count) \
|
||||||
|
.seek(free_seek) \
|
||||||
|
.run()
|
||||||
|
|
||||||
|
with TestRun.step("Check if recently written data was cached"):
|
||||||
|
check_statistics(
|
||||||
|
cache,
|
||||||
|
expected_writes_to_cache=Size(blocks_count + pt_blocks_count, Unit.Blocks4096),
|
||||||
|
expected_writes_to_core=Size(pt_blocks_count, Unit.Blocks4096)
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Stop cache"):
|
||||||
|
cache.stop(no_data_flush=True)
|
||||||
|
|
||||||
|
|
||||||
|
def check_statistics(cache, expected_writes_to_cache, expected_writes_to_core):
|
||||||
|
cache_stats = cache.get_statistics()
|
||||||
|
writes_to_cache = cache_stats.block_stats.cache.writes
|
||||||
|
writes_to_core = cache_stats.block_stats.core.writes
|
||||||
|
|
||||||
|
if writes_to_cache != expected_writes_to_cache:
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"Number of writes to cache should be "
|
||||||
|
f"{expected_writes_to_cache.get_value(Unit.Blocks4096)} "
|
||||||
|
f"but it is {writes_to_cache.get_value(Unit.Blocks4096)}")
|
||||||
|
if writes_to_core != expected_writes_to_core:
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"Number of writes to core should be: "
|
||||||
|
f"{expected_writes_to_core.get_value(Unit.Blocks4096)} "
|
||||||
|
f"but it is {writes_to_core.get_value(Unit.Blocks4096)}")
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -230,7 +230,7 @@ def gen_log(seqno_max):
|
|||||||
.set_param("write_iolog", f"{io_log_path}_{i}")
|
.set_param("write_iolog", f"{io_log_path}_{i}")
|
||||||
fio.run()
|
fio.run()
|
||||||
|
|
||||||
r = re.compile(r"\S+\s+(read|write)\s+(\d+)\s+(\d+)")
|
r = re.compile(r"\S+\s+\S+\s+write\s+(\d+)\s+(\d+)")
|
||||||
for j in range(num_jobs):
|
for j in range(num_jobs):
|
||||||
log = f"{io_log_path}_{j}"
|
log = f"{io_log_path}_{j}"
|
||||||
nr = 0
|
nr = 0
|
||||||
@ -238,7 +238,7 @@ def gen_log(seqno_max):
|
|||||||
m = r.match(line)
|
m = r.match(line)
|
||||||
if m:
|
if m:
|
||||||
if nr > max_log_seqno:
|
if nr > max_log_seqno:
|
||||||
block = int(m.group(2)) // block_size.value - j * job_workset_blocks
|
block = int(m.group(1)) // block_size.value - j * job_workset_blocks
|
||||||
g_io_log[j][block] += [nr]
|
g_io_log[j][block] += [nr]
|
||||||
nr += 1
|
nr += 1
|
||||||
if nr > seqno_max + 1:
|
if nr > seqno_max + 1:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -16,7 +16,7 @@ from api.cas.cache_config import (
|
|||||||
)
|
)
|
||||||
from api.cas.cli import start_cmd
|
from api.cas.cli import start_cmd
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools.peach_fuzzer.peach_fuzzer import PeachFuzzer
|
from test_tools.peach_fuzzer.peach_fuzzer import PeachFuzzer
|
||||||
from type_def.size import Unit, Size
|
from type_def.size import Unit, Size
|
||||||
from tests.security.fuzzy.kernel.common.common import (
|
from tests.security.fuzzy.kernel.common.common import (
|
||||||
@ -26,6 +26,7 @@ from tests.security.fuzzy.kernel.common.common import (
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("other", DiskTypeLowerThan("cache"))
|
||||||
@pytest.mark.parametrizex("cache_mode", CacheMode)
|
@pytest.mark.parametrizex("cache_mode", CacheMode)
|
||||||
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||||
@pytest.mark.parametrizex("unaligned_io", UnalignedIo)
|
@pytest.mark.parametrizex("unaligned_io", UnalignedIo)
|
||||||
@ -44,7 +45,8 @@ def test_fuzzy_start_cache_device(cache_mode, cache_line_size, unaligned_io, use
|
|||||||
cache_id = 1
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Create partitions on all devices"):
|
with TestRun.step("Create partitions on all devices"):
|
||||||
for disk in TestRun.dut.disks:
|
available_disks = [TestRun.disks["cache"], TestRun.disks["other"]]
|
||||||
|
for disk in available_disks:
|
||||||
disk.create_partitions([Size(400, Unit.MebiByte)])
|
disk.create_partitions([Size(400, Unit.MebiByte)])
|
||||||
|
|
||||||
with TestRun.step("Start and stop cache"):
|
with TestRun.step("Start and stop cache"):
|
||||||
@ -61,12 +63,18 @@ def test_fuzzy_start_cache_device(cache_mode, cache_line_size, unaligned_io, use
|
|||||||
cache.stop()
|
cache.stop()
|
||||||
|
|
||||||
with TestRun.step("Prepare PeachFuzzer"):
|
with TestRun.step("Prepare PeachFuzzer"):
|
||||||
disks_paths = [disk.path for disk in TestRun.dut.disks]
|
disks_paths = [disk.path for disk in available_disks]
|
||||||
partitions_paths = [disk.partitions[0].path for disk in TestRun.dut.disks]
|
partitions_paths = [disk.partitions[0].path for disk in available_disks]
|
||||||
valid_values = disks_paths + partitions_paths
|
valid_values = disks_paths + partitions_paths
|
||||||
# fuzz only partitions to speed up test
|
# fuzz only partitions to speed up test
|
||||||
fuzz_config = get_device_fuzz_config(partitions_paths)
|
fuzz_config = get_device_fuzz_config(partitions_paths)
|
||||||
|
# forbidden values are created to prevent starting cache on other disks connected to DUT
|
||||||
|
forbidden_values = [
|
||||||
|
disk.path for disk in TestRun.dut.disks if disk.path not in valid_values
|
||||||
|
]
|
||||||
valid_values = [path.encode("ascii") for path in valid_values]
|
valid_values = [path.encode("ascii") for path in valid_values]
|
||||||
|
forbidden_values = [path.encode("ascii") for path in forbidden_values]
|
||||||
|
|
||||||
PeachFuzzer.generate_config(fuzz_config)
|
PeachFuzzer.generate_config(fuzz_config)
|
||||||
base_cmd = start_cmd(
|
base_cmd = start_cmd(
|
||||||
cache_dev="{param}",
|
cache_dev="{param}",
|
||||||
@ -83,6 +91,11 @@ def test_fuzzy_start_cache_device(cache_mode, cache_line_size, unaligned_io, use
|
|||||||
enumerate(commands), f"Run command {TestRun.usr.fuzzy_iter_count} times"
|
enumerate(commands), f"Run command {TestRun.usr.fuzzy_iter_count} times"
|
||||||
):
|
):
|
||||||
with TestRun.step(f"Iteration {index + 1}"):
|
with TestRun.step(f"Iteration {index + 1}"):
|
||||||
|
if cmd.param in forbidden_values:
|
||||||
|
TestRun.LOGGER.warning(
|
||||||
|
f"Iteration skipped due to the forbidden param value {cmd.param}."
|
||||||
|
)
|
||||||
|
continue
|
||||||
output = run_cmd_and_validate(
|
output = run_cmd_and_validate(
|
||||||
cmd=cmd,
|
cmd=cmd,
|
||||||
value_name="Device path",
|
value_name="Device path",
|
||||||
|
Loading…
Reference in New Issue
Block a user