Merge pull request #1618 from katlapinka/kasiat/refactor-tests-description

Cleanup tests descriptions, prepare steps and values naming PART-1
This commit is contained in:
Katarzyna Treder 2025-03-10 14:22:03 +01:00 committed by GitHub
commit 4d23c5f586
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 609 additions and 537 deletions

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -28,11 +28,10 @@ block_sizes = [1, 2, 4, 5, 8, 16, 32, 64, 128]
@pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.nand]))
def test_support_different_io_size(cache_mode):
"""
title: OpenCAS supports different IO sizes
description: |
OpenCAS supports IO of size in rage from 512b to 128K
title: Support for different I/O sizes
description: Verify support for I/O of size in rage from 512B to 128KiB
pass_criteria:
- No IO errors
- No I/O errors
"""
with TestRun.step("Prepare cache and core devices"):
@ -47,7 +46,7 @@ def test_support_different_io_size(cache_mode):
)
core = cache.add_core(core_disk.partitions[0])
with TestRun.step("Load the default ioclass config file"):
with TestRun.step("Load the default io class config file"):
cache.load_io_class(opencas_ioclass_conf_path)
with TestRun.step("Create a filesystem on the core device and mount it"):

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -30,20 +30,20 @@ mountpoint = "/mnt"
@pytest.mark.CI
def test_cas_version():
"""
title: Test for CAS version
title: Test for version number
description:
Check if CAS print version cmd returns consistent version with version file
Check if version printed by cmd returns value consistent with version file
pass criteria:
- casadm version command succeeds
- versions from cmd and file in /var/lib/opencas/cas_version are consistent
- Version command succeeds
- Versions from cmd and file in /var/lib/opencas/cas_version are consistent
"""
with TestRun.step("Read cas version using casadm cmd"):
with TestRun.step("Read version using casadm cmd"):
output = casadm.print_version(output_format=OutputFormat.csv)
cmd_version = output.stdout
cmd_cas_versions = [version.split(",")[1] for version in cmd_version.split("\n")[1:]]
with TestRun.step(f"Read cas version from {version_file_path} location"):
with TestRun.step(f"Read version from {version_file_path} location"):
file_read = read_file(version_file_path).split("\n")
file_cas_version = next(
(line.split("=")[1] for line in file_read if "CAS_VERSION=" in line)
@ -51,25 +51,24 @@ def test_cas_version():
with TestRun.step("Compare cmd and file versions"):
if not all(file_cas_version == cmd_cas_version for cmd_cas_version in cmd_cas_versions):
TestRun.LOGGER.error(f"Cmd and file versions doesn`t match")
TestRun.LOGGER.error(f"Cmd and file versions doesn't match")
@pytest.mark.CI
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_negative_start_cache():
"""
title: Test start cache negative on cache device
title: Negative test for starting cache
description:
Check for negative cache start scenarios
Check starting cache using the same device or cache ID twice
pass criteria:
- Cache start succeeds
- Fails to start cache on the same device with another id
- Fails to start cache on another partition with the same id
- Starting cache on the same device with another ID fails
- Starting cache on another partition with the same ID fails
"""
with TestRun.step("Prepare cache device"):
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(2, Unit.GibiByte)] * 2)
cache_dev_1 = cache_dev.partitions[0]

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -65,10 +65,10 @@ def test_cleaning_policies_in_write_back(cleaning_policy: CleaningPolicy):
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
set_cleaning_policy_params(cache, cleaning_policy)
with TestRun.step("Check for running CAS cleaner"):
with TestRun.step("Check for running cleaner process"):
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
if output.exit_code != 0:
TestRun.fail("CAS cleaner process is not running!")
TestRun.fail("Cleaner process is not running!")
with TestRun.step(f"Add {cores_count} cores to the cache"):
cores = [cache.add_core(partition) for partition in core_dev.partitions]
@ -133,10 +133,10 @@ def test_cleaning_policies_in_write_through(cleaning_policy):
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
set_cleaning_policy_params(cache, cleaning_policy)
with TestRun.step("Check for running CAS cleaner"):
with TestRun.step("Check for running cleaner process"):
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
if output.exit_code != 0:
TestRun.fail("CAS cleaner process is not running!")
TestRun.fail("Cleaner process is not running!")
with TestRun.step(f"Add {cores_count} cores to the cache"):
cores = [cache.add_core(partition) for partition in core_dev.partitions]
@ -193,12 +193,12 @@ def set_cleaning_policy_params(cache, cleaning_policy):
if current_acp_params.wake_up_time != acp_params.wake_up_time:
failed_params += (
f"Wake Up time is {current_acp_params.wake_up_time}, "
f"Wake up time is {current_acp_params.wake_up_time}, "
f"should be {acp_params.wake_up_time}\n"
)
if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers:
failed_params += (
f"Flush Max Buffers is {current_acp_params.flush_max_buffers}, "
f"Flush max buffers is {current_acp_params.flush_max_buffers}, "
f"should be {acp_params.flush_max_buffers}\n"
)
TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}")
@ -215,22 +215,22 @@ def set_cleaning_policy_params(cache, cleaning_policy):
failed_params = ""
if current_alru_params.wake_up_time != alru_params.wake_up_time:
failed_params += (
f"Wake Up time is {current_alru_params.wake_up_time}, "
f"Wake up time is {current_alru_params.wake_up_time}, "
f"should be {alru_params.wake_up_time}\n"
)
if current_alru_params.staleness_time != alru_params.staleness_time:
failed_params += (
f"Staleness Time is {current_alru_params.staleness_time}, "
f"Staleness time is {current_alru_params.staleness_time}, "
f"should be {alru_params.staleness_time}\n"
)
if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers:
failed_params += (
f"Flush Max Buffers is {current_alru_params.flush_max_buffers}, "
f"Flush max buffers is {current_alru_params.flush_max_buffers}, "
f"should be {alru_params.flush_max_buffers}\n"
)
if current_alru_params.activity_threshold != alru_params.activity_threshold:
failed_params += (
f"Activity Threshold is {current_alru_params.activity_threshold}, "
f"Activity threshold is {current_alru_params.activity_threshold}, "
f"should be {alru_params.activity_threshold}\n"
)
TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}")
@ -245,9 +245,9 @@ def check_cleaning_policy_operation(
case CleaningPolicy.alru:
if core_writes_before_wait_for_cleaning != Size.zero():
TestRun.LOGGER.error(
"CAS cleaner started to clean dirty data right after IO! "
"Cleaner process started to clean dirty data right after I/O! "
"According to ALRU parameters set in this test cleaner should "
"wait 10 seconds after IO before cleaning dirty data"
"wait 10 seconds after I/O before cleaning dirty data"
)
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
TestRun.LOGGER.error(
@ -266,9 +266,9 @@ def check_cleaning_policy_operation(
case CleaningPolicy.acp:
if core_writes_before_wait_for_cleaning == Size.zero():
TestRun.LOGGER.error(
"CAS cleaner did not start cleaning dirty data right after IO! "
"Cleaner process did not start cleaning dirty data right after I/O! "
"According to ACP policy cleaner should start "
"cleaning dirty data right after IO"
"cleaning dirty data right after I/O"
)
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
TestRun.LOGGER.error(

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -153,7 +153,7 @@ def test_concurrent_caches_flush(cache_mode: CacheMode):
"""
title: Flush multiple caches simultaneously.
description: |
CAS should successfully flush multiple caches if there is already other flush in progress.
Check for flushing multiple caches if there is already other flush in progress.
pass_criteria:
- No system crash.
- Flush for each cache should finish successfully.

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -46,7 +46,7 @@ def test_cache_stop_and_load(cache_mode):
"""
title: Test for stopping and loading cache back with dynamic cache mode switching.
description: |
Validate the ability of the CAS to switch cache modes at runtime and
Validate the ability to switch cache modes at runtime and
check if all of them are working properly after switching and
after stopping and reloading cache back.
Check also other parameters consistency after reload.
@ -138,10 +138,8 @@ def test_cache_stop_and_load(cache_mode):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode):
"""
title: Test for dynamic cache mode switching during IO.
description: |
Validate the ability of CAS to switch cache modes
during working IO on CAS device.
title: Test for dynamic cache mode switching during I/O.
description: Validate the ability to switch cache modes during I/O on exported object.
pass_criteria:
- Cache mode is switched without errors.
"""
@ -182,7 +180,7 @@ def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mo
):
cache.set_cache_mode(cache_mode=cache_mode_2, flush=flush)
with TestRun.step(f"Check if cache mode has switched properly during IO"):
with TestRun.step("Check if cache mode has switched properly during I/O"):
cache_mode_after_switch = cache.get_cache_mode()
if cache_mode_after_switch != cache_mode_2:
TestRun.fail(
@ -229,7 +227,7 @@ def run_io_and_verify(cache, core, io_mode):
):
TestRun.fail(
"Write-Back cache mode is not working properly! "
"There should be some writes to CAS device and none to the core"
"There should be some writes to exported object and none to the core"
)
case CacheMode.PT:
if (

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -18,11 +18,11 @@ def test_remove_multilevel_core():
"""
title: Test of the ability to remove a core used in a multilevel cache.
description: |
Negative test if OpenCAS does not allow to remove a core when the related exported object
Negative test for removing a core when the related exported object
is used as a core device for another cache instance.
pass_criteria:
- No system crash.
- OpenCAS does not allow removing a core used in a multilevel cache instance.
- Removing a core used in a multilevel cache instance is forbidden.
"""
with TestRun.step("Prepare cache and core devices"):

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -57,7 +57,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(f"Start cache in Write-Back"):
with TestRun.step(f"Start cache in Write-Back cache mode"):
cache_disk = TestRun.disks["cache"]
core_disk = TestRun.disks["core"]
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
@ -105,7 +105,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
with TestRun.step(
"Write random number of 4k block requests to each stream and check if all "
"writes were sent in pass-through mode"
"writes were sent in pass-through"
):
core_statistics_before = core.get_statistics([StatsFilter.req, StatsFilter.blk])
random.shuffle(offsets)
@ -170,7 +170,7 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
with TestRun.step("Reset core statistics counters"):
core.reset_counters()
with TestRun.step("Run FIO on core device"):
with TestRun.step("Run fio on core device"):
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
sequential_streams = streams_seq_rand[0]
random_streams = streams_seq_rand[1]
@ -216,12 +216,14 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
- No system crash
"""
with TestRun.step(f"Disable udev"):
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Create filesystem on core device"):
with TestRun.step("Prepare cache and core devices"):
cache_disk = TestRun.disks["cache"]
core_disk = TestRun.disks["core"]
with TestRun.step("Create filesystem on core device"):
core_disk.create_filesystem(filesystem)
with TestRun.step("Start cache and add core"):
@ -231,7 +233,7 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
with TestRun.step("Mount core"):
core.mount(mount_point=mount_point)
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB"):
with TestRun.step("Set sequential cutoff policy to always and threshold to 20MiB"):
core.set_seq_cutoff_policy(policy=SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(threshold=Size(20, Unit.MebiByte))
@ -279,7 +281,7 @@ def run_dd(target_path, count, seek):
TestRun.LOGGER.info(f"dd command:\n{dd}")
output = dd.run()
if output.exit_code != 0:
raise CmdException("Error during IO", output)
raise CmdException("Error during I/O", output)
def check_statistics(stats_before, stats_after, expected_pt_writes, expected_writes_to_cache):

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -40,15 +40,14 @@ class VerifyType(Enum):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_size):
"""
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
title: Functional sequential cutoff test with multiple cores
description: |
Testing if amount of data written to cache after sequential writes for different
sequential cut-off thresholds on each core, while running sequential IO on 3 out of 4
cores and random IO against the last core, is correct.
Test checking if data is cached properly with sequential cutoff "always" policy
when sequential and random I/O is running to multiple cores.
pass_criteria:
- Amount of written blocks to cache is less or equal than amount set
with sequential cut-off threshold for three first cores.
- Amount of written blocks to cache is equal to io size run against last core.
with sequential cutoff threshold for three first cores.
- Amount of written blocks to cache is equal to I/O size run against last core.
"""
with TestRun.step("Prepare cache and core devices"):
@ -76,7 +75,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
)
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
with TestRun.step("Set sequential cut-off parameters for all cores"):
with TestRun.step("Set sequential cutoff parameters for all cores"):
writes_before_list = []
fio_additional_size = Size(10, Unit.Blocks4096)
thresholds_list = [
@ -96,7 +95,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(threshold)
with TestRun.step("Prepare sequential IO against first three cores"):
with TestRun.step("Prepare sequential I/O against first three cores"):
block_size = Size(4, Unit.KibiByte)
fio = Fio().create_command().io_engine(IoEngine.libaio).block_size(block_size).direct(True)
@ -107,7 +106,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
fio_job.target(core.path)
writes_before_list.append(core.get_statistics().block_stats.cache.writes)
with TestRun.step("Prepare random IO against the last core"):
with TestRun.step("Prepare random I/O against the last core"):
fio_job = fio.add_job(f"core_{core_list[-1].core_id}")
fio_job.size(io_sizes_list[-1])
fio_job.read_write(io_type_last)
@ -117,7 +116,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
with TestRun.step("Run fio against all cores"):
fio.run()
with TestRun.step("Verify writes to cache count after IO"):
with TestRun.step("Verify writes to cache count after I/O"):
margins = [
min(block_size * (core.get_seq_cut_off_parameters().promotion_count - 1), threshold)
for core, threshold in zip(core_list[:-1], thresholds_list[:-1])
@ -159,17 +158,16 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cache_line_size):
def test_seq_cutoff_multi_core_cpu_pinned(cache_mode, io_type, io_type_last, cache_line_size):
"""
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
title: Functional sequential cutoff test with multiple cores and cpu pinned I/O
description: |
Testing if amount of data written to cache after sequential writes for different
sequential cut-off thresholds on each core, while running sequential IO, pinned,
on 3 out of 4 cores and random IO against the last core, is correct.
Test checking if data is cached properly with sequential cutoff "always" policy
when sequential and random cpu pinned I/O is running to multiple cores.
pass_criteria:
- Amount of written blocks to cache is less or equal than amount set
with sequential cut-off threshold for three first cores.
- Amount of written blocks to cache is equal to io size run against last core.
with sequential cutoff threshold for three first cores.
- Amount of written blocks to cache is equal to I/O size run against last core.
"""
with TestRun.step("Partition cache and core devices"):
@ -198,7 +196,7 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
)
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
with TestRun.step(f"Set sequential cut-off parameters for all cores"):
with TestRun.step("Set sequential cutoff parameters for all cores"):
writes_before_list = []
fio_additional_size = Size(10, Unit.Blocks4096)
thresholds_list = [
@ -218,7 +216,9 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(threshold)
with TestRun.step("Prepare sequential IO against first three cores"):
with TestRun.step(
"Prepare sequential I/O against first three cores and random I/O against the last one"
):
fio = (
Fio()
.create_command()
@ -244,10 +244,10 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
fio_job.target(core_list[-1].path)
writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes)
with TestRun.step("Running IO against all cores"):
with TestRun.step("Running I/O against all cores"):
fio.run()
with TestRun.step("Verifying writes to cache count after IO"):
with TestRun.step("Verifying writes to cache count after I/O"):
for core, writes, threshold, io_size in zip(
core_list[:-1], writes_before_list[:-1], thresholds_list[:-1], io_sizes_list[:-1]
):
@ -282,16 +282,14 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
"""
title: Sequential cut-off tests for writes and reads for 'never', 'always' and 'full' policies
title: Functional test for sequential cutoff threshold parameter
description: |
Testing if amount of data written to cache after sequential writes and reads for different
sequential cut-off policies with cache configured with different cache line size
is valid for sequential cut-off threshold parameter, assuming that cache occupancy
doesn't reach 100% during test.
Check if data is cached properly according to sequential cutoff policy and
threshold parameter
pass_criteria:
- Amount of written blocks to cache is less or equal than amount set
with sequential cut-off parameter in case of 'always' policy.
- Amount of written blocks to cache is at least equal io size in case of 'never' and 'full'
- Amount of blocks written to cache is less than or equal to amount set
with sequential cutoff parameter in case of 'always' policy.
- Amount of blocks written to cache is at least equal to io size in case of 'never' and 'full'
policy.
"""
@ -326,13 +324,13 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
)
io_size = (threshold + fio_additional_size).align_down(0x1000)
with TestRun.step(f"Setting cache sequential cut off policy mode to {policy}"):
with TestRun.step(f"Setting cache sequential cutoff policy mode to {policy}"):
cache.set_seq_cutoff_policy(policy)
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"):
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
cache.set_seq_cutoff_threshold(threshold)
with TestRun.step("Prepare sequential IO against core"):
with TestRun.step("Prepare sequential I/O against core"):
sync()
writes_before = core.get_statistics().block_stats.cache.writes
fio = (
@ -364,16 +362,15 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
"""
title: Sequential cut-off tests during writes and reads on full cache for 'full' policy
title: Functional test for sequential cutoff threshold parameter and 'full' policy
description: |
Testing if amount of data written to cache after sequential io against fully occupied
cache for 'full' sequential cut-off policy with cache configured with different cache
line sizes is valid for sequential cut-off threshold parameter.
Check if data is cached properly according to sequential cutoff 'full' policy and given
threshold parameter
pass_criteria:
- Amount of written blocks to cache is big enough to fill cache when 'never' sequential
cut-off policy is set
cutoff policy is set
- Amount of written blocks to cache is less or equal than amount set
with sequential cut-off parameter in case of 'full' policy.
with sequential cutoff parameter in case of 'full' policy.
"""
with TestRun.step("Partition cache and core devices"):
@ -407,10 +404,10 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
)
io_size = (threshold + fio_additional_size).align_down(0x1000)
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.never}"):
with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.never}"):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
with TestRun.step("Prepare sequential IO against core"):
with TestRun.step("Prepare sequential I/O against core"):
sync()
fio = (
Fio()
@ -432,13 +429,13 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
f"Cache occupancy is too small: {occupancy_percentage}, expected at least 95%"
)
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.full}"):
with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.full}"):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.full)
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"):
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
cache.set_seq_cutoff_threshold(threshold)
with TestRun.step(f"Running sequential IO ({io_dir})"):
with TestRun.step(f"Running sequential I/O ({io_dir})"):
sync()
writes_before = core.get_statistics().block_stats.cache.writes
fio = (

View File

@ -1,13 +1,13 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode
from api.cas.cache_config import CacheMode, CacheModeTrait
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.udev import Udev
@ -20,19 +20,17 @@ dd_count = 100
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WA, CacheMode.WB])
@pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.InsertRead))
@pytest.mark.CI()
def test_ci_read(cache_mode):
"""
title: Verification test for write mode: write around
description: Verify if write mode: write around, works as expected and cache only reads
and does not cache write
title: Verification test for caching reads in various cache modes
description: Check if reads are properly cached in various cache modes
pass criteria:
- writes are not cached
- reads are cached
- Reads are cached
"""
with TestRun.step("Prepare partitions"):
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
@ -45,7 +43,7 @@ def test_ci_read(cache_mode):
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(f"Start cache with cache_mode={cache_mode}"):
with TestRun.step(f"Start cache in {cache_mode} cache mode"):
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
cache_mode=cache_mode)
casadm.add_core(cache, core_device)
@ -99,7 +97,14 @@ def test_ci_read(cache_mode):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.CI()
def test_ci_write_around_write():
with TestRun.step("Prepare partitions"):
"""
title: Verification test for writes in Write-Around cache mode
description: Validate I/O statistics after writing to exported object in Write-Around cache mode
pass criteria:
- Writes are not cached
- After inserting writes to core, data is read from core and not from cache
"""
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
@ -112,7 +117,7 @@ def test_ci_write_around_write():
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Start CAS Linux in Write Around mode"):
with TestRun.step("Start cache in Write-Around mode"):
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
cache_mode=CacheMode.WA)
casadm.add_core(cache, core_device)
@ -183,14 +188,14 @@ def test_ci_write_around_write():
else:
TestRun.LOGGER.error(f"Writes to cache: {write_cache_delta_1} != 0")
with TestRun.step("Verify that reads propagated to core"):
with TestRun.step("Verify that data was read from core"):
read_core_delta_2 = read_core_2 - read_core_1
if read_core_delta_2 == data_write:
TestRun.LOGGER.info(f"Reads from core: {read_core_delta_2} == {data_write}")
else:
TestRun.LOGGER.error(f"Reads from core: {read_core_delta_2} != {data_write}")
with TestRun.step("Verify that reads did not occur on cache"):
with TestRun.step("Verify that data was not read from cache"):
read_cache_delta_2 = read_cache_2 - read_cache_1
if read_cache_delta_2.value == 0:
TestRun.LOGGER.info(f"Reads from cache: {read_cache_delta_2} == 0")
@ -203,7 +208,15 @@ def test_ci_write_around_write():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.CI()
def test_ci_write_through_write():
with TestRun.step("Prepare partitions"):
"""
title: Verification test for Write-Through cache mode
description: |
Validate if reads and writes are cached properly for cache in Write-Through mode
pass criteria:
- Writes are inserted to cache and core
- Reads are not cached
"""
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
@ -216,7 +229,7 @@ def test_ci_write_through_write():
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Start CAS Linux in Write Through mode"):
with TestRun.step("Start cache in Write-Through mode"):
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
cache_mode=CacheMode.WT)
casadm.add_core(cache, core_device)

View File

@ -25,51 +25,51 @@ from test_tools.memory import disable_memory_affecting_functions, get_mem_free,
@pytest.mark.os_dependent
def test_insufficient_memory_for_cas_module():
"""
title: Negative test for the ability of CAS to load the kernel module with insufficient memory.
title: Load CAS kernel module with insufficient memory
description: |
Check that the CAS kernel module wont be loaded if enough memory is not available
Negative test for the ability to load the CAS kernel module with insufficient memory.
pass_criteria:
- CAS module cannot be loaded with not enough memory.
- Loading CAS with not enough memory returns error.
- CAS kernel module cannot be loaded with not enough memory.
- Loading CAS kernel module with not enough memory returns error.
"""
with TestRun.step("Disable caching and memory over-committing"):
disable_memory_affecting_functions()
drop_caches()
with TestRun.step("Measure memory usage without OpenCAS module"):
with TestRun.step("Measure memory usage without CAS kernel module"):
if is_kernel_module_loaded(CasModule.cache.value):
unload_kernel_module(CasModule.cache.value)
available_mem_before_cas = get_mem_free()
with TestRun.step("Load CAS module"):
with TestRun.step("Load CAS kernel module"):
load_kernel_module(CasModule.cache.value)
with TestRun.step("Measure memory usage with CAS module"):
with TestRun.step("Measure memory usage with CAS kernel module"):
available_mem_with_cas = get_mem_free()
memory_used_by_cas = available_mem_before_cas - available_mem_with_cas
TestRun.LOGGER.info(
f"OpenCAS module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM."
f"CAS kernel module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM."
)
with TestRun.step("Unload CAS module"):
with TestRun.step("Unload CAS kernel module"):
unload_kernel_module(CasModule.cache.value)
with TestRun.step("Allocate memory, leaving not enough memory for CAS module"):
memory_to_leave = get_mem_free() - (memory_used_by_cas * (3 / 4))
allocate_memory(memory_to_leave)
TestRun.LOGGER.info(
f"Memory left for OpenCAS module: {get_mem_free().get_value(Unit.MiB):0.2f} MiB."
f"Memory left for CAS kernel module: {get_mem_free().get_value(Unit.MiB):0.2f} MiB."
)
with TestRun.step(
"Try to load OpenCAS module and check if correct error message is printed on failure"
"Try to load CAS kernel module and check if correct error message is printed on failure"
):
output = load_kernel_module(CasModule.cache.value)
if output.stderr and output.exit_code != 0:
TestRun.LOGGER.info(f"Cannot load OpenCAS module as expected.\n{output.stderr}")
TestRun.LOGGER.info(f"Cannot load CAS kernel module as expected.\n{output.stderr}")
else:
TestRun.LOGGER.error("Loading OpenCAS module successfully finished, but should fail.")
TestRun.LOGGER.error("Loading CAS kernel module successfully finished, but should fail.")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@ -118,3 +118,4 @@ def test_attach_cache_min_ram():
with TestRun.step("Unlock RAM memory"):
unmount_ramfs()

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -23,14 +23,14 @@ from test_tools.udev import Udev
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cleaning_policy():
"""
Title: test_cleaning_policy
Title: Basic test for cleaning policy
description: |
The test is to see if dirty data will be removed from the Cache after changing the
cleaning policy from NOP to one that expects a flush.
Verify cleaning behaviour after changing cleaning policy from NOP
to one that expects a flush.
pass_criteria:
- Cache is successfully populated with dirty data
- Cleaning policy is changed successfully
- There is no dirty data after the policy change
- Cache is successfully populated with dirty data
- Cleaning policy is changed successfully
- There is no dirty data after the policy change
"""
wait_time = 60

View File

@ -1,5 +1,6 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -14,7 +15,7 @@ def test_cli_help_spelling():
title: Spelling test for 'help' command
description: Validates spelling of 'help' in CLI
pass criteria:
- no spelling mistakes are found
- No spelling mistakes are found
"""
cas_dictionary = os.path.join(TestRun.usr.repo_dir, "test", "functional", "resources")

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -20,12 +20,11 @@ from test_tools.dd import Dd
@pytest.mark.parametrize("purge_target", ["cache", "core"])
def test_purge(purge_target):
"""
title: Call purge without and with `--script` switch
description: |
Check if purge is called only when `--script` switch is used.
title: Basic test for purge command
description: Check purge command behaviour with and without '--script' flag
pass_criteria:
- casadm returns an error when `--script` is missing
- cache is wiped when purge command is used properly
- Error returned when '--script' is missing
- Cache is wiped when purge command is used properly
"""
with TestRun.step("Prepare devices"):
cache_device = TestRun.disks["cache"]
@ -41,7 +40,7 @@ def test_purge(purge_target):
cache = casadm.start_cache(cache_device, force=True)
core = casadm.add_core(cache, core_device)
with TestRun.step("Trigger IO to prepared cache instance"):
with TestRun.step("Trigger I/O to prepared cache instance"):
dd = (
Dd()
.input("/dev/zero")
@ -79,8 +78,3 @@ def test_purge(purge_target):
if cache.get_statistics().usage_stats.occupancy.get_value() != 0:
TestRun.fail(f"{cache.get_statistics().usage_stats.occupancy.get_value()}")
TestRun.fail(f"Purge {purge_target} should invalidate all cache lines!")
with TestRun.step(
f"Stop cache"
):
casadm.stop_all_caches()

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies
# Copyright(c) 2024-2025 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause
#
@ -44,8 +44,8 @@ def test_standby_neg_cli_params():
"""
title: Verifying parameters for starting a standby cache instance
description: |
Try executing the standby init command with required arguments missing or
disallowed arguments present.
Try executing the standby init command with required arguments missing or
disallowed arguments present.
pass_criteria:
- The execution is unsuccessful for all improper argument combinations
- A proper error message is displayed for unsuccessful executions
@ -120,11 +120,12 @@ def test_activate_neg_cli_params():
-The execution is unsuccessful for all improper argument combinations
-A proper error message is displayed for unsuccessful executions
"""
cache_id = 1
with TestRun.step("Prepare the device for the cache."):
cache_device = TestRun.disks["cache"]
cache_device.create_partitions([Size(500, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
cache_id = 1
with TestRun.step("Init standby cache"):
cache_dev = Device(cache_device.path)
@ -201,6 +202,8 @@ def test_standby_neg_cli_management():
- The execution is successful for allowed management commands
- A proper error message is displayed for unsuccessful executions
"""
cache_id = 1
with TestRun.step("Prepare the device for the cache."):
device = TestRun.disks["cache"]
device.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
@ -208,7 +211,6 @@ def test_standby_neg_cli_management():
core_device = device.partitions[1]
with TestRun.step("Prepare the standby instance"):
cache_id = 1
cache = casadm.standby_init(
cache_dev=cache_device, cache_id=cache_id,
cache_line_size=CacheLineSize.LINE_32KiB, force=True
@ -272,19 +274,19 @@ def test_start_neg_cli_flags():
"""
title: Blocking standby start command with mutually exclusive flags
description: |
Try executing the standby start command with different combinations of mutually
exclusive flags.
Try executing the standby start command with different combinations of mutually
exclusive flags.
pass_criteria:
- The command execution is unsuccessful for commands with mutually exclusive flags
- A proper error message is displayed
"""
cache_id = 1
cache_line_size = 32
with TestRun.step("Prepare the device for the cache."):
cache_device = TestRun.disks["cache"]
cache_device.create_partitions([Size(500, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
cache_id = 1
cache_line_size = 32
with TestRun.step("Try to start standby cache with mutually exclusive parameters"):
init_required_params = f' --cache-device {cache_device.path}' \
@ -327,19 +329,19 @@ def test_activate_without_detach():
"""
title: Activate cache without detach command.
description: |
Try activate passive cache without detach command before activation.
Try to activate passive cache without detach command before activation.
pass_criteria:
- The activation is not possible
- The cache remains in Standby state after unsuccessful activation
- The cache exported object is present after an unsuccessful activation
"""
cache_id = 1
cache_exp_obj_name = f"cas-cache-{cache_id}"
with TestRun.step("Prepare the device for the cache."):
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
cache_dev = cache_dev.partitions[0]
cache_id = 1
cache_exp_obj_name = f"cas-cache-{cache_id}"
with TestRun.step("Start cache instance."):
cache = casadm.start_cache(cache_dev=cache_dev, cache_id=cache_id)
@ -390,15 +392,18 @@ def test_activate_without_detach():
@pytest.mark.require_disk("standby_cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_activate_neg_cache_line_size():
"""
title: Blocking cache with mismatching cache line size activation.
description: |
Try restoring cache operations from a replicated cache that was initialized
with different cache line size than the original cache.
pass_criteria:
- The activation is cancelled
- The cache remains in Standby detached state after an unsuccessful activation
- A proper error message is displayed
title: Blocking cache with mismatching cache line size activation.
description: |
Try restoring cache operations from a replicated cache that was initialized
with different cache line size than the original cache.
pass_criteria:
- The activation is cancelled
- The cache remains in Standby detached state after an unsuccessful activation
- A proper error message is displayed
"""
cache_id = 1
active_cls, standby_cls = CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB
cache_exp_obj_name = f"cas-cache-{cache_id}"
with TestRun.step("Prepare cache devices"):
active_cache_dev = TestRun.disks["active_cache"]
@ -407,73 +412,69 @@ def test_activate_neg_cache_line_size():
standby_cache_dev = TestRun.disks["standby_cache"]
standby_cache_dev.create_partitions([Size(500, Unit.MebiByte)])
standby_cache_dev = standby_cache_dev.partitions[0]
cache_id = 1
active_cls, standby_cls = CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB
cache_exp_obj_name = f"cas-cache-{cache_id}"
with TestRun.step("Start active cache instance."):
active_cache = casadm.start_cache(cache_dev=active_cache_dev, cache_id=cache_id,
cache_line_size=active_cls)
with TestRun.step("Start active cache instance."):
active_cache = casadm.start_cache(cache_dev=active_cache_dev, cache_id=cache_id,
cache_line_size=active_cls)
with TestRun.step("Create dump file with cache metadata"):
with TestRun.step("Get metadata size"):
dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
md_size = dmesg.get_metadata_size_on_device(dmesg_out)
with TestRun.step("Get metadata size"):
dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
md_size = dmesg.get_metadata_size_on_device(dmesg_out)
with TestRun.step("Dump the metadata of the cache"):
dump_file_path = "/tmp/test_activate_corrupted.dump"
md_dump = File(dump_file_path)
md_dump.remove(force=True, ignore_errors=True)
dd_count = int(md_size / Size(1, Unit.MebiByte)) + 1
(
Dd().input(active_cache_dev.path)
.output(md_dump.full_path)
.block_size(Size(1, Unit.MebiByte))
.count(dd_count)
.run()
)
md_dump.refresh_item()
with TestRun.step("Dump the metadata of the cache"):
dump_file_path = "/tmp/test_activate_corrupted.dump"
md_dump = File(dump_file_path)
md_dump.remove(force=True, ignore_errors=True)
dd_count = int(md_size / Size(1, Unit.MebiByte)) + 1
(
Dd().input(active_cache_dev.path)
.output(md_dump.full_path)
.block_size(Size(1, Unit.MebiByte))
.count(dd_count)
.run()
)
md_dump.refresh_item()
with TestRun.step("Stop cache instance."):
active_cache.stop()
with TestRun.step("Stop cache instance."):
active_cache.stop()
with TestRun.step("Start standby cache instance."):
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
cache_line_size=standby_cls,
force=True)
with TestRun.step("Start standby cache instance."):
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
cache_line_size=standby_cls,
force=True)
with TestRun.step("Verify if the cache exported object appeared in the system"):
output = TestRun.executor.run_expect_success(
f"ls -la /dev/ | grep {cache_exp_obj_name}"
)
if output.stdout[0] != "b":
TestRun.fail("The cache exported object is not a block device")
with TestRun.step("Verify if the cache exported object appeared in the system"):
output = TestRun.executor.run_expect_success(
f"ls -la /dev/ | grep {cache_exp_obj_name}"
)
if output.stdout[0] != "b":
TestRun.fail("The cache exported object is not a block device")
with TestRun.step("Detach standby cache instance"):
standby_cache.standby_detach()
with TestRun.step("Detach standby cache instance"):
standby_cache.standby_detach()
with TestRun.step(f"Copy changed metadata to the standby instance"):
Dd().input(md_dump.full_path).output(standby_cache_dev.path).run()
sync()
with TestRun.step(f"Copy changed metadata to the standby instance"):
Dd().input(md_dump.full_path).output(standby_cache_dev.path).run()
sync()
with TestRun.step("Try to activate cache instance"):
with pytest.raises(CmdException) as cmdExc:
output = standby_cache.standby_activate(standby_cache_dev)
if not check_stderr_msg(output, cache_line_size_mismatch):
TestRun.LOGGER.error(
f'Expected error message in format '
f'"{cache_line_size_mismatch[0]}"'
f'Got "{output.stderr}" instead.'
)
assert "Failed to activate standby cache." in str(cmdExc.value)
with TestRun.step("Verify if cache is in standby detached state after failed activation"):
cache_status = standby_cache.get_status()
if cache_status != CacheStatus.standby_detached:
with TestRun.step("Try to activate cache instance"):
with pytest.raises(CmdException) as cmdExc:
output = standby_cache.standby_activate(standby_cache_dev)
if not check_stderr_msg(output, cache_line_size_mismatch):
TestRun.LOGGER.error(
f'Expected Cache state: "{CacheStatus.standby.value}" '
f'Got "{cache_status.value}" instead.'
f'Expected error message in format '
f'"{cache_line_size_mismatch[0]}"'
f'Got "{output.stderr}" instead.'
)
assert "Failed to activate standby cache." in str(cmdExc.value)
with TestRun.step("Verify if cache is in standby detached state after failed activation"):
cache_status = standby_cache.get_status()
if cache_status != CacheStatus.standby_detached:
TestRun.LOGGER.error(
f'Expected Cache state: "{CacheStatus.standby.value}" '
f'Got "{cache_status.value}" instead.'
)
@pytest.mark.CI
@ -489,17 +490,18 @@ def test_standby_init_with_preexisting_metadata():
- initialize cache without force flag fails and informative error message is printed
- initialize cache with force flag succeeds and passive instance is present in system
"""
cache_line_size = CacheLineSize.LINE_32KiB
cache_id = 1
with TestRun.step("Prepare device for cache"):
cache_device = TestRun.disks["cache"]
cache_device.create_partitions([Size(200, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
cls = CacheLineSize.LINE_32KiB
cache_id = 1
with TestRun.step("Start standby cache instance"):
cache = casadm.standby_init(
cache_dev=cache_device,
cache_line_size=cls,
cache_line_size=cache_line_size,
cache_id=cache_id,
force=True,
)
@ -512,7 +514,7 @@ def test_standby_init_with_preexisting_metadata():
standby_init_cmd(
cache_dev=cache_device.path,
cache_id=str(cache_id),
cache_line_size=str(int(cls.value.value / Unit.KibiByte.value)),
cache_line_size=str(int(cache_line_size.value.value / Unit.KibiByte.value)),
)
)
if not check_stderr_msg(output, start_cache_with_existing_metadata):
@ -524,7 +526,7 @@ def test_standby_init_with_preexisting_metadata():
with TestRun.step("Try initialize cache with force flag"):
casadm.standby_init(
cache_dev=cache_device,
cache_line_size=cls,
cache_line_size=cache_line_size,
cache_id=cache_id,
force=True,
)
@ -549,12 +551,13 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
- initialize cache without force flag fails and informative error message is printed
- initialize cache with force flag succeeds and passive instance is present in system
"""
cache_line_size = CacheLineSize.LINE_32KiB
cache_id = 1
with TestRun.step("Prepare device for cache"):
cache_device = TestRun.disks["cache"]
cache_device.create_partitions([Size(200, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
cls = CacheLineSize.LINE_32KiB
cache_id = 1
with TestRun.step("Create filesystem on cache device partition"):
cache_device.create_filesystem(filesystem)
@ -564,7 +567,7 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
standby_init_cmd(
cache_dev=cache_device.path,
cache_id=str(cache_id),
cache_line_size=str(int(cls.value.value / Unit.KibiByte.value)),
cache_line_size=str(int(cache_line_size.value.value / Unit.KibiByte.value)),
)
)
if not check_stderr_msg(output, standby_init_with_existing_filesystem):
@ -576,7 +579,7 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
with TestRun.step("Try initialize cache with force flag"):
casadm.standby_init(
cache_dev=cache_device,
cache_line_size=cls,
cache_line_size=cache_line_size,
cache_id=cache_id,
force=True,
)
@ -593,13 +596,18 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
@pytest.mark.require_disk("core", DiskTypeLowerThan("caches"))
def test_standby_activate_with_corepool():
"""
title: Activate standby cache instance with corepool
title: Activate standby cache instance with core pool
description: |
Activation of standby cache with core taken from core pool
pass_criteria:
- During activate metadata on the device match with metadata in DRAM
- Core is in active state after activate
- During activate metadata on the device match with metadata in DRAM
- Core is in active state after activate
"""
cache_id = 1
core_id = 1
cache_exp_obj_name = f"cas-cache-{cache_id}"
cache_line_size = CacheLineSize.LINE_16KiB
with TestRun.step("Prepare cache and core devices"):
caches_dev = TestRun.disks["caches"]
caches_dev.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
@ -609,13 +617,8 @@ def test_standby_activate_with_corepool():
core_dev.create_partitions([Size(200, Unit.MebiByte)])
core_dev = core_dev.partitions[0]
cache_id = 1
core_id = 1
cache_exp_obj_name = f"cas-cache-{cache_id}"
cls = CacheLineSize.LINE_16KiB
with TestRun.step("Start regular cache instance"):
cache = casadm.start_cache(cache_dev=active_cache_dev, cache_line_size=cls,
cache = casadm.start_cache(cache_dev=active_cache_dev, cache_line_size=cache_line_size,
cache_id=cache_id)
with TestRun.step("Add core to regular cache instance"):
@ -629,7 +632,7 @@ def test_standby_activate_with_corepool():
with TestRun.step("Start standby cache instance."):
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
cache_line_size=cls,
cache_line_size=cache_line_size,
force=True)
with TestRun.step(f"Copy changed metadata to the standby instance"):
@ -652,12 +655,12 @@ def test_standby_activate_with_corepool():
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_standby_start_stop(cache_line_size):
"""
title: Start and stop a standby cache instance.
description: Test if cache can be started in standby state and stopped without activation.
pass_criteria:
- A cache exported object appears after starting a cache in standby state
- The data written to the cache exported object committed on the underlying cache device
- The cache exported object disappears after stopping the standby cache instance
title: Start and stop a standby cache instance.
description: Test if cache can be started in standby state and stopped without activation.
pass_criteria:
- A cache exported object appears after starting a cache in standby state
- The data written to the cache exported object committed on the underlying cache device
- The cache exported object disappears after stopping the standby cache instance
"""
with TestRun.step("Prepare a cache device"):
cache_size = Size(500, Unit.MebiByte)

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -21,12 +21,12 @@ CORE_ID_RANGE = (0, 4095)
@pytest.mark.parametrize("shortcut", [True, False])
def test_cli_start_stop_default_id(shortcut):
"""
title: Test for starting a cache with a default ID - short and long command
description: |
Start a new cache with a default ID and then stop this cache.
pass_criteria:
- The cache has successfully started with default ID
- The cache has successfully stopped
title: Test for starting a cache with a default ID - short and long command
description: |
Start a new cache with a default ID and then stop this cache.
pass_criteria:
- The cache has successfully started with default ID
- The cache has successfully stopped
"""
with TestRun.step("Prepare the device for the cache."):
cache_device = TestRun.disks['cache']
@ -62,12 +62,12 @@ def test_cli_start_stop_default_id(shortcut):
@pytest.mark.parametrize("shortcut", [True, False])
def test_cli_start_stop_custom_id(shortcut):
"""
title: Test for starting a cache with a custom ID - short and long command
description: |
Start a new cache with a random ID (from allowed pool) and then stop this cache.
pass_criteria:
- The cache has successfully started with a custom ID
- The cache has successfully stopped
title: Test for starting a cache with a custom ID - short and long command
description: |
Start a new cache with a random ID (from allowed pool) and then stop this cache.
pass_criteria:
- The cache has successfully started with a custom ID
- The cache has successfully stopped
"""
with TestRun.step("Prepare the device for the cache."):
cache_device = TestRun.disks['cache']
@ -106,13 +106,13 @@ def test_cli_start_stop_custom_id(shortcut):
@pytest.mark.parametrize("shortcut", [True, False])
def test_cli_add_remove_default_id(shortcut):
"""
title: Test for adding and removing a core with a default ID - short and long command
description: |
Start a new cache and add a core to it without passing a core ID as an argument
and then remove this core from the cache.
pass_criteria:
- The core is added to the cache with a default ID
- The core is successfully removed from the cache
title: Test for adding and removing a core with a default ID - short and long command
description: |
Start a new cache and add a core to it without passing a core ID as an argument
and then remove this core from the cache.
pass_criteria:
- The core is added to the cache with a default ID
- The core is successfully removed from the cache
"""
with TestRun.step("Prepare the devices."):
cache_disk = TestRun.disks['cache']
@ -157,13 +157,13 @@ def test_cli_add_remove_default_id(shortcut):
@pytest.mark.parametrize("shortcut", [True, False])
def test_cli_add_remove_custom_id(shortcut):
"""
title: Test for adding and removing a core with a custom ID - short and long command
description: |
Start a new cache and add a core to it with passing a random core ID
(from allowed pool) as an argument and then remove this core from the cache.
pass_criteria:
- The core is added to the cache with a default ID
- The core is successfully removed from the cache
title: Test for adding and removing a core with a custom ID - short and long command
description: |
Start a new cache and add a core to it with passing a random core ID
(from allowed pool) as an argument and then remove this core from the cache.
pass_criteria:
- The core is added to the cache with a default ID
- The core is successfully removed from the cache
"""
with TestRun.step("Prepare the devices."):
cache_disk = TestRun.disks['cache']
@ -209,13 +209,13 @@ def test_cli_add_remove_custom_id(shortcut):
@pytest.mark.parametrize("shortcut", [True, False])
def test_cli_load_and_force(shortcut):
"""
title: Test if it is possible to use start command with 'load' and 'force' flag at once
description: |
Try to start cache with 'load' and 'force' options at the same time
and check if it is not possible to do
pass_criteria:
- Start cache command with both 'force' and 'load' options should fail
- Proper message should be received
title: Test if it is possible to use start command with 'load' and 'force' flag at once
description: |
Try to start cache with 'load' and 'force' options at the same time
and check if it is not possible to do
pass_criteria:
- Start cache command with both 'force' and 'load' options should fail
- Proper message should be received
"""
with TestRun.step("Prepare cache."):
cache_device = TestRun.disks['cache']

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -24,13 +24,16 @@ from test_tools.udev import Udev
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cleaning_policy():
"""
Title: test manual casadm flush
description: | The test is to see if dirty data will be removed from the Cache
or Core after using the casadm command with the corresponding parameter.
title: Test for manual cache and core flushing
description: |
The test is to see if dirty data will be removed from the cache
or core after using the casadm command with the corresponding parameter.
pass_criteria:
- Cache and core are filled with dirty data.
- After cache and core flush dirty data are cleared.
"""
cache_id = 1
with TestRun.step("Prepare devices."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
@ -39,7 +42,8 @@ def test_cleaning_policy():
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(1, Unit.GibiByte)])
core_dev = core_disk.partitions[0]
cache_id = 1
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Start cache and set cleaning policy to NOP"):

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -22,26 +22,37 @@ from type_def.size import Size, Unit
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_default_params():
"""
title: Default sequential cut-off threshold & policy test
title: Default sequential cutoff threshold & policy test
description: Test if proper default threshold and policy is set after cache start
pass_criteria:
- "Full" shall be default sequential cut-off policy
- There shall be default 1MiB (1024kiB) value for sequential cut-off threshold
- "Full" shall be default sequential cutoff policy
- There shall be default 1MiB (1024kiB) value for sequential cutoff threshold
"""
with TestRun.step("Test prepare (start cache and add core)"):
cache, cores = prepare()
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
with TestRun.step("Getting sequential cut-off parameters"):
params = cores[0].get_seq_cut_off_parameters()
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)])
with TestRun.step("Check if proper sequential cut off policy is set as a default"):
cache_part = cache_device.partitions[0]
core_part = core_device.partitions[0]
with TestRun.step("Start cache and add core"):
cache = casadm.start_cache(cache_part, force=True)
core = cache.add_core(core_dev=core_part)
with TestRun.step("Getting sequential cutoff parameters"):
params = core.get_seq_cut_off_parameters()
with TestRun.step("Check if proper sequential cutoff policy is set as a default"):
if params.policy != SeqCutOffPolicy.DEFAULT:
TestRun.fail(f"Wrong sequential cut off policy set: {params.policy} "
TestRun.fail(f"Wrong sequential cutoff policy set: {params.policy} "
f"should be {SeqCutOffPolicy.DEFAULT}")
with TestRun.step("Check if proper sequential cut off threshold is set as a default"):
with TestRun.step("Check if proper sequential cutoff threshold is set as a default"):
if params.threshold != SEQ_CUT_OFF_THRESHOLD_DEFAULT:
TestRun.fail(f"Wrong sequential cut off threshold set: {params.threshold} "
TestRun.fail(f"Wrong sequential cutoff threshold set: {params.threshold} "
f"should be {SEQ_CUT_OFF_THRESHOLD_DEFAULT}")
@ -50,32 +61,41 @@ def test_seq_cutoff_default_params():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_set_get_policy_core(policy):
"""
title: Sequential cut-off policy set/get test for core
title: Sequential cutoff policy set/get test for core
description: |
Test if CAS is setting proper sequential cut-off policy for core and
returns previously set value
Verify if it is possible to set and get a sequential cutoff policy per core
pass_criteria:
- Sequential cut-off policy obtained from get-param command for the first core must be
- Sequential cutoff policy obtained from get-param command for the first core must be
the same as the one used in set-param command
- Sequential cut-off policy obtained from get-param command for the second core must be
- Sequential cutoff policy obtained from get-param command for the second core must be
proper default value
"""
with TestRun.step("Test prepare (start cache and add 2 cores)"):
cache, cores = prepare(cores_count=2)
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
with TestRun.step(f"Setting core sequential cut off policy mode to {policy}"):
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)] * 2)
cache_part = cache_device.partitions[0]
with TestRun.step("Start cache and add cores"):
cache = casadm.start_cache(cache_part, force=True)
cores = [cache.add_core(core_dev=part) for part in core_device.partitions]
with TestRun.step(f"Setting core sequential cutoff policy mode to {policy}"):
cores[0].set_seq_cutoff_policy(policy)
with TestRun.step("Check if proper sequential cut off policy was set for the first core"):
with TestRun.step("Check if proper sequential cutoff policy was set for the first core"):
if cores[0].get_seq_cut_off_policy() != policy:
TestRun.fail(f"Wrong sequential cut off policy set: "
TestRun.fail(f"Wrong sequential cutoff policy set: "
f"{cores[0].get_seq_cut_off_policy()} "
f"should be {policy}")
with TestRun.step("Check if proper default sequential cut off policy was set for the "
with TestRun.step("Check if proper default sequential cutoff policy was set for the "
"second core"):
if cores[1].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT:
TestRun.fail(f"Wrong default sequential cut off policy: "
TestRun.fail(f"Wrong default sequential cutoff policy: "
f"{cores[1].get_seq_cut_off_policy()} "
f"should be {SeqCutOffPolicy.DEFAULT}")
@ -85,24 +105,33 @@ def test_seq_cutoff_set_get_policy_core(policy):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_set_get_policy_cache(policy):
"""
title: Sequential cut-off policy set/get test for cache
title: Sequential cutoff policy set/get test for cache
description: |
Test if CAS is setting proper sequential cut-off policy for whole cache and
returns previously set value
Verify if it is possible to set and get a sequential cutoff policy for the whole cache
pass_criteria:
- Sequential cut-off policy obtained from get-param command for each of 3 cores must be the
- Sequential cutoff policy obtained from get-param command for each of 3 cores must be the
same as the one used in set-param command for cache
"""
with TestRun.step("Test prepare (start cache and add 3 cores)"):
cache, cores = prepare(cores_count=3)
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
with TestRun.step(f"Setting sequential cut off policy mode {policy} for cache"):
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)] * 3)
cache_part = cache_device.partitions[0]
with TestRun.step("Start cache and add cores"):
cache = casadm.start_cache(cache_part, force=True)
cores = [cache.add_core(core_dev=part) for part in core_device.partitions]
with TestRun.step(f"Setting sequential cutoff policy mode {policy} for cache"):
cache.set_seq_cutoff_policy(policy)
for i in TestRun.iteration(range(0, len(cores)), "Verifying if proper policy was set"):
with TestRun.step(f"Check if proper sequential cut off policy was set for core"):
with TestRun.step(f"Check if proper sequential cutoff policy was set for core"):
if cores[i].get_seq_cut_off_policy() != policy:
TestRun.fail(f"Wrong core sequential cut off policy: "
TestRun.fail(f"Wrong core sequential cutoff policy: "
f"{cores[i].get_seq_cut_off_policy()} "
f"should be {policy}")
@ -111,23 +140,35 @@ def test_seq_cutoff_set_get_policy_cache(policy):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_policy_load():
"""
title: Sequential cut-off policy set/get test with cache load between
title: Sequential cutoff policy set/get test with cache load between
description: |
Set each possible policy for different core, stop cache, test if after cache load
sequential cut-off policy value previously set is being loaded correctly for each core.
Set each possible policy for different core, stop cache, test if after cache load
sequential cutoff policy value previously set is being loaded correctly for each core.
pass_criteria:
- Sequential cut-off policy obtained from get-param command after cache load
- Sequential cutoff policy obtained from get-param command after cache load
must be the same as the one used in set-param command before cache stop
- Sequential cut-off policy loaded for the last core should be the default one
"""
with TestRun.step(f"Test prepare (start cache and add {len(SeqCutOffPolicy) + 1} cores)"):
# Create as many cores as many possible policies including default one
cache, cores = prepare(cores_count=len(SeqCutOffPolicy) + 1)
policies = [policy for policy in SeqCutOffPolicy]
- Sequential cutoff policy loaded for the last core should be the default one
"""
policies = [policy for policy in SeqCutOffPolicy]
for i, core in TestRun.iteration(enumerate(cores[:-1]), "Set all possible policies "
"except the default one"):
with TestRun.step(f"Setting cache sequential cut off policy mode to "
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)] * (len(SeqCutOffPolicy) + 1))
cache_part = cache_device.partitions[0]
with TestRun.step("Start cache and add cores"):
cache = casadm.start_cache(cache_part, force=True)
cores = [cache.add_core(core_dev=part) for part in core_device.partitions]
for i, core in TestRun.iteration(
enumerate(cores[:-1]),
"Set all possible policies except the default one"
):
with TestRun.step(f"Setting cache sequential cutoff policy mode to "
f"{policies[i]}"):
cores[i].set_seq_cutoff_policy(policies[i])
@ -140,18 +181,21 @@ def test_seq_cutoff_policy_load():
with TestRun.step("Getting cores from loaded cache"):
cores = loaded_cache.get_core_devices()
for i, core in TestRun.iteration(enumerate(cores[:-1]), "Check if proper policies have "
"been loaded"):
with TestRun.step(f"Check if proper sequential cut off policy was loaded"):
for i, core in TestRun.iteration(
enumerate(cores[:-1]),
"Check if proper policies have been loaded"
):
with TestRun.step(f"Check if proper sequential cutoff policy was loaded"):
if cores[i].get_seq_cut_off_policy() != policies[i]:
TestRun.fail(f"Wrong sequential cut off policy loaded: "
TestRun.fail(f"Wrong sequential cutoff policy loaded: "
f"{cores[i].get_seq_cut_off_policy()} "
f"should be {policies[i]}")
with TestRun.step(f"Check if proper (default) sequential cut off policy was loaded for "
f"last core"):
with TestRun.step(
"Check if proper (default) sequential cutoff policy was loaded for last core"
):
if cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT:
TestRun.fail(f"Wrong sequential cut off policy loaded: "
TestRun.fail(f"Wrong sequential cutoff policy loaded: "
f"{cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy()} "
f"should be {SeqCutOffPolicy.DEFAULT}")
@ -163,29 +207,41 @@ def test_seq_cutoff_policy_load():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_set_invalid_threshold(threshold):
"""
title: Invalid sequential cut-off threshold test
description: Test if CAS is allowing setting invalid sequential cut-off threshold
title: Invalid sequential cutoff threshold test
description: Validate setting invalid sequential cutoff threshold
pass_criteria:
- Setting invalid sequential cut-off threshold should be blocked
- Setting invalid sequential cutoff threshold should be blocked
"""
with TestRun.step("Test prepare (start cache and add core)"):
cache, cores = prepare()
_threshold = Size(threshold, Unit.KibiByte)
_threshold = Size(threshold, Unit.KibiByte)
with TestRun.step(f"Setting cache sequential cut off threshold to out of range value: "
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)])
cache_part = cache_device.partitions[0]
core_part = core_device.partitions[0]
with TestRun.step("Start cache and add core"):
cache = casadm.start_cache(cache_part, force=True)
core = cache.add_core(core_dev=core_part)
with TestRun.step(f"Setting cache sequential cutoff threshold to out of range value: "
f"{_threshold}"):
command = set_param_cutoff_cmd(
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
cache_id=str(cache.cache_id), core_id=str(core.core_id),
threshold=str(int(_threshold.get_value(Unit.KiloByte))))
output = TestRun.executor.run_expect_fail(command)
if "Invalid sequential cutoff threshold, must be in the range 1-4194181"\
not in output.stderr:
TestRun.fail("Command succeeded (should fail)!")
with TestRun.step(f"Setting cache sequential cut off threshold "
with TestRun.step(f"Setting cache sequential cutoff threshold "
f"to value passed as a float"):
command = set_param_cutoff_cmd(
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
cache_id=str(cache.cache_id), core_id=str(core.core_id),
threshold=str(_threshold.get_value(Unit.KiloByte)))
output = TestRun.executor.run_expect_fail(command)
if "Invalid sequential cutoff threshold, must be a correct unsigned decimal integer"\
@ -199,26 +255,36 @@ def test_seq_cutoff_set_invalid_threshold(threshold):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_set_get_threshold(threshold):
"""
title: Sequential cut-off threshold set/get test
description: |
Test if CAS is setting proper sequential cut-off threshold and returns
previously set value
title: Sequential cutoff threshold set/get test
description: Verify setting and getting value of sequential cutoff threshold
pass_criteria:
- Sequential cut-off threshold obtained from get-param command must be the same as
- Sequential cutoff threshold obtained from get-param command must be the same as
the one used in set-param command
"""
with TestRun.step("Test prepare (start cache and add core)"):
cache, cores = prepare()
_threshold = Size(threshold, Unit.KibiByte)
_threshold = Size(threshold, Unit.KibiByte)
with TestRun.step(f"Setting cache sequential cut off threshold to "
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)])
cache_part = cache_device.partitions[0]
core_part = core_device.partitions[0]
with TestRun.step("Start cache and add core"):
cache = casadm.start_cache(cache_part, force=True)
core = cache.add_core(core_dev=core_part)
with TestRun.step(f"Setting cache sequential cutoff threshold to "
f"{_threshold}"):
cores[0].set_seq_cutoff_threshold(_threshold)
core.set_seq_cutoff_threshold(_threshold)
with TestRun.step("Check if proper sequential cut off threshold was set"):
if cores[0].get_seq_cut_off_threshold() != _threshold:
TestRun.fail(f"Wrong sequential cut off threshold set: "
f"{cores[0].get_seq_cut_off_threshold()} "
with TestRun.step("Check if proper sequential cutoff threshold was set"):
if core.get_seq_cut_off_threshold() != _threshold:
TestRun.fail(f"Wrong sequential cutoff threshold set: "
f"{core.get_seq_cut_off_threshold()} "
f"should be {_threshold}")
@ -228,22 +294,31 @@ def test_seq_cutoff_set_get_threshold(threshold):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_threshold_load(threshold):
"""
title: Sequential cut-off threshold set/get test with cache load between
description: |
Test if after cache load sequential cut-off threshold
value previously set is being loaded correctly. Each of possible sequential cut-off
policies is set for different core.
title: Sequential cutoff threshold after loading cache
description: Verify sequential cutoff threshold value after reloading the cache.
pass_criteria:
- Sequential cut-off threshold obtained from get-param command after cache load
- Sequential cutoff threshold obtained from get-param command after cache load
must be the same as the one used in set-param command before cache stop
"""
with TestRun.step("Test prepare (start cache and add core)"):
cache, cores = prepare()
_threshold = Size(threshold, Unit.KibiByte)
_threshold = Size(threshold, Unit.KibiByte)
with TestRun.step(f"Setting cache sequential cut off threshold to "
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)])
cache_part = cache_device.partitions[0]
core_part = core_device.partitions[0]
with TestRun.step("Start cache and add core"):
cache = casadm.start_cache(cache_part, force=True)
core = cache.add_core(core_dev=core_part)
with TestRun.step(f"Setting cache sequential cutoff threshold to "
f"{_threshold}"):
cores[0].set_seq_cutoff_threshold(_threshold)
core.set_seq_cutoff_threshold(_threshold)
with TestRun.step("Stopping cache"):
cache.stop()
@ -254,28 +329,8 @@ def test_seq_cutoff_threshold_load(threshold):
with TestRun.step("Getting core from loaded cache"):
cores_load = loaded_cache.get_core_devices()
with TestRun.step("Check if proper sequential cut off policy was loaded"):
with TestRun.step("Check if proper sequential cutoff policy was loaded"):
if cores_load[0].get_seq_cut_off_threshold() != _threshold:
TestRun.fail(f"Wrong sequential cut off threshold set: "
TestRun.fail(f"Wrong sequential cutoff threshold set: "
f"{cores_load[0].get_seq_cut_off_threshold()} "
f"should be {_threshold}")
def prepare(cores_count=1):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
partitions = []
for x in range(cores_count):
partitions.append(Size(1, Unit.GibiByte))
core_device.create_partitions(partitions)
cache_part = cache_device.partitions[0]
core_parts = core_device.partitions
TestRun.LOGGER.info("Staring cache")
cache = casadm.start_cache(cache_part, force=True)
TestRun.LOGGER.info("Adding core devices")
core_list = []
for core_part in core_parts:
core_list.append(cache.add_core(core_dev=core_part))
return cache, core_list

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -36,80 +36,96 @@ number_of_checks = 10
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_set_get_seqcutoff_params(cache_mode):
def test_set_get_seq_cutoff_params(cache_mode):
"""
title: Test for setting and reading sequential cut-off parameters.
description: |
Verify that it is possible to set and read all available sequential cut-off
parameters using casadm --set-param and --get-param options.
pass_criteria:
- All sequential cut-off parameters are set to given values.
- All sequential cut-off parameters displays proper values.
title: Test for setting and reading sequential cutoff parameters.
description: |
Verify that it is possible to set and read all available sequential cutoff
parameters using casadm --set-param and --get-param options.
pass_criteria:
- All sequential cutoff parameters are set to given values.
- All sequential cutoff parameters displays proper values.
"""
with TestRun.step("Partition cache and core devices"):
cache_dev, core_dev = storage_prepare()
cache_dev = TestRun.disks["cache"]
cache_parts = [Size(1, Unit.GibiByte)] * caches_count
cache_dev.create_partitions(cache_parts)
core_dev = TestRun.disks["core"]
core_parts = [Size(2, Unit.GibiByte)] * cores_per_cache * caches_count
core_dev.create_partitions(core_parts)
with TestRun.step(
f"Start {caches_count} caches in {cache_mode} cache mode "
f"and add {cores_per_cache} cores per cache"
):
caches, cores = cache_prepare(cache_mode, cache_dev, core_dev)
caches = [
casadm.start_cache(part, cache_mode, force=True) for part in cache_dev.partitions
]
with TestRun.step("Check sequential cut-off default parameters"):
default_seqcutoff_params = SeqCutOffParameters.default_seq_cut_off_params()
cores = [
[
caches[i].add_core(
core_dev.partitions[i * cores_per_cache + j]
) for j in range(cores_per_cache)
] for i in range(caches_count)
]
with TestRun.step("Check sequential cutoff default parameters"):
default_seq_cutoff_params = SeqCutOffParameters.default_seq_cut_off_params()
for i in range(caches_count):
for j in range(cores_per_cache):
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params)
check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
with TestRun.step(
"Set new random values for sequential cut-off parameters for one core only"
"Set new random values for sequential cutoff parameters for one core only"
):
for check in range(number_of_checks):
random_seqcutoff_params = new_seqcutoff_parameters_random_values()
cores[0][0].set_seq_cutoff_parameters(random_seqcutoff_params)
random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
cores[0][0].set_seq_cutoff_parameters(random_seq_cutoff_params)
# Check changed parameters for first core:
check_seqcutoff_parameters(cores[0][0], random_seqcutoff_params)
check_seq_cutoff_parameters(cores[0][0], random_seq_cutoff_params)
# Check default parameters for other cores:
for j in range(1, cores_per_cache):
check_seqcutoff_parameters(cores[0][j], default_seqcutoff_params)
check_seq_cutoff_parameters(cores[0][j], default_seq_cutoff_params)
for i in range(1, caches_count):
for j in range(cores_per_cache):
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params)
check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
with TestRun.step(
"Set new random values for sequential cut-off parameters "
"Set new random values for sequential cutoff parameters "
"for all cores within given cache instance"
):
for check in range(number_of_checks):
random_seqcutoff_params = new_seqcutoff_parameters_random_values()
caches[0].set_seq_cutoff_parameters(random_seqcutoff_params)
random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
caches[0].set_seq_cutoff_parameters(random_seq_cutoff_params)
# Check changed parameters for first cache instance:
for j in range(cores_per_cache):
check_seqcutoff_parameters(cores[0][j], random_seqcutoff_params)
check_seq_cutoff_parameters(cores[0][j], random_seq_cutoff_params)
# Check default parameters for other cache instances:
for i in range(1, caches_count):
for j in range(cores_per_cache):
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params)
check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
with TestRun.step(
"Set new random values for sequential cut-off parameters for all cores"
"Set new random values for sequential cutoff parameters for all cores"
):
for check in range(number_of_checks):
seqcutoff_params = []
seq_cutoff_params = []
for i in range(caches_count):
for j in range(cores_per_cache):
random_seqcutoff_params = new_seqcutoff_parameters_random_values()
seqcutoff_params.append(random_seqcutoff_params)
cores[i][j].set_seq_cutoff_parameters(random_seqcutoff_params)
random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
seq_cutoff_params.append(random_seq_cutoff_params)
cores[i][j].set_seq_cutoff_parameters(random_seq_cutoff_params)
for i in range(caches_count):
for j in range(cores_per_cache):
check_seqcutoff_parameters(
cores[i][j], seqcutoff_params[i * cores_per_cache + j]
check_seq_cutoff_parameters(
cores[i][j], seq_cutoff_params[i * cores_per_cache + j]
)
@ -119,24 +135,36 @@ def test_set_get_seqcutoff_params(cache_mode):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_set_get_cleaning_params(cache_mode, cleaning_policy):
"""
title: Test for setting and reading cleaning parameters.
description: |
Verify that it is possible to set and read all available cleaning
parameters for all cleaning policies using casadm --set-param and
--get-param options.
pass_criteria:
- All cleaning parameters are set to given values.
- All cleaning parameters displays proper values.
title: Test for setting and reading cleaning parameters.
description: |
Verify that it is possible to set and read all available cleaning
parameters for all cleaning policies using casadm --set-param and
--get-param options.
pass_criteria:
- All cleaning parameters are set to given values.
- All cleaning parameters displays proper values.
"""
with TestRun.step("Partition cache and core devices"):
cache_dev, core_dev = storage_prepare()
cache_dev = TestRun.disks["cache"]
cache_parts = [Size(1, Unit.GibiByte)] * caches_count
cache_dev.create_partitions(cache_parts)
core_dev = TestRun.disks["core"]
core_parts = [Size(2, Unit.GibiByte)] * cores_per_cache * caches_count
core_dev.create_partitions(core_parts)
with TestRun.step(
f"Start {caches_count} caches in {cache_mode} cache mode "
f"and add {cores_per_cache} cores per cache"
):
caches, cores = cache_prepare(cache_mode, cache_dev, core_dev)
caches = [
casadm.start_cache(part, cache_mode, force=True) for part in cache_dev.partitions
]
for i in range(caches_count):
for j in range(cores_per_cache):
caches[i].add_core(core_dev.partitions[i * cores_per_cache + j])
with TestRun.step(f"Set cleaning policy to {cleaning_policy}"):
if cleaning_policy != CleaningPolicy.DEFAULT:
@ -205,33 +233,7 @@ def test_set_get_cleaning_params(cache_mode, cleaning_policy):
)
def storage_prepare():
cache_dev = TestRun.disks["cache"]
cache_parts = [Size(1, Unit.GibiByte)] * caches_count
cache_dev.create_partitions(cache_parts)
core_dev = TestRun.disks["core"]
core_parts = [Size(2, Unit.GibiByte)] * cores_per_cache * caches_count
core_dev.create_partitions(core_parts)
return cache_dev, core_dev
def cache_prepare(cache_mode, cache_dev, core_dev):
caches = []
for i in range(caches_count):
caches.append(
casadm.start_cache(cache_dev.partitions[i], cache_mode, force=True)
)
cores = [[] for i in range(caches_count)]
for i in range(caches_count):
for j in range(cores_per_cache):
core_partition_nr = i * cores_per_cache + j
cores[i].append(caches[i].add_core(core_dev.partitions[core_partition_nr]))
return caches, cores
def new_seqcutoff_parameters_random_values():
def new_seq_cutoff_parameters_random_values():
return SeqCutOffParameters(
threshold=Size(random.randrange(1, 1000000), Unit.KibiByte),
policy=random.choice(list(SeqCutOffPolicy)),
@ -275,27 +277,27 @@ def new_cleaning_parameters_random_values(cleaning_policy):
return cleaning_params
def check_seqcutoff_parameters(core, seqcutoff_params):
current_seqcutoff_params = core.get_seq_cut_off_parameters()
def check_seq_cutoff_parameters(core, seq_cutoff_params):
current_seq_cutoff_params = core.get_seq_cut_off_parameters()
failed_params = ""
if current_seqcutoff_params.threshold != seqcutoff_params.threshold:
if current_seq_cutoff_params.threshold != seq_cutoff_params.threshold:
failed_params += (
f"Threshold is {current_seqcutoff_params.threshold}, "
f"should be {seqcutoff_params.threshold}\n"
f"Threshold is {current_seq_cutoff_params.threshold}, "
f"should be {seq_cutoff_params.threshold}\n"
)
if current_seqcutoff_params.policy != seqcutoff_params.policy:
if current_seq_cutoff_params.policy != seq_cutoff_params.policy:
failed_params += (
f"Policy is {current_seqcutoff_params.policy}, "
f"should be {seqcutoff_params.policy}\n"
f"Policy is {current_seq_cutoff_params.policy}, "
f"should be {seq_cutoff_params.policy}\n"
)
if current_seqcutoff_params.promotion_count != seqcutoff_params.promotion_count:
if current_seq_cutoff_params.promotion_count != seq_cutoff_params.promotion_count:
failed_params += (
f"Promotion count is {current_seqcutoff_params.promotion_count}, "
f"should be {seqcutoff_params.promotion_count}\n"
f"Promotion count is {current_seq_cutoff_params.promotion_count}, "
f"should be {seq_cutoff_params.promotion_count}\n"
)
if failed_params:
TestRun.LOGGER.error(
f"Sequential cut-off parameters are not correct "
f"Sequential cutoff parameters are not correct "
f"for {core.path}:\n{failed_params}"
)
@ -306,12 +308,12 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
failed_params = ""
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
failed_params += (
f"Wake Up time is {current_cleaning_params.wake_up_time}, "
f"Wake up time is {current_cleaning_params.wake_up_time}, "
f"should be {cleaning_params.wake_up_time}\n"
)
if current_cleaning_params.staleness_time != cleaning_params.staleness_time:
failed_params += (
f"Staleness Time is {current_cleaning_params.staleness_time}, "
f"Staleness time is {current_cleaning_params.staleness_time}, "
f"should be {cleaning_params.staleness_time}\n"
)
if (
@ -319,7 +321,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
!= cleaning_params.flush_max_buffers
):
failed_params += (
f"Flush Max Buffers is {current_cleaning_params.flush_max_buffers}, "
f"Flush max buffers is {current_cleaning_params.flush_max_buffers}, "
f"should be {cleaning_params.flush_max_buffers}\n"
)
if (
@ -327,7 +329,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
!= cleaning_params.activity_threshold
):
failed_params += (
f"Activity Threshold is {current_cleaning_params.activity_threshold}, "
f"Activity threshold is {current_cleaning_params.activity_threshold}, "
f"should be {cleaning_params.activity_threshold}\n"
)
if failed_params:
@ -341,7 +343,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
failed_params = ""
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
failed_params += (
f"Wake Up time is {current_cleaning_params.wake_up_time}, "
f"Wake up time is {current_cleaning_params.wake_up_time}, "
f"should be {cleaning_params.wake_up_time}\n"
)
if (
@ -349,7 +351,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
!= cleaning_params.flush_max_buffers
):
failed_params += (
f"Flush Max Buffers is {current_cleaning_params.flush_max_buffers}, "
f"Flush max buffers is {current_cleaning_params.flush_max_buffers}, "
f"should be {cleaning_params.flush_max_buffers}\n"
)
if failed_params:

View File

@ -1,6 +1,6 @@
#
# Copyright(c) 2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
import time
@ -24,20 +24,23 @@ from type_def.size import Size, Unit
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_zero_metadata_negative_cases():
"""
title: Test for '--zero-metadata' negative cases.
description: |
Test for '--zero-metadata' scenarios with expected failures.
pass_criteria:
- Zeroing metadata without '--force' failed when run on cache.
- Zeroing metadata with '--force' failed when run on cache.
- Zeroing metadata failed when run on system drive.
- Load cache command failed after successfully zeroing metadata on the cache device.
title: Test for '--zero-metadata' negative cases.
description: Test for '--zero-metadata' scenarios with expected failures.
pass_criteria:
- Zeroing metadata without '--force' failed when run on cache.
- Zeroing metadata with '--force' failed when run on cache.
- Zeroing metadata failed when run on system drive.
- Load cache command failed after successfully zeroing metadata on the cache device.
"""
with TestRun.step("Prepare cache and core devices."):
cache_dev, core_dev, cache_disk = prepare_devices()
cache_disk = TestRun.disks['cache']
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
cache_dev = cache_disk.partitions[0]
core_disk = TestRun.disks['core']
core_disk.create_partitions([Size(5, Unit.GibiByte)])
with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, force=True)
casadm.start_cache(cache_dev, force=True)
with TestRun.step("Try to zero metadata and validate error message."):
try:
@ -75,7 +78,7 @@ def test_zero_metadata_negative_cases():
with TestRun.step("Load cache."):
try:
cache = casadm.load_cache(cache_dev)
casadm.load_cache(cache_dev)
TestRun.LOGGER.error("Loading cache should fail.")
except CmdException:
TestRun.LOGGER.info("Loading cache failed as expected.")
@ -86,16 +89,19 @@ def test_zero_metadata_negative_cases():
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_zero_metadata_filesystem(filesystem):
"""
title: Test for '--zero-metadata' and filesystem.
description: |
Test for '--zero-metadata' on drive with filesystem.
pass_criteria:
- Zeroing metadata on device with filesystem failed and not removed filesystem.
- Zeroing metadata on mounted device failed.
title: Test for '--zero-metadata' and filesystem.
description: Test for '--zero-metadata' on drive with filesystem.
pass_criteria:
- Zeroing metadata on device with filesystem failed and not removed filesystem.
- Zeroing metadata on mounted device failed.
"""
mount_point = "/mnt"
with TestRun.step("Prepare devices."):
cache_dev, core_disk, cache_disk = prepare_devices()
cache_disk = TestRun.disks['cache']
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
cache_dev = cache_disk.partitions[0]
core_disk = TestRun.disks['core']
core_disk.create_partitions([Size(5, Unit.GibiByte)])
with TestRun.step("Create filesystem on core device."):
core_disk.create_filesystem(filesystem)
@ -131,17 +137,21 @@ def test_zero_metadata_filesystem(filesystem):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_zero_metadata_dirty_data():
"""
title: Test for '--zero-metadata' and dirty data scenario.
description: |
Test for '--zero-metadata' with and without 'force' option if there are dirty data
on cache.
pass_criteria:
- Zeroing metadata without force failed on cache with dirty data.
- Zeroing metadata with force ran successfully on cache with dirty data.
- Cache started successfully after zeroing metadata on cache with dirty data.
title: Test for '--zero-metadata' and dirty data scenario.
description: |
Test for '--zero-metadata' with and without 'force' option if there are dirty data
on cache.
pass_criteria:
- Zeroing metadata without force failed on cache with dirty data.
- Zeroing metadata with force ran successfully on cache with dirty data.
- Cache started successfully after zeroing metadata on cache with dirty data.
"""
with TestRun.step("Prepare cache and core devices."):
cache_dev, core_disk, cache_disk = prepare_devices()
cache_disk = TestRun.disks['cache']
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
cache_dev = cache_disk.partitions[0]
core_disk = TestRun.disks['core']
core_disk.create_partitions([Size(5, Unit.GibiByte)])
with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
@ -165,7 +175,7 @@ def test_zero_metadata_dirty_data():
with TestRun.step("Start cache (expect to fail)."):
try:
cache = casadm.start_cache(cache_dev, CacheMode.WB)
casadm.start_cache(cache_dev, CacheMode.WB)
except CmdException:
TestRun.LOGGER.info("Start cache failed as expected.")
@ -186,7 +196,7 @@ def test_zero_metadata_dirty_data():
with TestRun.step("Start cache without 'force' option."):
try:
cache = casadm.start_cache(cache_dev, CacheMode.WB)
casadm.start_cache(cache_dev, CacheMode.WB)
TestRun.LOGGER.info("Cache started successfully.")
except CmdException:
TestRun.LOGGER.error("Start cache failed.")
@ -196,21 +206,25 @@ def test_zero_metadata_dirty_data():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_zero_metadata_dirty_shutdown():
"""
title: Test for '--zero-metadata' and dirty shutdown scenario.
description: |
Test for '--zero-metadata' with and without 'force' option on cache which had been dirty
shut down before.
pass_criteria:
- Zeroing metadata without force failed on cache after dirty shutdown.
- Zeroing metadata with force ran successfully on cache after dirty shutdown.
- Cache started successfully after dirty shutdown and zeroing metadata on cache.
title: Test for '--zero-metadata' and dirty shutdown scenario.
description: |
Test for '--zero-metadata' with and without 'force' option on cache which had been dirty
shut down before.
pass_criteria:
- Zeroing metadata without force failed on cache after dirty shutdown.
- Zeroing metadata with force ran successfully on cache after dirty shutdown.
- Cache started successfully after dirty shutdown and zeroing metadata on cache.
"""
with TestRun.step("Prepare cache and core devices."):
cache_dev, core_disk, cache_disk = prepare_devices()
cache_disk = TestRun.disks['cache']
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
cache_dev = cache_disk.partitions[0]
core_disk = TestRun.disks['core']
core_disk.create_partitions([Size(5, Unit.GibiByte)])
with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, CacheMode.WT, force=True)
core = cache.add_core(core_disk)
cache.add_core(core_disk)
with TestRun.step("Unplug cache device."):
cache_disk.unplug()
@ -227,7 +241,7 @@ def test_zero_metadata_dirty_shutdown():
with TestRun.step("Start cache (expect to fail)."):
try:
cache = casadm.start_cache(cache_dev, CacheMode.WT)
casadm.start_cache(cache_dev, CacheMode.WT)
TestRun.LOGGER.error("Starting cache should fail!")
except CmdException:
TestRun.LOGGER.info("Start cache failed as expected.")
@ -249,17 +263,7 @@ def test_zero_metadata_dirty_shutdown():
with TestRun.step("Start cache."):
try:
cache = casadm.start_cache(cache_dev, CacheMode.WT)
casadm.start_cache(cache_dev, CacheMode.WT)
TestRun.LOGGER.info("Cache started successfully.")
except CmdException:
TestRun.LOGGER.error("Start cache failed.")
def prepare_devices():
cache_disk = TestRun.disks['cache']
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
cache_part = cache_disk.partitions[0]
core_disk = TestRun.disks['core']
core_disk.create_partitions([Size(5, Unit.GibiByte)])
return cache_part, core_disk, cache_disk