Minor test description and names refactor

Signed-off-by: Katarzyna Treder <katarzyna.treder@h-partners.com>
This commit is contained in:
Katarzyna Treder 2025-02-28 12:03:10 +01:00
parent d4de219fec
commit ba7d907775
20 changed files with 484 additions and 359 deletions

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2022 Intel Corporation # Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -28,11 +28,10 @@ block_sizes = [1, 2, 4, 5, 8, 16, 32, 64, 128]
@pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.nand]))
def test_support_different_io_size(cache_mode): def test_support_different_io_size(cache_mode):
""" """
title: OpenCAS supports different IO sizes title: Support for different I/O sizes
description: | description: Verify support for I/O of size in rage from 512B to 128KiB
OpenCAS supports IO of size in rage from 512b to 128K
pass_criteria: pass_criteria:
- No IO errors - No I/O errors
""" """
with TestRun.step("Prepare cache and core devices"): with TestRun.step("Prepare cache and core devices"):
@ -47,7 +46,7 @@ def test_support_different_io_size(cache_mode):
) )
core = cache.add_core(core_disk.partitions[0]) core = cache.add_core(core_disk.partitions[0])
with TestRun.step("Load the default ioclass config file"): with TestRun.step("Load the default io class config file"):
cache.load_io_class(opencas_ioclass_conf_path) cache.load_io_class(opencas_ioclass_conf_path)
with TestRun.step("Create a filesystem on the core device and mount it"): with TestRun.step("Create a filesystem on the core device and mount it"):

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2022 Intel Corporation # Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -30,20 +30,20 @@ mountpoint = "/mnt"
@pytest.mark.CI @pytest.mark.CI
def test_cas_version(): def test_cas_version():
""" """
title: Test for CAS version title: Test for version number
description: description:
Check if CAS print version cmd returns consistent version with version file Check if version printed by cmd returns value consistent with version file
pass criteria: pass criteria:
- casadm version command succeeds - Version command succeeds
- versions from cmd and file in /var/lib/opencas/cas_version are consistent - Versions from cmd and file in /var/lib/opencas/cas_version are consistent
""" """
with TestRun.step("Read cas version using casadm cmd"): with TestRun.step("Read version using casadm cmd"):
output = casadm.print_version(output_format=OutputFormat.csv) output = casadm.print_version(output_format=OutputFormat.csv)
cmd_version = output.stdout cmd_version = output.stdout
cmd_cas_versions = [version.split(",")[1] for version in cmd_version.split("\n")[1:]] cmd_cas_versions = [version.split(",")[1] for version in cmd_version.split("\n")[1:]]
with TestRun.step(f"Read cas version from {version_file_path} location"): with TestRun.step(f"Read version from {version_file_path} location"):
file_read = read_file(version_file_path).split("\n") file_read = read_file(version_file_path).split("\n")
file_cas_version = next( file_cas_version = next(
(line.split("=")[1] for line in file_read if "CAS_VERSION=" in line) (line.split("=")[1] for line in file_read if "CAS_VERSION=" in line)
@ -51,20 +51,20 @@ def test_cas_version():
with TestRun.step("Compare cmd and file versions"): with TestRun.step("Compare cmd and file versions"):
if not all(file_cas_version == cmd_cas_version for cmd_cas_version in cmd_cas_versions): if not all(file_cas_version == cmd_cas_version for cmd_cas_version in cmd_cas_versions):
TestRun.LOGGER.error(f"Cmd and file versions doesn`t match") TestRun.LOGGER.error(f"Cmd and file versions doesn't match")
@pytest.mark.CI @pytest.mark.CI
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_negative_start_cache(): def test_negative_start_cache():
""" """
title: Test start cache negative on cache device title: Negative test for starting cache
description: description:
Check for negative cache start scenarios Check starting cache using the same device or cache ID twice
pass criteria: pass criteria:
- Cache start succeeds - Cache start succeeds
- Fails to start cache on the same device with another id - Starting cache on the same device with another ID fails
- Fails to start cache on another partition with the same id - Starting cache on another partition with the same ID fails
""" """
with TestRun.step("Prepare cache device"): with TestRun.step("Prepare cache device"):

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -65,10 +65,10 @@ def test_cleaning_policies_in_write_back(cleaning_policy: CleaningPolicy):
cache.set_cleaning_policy(cleaning_policy=cleaning_policy) cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
set_cleaning_policy_params(cache, cleaning_policy) set_cleaning_policy_params(cache, cleaning_policy)
with TestRun.step("Check for running CAS cleaner"): with TestRun.step("Check for running cleaner process"):
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}") output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
if output.exit_code != 0: if output.exit_code != 0:
TestRun.fail("CAS cleaner process is not running!") TestRun.fail("Cleaner process is not running!")
with TestRun.step(f"Add {cores_count} cores to the cache"): with TestRun.step(f"Add {cores_count} cores to the cache"):
cores = [cache.add_core(partition) for partition in core_dev.partitions] cores = [cache.add_core(partition) for partition in core_dev.partitions]
@ -133,10 +133,10 @@ def test_cleaning_policies_in_write_through(cleaning_policy):
cache.set_cleaning_policy(cleaning_policy=cleaning_policy) cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
set_cleaning_policy_params(cache, cleaning_policy) set_cleaning_policy_params(cache, cleaning_policy)
with TestRun.step("Check for running CAS cleaner"): with TestRun.step("Check for running cleaner process"):
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}") output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
if output.exit_code != 0: if output.exit_code != 0:
TestRun.fail("CAS cleaner process is not running!") TestRun.fail("Cleaner process is not running!")
with TestRun.step(f"Add {cores_count} cores to the cache"): with TestRun.step(f"Add {cores_count} cores to the cache"):
cores = [cache.add_core(partition) for partition in core_dev.partitions] cores = [cache.add_core(partition) for partition in core_dev.partitions]
@ -193,12 +193,12 @@ def set_cleaning_policy_params(cache, cleaning_policy):
if current_acp_params.wake_up_time != acp_params.wake_up_time: if current_acp_params.wake_up_time != acp_params.wake_up_time:
failed_params += ( failed_params += (
f"Wake Up time is {current_acp_params.wake_up_time}, " f"Wake up time is {current_acp_params.wake_up_time}, "
f"should be {acp_params.wake_up_time}\n" f"should be {acp_params.wake_up_time}\n"
) )
if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers: if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers:
failed_params += ( failed_params += (
f"Flush Max Buffers is {current_acp_params.flush_max_buffers}, " f"Flush max buffers is {current_acp_params.flush_max_buffers}, "
f"should be {acp_params.flush_max_buffers}\n" f"should be {acp_params.flush_max_buffers}\n"
) )
TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}") TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}")
@ -215,22 +215,22 @@ def set_cleaning_policy_params(cache, cleaning_policy):
failed_params = "" failed_params = ""
if current_alru_params.wake_up_time != alru_params.wake_up_time: if current_alru_params.wake_up_time != alru_params.wake_up_time:
failed_params += ( failed_params += (
f"Wake Up time is {current_alru_params.wake_up_time}, " f"Wake up time is {current_alru_params.wake_up_time}, "
f"should be {alru_params.wake_up_time}\n" f"should be {alru_params.wake_up_time}\n"
) )
if current_alru_params.staleness_time != alru_params.staleness_time: if current_alru_params.staleness_time != alru_params.staleness_time:
failed_params += ( failed_params += (
f"Staleness Time is {current_alru_params.staleness_time}, " f"Staleness time is {current_alru_params.staleness_time}, "
f"should be {alru_params.staleness_time}\n" f"should be {alru_params.staleness_time}\n"
) )
if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers: if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers:
failed_params += ( failed_params += (
f"Flush Max Buffers is {current_alru_params.flush_max_buffers}, " f"Flush max buffers is {current_alru_params.flush_max_buffers}, "
f"should be {alru_params.flush_max_buffers}\n" f"should be {alru_params.flush_max_buffers}\n"
) )
if current_alru_params.activity_threshold != alru_params.activity_threshold: if current_alru_params.activity_threshold != alru_params.activity_threshold:
failed_params += ( failed_params += (
f"Activity Threshold is {current_alru_params.activity_threshold}, " f"Activity threshold is {current_alru_params.activity_threshold}, "
f"should be {alru_params.activity_threshold}\n" f"should be {alru_params.activity_threshold}\n"
) )
TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}") TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}")
@ -245,9 +245,9 @@ def check_cleaning_policy_operation(
case CleaningPolicy.alru: case CleaningPolicy.alru:
if core_writes_before_wait_for_cleaning != Size.zero(): if core_writes_before_wait_for_cleaning != Size.zero():
TestRun.LOGGER.error( TestRun.LOGGER.error(
"CAS cleaner started to clean dirty data right after IO! " "Cleaner process started to clean dirty data right after I/O! "
"According to ALRU parameters set in this test cleaner should " "According to ALRU parameters set in this test cleaner should "
"wait 10 seconds after IO before cleaning dirty data" "wait 10 seconds after I/O before cleaning dirty data"
) )
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning: if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
TestRun.LOGGER.error( TestRun.LOGGER.error(
@ -266,9 +266,9 @@ def check_cleaning_policy_operation(
case CleaningPolicy.acp: case CleaningPolicy.acp:
if core_writes_before_wait_for_cleaning == Size.zero(): if core_writes_before_wait_for_cleaning == Size.zero():
TestRun.LOGGER.error( TestRun.LOGGER.error(
"CAS cleaner did not start cleaning dirty data right after IO! " "Cleaner process did not start cleaning dirty data right after I/O! "
"According to ACP policy cleaner should start " "According to ACP policy cleaner should start "
"cleaning dirty data right after IO" "cleaning dirty data right after I/O"
) )
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning: if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
TestRun.LOGGER.error( TestRun.LOGGER.error(

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2020-2021 Intel Corporation # Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -153,7 +153,7 @@ def test_concurrent_caches_flush(cache_mode: CacheMode):
""" """
title: Flush multiple caches simultaneously. title: Flush multiple caches simultaneously.
description: | description: |
CAS should successfully flush multiple caches if there is already other flush in progress. Check for flushing multiple caches if there is already other flush in progress.
pass_criteria: pass_criteria:
- No system crash. - No system crash.
- Flush for each cache should finish successfully. - Flush for each cache should finish successfully.

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -46,7 +46,7 @@ def test_cache_stop_and_load(cache_mode):
""" """
title: Test for stopping and loading cache back with dynamic cache mode switching. title: Test for stopping and loading cache back with dynamic cache mode switching.
description: | description: |
Validate the ability of the CAS to switch cache modes at runtime and Validate the ability to switch cache modes at runtime and
check if all of them are working properly after switching and check if all of them are working properly after switching and
after stopping and reloading cache back. after stopping and reloading cache back.
Check also other parameters consistency after reload. Check also other parameters consistency after reload.
@ -138,10 +138,8 @@ def test_cache_stop_and_load(cache_mode):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode): def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode):
""" """
title: Test for dynamic cache mode switching during IO. title: Test for dynamic cache mode switching during I/O.
description: | description: Validate the ability to switch cache modes during I/O on exported object.
Validate the ability of CAS to switch cache modes
during working IO on CAS device.
pass_criteria: pass_criteria:
- Cache mode is switched without errors. - Cache mode is switched without errors.
""" """
@ -182,7 +180,7 @@ def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mo
): ):
cache.set_cache_mode(cache_mode=cache_mode_2, flush=flush) cache.set_cache_mode(cache_mode=cache_mode_2, flush=flush)
with TestRun.step(f"Check if cache mode has switched properly during IO"): with TestRun.step("Check if cache mode has switched properly during I/O"):
cache_mode_after_switch = cache.get_cache_mode() cache_mode_after_switch = cache.get_cache_mode()
if cache_mode_after_switch != cache_mode_2: if cache_mode_after_switch != cache_mode_2:
TestRun.fail( TestRun.fail(
@ -229,7 +227,7 @@ def run_io_and_verify(cache, core, io_mode):
): ):
TestRun.fail( TestRun.fail(
"Write-Back cache mode is not working properly! " "Write-Back cache mode is not working properly! "
"There should be some writes to CAS device and none to the core" "There should be some writes to exported object and none to the core"
) )
case CacheMode.PT: case CacheMode.PT:
if ( if (

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2020-2022 Intel Corporation # Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -18,11 +18,11 @@ def test_remove_multilevel_core():
""" """
title: Test of the ability to remove a core used in a multilevel cache. title: Test of the ability to remove a core used in a multilevel cache.
description: | description: |
Negative test if OpenCAS does not allow to remove a core when the related exported object Negative test for removing a core when the related exported object
is used as a core device for another cache instance. is used as a core device for another cache instance.
pass_criteria: pass_criteria:
- No system crash. - No system crash.
- OpenCAS does not allow removing a core used in a multilevel cache instance. - Removing a core used in a multilevel cache instance is forbidden.
""" """
with TestRun.step("Prepare cache and core devices"): with TestRun.step("Prepare cache and core devices"):

View File

@ -57,7 +57,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
with TestRun.step("Disable udev"): with TestRun.step("Disable udev"):
Udev.disable() Udev.disable()
with TestRun.step(f"Start cache in Write-Back"): with TestRun.step(f"Start cache in Write-Back cache mode"):
cache_disk = TestRun.disks["cache"] cache_disk = TestRun.disks["cache"]
core_disk = TestRun.disks["core"] core_disk = TestRun.disks["core"]
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True) cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
@ -105,7 +105,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
with TestRun.step( with TestRun.step(
"Write random number of 4k block requests to each stream and check if all " "Write random number of 4k block requests to each stream and check if all "
"writes were sent in pass-through mode" "writes were sent in pass-through"
): ):
core_statistics_before = core.get_statistics([StatsFilter.req, StatsFilter.blk]) core_statistics_before = core.get_statistics([StatsFilter.req, StatsFilter.blk])
random.shuffle(offsets) random.shuffle(offsets)
@ -170,7 +170,7 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
with TestRun.step("Reset core statistics counters"): with TestRun.step("Reset core statistics counters"):
core.reset_counters() core.reset_counters()
with TestRun.step("Run FIO on core device"): with TestRun.step("Run fio on core device"):
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte)) stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
sequential_streams = streams_seq_rand[0] sequential_streams = streams_seq_rand[0]
random_streams = streams_seq_rand[1] random_streams = streams_seq_rand[1]
@ -216,7 +216,7 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
- No system crash - No system crash
""" """
with TestRun.step(f"Disable udev"): with TestRun.step("Disable udev"):
Udev.disable() Udev.disable()
with TestRun.step("Create filesystem on core device"): with TestRun.step("Create filesystem on core device"):
@ -231,7 +231,7 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
with TestRun.step("Mount core"): with TestRun.step("Mount core"):
core.mount(mount_point=mount_point) core.mount(mount_point=mount_point)
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB"): with TestRun.step("Set sequential cutoff policy to always and threshold to 20MiB"):
core.set_seq_cutoff_policy(policy=SeqCutOffPolicy.always) core.set_seq_cutoff_policy(policy=SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(threshold=Size(20, Unit.MebiByte)) core.set_seq_cutoff_threshold(threshold=Size(20, Unit.MebiByte))
@ -279,7 +279,7 @@ def run_dd(target_path, count, seek):
TestRun.LOGGER.info(f"dd command:\n{dd}") TestRun.LOGGER.info(f"dd command:\n{dd}")
output = dd.run() output = dd.run()
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Error during IO", output) raise CmdException("Error during I/O", output)
def check_statistics(stats_before, stats_after, expected_pt_writes, expected_writes_to_cache): def check_statistics(stats_before, stats_after, expected_pt_writes, expected_writes_to_cache):

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -40,15 +40,14 @@ class VerifyType(Enum):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_size): def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_size):
""" """
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores title: Functional sequential cutoff test with multiple cores
description: | description: |
Testing if amount of data written to cache after sequential writes for different Test checking if data is cached properly with sequential cutoff "always" policy
sequential cut-off thresholds on each core, while running sequential IO on 3 out of 4 when sequential and random I/O is running to multiple cores.
cores and random IO against the last core, is correct.
pass_criteria: pass_criteria:
- Amount of written blocks to cache is less or equal than amount set - Amount of written blocks to cache is less or equal than amount set
with sequential cut-off threshold for three first cores. with sequential cutoff threshold for three first cores.
- Amount of written blocks to cache is equal to io size run against last core. - Amount of written blocks to cache is equal to I/O size run against last core.
""" """
with TestRun.step("Prepare cache and core devices"): with TestRun.step("Prepare cache and core devices"):
@ -76,7 +75,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
) )
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts] core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
with TestRun.step("Set sequential cut-off parameters for all cores"): with TestRun.step("Set sequential cutoff parameters for all cores"):
writes_before_list = [] writes_before_list = []
fio_additional_size = Size(10, Unit.Blocks4096) fio_additional_size = Size(10, Unit.Blocks4096)
thresholds_list = [ thresholds_list = [
@ -96,7 +95,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
core.set_seq_cutoff_policy(SeqCutOffPolicy.always) core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(threshold) core.set_seq_cutoff_threshold(threshold)
with TestRun.step("Prepare sequential IO against first three cores"): with TestRun.step("Prepare sequential I/O against first three cores"):
block_size = Size(4, Unit.KibiByte) block_size = Size(4, Unit.KibiByte)
fio = Fio().create_command().io_engine(IoEngine.libaio).block_size(block_size).direct(True) fio = Fio().create_command().io_engine(IoEngine.libaio).block_size(block_size).direct(True)
@ -107,7 +106,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
fio_job.target(core.path) fio_job.target(core.path)
writes_before_list.append(core.get_statistics().block_stats.cache.writes) writes_before_list.append(core.get_statistics().block_stats.cache.writes)
with TestRun.step("Prepare random IO against the last core"): with TestRun.step("Prepare random I/O against the last core"):
fio_job = fio.add_job(f"core_{core_list[-1].core_id}") fio_job = fio.add_job(f"core_{core_list[-1].core_id}")
fio_job.size(io_sizes_list[-1]) fio_job.size(io_sizes_list[-1])
fio_job.read_write(io_type_last) fio_job.read_write(io_type_last)
@ -117,7 +116,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
with TestRun.step("Run fio against all cores"): with TestRun.step("Run fio against all cores"):
fio.run() fio.run()
with TestRun.step("Verify writes to cache count after IO"): with TestRun.step("Verify writes to cache count after I/O"):
margins = [ margins = [
min(block_size * (core.get_seq_cut_off_parameters().promotion_count - 1), threshold) min(block_size * (core.get_seq_cut_off_parameters().promotion_count - 1), threshold)
for core, threshold in zip(core_list[:-1], thresholds_list[:-1]) for core, threshold in zip(core_list[:-1], thresholds_list[:-1])
@ -159,17 +158,16 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
@pytest.mark.parametrizex("cache_line_size", CacheLineSize) @pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cache_line_size): def test_seq_cutoff_multi_core_cpu_pinned(cache_mode, io_type, io_type_last, cache_line_size):
""" """
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores title: Functional sequential cutoff test with multiple cores and cpu pinned I/O
description: | description: |
Testing if amount of data written to cache after sequential writes for different Test checking if data is cached properly with sequential cutoff "always" policy
sequential cut-off thresholds on each core, while running sequential IO, pinned, when sequential and random cpu pinned I/O is running to multiple cores.
on 3 out of 4 cores and random IO against the last core, is correct.
pass_criteria: pass_criteria:
- Amount of written blocks to cache is less or equal than amount set - Amount of written blocks to cache is less or equal than amount set
with sequential cut-off threshold for three first cores. with sequential cutoff threshold for three first cores.
- Amount of written blocks to cache is equal to io size run against last core. - Amount of written blocks to cache is equal to I/O size run against last core.
""" """
with TestRun.step("Partition cache and core devices"): with TestRun.step("Partition cache and core devices"):
@ -198,7 +196,7 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
) )
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts] core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
with TestRun.step(f"Set sequential cut-off parameters for all cores"): with TestRun.step("Set sequential cutoff parameters for all cores"):
writes_before_list = [] writes_before_list = []
fio_additional_size = Size(10, Unit.Blocks4096) fio_additional_size = Size(10, Unit.Blocks4096)
thresholds_list = [ thresholds_list = [
@ -218,7 +216,9 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
core.set_seq_cutoff_policy(SeqCutOffPolicy.always) core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(threshold) core.set_seq_cutoff_threshold(threshold)
with TestRun.step("Prepare sequential IO against first three cores"): with TestRun.step(
"Prepare sequential I/O against first three cores and random I/O against the last one"
):
fio = ( fio = (
Fio() Fio()
.create_command() .create_command()
@ -244,10 +244,10 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
fio_job.target(core_list[-1].path) fio_job.target(core_list[-1].path)
writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes) writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes)
with TestRun.step("Running IO against all cores"): with TestRun.step("Running I/O against all cores"):
fio.run() fio.run()
with TestRun.step("Verifying writes to cache count after IO"): with TestRun.step("Verifying writes to cache count after I/O"):
for core, writes, threshold, io_size in zip( for core, writes, threshold, io_size in zip(
core_list[:-1], writes_before_list[:-1], thresholds_list[:-1], io_sizes_list[:-1] core_list[:-1], writes_before_list[:-1], thresholds_list[:-1], io_sizes_list[:-1]
): ):
@ -282,16 +282,14 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type): def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
""" """
title: Sequential cut-off tests for writes and reads for 'never', 'always' and 'full' policies title: Functional test for sequential cutoff threshold parameter
description: | description: |
Testing if amount of data written to cache after sequential writes and reads for different Check if data is cached properly according to sequential cutoff policy and
sequential cut-off policies with cache configured with different cache line size threshold parameter
is valid for sequential cut-off threshold parameter, assuming that cache occupancy
doesn't reach 100% during test.
pass_criteria: pass_criteria:
- Amount of written blocks to cache is less or equal than amount set - Amount of blocks written to cache is less than or equal to amount set
with sequential cut-off parameter in case of 'always' policy. with sequential cutoff parameter in case of 'always' policy.
- Amount of written blocks to cache is at least equal io size in case of 'never' and 'full' - Amount of blocks written to cache is at least equal to io size in case of 'never' and 'full'
policy. policy.
""" """
@ -326,13 +324,13 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
) )
io_size = (threshold + fio_additional_size).align_down(0x1000) io_size = (threshold + fio_additional_size).align_down(0x1000)
with TestRun.step(f"Setting cache sequential cut off policy mode to {policy}"): with TestRun.step(f"Setting cache sequential cutoff policy mode to {policy}"):
cache.set_seq_cutoff_policy(policy) cache.set_seq_cutoff_policy(policy)
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"): with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
cache.set_seq_cutoff_threshold(threshold) cache.set_seq_cutoff_threshold(threshold)
with TestRun.step("Prepare sequential IO against core"): with TestRun.step("Prepare sequential I/O against core"):
sync() sync()
writes_before = core.get_statistics().block_stats.cache.writes writes_before = core.get_statistics().block_stats.cache.writes
fio = ( fio = (
@ -364,16 +362,15 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_thresh_fill(cache_line_size, io_dir): def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
""" """
title: Sequential cut-off tests during writes and reads on full cache for 'full' policy title: Functional test for sequential cutoff threshold parameter and 'full' policy
description: | description: |
Testing if amount of data written to cache after sequential io against fully occupied Check if data is cached properly according to sequential cutoff 'full' policy and given
cache for 'full' sequential cut-off policy with cache configured with different cache threshold parameter
line sizes is valid for sequential cut-off threshold parameter.
pass_criteria: pass_criteria:
- Amount of written blocks to cache is big enough to fill cache when 'never' sequential - Amount of written blocks to cache is big enough to fill cache when 'never' sequential
cut-off policy is set cutoff policy is set
- Amount of written blocks to cache is less or equal than amount set - Amount of written blocks to cache is less or equal than amount set
with sequential cut-off parameter in case of 'full' policy. with sequential cutoff parameter in case of 'full' policy.
""" """
with TestRun.step("Partition cache and core devices"): with TestRun.step("Partition cache and core devices"):
@ -407,10 +404,10 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
) )
io_size = (threshold + fio_additional_size).align_down(0x1000) io_size = (threshold + fio_additional_size).align_down(0x1000)
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.never}"): with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.never}"):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
with TestRun.step("Prepare sequential IO against core"): with TestRun.step("Prepare sequential I/O against core"):
sync() sync()
fio = ( fio = (
Fio() Fio()
@ -432,13 +429,13 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
f"Cache occupancy is too small: {occupancy_percentage}, expected at least 95%" f"Cache occupancy is too small: {occupancy_percentage}, expected at least 95%"
) )
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.full}"): with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.full}"):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.full) cache.set_seq_cutoff_policy(SeqCutOffPolicy.full)
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"): with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
cache.set_seq_cutoff_threshold(threshold) cache.set_seq_cutoff_threshold(threshold)
with TestRun.step(f"Running sequential IO ({io_dir})"): with TestRun.step(f"Running sequential I/O ({io_dir})"):
sync() sync()
writes_before = core.get_statistics().block_stats.cache.writes writes_before = core.get_statistics().block_stats.cache.writes
fio = ( fio = (

View File

@ -1,13 +1,13 @@
# #
# Copyright(c) 2022 Intel Corporation # Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import pytest import pytest
from api.cas import casadm from api.cas import casadm
from api.cas.cache_config import CacheMode from api.cas.cache_config import CacheMode, CacheModeTrait
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.udev import Udev from test_tools.udev import Udev
@ -20,16 +20,14 @@ dd_count = 100
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WA, CacheMode.WB]) @pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.InsertRead))
@pytest.mark.CI() @pytest.mark.CI()
def test_ci_read(cache_mode): def test_ci_read(cache_mode):
""" """
title: Verification test for write mode: write around title: Verification test for caching reads in various cache modes
description: Verify if write mode: write around, works as expected and cache only reads description: Check if reads are properly cached in various cache modes
and does not cache write
pass criteria: pass criteria:
- writes are not cached - Reads are cached
- reads are cached
""" """
with TestRun.step("Prepare partitions"): with TestRun.step("Prepare partitions"):
@ -45,7 +43,7 @@ def test_ci_read(cache_mode):
with TestRun.step("Disable udev"): with TestRun.step("Disable udev"):
Udev.disable() Udev.disable()
with TestRun.step(f"Start cache with cache_mode={cache_mode}"): with TestRun.step(f"Start cache in {cache_mode} cache mode"):
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True, cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
cache_mode=cache_mode) cache_mode=cache_mode)
casadm.add_core(cache, core_device) casadm.add_core(cache, core_device)
@ -99,6 +97,13 @@ def test_ci_read(cache_mode):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.CI() @pytest.mark.CI()
def test_ci_write_around_write(): def test_ci_write_around_write():
"""
title: Verification test for writes in Write-Around cache mode
description: Validate I/O statistics after writing to exported object in Write-Around cache mode
pass criteria:
- Writes are not cached
- After inserting writes to core, data is read from core and not from cache
"""
with TestRun.step("Prepare partitions"): with TestRun.step("Prepare partitions"):
cache_device = TestRun.disks["cache"] cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"] core_device = TestRun.disks["core"]
@ -112,7 +117,7 @@ def test_ci_write_around_write():
with TestRun.step("Disable udev"): with TestRun.step("Disable udev"):
Udev.disable() Udev.disable()
with TestRun.step("Start CAS Linux in Write Around mode"): with TestRun.step("Start cache in Write-Around mode"):
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True, cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
cache_mode=CacheMode.WA) cache_mode=CacheMode.WA)
casadm.add_core(cache, core_device) casadm.add_core(cache, core_device)
@ -183,14 +188,14 @@ def test_ci_write_around_write():
else: else:
TestRun.LOGGER.error(f"Writes to cache: {write_cache_delta_1} != 0") TestRun.LOGGER.error(f"Writes to cache: {write_cache_delta_1} != 0")
with TestRun.step("Verify that reads propagated to core"): with TestRun.step("Verify that data was read from core"):
read_core_delta_2 = read_core_2 - read_core_1 read_core_delta_2 = read_core_2 - read_core_1
if read_core_delta_2 == data_write: if read_core_delta_2 == data_write:
TestRun.LOGGER.info(f"Reads from core: {read_core_delta_2} == {data_write}") TestRun.LOGGER.info(f"Reads from core: {read_core_delta_2} == {data_write}")
else: else:
TestRun.LOGGER.error(f"Reads from core: {read_core_delta_2} != {data_write}") TestRun.LOGGER.error(f"Reads from core: {read_core_delta_2} != {data_write}")
with TestRun.step("Verify that reads did not occur on cache"): with TestRun.step("Verify that data was not read from cache"):
read_cache_delta_2 = read_cache_2 - read_cache_1 read_cache_delta_2 = read_cache_2 - read_cache_1
if read_cache_delta_2.value == 0: if read_cache_delta_2.value == 0:
TestRun.LOGGER.info(f"Reads from cache: {read_cache_delta_2} == 0") TestRun.LOGGER.info(f"Reads from cache: {read_cache_delta_2} == 0")
@ -203,6 +208,14 @@ def test_ci_write_around_write():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.CI() @pytest.mark.CI()
def test_ci_write_through_write(): def test_ci_write_through_write():
"""
title: Verification test for Write-Through cache mode
description: |
Validate if reads and writes are cached properly for cache in Write-Through mode
pass criteria:
- Writes are inserted to cache and core
- Reads are not cached
"""
with TestRun.step("Prepare partitions"): with TestRun.step("Prepare partitions"):
cache_device = TestRun.disks["cache"] cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"] core_device = TestRun.disks["core"]
@ -216,7 +229,7 @@ def test_ci_write_through_write():
with TestRun.step("Disable udev"): with TestRun.step("Disable udev"):
Udev.disable() Udev.disable()
with TestRun.step("Start CAS Linux in Write Through mode"): with TestRun.step("Start cache in Write-Through mode"):
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True, cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
cache_mode=CacheMode.WT) cache_mode=CacheMode.WT)
casadm.add_core(cache, core_device) casadm.add_core(cache, core_device)

View File

@ -25,51 +25,51 @@ from test_tools.memory import disable_memory_affecting_functions, get_mem_free,
@pytest.mark.os_dependent @pytest.mark.os_dependent
def test_insufficient_memory_for_cas_module(): def test_insufficient_memory_for_cas_module():
""" """
title: Negative test for the ability of CAS to load the kernel module with insufficient memory. title: Load CAS kernel module with insufficient memory
description: | description: |
Check that the CAS kernel module wont be loaded if enough memory is not available Negative test for the ability to load the CAS kernel module with insufficient memory.
pass_criteria: pass_criteria:
- CAS module cannot be loaded with not enough memory. - CAS kernel module cannot be loaded with not enough memory.
- Loading CAS with not enough memory returns error. - Loading CAS kernel module with not enough memory returns error.
""" """
with TestRun.step("Disable caching and memory over-committing"): with TestRun.step("Disable caching and memory over-committing"):
disable_memory_affecting_functions() disable_memory_affecting_functions()
drop_caches() drop_caches()
with TestRun.step("Measure memory usage without OpenCAS module"): with TestRun.step("Measure memory usage without CAS kernel module"):
if is_kernel_module_loaded(CasModule.cache.value): if is_kernel_module_loaded(CasModule.cache.value):
unload_kernel_module(CasModule.cache.value) unload_kernel_module(CasModule.cache.value)
available_mem_before_cas = get_mem_free() available_mem_before_cas = get_mem_free()
with TestRun.step("Load CAS module"): with TestRun.step("Load CAS kernel module"):
load_kernel_module(CasModule.cache.value) load_kernel_module(CasModule.cache.value)
with TestRun.step("Measure memory usage with CAS module"): with TestRun.step("Measure memory usage with CAS kernel module"):
available_mem_with_cas = get_mem_free() available_mem_with_cas = get_mem_free()
memory_used_by_cas = available_mem_before_cas - available_mem_with_cas memory_used_by_cas = available_mem_before_cas - available_mem_with_cas
TestRun.LOGGER.info( TestRun.LOGGER.info(
f"OpenCAS module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM." f"CAS kernel module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM."
) )
with TestRun.step("Unload CAS module"): with TestRun.step("Unload CAS kernel module"):
unload_kernel_module(CasModule.cache.value) unload_kernel_module(CasModule.cache.value)
with TestRun.step("Allocate memory, leaving not enough memory for CAS module"): with TestRun.step("Allocate memory, leaving not enough memory for CAS module"):
memory_to_leave = get_mem_free() - (memory_used_by_cas * (3 / 4)) memory_to_leave = get_mem_free() - (memory_used_by_cas * (3 / 4))
allocate_memory(memory_to_leave) allocate_memory(memory_to_leave)
TestRun.LOGGER.info( TestRun.LOGGER.info(
f"Memory left for OpenCAS module: {get_mem_free().get_value(Unit.MiB):0.2f} MiB." f"Memory left for CAS kernel module: {get_mem_free().get_value(Unit.MiB):0.2f} MiB."
) )
with TestRun.step( with TestRun.step(
"Try to load OpenCAS module and check if correct error message is printed on failure" "Try to load CAS kernel module and check if correct error message is printed on failure"
): ):
output = load_kernel_module(CasModule.cache.value) output = load_kernel_module(CasModule.cache.value)
if output.stderr and output.exit_code != 0: if output.stderr and output.exit_code != 0:
TestRun.LOGGER.info(f"Cannot load OpenCAS module as expected.\n{output.stderr}") TestRun.LOGGER.info(f"Cannot load CAS kernel module as expected.\n{output.stderr}")
else: else:
TestRun.LOGGER.error("Loading OpenCAS module successfully finished, but should fail.") TestRun.LOGGER.error("Loading CAS kernel module successfully finished, but should fail.")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@ -118,3 +118,4 @@ def test_attach_cache_min_ram():
with TestRun.step("Unlock RAM memory"): with TestRun.step("Unlock RAM memory"):
unmount_ramfs() unmount_ramfs()

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2022 Intel Corporation # Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -23,14 +23,14 @@ from test_tools.udev import Udev
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cleaning_policy(): def test_cleaning_policy():
""" """
Title: test_cleaning_policy Title: Basic test for cleaning policy
description: | description: |
The test is to see if dirty data will be removed from the Cache after changing the Verify cleaning behaviour after changing cleaning policy from NOP
cleaning policy from NOP to one that expects a flush. to one that expects a flush.
pass_criteria: pass_criteria:
- Cache is successfully populated with dirty data - Cache is successfully populated with dirty data
- Cleaning policy is changed successfully - Cleaning policy is changed successfully
- There is no dirty data after the policy change - There is no dirty data after the policy change
""" """
wait_time = 60 wait_time = 60

View File

@ -0,0 +1,126 @@
#
# Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
import re
import pytest
from api.cas import casadm
from api.cas.casadm_params import OutputFormat
from api.cas.cli_help_messages import *
from api.cas.cli_messages import check_stderr_msg, check_stdout_msg
from core.test_run import TestRun
@pytest.mark.parametrize("shortcut", [True, False])
def test_cli_help(shortcut):
"""
title: Test for 'help' command.
description: Test if help for commands displays correct output.
pass_criteria:
- Proper help displays for every command.
"""
TestRun.LOGGER.info("Run 'help' for every 'casadm' command.")
output = casadm.help(shortcut)
check_stdout_msg(output, casadm_help)
output = TestRun.executor.run("casadm" + (" -S" if shortcut else " --start-cache")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, start_cache_help)
output = TestRun.executor.run("casadm" + (" -T" if shortcut else " --stop-cache")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, stop_cache_help)
output = TestRun.executor.run("casadm" + (" -X" if shortcut else " --set-param")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, set_params_help)
output = TestRun.executor.run("casadm" + (" -G" if shortcut else " --get-param")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, get_params_help)
output = TestRun.executor.run("casadm" + (" -Q" if shortcut else " --set-cache-mode")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, set_cache_mode_help)
output = TestRun.executor.run("casadm" + (" -A" if shortcut else " --add-core")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, add_core_help)
output = TestRun.executor.run("casadm" + (" -R" if shortcut else " --remove-core")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, remove_core_help)
output = TestRun.executor.run("casadm" + " --remove-detached"
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, remove_detached_help)
output = TestRun.executor.run("casadm" + (" -L" if shortcut else " --list-caches")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, list_caches_help)
output = TestRun.executor.run("casadm" + (" -P" if shortcut else " --stats")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, stats_help)
output = TestRun.executor.run("casadm" + (" -Z" if shortcut else " --reset-counters")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, reset_counters_help)
output = TestRun.executor.run("casadm" + (" -F" if shortcut else " --flush-cache")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, flush_cache_help)
output = TestRun.executor.run("casadm" + (" -C" if shortcut else " --io-class")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, ioclass_help)
output = TestRun.executor.run("casadm" + (" -V" if shortcut else " --version")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, version_help)
output = TestRun.executor.run("casadm" + (" -H" if shortcut else " --help")
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, help_help)
output = TestRun.executor.run("casadm" + " --standby"
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, standby_help)
output = TestRun.executor.run("casadm" + " --zero-metadata"
+ (" -H" if shortcut else " --help"))
check_stdout_msg(output, zero_metadata_help)
output = TestRun.executor.run("casadm" + (" -Y" if shortcut else " --yell")
+ (" -H" if shortcut else " --help"))
check_stderr_msg(output, unrecognized_stderr)
check_stdout_msg(output, unrecognized_stdout)
@pytest.mark.parametrize("output_format", OutputFormat)
@pytest.mark.parametrize("shortcut", [True, False])
def test_cli_version(shortcut, output_format):
"""
title: Test for 'version' command.
description: Test if 'version' command displays correct output.
pass_criteria:
- Proper component names displayed in table with component versions.
"""
TestRun.LOGGER.info("Check version.")
output = casadm.print_version(output_format, shortcut).stdout
TestRun.LOGGER.info(output)
if not names_in_output(output) or not versions_in_output(output):
TestRun.fail("'Version' command failed.")
def names_in_output(output):
return ("CAS Cache Kernel Module" in output
and "CAS CLI Utility" in output)
def versions_in_output(output):
version_pattern = re.compile(r"(\d){2}\.(\d){2}\.(\d)\.(\d){4}.(\S)")
return len(version_pattern.findall(output)) == 2

View File

@ -14,7 +14,7 @@ def test_cli_help_spelling():
title: Spelling test for 'help' command title: Spelling test for 'help' command
description: Validates spelling of 'help' in CLI description: Validates spelling of 'help' in CLI
pass criteria: pass criteria:
- no spelling mistakes are found - No spelling mistakes are found
""" """
cas_dictionary = os.path.join(TestRun.usr.repo_dir, "test", "functional", "resources") cas_dictionary = os.path.join(TestRun.usr.repo_dir, "test", "functional", "resources")

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2020-2021 Intel Corporation # Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -20,12 +20,11 @@ from test_tools.dd import Dd
@pytest.mark.parametrize("purge_target", ["cache", "core"]) @pytest.mark.parametrize("purge_target", ["cache", "core"])
def test_purge(purge_target): def test_purge(purge_target):
""" """
title: Call purge without and with `--script` switch title: Basic test for purge command
description: | description: Check purge command behaviour with and without '--script' flag
Check if purge is called only when `--script` switch is used.
pass_criteria: pass_criteria:
- casadm returns an error when `--script` is missing - Error returned when '--script' is missing
- cache is wiped when purge command is used properly - Cache is wiped when purge command is used properly
""" """
with TestRun.step("Prepare devices"): with TestRun.step("Prepare devices"):
cache_device = TestRun.disks["cache"] cache_device = TestRun.disks["cache"]
@ -41,7 +40,7 @@ def test_purge(purge_target):
cache = casadm.start_cache(cache_device, force=True) cache = casadm.start_cache(cache_device, force=True)
core = casadm.add_core(cache, core_device) core = casadm.add_core(cache, core_device)
with TestRun.step("Trigger IO to prepared cache instance"): with TestRun.step("Trigger I/O to prepared cache instance"):
dd = ( dd = (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
@ -79,8 +78,3 @@ def test_purge(purge_target):
if cache.get_statistics().usage_stats.occupancy.get_value() != 0: if cache.get_statistics().usage_stats.occupancy.get_value() != 0:
TestRun.fail(f"{cache.get_statistics().usage_stats.occupancy.get_value()}") TestRun.fail(f"{cache.get_statistics().usage_stats.occupancy.get_value()}")
TestRun.fail(f"Purge {purge_target} should invalidate all cache lines!") TestRun.fail(f"Purge {purge_target} should invalidate all cache lines!")
with TestRun.step(
f"Stop cache"
):
casadm.stop_all_caches()

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies # Copyright(c) 2024-2025 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -44,8 +44,8 @@ def test_standby_neg_cli_params():
""" """
title: Verifying parameters for starting a standby cache instance title: Verifying parameters for starting a standby cache instance
description: | description: |
Try executing the standby init command with required arguments missing or Try executing the standby init command with required arguments missing or
disallowed arguments present. disallowed arguments present.
pass_criteria: pass_criteria:
- The execution is unsuccessful for all improper argument combinations - The execution is unsuccessful for all improper argument combinations
- A proper error message is displayed for unsuccessful executions - A proper error message is displayed for unsuccessful executions
@ -272,8 +272,8 @@ def test_start_neg_cli_flags():
""" """
title: Blocking standby start command with mutually exclusive flags title: Blocking standby start command with mutually exclusive flags
description: | description: |
Try executing the standby start command with different combinations of mutually Try executing the standby start command with different combinations of mutually
exclusive flags. exclusive flags.
pass_criteria: pass_criteria:
- The command execution is unsuccessful for commands with mutually exclusive flags - The command execution is unsuccessful for commands with mutually exclusive flags
- A proper error message is displayed - A proper error message is displayed
@ -327,7 +327,7 @@ def test_activate_without_detach():
""" """
title: Activate cache without detach command. title: Activate cache without detach command.
description: | description: |
Try activate passive cache without detach command before activation. Try to activate passive cache without detach command before activation.
pass_criteria: pass_criteria:
- The activation is not possible - The activation is not possible
- The cache remains in Standby state after unsuccessful activation - The cache remains in Standby state after unsuccessful activation
@ -390,14 +390,14 @@ def test_activate_without_detach():
@pytest.mark.require_disk("standby_cache", DiskTypeSet([DiskType.nand, DiskType.optane])) @pytest.mark.require_disk("standby_cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_activate_neg_cache_line_size(): def test_activate_neg_cache_line_size():
""" """
title: Blocking cache with mismatching cache line size activation. title: Blocking cache with mismatching cache line size activation.
description: | description: |
Try restoring cache operations from a replicated cache that was initialized Try restoring cache operations from a replicated cache that was initialized
with different cache line size than the original cache. with different cache line size than the original cache.
pass_criteria: pass_criteria:
- The activation is cancelled - The activation is cancelled
- The cache remains in Standby detached state after an unsuccessful activation - The cache remains in Standby detached state after an unsuccessful activation
- A proper error message is displayed - A proper error message is displayed
""" """
with TestRun.step("Prepare cache devices"): with TestRun.step("Prepare cache devices"):
@ -593,7 +593,7 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
@pytest.mark.require_disk("core", DiskTypeLowerThan("caches")) @pytest.mark.require_disk("core", DiskTypeLowerThan("caches"))
def test_standby_activate_with_corepool(): def test_standby_activate_with_corepool():
""" """
title: Activate standby cache instance with corepool title: Activate standby cache instance with core pool
description: | description: |
Activation of standby cache with core taken from core pool Activation of standby cache with core taken from core pool
pass_criteria: pass_criteria:
@ -652,12 +652,12 @@ def test_standby_activate_with_corepool():
@pytest.mark.parametrizex("cache_line_size", CacheLineSize) @pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_standby_start_stop(cache_line_size): def test_standby_start_stop(cache_line_size):
""" """
title: Start and stop a standby cache instance. title: Start and stop a standby cache instance.
description: Test if cache can be started in standby state and stopped without activation. description: Test if cache can be started in standby state and stopped without activation.
pass_criteria: pass_criteria:
- A cache exported object appears after starting a cache in standby state - A cache exported object appears after starting a cache in standby state
- The data written to the cache exported object committed on the underlying cache device - The data written to the cache exported object committed on the underlying cache device
- The cache exported object disappears after stopping the standby cache instance - The cache exported object disappears after stopping the standby cache instance
""" """
with TestRun.step("Prepare a cache device"): with TestRun.step("Prepare a cache device"):
cache_size = Size(500, Unit.MebiByte) cache_size = Size(500, Unit.MebiByte)

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -21,12 +21,12 @@ CORE_ID_RANGE = (0, 4095)
@pytest.mark.parametrize("shortcut", [True, False]) @pytest.mark.parametrize("shortcut", [True, False])
def test_cli_start_stop_default_id(shortcut): def test_cli_start_stop_default_id(shortcut):
""" """
title: Test for starting a cache with a default ID - short and long command title: Test for starting a cache with a default ID - short and long command
description: | description: |
Start a new cache with a default ID and then stop this cache. Start a new cache with a default ID and then stop this cache.
pass_criteria: pass_criteria:
- The cache has successfully started with default ID - The cache has successfully started with default ID
- The cache has successfully stopped - The cache has successfully stopped
""" """
with TestRun.step("Prepare the device for the cache."): with TestRun.step("Prepare the device for the cache."):
cache_device = TestRun.disks['cache'] cache_device = TestRun.disks['cache']
@ -62,12 +62,12 @@ def test_cli_start_stop_default_id(shortcut):
@pytest.mark.parametrize("shortcut", [True, False]) @pytest.mark.parametrize("shortcut", [True, False])
def test_cli_start_stop_custom_id(shortcut): def test_cli_start_stop_custom_id(shortcut):
""" """
title: Test for starting a cache with a custom ID - short and long command title: Test for starting a cache with a custom ID - short and long command
description: | description: |
Start a new cache with a random ID (from allowed pool) and then stop this cache. Start a new cache with a random ID (from allowed pool) and then stop this cache.
pass_criteria: pass_criteria:
- The cache has successfully started with a custom ID - The cache has successfully started with a custom ID
- The cache has successfully stopped - The cache has successfully stopped
""" """
with TestRun.step("Prepare the device for the cache."): with TestRun.step("Prepare the device for the cache."):
cache_device = TestRun.disks['cache'] cache_device = TestRun.disks['cache']
@ -106,13 +106,13 @@ def test_cli_start_stop_custom_id(shortcut):
@pytest.mark.parametrize("shortcut", [True, False]) @pytest.mark.parametrize("shortcut", [True, False])
def test_cli_add_remove_default_id(shortcut): def test_cli_add_remove_default_id(shortcut):
""" """
title: Test for adding and removing a core with a default ID - short and long command title: Test for adding and removing a core with a default ID - short and long command
description: | description: |
Start a new cache and add a core to it without passing a core ID as an argument Start a new cache and add a core to it without passing a core ID as an argument
and then remove this core from the cache. and then remove this core from the cache.
pass_criteria: pass_criteria:
- The core is added to the cache with a default ID - The core is added to the cache with a default ID
- The core is successfully removed from the cache - The core is successfully removed from the cache
""" """
with TestRun.step("Prepare the devices."): with TestRun.step("Prepare the devices."):
cache_disk = TestRun.disks['cache'] cache_disk = TestRun.disks['cache']
@ -157,13 +157,13 @@ def test_cli_add_remove_default_id(shortcut):
@pytest.mark.parametrize("shortcut", [True, False]) @pytest.mark.parametrize("shortcut", [True, False])
def test_cli_add_remove_custom_id(shortcut): def test_cli_add_remove_custom_id(shortcut):
""" """
title: Test for adding and removing a core with a custom ID - short and long command title: Test for adding and removing a core with a custom ID - short and long command
description: | description: |
Start a new cache and add a core to it with passing a random core ID Start a new cache and add a core to it with passing a random core ID
(from allowed pool) as an argument and then remove this core from the cache. (from allowed pool) as an argument and then remove this core from the cache.
pass_criteria: pass_criteria:
- The core is added to the cache with a default ID - The core is added to the cache with a default ID
- The core is successfully removed from the cache - The core is successfully removed from the cache
""" """
with TestRun.step("Prepare the devices."): with TestRun.step("Prepare the devices."):
cache_disk = TestRun.disks['cache'] cache_disk = TestRun.disks['cache']
@ -209,13 +209,13 @@ def test_cli_add_remove_custom_id(shortcut):
@pytest.mark.parametrize("shortcut", [True, False]) @pytest.mark.parametrize("shortcut", [True, False])
def test_cli_load_and_force(shortcut): def test_cli_load_and_force(shortcut):
""" """
title: Test if it is possible to use start command with 'load' and 'force' flag at once title: Test if it is possible to use start command with 'load' and 'force' flag at once
description: | description: |
Try to start cache with 'load' and 'force' options at the same time Try to start cache with 'load' and 'force' options at the same time
and check if it is not possible to do and check if it is not possible to do
pass_criteria: pass_criteria:
- Start cache command with both 'force' and 'load' options should fail - Start cache command with both 'force' and 'load' options should fail
- Proper message should be received - Proper message should be received
""" """
with TestRun.step("Prepare cache."): with TestRun.step("Prepare cache."):
cache_device = TestRun.disks['cache'] cache_device = TestRun.disks['cache']

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2022 Intel Corporation # Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -24,9 +24,10 @@ from test_tools.udev import Udev
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cleaning_policy(): def test_cleaning_policy():
""" """
Title: test manual casadm flush title: Test for manual cache and core flushing
description: | The test is to see if dirty data will be removed from the Cache description: |
or Core after using the casadm command with the corresponding parameter. The test is to see if dirty data will be removed from the cache
or core after using the casadm command with the corresponding parameter.
pass_criteria: pass_criteria:
- Cache and core are filled with dirty data. - Cache and core are filled with dirty data.
- After cache and core flush dirty data are cleared. - After cache and core flush dirty data are cleared.

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -22,26 +22,26 @@ from type_def.size import Size, Unit
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_default_params(): def test_seq_cutoff_default_params():
""" """
title: Default sequential cut-off threshold & policy test title: Default sequential cutoff threshold & policy test
description: Test if proper default threshold and policy is set after cache start description: Test if proper default threshold and policy is set after cache start
pass_criteria: pass_criteria:
- "Full" shall be default sequential cut-off policy - "Full" shall be default sequential cutoff policy
- There shall be default 1MiB (1024kiB) value for sequential cut-off threshold - There shall be default 1MiB (1024kiB) value for sequential cutoff threshold
""" """
with TestRun.step("Test prepare (start cache and add core)"): with TestRun.step("Test prepare (start cache and add core)"):
cache, cores = prepare() cache, cores = prepare()
with TestRun.step("Getting sequential cut-off parameters"): with TestRun.step("Getting sequential cutoff parameters"):
params = cores[0].get_seq_cut_off_parameters() params = cores[0].get_seq_cut_off_parameters()
with TestRun.step("Check if proper sequential cut off policy is set as a default"): with TestRun.step("Check if proper sequential cutoff policy is set as a default"):
if params.policy != SeqCutOffPolicy.DEFAULT: if params.policy != SeqCutOffPolicy.DEFAULT:
TestRun.fail(f"Wrong sequential cut off policy set: {params.policy} " TestRun.fail(f"Wrong sequential cutoff policy set: {params.policy} "
f"should be {SeqCutOffPolicy.DEFAULT}") f"should be {SeqCutOffPolicy.DEFAULT}")
with TestRun.step("Check if proper sequential cut off threshold is set as a default"): with TestRun.step("Check if proper sequential cutoff threshold is set as a default"):
if params.threshold != SEQ_CUT_OFF_THRESHOLD_DEFAULT: if params.threshold != SEQ_CUT_OFF_THRESHOLD_DEFAULT:
TestRun.fail(f"Wrong sequential cut off threshold set: {params.threshold} " TestRun.fail(f"Wrong sequential cutoff threshold set: {params.threshold} "
f"should be {SEQ_CUT_OFF_THRESHOLD_DEFAULT}") f"should be {SEQ_CUT_OFF_THRESHOLD_DEFAULT}")
@ -50,32 +50,31 @@ def test_seq_cutoff_default_params():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_set_get_policy_core(policy): def test_seq_cutoff_set_get_policy_core(policy):
""" """
title: Sequential cut-off policy set/get test for core title: Sequential cutoff policy set/get test for core
description: | description: |
Test if CAS is setting proper sequential cut-off policy for core and Verify if it is possible to set and get a sequential cutoff policy per core
returns previously set value
pass_criteria: pass_criteria:
- Sequential cut-off policy obtained from get-param command for the first core must be - Sequential cutoff policy obtained from get-param command for the first core must be
the same as the one used in set-param command the same as the one used in set-param command
- Sequential cut-off policy obtained from get-param command for the second core must be - Sequential cutoff policy obtained from get-param command for the second core must be
proper default value proper default value
""" """
with TestRun.step("Test prepare (start cache and add 2 cores)"): with TestRun.step("Test prepare (start cache and add 2 cores)"):
cache, cores = prepare(cores_count=2) cache, cores = prepare(cores_count=2)
with TestRun.step(f"Setting core sequential cut off policy mode to {policy}"): with TestRun.step(f"Setting core sequential cutoff policy mode to {policy}"):
cores[0].set_seq_cutoff_policy(policy) cores[0].set_seq_cutoff_policy(policy)
with TestRun.step("Check if proper sequential cut off policy was set for the first core"): with TestRun.step("Check if proper sequential cutoff policy was set for the first core"):
if cores[0].get_seq_cut_off_policy() != policy: if cores[0].get_seq_cut_off_policy() != policy:
TestRun.fail(f"Wrong sequential cut off policy set: " TestRun.fail(f"Wrong sequential cutoff policy set: "
f"{cores[0].get_seq_cut_off_policy()} " f"{cores[0].get_seq_cut_off_policy()} "
f"should be {policy}") f"should be {policy}")
with TestRun.step("Check if proper default sequential cut off policy was set for the " with TestRun.step("Check if proper default sequential cutoff policy was set for the "
"second core"): "second core"):
if cores[1].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT: if cores[1].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT:
TestRun.fail(f"Wrong default sequential cut off policy: " TestRun.fail(f"Wrong default sequential cutoff policy: "
f"{cores[1].get_seq_cut_off_policy()} " f"{cores[1].get_seq_cut_off_policy()} "
f"should be {SeqCutOffPolicy.DEFAULT}") f"should be {SeqCutOffPolicy.DEFAULT}")
@ -85,24 +84,23 @@ def test_seq_cutoff_set_get_policy_core(policy):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_set_get_policy_cache(policy): def test_seq_cutoff_set_get_policy_cache(policy):
""" """
title: Sequential cut-off policy set/get test for cache title: Sequential cutoff policy set/get test for cache
description: | description: |
Test if CAS is setting proper sequential cut-off policy for whole cache and Verify if it is possible to set and get a sequential cutoff policy for the whole cache
returns previously set value
pass_criteria: pass_criteria:
- Sequential cut-off policy obtained from get-param command for each of 3 cores must be the - Sequential cutoff policy obtained from get-param command for each of 3 cores must be the
same as the one used in set-param command for cache same as the one used in set-param command for cache
""" """
with TestRun.step("Test prepare (start cache and add 3 cores)"): with TestRun.step("Test prepare (start cache and add 3 cores)"):
cache, cores = prepare(cores_count=3) cache, cores = prepare(cores_count=3)
with TestRun.step(f"Setting sequential cut off policy mode {policy} for cache"): with TestRun.step(f"Setting sequential cutoff policy mode {policy} for cache"):
cache.set_seq_cutoff_policy(policy) cache.set_seq_cutoff_policy(policy)
for i in TestRun.iteration(range(0, len(cores)), "Verifying if proper policy was set"): for i in TestRun.iteration(range(0, len(cores)), "Verifying if proper policy was set"):
with TestRun.step(f"Check if proper sequential cut off policy was set for core"): with TestRun.step(f"Check if proper sequential cutoff policy was set for core"):
if cores[i].get_seq_cut_off_policy() != policy: if cores[i].get_seq_cut_off_policy() != policy:
TestRun.fail(f"Wrong core sequential cut off policy: " TestRun.fail(f"Wrong core sequential cutoff policy: "
f"{cores[i].get_seq_cut_off_policy()} " f"{cores[i].get_seq_cut_off_policy()} "
f"should be {policy}") f"should be {policy}")
@ -111,23 +109,25 @@ def test_seq_cutoff_set_get_policy_cache(policy):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_policy_load(): def test_seq_cutoff_policy_load():
""" """
title: Sequential cut-off policy set/get test with cache load between title: Sequential cutoff policy set/get test with cache load between
description: | description: |
Set each possible policy for different core, stop cache, test if after cache load Set each possible policy for different core, stop cache, test if after cache load
sequential cut-off policy value previously set is being loaded correctly for each core. sequential cutoff policy value previously set is being loaded correctly for each core.
pass_criteria: pass_criteria:
- Sequential cut-off policy obtained from get-param command after cache load - Sequential cutoff policy obtained from get-param command after cache load
must be the same as the one used in set-param command before cache stop must be the same as the one used in set-param command before cache stop
- Sequential cut-off policy loaded for the last core should be the default one - Sequential cutoff policy loaded for the last core should be the default one
""" """
with TestRun.step(f"Test prepare (start cache and add {len(SeqCutOffPolicy) + 1} cores)"): with TestRun.step(f"Test prepare (start cache and add {len(SeqCutOffPolicy) + 1} cores)"):
# Create as many cores as many possible policies including default one # Create as many cores as many possible policies including default one
cache, cores = prepare(cores_count=len(SeqCutOffPolicy) + 1) cache, cores = prepare(cores_count=len(SeqCutOffPolicy) + 1)
policies = [policy for policy in SeqCutOffPolicy] policies = [policy for policy in SeqCutOffPolicy]
for i, core in TestRun.iteration(enumerate(cores[:-1]), "Set all possible policies " for i, core in TestRun.iteration(
"except the default one"): enumerate(cores[:-1]),
with TestRun.step(f"Setting cache sequential cut off policy mode to " "Set all possible policies except the default one"
):
with TestRun.step(f"Setting cache sequential cutoff policy mode to "
f"{policies[i]}"): f"{policies[i]}"):
cores[i].set_seq_cutoff_policy(policies[i]) cores[i].set_seq_cutoff_policy(policies[i])
@ -140,18 +140,21 @@ def test_seq_cutoff_policy_load():
with TestRun.step("Getting cores from loaded cache"): with TestRun.step("Getting cores from loaded cache"):
cores = loaded_cache.get_core_devices() cores = loaded_cache.get_core_devices()
for i, core in TestRun.iteration(enumerate(cores[:-1]), "Check if proper policies have " for i, core in TestRun.iteration(
"been loaded"): enumerate(cores[:-1]),
with TestRun.step(f"Check if proper sequential cut off policy was loaded"): "Check if proper policies have been loaded"
):
with TestRun.step(f"Check if proper sequential cutoff policy was loaded"):
if cores[i].get_seq_cut_off_policy() != policies[i]: if cores[i].get_seq_cut_off_policy() != policies[i]:
TestRun.fail(f"Wrong sequential cut off policy loaded: " TestRun.fail(f"Wrong sequential cutoff policy loaded: "
f"{cores[i].get_seq_cut_off_policy()} " f"{cores[i].get_seq_cut_off_policy()} "
f"should be {policies[i]}") f"should be {policies[i]}")
with TestRun.step(f"Check if proper (default) sequential cut off policy was loaded for " with TestRun.step(
f"last core"): "Check if proper (default) sequential cutoff policy was loaded for last core"
):
if cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT: if cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT:
TestRun.fail(f"Wrong sequential cut off policy loaded: " TestRun.fail(f"Wrong sequential cutoff policy loaded: "
f"{cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy()} " f"{cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy()} "
f"should be {SeqCutOffPolicy.DEFAULT}") f"should be {SeqCutOffPolicy.DEFAULT}")
@ -163,16 +166,16 @@ def test_seq_cutoff_policy_load():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_set_invalid_threshold(threshold): def test_seq_cutoff_set_invalid_threshold(threshold):
""" """
title: Invalid sequential cut-off threshold test title: Invalid sequential cutoff threshold test
description: Test if CAS is allowing setting invalid sequential cut-off threshold description: Validate setting invalid sequential cutoff threshold
pass_criteria: pass_criteria:
- Setting invalid sequential cut-off threshold should be blocked - Setting invalid sequential cutoff threshold should be blocked
""" """
with TestRun.step("Test prepare (start cache and add core)"): with TestRun.step("Test prepare (start cache and add core)"):
cache, cores = prepare() cache, cores = prepare()
_threshold = Size(threshold, Unit.KibiByte) _threshold = Size(threshold, Unit.KibiByte)
with TestRun.step(f"Setting cache sequential cut off threshold to out of range value: " with TestRun.step(f"Setting cache sequential cutoff threshold to out of range value: "
f"{_threshold}"): f"{_threshold}"):
command = set_param_cutoff_cmd( command = set_param_cutoff_cmd(
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id), cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
@ -182,7 +185,7 @@ def test_seq_cutoff_set_invalid_threshold(threshold):
not in output.stderr: not in output.stderr:
TestRun.fail("Command succeeded (should fail)!") TestRun.fail("Command succeeded (should fail)!")
with TestRun.step(f"Setting cache sequential cut off threshold " with TestRun.step(f"Setting cache sequential cutoff threshold "
f"to value passed as a float"): f"to value passed as a float"):
command = set_param_cutoff_cmd( command = set_param_cutoff_cmd(
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id), cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
@ -199,25 +202,23 @@ def test_seq_cutoff_set_invalid_threshold(threshold):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_set_get_threshold(threshold): def test_seq_cutoff_set_get_threshold(threshold):
""" """
title: Sequential cut-off threshold set/get test title: Sequential cutoff threshold set/get test
description: | description: Verify setting and getting value of sequential cutoff threshold
Test if CAS is setting proper sequential cut-off threshold and returns
previously set value
pass_criteria: pass_criteria:
- Sequential cut-off threshold obtained from get-param command must be the same as - Sequential cutoff threshold obtained from get-param command must be the same as
the one used in set-param command the one used in set-param command
""" """
with TestRun.step("Test prepare (start cache and add core)"): with TestRun.step("Test prepare (start cache and add core)"):
cache, cores = prepare() cache, cores = prepare()
_threshold = Size(threshold, Unit.KibiByte) _threshold = Size(threshold, Unit.KibiByte)
with TestRun.step(f"Setting cache sequential cut off threshold to " with TestRun.step(f"Setting cache sequential cutoff threshold to "
f"{_threshold}"): f"{_threshold}"):
cores[0].set_seq_cutoff_threshold(_threshold) cores[0].set_seq_cutoff_threshold(_threshold)
with TestRun.step("Check if proper sequential cut off threshold was set"): with TestRun.step("Check if proper sequential cutoff threshold was set"):
if cores[0].get_seq_cut_off_threshold() != _threshold: if cores[0].get_seq_cut_off_threshold() != _threshold:
TestRun.fail(f"Wrong sequential cut off threshold set: " TestRun.fail(f"Wrong sequential cutoff threshold set: "
f"{cores[0].get_seq_cut_off_threshold()} " f"{cores[0].get_seq_cut_off_threshold()} "
f"should be {_threshold}") f"should be {_threshold}")
@ -228,20 +229,17 @@ def test_seq_cutoff_set_get_threshold(threshold):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_seq_cutoff_threshold_load(threshold): def test_seq_cutoff_threshold_load(threshold):
""" """
title: Sequential cut-off threshold set/get test with cache load between title: Sequential cutoff threshold after loading cache
description: | description: Verify sequential cutoff threshold value after reloading the cache.
Test if after cache load sequential cut-off threshold
value previously set is being loaded correctly. Each of possible sequential cut-off
policies is set for different core.
pass_criteria: pass_criteria:
- Sequential cut-off threshold obtained from get-param command after cache load - Sequential cutoff threshold obtained from get-param command after cache load
must be the same as the one used in set-param command before cache stop must be the same as the one used in set-param command before cache stop
""" """
with TestRun.step("Test prepare (start cache and add core)"): with TestRun.step("Test prepare (start cache and add core)"):
cache, cores = prepare() cache, cores = prepare()
_threshold = Size(threshold, Unit.KibiByte) _threshold = Size(threshold, Unit.KibiByte)
with TestRun.step(f"Setting cache sequential cut off threshold to " with TestRun.step(f"Setting cache sequential cutoff threshold to "
f"{_threshold}"): f"{_threshold}"):
cores[0].set_seq_cutoff_threshold(_threshold) cores[0].set_seq_cutoff_threshold(_threshold)
@ -254,9 +252,9 @@ def test_seq_cutoff_threshold_load(threshold):
with TestRun.step("Getting core from loaded cache"): with TestRun.step("Getting core from loaded cache"):
cores_load = loaded_cache.get_core_devices() cores_load = loaded_cache.get_core_devices()
with TestRun.step("Check if proper sequential cut off policy was loaded"): with TestRun.step("Check if proper sequential cutoff policy was loaded"):
if cores_load[0].get_seq_cut_off_threshold() != _threshold: if cores_load[0].get_seq_cut_off_threshold() != _threshold:
TestRun.fail(f"Wrong sequential cut off threshold set: " TestRun.fail(f"Wrong sequential cutoff threshold set: "
f"{cores_load[0].get_seq_cut_off_threshold()} " f"{cores_load[0].get_seq_cut_off_threshold()} "
f"should be {_threshold}") f"should be {_threshold}")

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2020-2021 Intel Corporation # Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -36,15 +36,15 @@ number_of_checks = 10
@pytest.mark.parametrizex("cache_mode", CacheMode) @pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_set_get_seqcutoff_params(cache_mode): def test_set_get_seq_cutoff_params(cache_mode):
""" """
title: Test for setting and reading sequential cut-off parameters. title: Test for setting and reading sequential cutoff parameters.
description: | description: |
Verify that it is possible to set and read all available sequential cut-off Verify that it is possible to set and read all available sequential cutoff
parameters using casadm --set-param and --get-param options. parameters using casadm --set-param and --get-param options.
pass_criteria: pass_criteria:
- All sequential cut-off parameters are set to given values. - All sequential cutoff parameters are set to given values.
- All sequential cut-off parameters displays proper values. - All sequential cutoff parameters displays proper values.
""" """
with TestRun.step("Partition cache and core devices"): with TestRun.step("Partition cache and core devices"):
@ -56,60 +56,60 @@ def test_set_get_seqcutoff_params(cache_mode):
): ):
caches, cores = cache_prepare(cache_mode, cache_dev, core_dev) caches, cores = cache_prepare(cache_mode, cache_dev, core_dev)
with TestRun.step("Check sequential cut-off default parameters"): with TestRun.step("Check sequential cutoff default parameters"):
default_seqcutoff_params = SeqCutOffParameters.default_seq_cut_off_params() default_seq_cutoff_params = SeqCutOffParameters.default_seq_cut_off_params()
for i in range(caches_count): for i in range(caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params) check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
with TestRun.step( with TestRun.step(
"Set new random values for sequential cut-off parameters for one core only" "Set new random values for sequential cutoff parameters for one core only"
): ):
for check in range(number_of_checks): for check in range(number_of_checks):
random_seqcutoff_params = new_seqcutoff_parameters_random_values() random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
cores[0][0].set_seq_cutoff_parameters(random_seqcutoff_params) cores[0][0].set_seq_cutoff_parameters(random_seq_cutoff_params)
# Check changed parameters for first core: # Check changed parameters for first core:
check_seqcutoff_parameters(cores[0][0], random_seqcutoff_params) check_seq_cutoff_parameters(cores[0][0], random_seq_cutoff_params)
# Check default parameters for other cores: # Check default parameters for other cores:
for j in range(1, cores_per_cache): for j in range(1, cores_per_cache):
check_seqcutoff_parameters(cores[0][j], default_seqcutoff_params) check_seq_cutoff_parameters(cores[0][j], default_seq_cutoff_params)
for i in range(1, caches_count): for i in range(1, caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params) check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
with TestRun.step( with TestRun.step(
"Set new random values for sequential cut-off parameters " "Set new random values for sequential cutoff parameters "
"for all cores within given cache instance" "for all cores within given cache instance"
): ):
for check in range(number_of_checks): for check in range(number_of_checks):
random_seqcutoff_params = new_seqcutoff_parameters_random_values() random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
caches[0].set_seq_cutoff_parameters(random_seqcutoff_params) caches[0].set_seq_cutoff_parameters(random_seq_cutoff_params)
# Check changed parameters for first cache instance: # Check changed parameters for first cache instance:
for j in range(cores_per_cache): for j in range(cores_per_cache):
check_seqcutoff_parameters(cores[0][j], random_seqcutoff_params) check_seq_cutoff_parameters(cores[0][j], random_seq_cutoff_params)
# Check default parameters for other cache instances: # Check default parameters for other cache instances:
for i in range(1, caches_count): for i in range(1, caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params) check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
with TestRun.step( with TestRun.step(
"Set new random values for sequential cut-off parameters for all cores" "Set new random values for sequential cutoff parameters for all cores"
): ):
for check in range(number_of_checks): for check in range(number_of_checks):
seqcutoff_params = [] seq_cutoff_params = []
for i in range(caches_count): for i in range(caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
random_seqcutoff_params = new_seqcutoff_parameters_random_values() random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
seqcutoff_params.append(random_seqcutoff_params) seq_cutoff_params.append(random_seq_cutoff_params)
cores[i][j].set_seq_cutoff_parameters(random_seqcutoff_params) cores[i][j].set_seq_cutoff_parameters(random_seq_cutoff_params)
for i in range(caches_count): for i in range(caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
check_seqcutoff_parameters( check_seq_cutoff_parameters(
cores[i][j], seqcutoff_params[i * cores_per_cache + j] cores[i][j], seq_cutoff_params[i * cores_per_cache + j]
) )
@ -119,14 +119,14 @@ def test_set_get_seqcutoff_params(cache_mode):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_set_get_cleaning_params(cache_mode, cleaning_policy): def test_set_get_cleaning_params(cache_mode, cleaning_policy):
""" """
title: Test for setting and reading cleaning parameters. title: Test for setting and reading cleaning parameters.
description: | description: |
Verify that it is possible to set and read all available cleaning Verify that it is possible to set and read all available cleaning
parameters for all cleaning policies using casadm --set-param and parameters for all cleaning policies using casadm --set-param and
--get-param options. --get-param options.
pass_criteria: pass_criteria:
- All cleaning parameters are set to given values. - All cleaning parameters are set to given values.
- All cleaning parameters displays proper values. - All cleaning parameters displays proper values.
""" """
with TestRun.step("Partition cache and core devices"): with TestRun.step("Partition cache and core devices"):
@ -231,7 +231,7 @@ def cache_prepare(cache_mode, cache_dev, core_dev):
return caches, cores return caches, cores
def new_seqcutoff_parameters_random_values(): def new_seq_cutoff_parameters_random_values():
return SeqCutOffParameters( return SeqCutOffParameters(
threshold=Size(random.randrange(1, 1000000), Unit.KibiByte), threshold=Size(random.randrange(1, 1000000), Unit.KibiByte),
policy=random.choice(list(SeqCutOffPolicy)), policy=random.choice(list(SeqCutOffPolicy)),
@ -275,27 +275,27 @@ def new_cleaning_parameters_random_values(cleaning_policy):
return cleaning_params return cleaning_params
def check_seqcutoff_parameters(core, seqcutoff_params): def check_seq_cutoff_parameters(core, seq_cutoff_params):
current_seqcutoff_params = core.get_seq_cut_off_parameters() current_seq_cutoff_params = core.get_seq_cut_off_parameters()
failed_params = "" failed_params = ""
if current_seqcutoff_params.threshold != seqcutoff_params.threshold: if current_seq_cutoff_params.threshold != seq_cutoff_params.threshold:
failed_params += ( failed_params += (
f"Threshold is {current_seqcutoff_params.threshold}, " f"Threshold is {current_seq_cutoff_params.threshold}, "
f"should be {seqcutoff_params.threshold}\n" f"should be {seq_cutoff_params.threshold}\n"
) )
if current_seqcutoff_params.policy != seqcutoff_params.policy: if current_seq_cutoff_params.policy != seq_cutoff_params.policy:
failed_params += ( failed_params += (
f"Policy is {current_seqcutoff_params.policy}, " f"Policy is {current_seq_cutoff_params.policy}, "
f"should be {seqcutoff_params.policy}\n" f"should be {seq_cutoff_params.policy}\n"
) )
if current_seqcutoff_params.promotion_count != seqcutoff_params.promotion_count: if current_seq_cutoff_params.promotion_count != seq_cutoff_params.promotion_count:
failed_params += ( failed_params += (
f"Promotion count is {current_seqcutoff_params.promotion_count}, " f"Promotion count is {current_seq_cutoff_params.promotion_count}, "
f"should be {seqcutoff_params.promotion_count}\n" f"should be {seq_cutoff_params.promotion_count}\n"
) )
if failed_params: if failed_params:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Sequential cut-off parameters are not correct " f"Sequential cutoff parameters are not correct "
f"for {core.path}:\n{failed_params}" f"for {core.path}:\n{failed_params}"
) )
@ -306,12 +306,12 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
failed_params = "" failed_params = ""
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time: if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
failed_params += ( failed_params += (
f"Wake Up time is {current_cleaning_params.wake_up_time}, " f"Wake up time is {current_cleaning_params.wake_up_time}, "
f"should be {cleaning_params.wake_up_time}\n" f"should be {cleaning_params.wake_up_time}\n"
) )
if current_cleaning_params.staleness_time != cleaning_params.staleness_time: if current_cleaning_params.staleness_time != cleaning_params.staleness_time:
failed_params += ( failed_params += (
f"Staleness Time is {current_cleaning_params.staleness_time}, " f"Staleness time is {current_cleaning_params.staleness_time}, "
f"should be {cleaning_params.staleness_time}\n" f"should be {cleaning_params.staleness_time}\n"
) )
if ( if (
@ -319,7 +319,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
!= cleaning_params.flush_max_buffers != cleaning_params.flush_max_buffers
): ):
failed_params += ( failed_params += (
f"Flush Max Buffers is {current_cleaning_params.flush_max_buffers}, " f"Flush max buffers is {current_cleaning_params.flush_max_buffers}, "
f"should be {cleaning_params.flush_max_buffers}\n" f"should be {cleaning_params.flush_max_buffers}\n"
) )
if ( if (
@ -327,7 +327,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
!= cleaning_params.activity_threshold != cleaning_params.activity_threshold
): ):
failed_params += ( failed_params += (
f"Activity Threshold is {current_cleaning_params.activity_threshold}, " f"Activity threshold is {current_cleaning_params.activity_threshold}, "
f"should be {cleaning_params.activity_threshold}\n" f"should be {cleaning_params.activity_threshold}\n"
) )
if failed_params: if failed_params:
@ -341,7 +341,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
failed_params = "" failed_params = ""
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time: if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
failed_params += ( failed_params += (
f"Wake Up time is {current_cleaning_params.wake_up_time}, " f"Wake up time is {current_cleaning_params.wake_up_time}, "
f"should be {cleaning_params.wake_up_time}\n" f"should be {cleaning_params.wake_up_time}\n"
) )
if ( if (
@ -349,7 +349,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
!= cleaning_params.flush_max_buffers != cleaning_params.flush_max_buffers
): ):
failed_params += ( failed_params += (
f"Flush Max Buffers is {current_cleaning_params.flush_max_buffers}, " f"Flush max buffers is {current_cleaning_params.flush_max_buffers}, "
f"should be {cleaning_params.flush_max_buffers}\n" f"should be {cleaning_params.flush_max_buffers}\n"
) )
if failed_params: if failed_params:

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2021 Intel Corporation # Copyright(c) 2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import time import time
@ -24,20 +24,19 @@ from type_def.size import Size, Unit
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_zero_metadata_negative_cases(): def test_zero_metadata_negative_cases():
""" """
title: Test for '--zero-metadata' negative cases. title: Test for '--zero-metadata' negative cases.
description: | description: Test for '--zero-metadata' scenarios with expected failures.
Test for '--zero-metadata' scenarios with expected failures. pass_criteria:
pass_criteria: - Zeroing metadata without '--force' failed when run on cache.
- Zeroing metadata without '--force' failed when run on cache. - Zeroing metadata with '--force' failed when run on cache.
- Zeroing metadata with '--force' failed when run on cache. - Zeroing metadata failed when run on system drive.
- Zeroing metadata failed when run on system drive. - Load cache command failed after successfully zeroing metadata on the cache device.
- Load cache command failed after successfully zeroing metadata on the cache device.
""" """
with TestRun.step("Prepare cache and core devices."): with TestRun.step("Prepare cache and core devices."):
cache_dev, core_dev, cache_disk = prepare_devices() cache_dev, core_dev, cache_disk = prepare_devices()
with TestRun.step("Start cache."): with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, force=True) casadm.start_cache(cache_dev, force=True)
with TestRun.step("Try to zero metadata and validate error message."): with TestRun.step("Try to zero metadata and validate error message."):
try: try:
@ -75,7 +74,7 @@ def test_zero_metadata_negative_cases():
with TestRun.step("Load cache."): with TestRun.step("Load cache."):
try: try:
cache = casadm.load_cache(cache_dev) casadm.load_cache(cache_dev)
TestRun.LOGGER.error("Loading cache should fail.") TestRun.LOGGER.error("Loading cache should fail.")
except CmdException: except CmdException:
TestRun.LOGGER.info("Loading cache failed as expected.") TestRun.LOGGER.info("Loading cache failed as expected.")
@ -86,12 +85,11 @@ def test_zero_metadata_negative_cases():
@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("filesystem", Filesystem)
def test_zero_metadata_filesystem(filesystem): def test_zero_metadata_filesystem(filesystem):
""" """
title: Test for '--zero-metadata' and filesystem. title: Test for '--zero-metadata' and filesystem.
description: | description: Test for '--zero-metadata' on drive with filesystem.
Test for '--zero-metadata' on drive with filesystem. pass_criteria:
pass_criteria: - Zeroing metadata on device with filesystem failed and not removed filesystem.
- Zeroing metadata on device with filesystem failed and not removed filesystem. - Zeroing metadata on mounted device failed.
- Zeroing metadata on mounted device failed.
""" """
mount_point = "/mnt" mount_point = "/mnt"
with TestRun.step("Prepare devices."): with TestRun.step("Prepare devices."):
@ -131,14 +129,14 @@ def test_zero_metadata_filesystem(filesystem):
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_zero_metadata_dirty_data(): def test_zero_metadata_dirty_data():
""" """
title: Test for '--zero-metadata' and dirty data scenario. title: Test for '--zero-metadata' and dirty data scenario.
description: | description: |
Test for '--zero-metadata' with and without 'force' option if there are dirty data Test for '--zero-metadata' with and without 'force' option if there are dirty data
on cache. on cache.
pass_criteria: pass_criteria:
- Zeroing metadata without force failed on cache with dirty data. - Zeroing metadata without force failed on cache with dirty data.
- Zeroing metadata with force ran successfully on cache with dirty data. - Zeroing metadata with force ran successfully on cache with dirty data.
- Cache started successfully after zeroing metadata on cache with dirty data. - Cache started successfully after zeroing metadata on cache with dirty data.
""" """
with TestRun.step("Prepare cache and core devices."): with TestRun.step("Prepare cache and core devices."):
cache_dev, core_disk, cache_disk = prepare_devices() cache_dev, core_disk, cache_disk = prepare_devices()
@ -165,7 +163,7 @@ def test_zero_metadata_dirty_data():
with TestRun.step("Start cache (expect to fail)."): with TestRun.step("Start cache (expect to fail)."):
try: try:
cache = casadm.start_cache(cache_dev, CacheMode.WB) casadm.start_cache(cache_dev, CacheMode.WB)
except CmdException: except CmdException:
TestRun.LOGGER.info("Start cache failed as expected.") TestRun.LOGGER.info("Start cache failed as expected.")
@ -186,7 +184,7 @@ def test_zero_metadata_dirty_data():
with TestRun.step("Start cache without 'force' option."): with TestRun.step("Start cache without 'force' option."):
try: try:
cache = casadm.start_cache(cache_dev, CacheMode.WB) casadm.start_cache(cache_dev, CacheMode.WB)
TestRun.LOGGER.info("Cache started successfully.") TestRun.LOGGER.info("Cache started successfully.")
except CmdException: except CmdException:
TestRun.LOGGER.error("Start cache failed.") TestRun.LOGGER.error("Start cache failed.")
@ -196,21 +194,21 @@ def test_zero_metadata_dirty_data():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_zero_metadata_dirty_shutdown(): def test_zero_metadata_dirty_shutdown():
""" """
title: Test for '--zero-metadata' and dirty shutdown scenario. title: Test for '--zero-metadata' and dirty shutdown scenario.
description: | description: |
Test for '--zero-metadata' with and without 'force' option on cache which had been dirty Test for '--zero-metadata' with and without 'force' option on cache which had been dirty
shut down before. shut down before.
pass_criteria: pass_criteria:
- Zeroing metadata without force failed on cache after dirty shutdown. - Zeroing metadata without force failed on cache after dirty shutdown.
- Zeroing metadata with force ran successfully on cache after dirty shutdown. - Zeroing metadata with force ran successfully on cache after dirty shutdown.
- Cache started successfully after dirty shutdown and zeroing metadata on cache. - Cache started successfully after dirty shutdown and zeroing metadata on cache.
""" """
with TestRun.step("Prepare cache and core devices."): with TestRun.step("Prepare cache and core devices."):
cache_dev, core_disk, cache_disk = prepare_devices() cache_dev, core_disk, cache_disk = prepare_devices()
with TestRun.step("Start cache."): with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, CacheMode.WT, force=True) cache = casadm.start_cache(cache_dev, CacheMode.WT, force=True)
core = cache.add_core(core_disk) cache.add_core(core_disk)
with TestRun.step("Unplug cache device."): with TestRun.step("Unplug cache device."):
cache_disk.unplug() cache_disk.unplug()
@ -227,7 +225,7 @@ def test_zero_metadata_dirty_shutdown():
with TestRun.step("Start cache (expect to fail)."): with TestRun.step("Start cache (expect to fail)."):
try: try:
cache = casadm.start_cache(cache_dev, CacheMode.WT) casadm.start_cache(cache_dev, CacheMode.WT)
TestRun.LOGGER.error("Starting cache should fail!") TestRun.LOGGER.error("Starting cache should fail!")
except CmdException: except CmdException:
TestRun.LOGGER.info("Start cache failed as expected.") TestRun.LOGGER.info("Start cache failed as expected.")
@ -249,7 +247,7 @@ def test_zero_metadata_dirty_shutdown():
with TestRun.step("Start cache."): with TestRun.step("Start cache."):
try: try:
cache = casadm.start_cache(cache_dev, CacheMode.WT) casadm.start_cache(cache_dev, CacheMode.WT)
TestRun.LOGGER.info("Cache started successfully.") TestRun.LOGGER.info("Cache started successfully.")
except CmdException: except CmdException:
TestRun.LOGGER.error("Start cache failed.") TestRun.LOGGER.error("Start cache failed.")