Minor test description and names refactor
Signed-off-by: Katarzyna Treder <katarzyna.treder@h-partners.com>
This commit is contained in:
parent
d4de219fec
commit
ba7d907775
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -28,11 +28,10 @@ block_sizes = [1, 2, 4, 5, 8, 16, 32, 64, 128]
|
||||
@pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.nand]))
|
||||
def test_support_different_io_size(cache_mode):
|
||||
"""
|
||||
title: OpenCAS supports different IO sizes
|
||||
description: |
|
||||
OpenCAS supports IO of size in rage from 512b to 128K
|
||||
title: Support for different I/O sizes
|
||||
description: Verify support for I/O of size in rage from 512B to 128KiB
|
||||
pass_criteria:
|
||||
- No IO errors
|
||||
- No I/O errors
|
||||
"""
|
||||
|
||||
with TestRun.step("Prepare cache and core devices"):
|
||||
@ -47,7 +46,7 @@ def test_support_different_io_size(cache_mode):
|
||||
)
|
||||
core = cache.add_core(core_disk.partitions[0])
|
||||
|
||||
with TestRun.step("Load the default ioclass config file"):
|
||||
with TestRun.step("Load the default io class config file"):
|
||||
cache.load_io_class(opencas_ioclass_conf_path)
|
||||
|
||||
with TestRun.step("Create a filesystem on the core device and mount it"):
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -30,20 +30,20 @@ mountpoint = "/mnt"
|
||||
@pytest.mark.CI
|
||||
def test_cas_version():
|
||||
"""
|
||||
title: Test for CAS version
|
||||
title: Test for version number
|
||||
description:
|
||||
Check if CAS print version cmd returns consistent version with version file
|
||||
Check if version printed by cmd returns value consistent with version file
|
||||
pass criteria:
|
||||
- casadm version command succeeds
|
||||
- versions from cmd and file in /var/lib/opencas/cas_version are consistent
|
||||
- Version command succeeds
|
||||
- Versions from cmd and file in /var/lib/opencas/cas_version are consistent
|
||||
"""
|
||||
|
||||
with TestRun.step("Read cas version using casadm cmd"):
|
||||
with TestRun.step("Read version using casadm cmd"):
|
||||
output = casadm.print_version(output_format=OutputFormat.csv)
|
||||
cmd_version = output.stdout
|
||||
cmd_cas_versions = [version.split(",")[1] for version in cmd_version.split("\n")[1:]]
|
||||
|
||||
with TestRun.step(f"Read cas version from {version_file_path} location"):
|
||||
with TestRun.step(f"Read version from {version_file_path} location"):
|
||||
file_read = read_file(version_file_path).split("\n")
|
||||
file_cas_version = next(
|
||||
(line.split("=")[1] for line in file_read if "CAS_VERSION=" in line)
|
||||
@ -51,20 +51,20 @@ def test_cas_version():
|
||||
|
||||
with TestRun.step("Compare cmd and file versions"):
|
||||
if not all(file_cas_version == cmd_cas_version for cmd_cas_version in cmd_cas_versions):
|
||||
TestRun.LOGGER.error(f"Cmd and file versions doesn`t match")
|
||||
TestRun.LOGGER.error(f"Cmd and file versions doesn't match")
|
||||
|
||||
|
||||
@pytest.mark.CI
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
||||
def test_negative_start_cache():
|
||||
"""
|
||||
title: Test start cache negative on cache device
|
||||
title: Negative test for starting cache
|
||||
description:
|
||||
Check for negative cache start scenarios
|
||||
Check starting cache using the same device or cache ID twice
|
||||
pass criteria:
|
||||
- Cache start succeeds
|
||||
- Fails to start cache on the same device with another id
|
||||
- Fails to start cache on another partition with the same id
|
||||
- Starting cache on the same device with another ID fails
|
||||
- Starting cache on another partition with the same ID fails
|
||||
"""
|
||||
|
||||
with TestRun.step("Prepare cache device"):
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -65,10 +65,10 @@ def test_cleaning_policies_in_write_back(cleaning_policy: CleaningPolicy):
|
||||
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
|
||||
set_cleaning_policy_params(cache, cleaning_policy)
|
||||
|
||||
with TestRun.step("Check for running CAS cleaner"):
|
||||
with TestRun.step("Check for running cleaner process"):
|
||||
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
||||
if output.exit_code != 0:
|
||||
TestRun.fail("CAS cleaner process is not running!")
|
||||
TestRun.fail("Cleaner process is not running!")
|
||||
|
||||
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
||||
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
||||
@ -133,10 +133,10 @@ def test_cleaning_policies_in_write_through(cleaning_policy):
|
||||
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
|
||||
set_cleaning_policy_params(cache, cleaning_policy)
|
||||
|
||||
with TestRun.step("Check for running CAS cleaner"):
|
||||
with TestRun.step("Check for running cleaner process"):
|
||||
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
||||
if output.exit_code != 0:
|
||||
TestRun.fail("CAS cleaner process is not running!")
|
||||
TestRun.fail("Cleaner process is not running!")
|
||||
|
||||
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
||||
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
||||
@ -193,12 +193,12 @@ def set_cleaning_policy_params(cache, cleaning_policy):
|
||||
|
||||
if current_acp_params.wake_up_time != acp_params.wake_up_time:
|
||||
failed_params += (
|
||||
f"Wake Up time is {current_acp_params.wake_up_time}, "
|
||||
f"Wake up time is {current_acp_params.wake_up_time}, "
|
||||
f"should be {acp_params.wake_up_time}\n"
|
||||
)
|
||||
if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers:
|
||||
failed_params += (
|
||||
f"Flush Max Buffers is {current_acp_params.flush_max_buffers}, "
|
||||
f"Flush max buffers is {current_acp_params.flush_max_buffers}, "
|
||||
f"should be {acp_params.flush_max_buffers}\n"
|
||||
)
|
||||
TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}")
|
||||
@ -215,22 +215,22 @@ def set_cleaning_policy_params(cache, cleaning_policy):
|
||||
failed_params = ""
|
||||
if current_alru_params.wake_up_time != alru_params.wake_up_time:
|
||||
failed_params += (
|
||||
f"Wake Up time is {current_alru_params.wake_up_time}, "
|
||||
f"Wake up time is {current_alru_params.wake_up_time}, "
|
||||
f"should be {alru_params.wake_up_time}\n"
|
||||
)
|
||||
if current_alru_params.staleness_time != alru_params.staleness_time:
|
||||
failed_params += (
|
||||
f"Staleness Time is {current_alru_params.staleness_time}, "
|
||||
f"Staleness time is {current_alru_params.staleness_time}, "
|
||||
f"should be {alru_params.staleness_time}\n"
|
||||
)
|
||||
if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers:
|
||||
failed_params += (
|
||||
f"Flush Max Buffers is {current_alru_params.flush_max_buffers}, "
|
||||
f"Flush max buffers is {current_alru_params.flush_max_buffers}, "
|
||||
f"should be {alru_params.flush_max_buffers}\n"
|
||||
)
|
||||
if current_alru_params.activity_threshold != alru_params.activity_threshold:
|
||||
failed_params += (
|
||||
f"Activity Threshold is {current_alru_params.activity_threshold}, "
|
||||
f"Activity threshold is {current_alru_params.activity_threshold}, "
|
||||
f"should be {alru_params.activity_threshold}\n"
|
||||
)
|
||||
TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}")
|
||||
@ -245,9 +245,9 @@ def check_cleaning_policy_operation(
|
||||
case CleaningPolicy.alru:
|
||||
if core_writes_before_wait_for_cleaning != Size.zero():
|
||||
TestRun.LOGGER.error(
|
||||
"CAS cleaner started to clean dirty data right after IO! "
|
||||
"Cleaner process started to clean dirty data right after I/O! "
|
||||
"According to ALRU parameters set in this test cleaner should "
|
||||
"wait 10 seconds after IO before cleaning dirty data"
|
||||
"wait 10 seconds after I/O before cleaning dirty data"
|
||||
)
|
||||
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
||||
TestRun.LOGGER.error(
|
||||
@ -266,9 +266,9 @@ def check_cleaning_policy_operation(
|
||||
case CleaningPolicy.acp:
|
||||
if core_writes_before_wait_for_cleaning == Size.zero():
|
||||
TestRun.LOGGER.error(
|
||||
"CAS cleaner did not start cleaning dirty data right after IO! "
|
||||
"Cleaner process did not start cleaning dirty data right after I/O! "
|
||||
"According to ACP policy cleaner should start "
|
||||
"cleaning dirty data right after IO"
|
||||
"cleaning dirty data right after I/O"
|
||||
)
|
||||
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
||||
TestRun.LOGGER.error(
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2020-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -153,7 +153,7 @@ def test_concurrent_caches_flush(cache_mode: CacheMode):
|
||||
"""
|
||||
title: Flush multiple caches simultaneously.
|
||||
description: |
|
||||
CAS should successfully flush multiple caches if there is already other flush in progress.
|
||||
Check for flushing multiple caches if there is already other flush in progress.
|
||||
pass_criteria:
|
||||
- No system crash.
|
||||
- Flush for each cache should finish successfully.
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -46,7 +46,7 @@ def test_cache_stop_and_load(cache_mode):
|
||||
"""
|
||||
title: Test for stopping and loading cache back with dynamic cache mode switching.
|
||||
description: |
|
||||
Validate the ability of the CAS to switch cache modes at runtime and
|
||||
Validate the ability to switch cache modes at runtime and
|
||||
check if all of them are working properly after switching and
|
||||
after stopping and reloading cache back.
|
||||
Check also other parameters consistency after reload.
|
||||
@ -138,10 +138,8 @@ def test_cache_stop_and_load(cache_mode):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode):
|
||||
"""
|
||||
title: Test for dynamic cache mode switching during IO.
|
||||
description: |
|
||||
Validate the ability of CAS to switch cache modes
|
||||
during working IO on CAS device.
|
||||
title: Test for dynamic cache mode switching during I/O.
|
||||
description: Validate the ability to switch cache modes during I/O on exported object.
|
||||
pass_criteria:
|
||||
- Cache mode is switched without errors.
|
||||
"""
|
||||
@ -182,7 +180,7 @@ def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mo
|
||||
):
|
||||
cache.set_cache_mode(cache_mode=cache_mode_2, flush=flush)
|
||||
|
||||
with TestRun.step(f"Check if cache mode has switched properly during IO"):
|
||||
with TestRun.step("Check if cache mode has switched properly during I/O"):
|
||||
cache_mode_after_switch = cache.get_cache_mode()
|
||||
if cache_mode_after_switch != cache_mode_2:
|
||||
TestRun.fail(
|
||||
@ -229,7 +227,7 @@ def run_io_and_verify(cache, core, io_mode):
|
||||
):
|
||||
TestRun.fail(
|
||||
"Write-Back cache mode is not working properly! "
|
||||
"There should be some writes to CAS device and none to the core"
|
||||
"There should be some writes to exported object and none to the core"
|
||||
)
|
||||
case CacheMode.PT:
|
||||
if (
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2020-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -18,11 +18,11 @@ def test_remove_multilevel_core():
|
||||
"""
|
||||
title: Test of the ability to remove a core used in a multilevel cache.
|
||||
description: |
|
||||
Negative test if OpenCAS does not allow to remove a core when the related exported object
|
||||
Negative test for removing a core when the related exported object
|
||||
is used as a core device for another cache instance.
|
||||
pass_criteria:
|
||||
- No system crash.
|
||||
- OpenCAS does not allow removing a core used in a multilevel cache instance.
|
||||
- Removing a core used in a multilevel cache instance is forbidden.
|
||||
"""
|
||||
|
||||
with TestRun.step("Prepare cache and core devices"):
|
||||
|
@ -57,7 +57,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
|
||||
with TestRun.step("Disable udev"):
|
||||
Udev.disable()
|
||||
|
||||
with TestRun.step(f"Start cache in Write-Back"):
|
||||
with TestRun.step(f"Start cache in Write-Back cache mode"):
|
||||
cache_disk = TestRun.disks["cache"]
|
||||
core_disk = TestRun.disks["core"]
|
||||
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
|
||||
@ -105,7 +105,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
|
||||
|
||||
with TestRun.step(
|
||||
"Write random number of 4k block requests to each stream and check if all "
|
||||
"writes were sent in pass-through mode"
|
||||
"writes were sent in pass-through"
|
||||
):
|
||||
core_statistics_before = core.get_statistics([StatsFilter.req, StatsFilter.blk])
|
||||
random.shuffle(offsets)
|
||||
@ -170,7 +170,7 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
|
||||
with TestRun.step("Reset core statistics counters"):
|
||||
core.reset_counters()
|
||||
|
||||
with TestRun.step("Run FIO on core device"):
|
||||
with TestRun.step("Run fio on core device"):
|
||||
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
|
||||
sequential_streams = streams_seq_rand[0]
|
||||
random_streams = streams_seq_rand[1]
|
||||
@ -216,7 +216,7 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
|
||||
- No system crash
|
||||
"""
|
||||
|
||||
with TestRun.step(f"Disable udev"):
|
||||
with TestRun.step("Disable udev"):
|
||||
Udev.disable()
|
||||
|
||||
with TestRun.step("Create filesystem on core device"):
|
||||
@ -231,7 +231,7 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
|
||||
with TestRun.step("Mount core"):
|
||||
core.mount(mount_point=mount_point)
|
||||
|
||||
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB"):
|
||||
with TestRun.step("Set sequential cutoff policy to always and threshold to 20MiB"):
|
||||
core.set_seq_cutoff_policy(policy=SeqCutOffPolicy.always)
|
||||
core.set_seq_cutoff_threshold(threshold=Size(20, Unit.MebiByte))
|
||||
|
||||
@ -279,7 +279,7 @@ def run_dd(target_path, count, seek):
|
||||
TestRun.LOGGER.info(f"dd command:\n{dd}")
|
||||
output = dd.run()
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error during IO", output)
|
||||
raise CmdException("Error during I/O", output)
|
||||
|
||||
|
||||
def check_statistics(stats_before, stats_after, expected_pt_writes, expected_writes_to_cache):
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -40,15 +40,14 @@ class VerifyType(Enum):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_size):
|
||||
"""
|
||||
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
||||
title: Functional sequential cutoff test with multiple cores
|
||||
description: |
|
||||
Testing if amount of data written to cache after sequential writes for different
|
||||
sequential cut-off thresholds on each core, while running sequential IO on 3 out of 4
|
||||
cores and random IO against the last core, is correct.
|
||||
Test checking if data is cached properly with sequential cutoff "always" policy
|
||||
when sequential and random I/O is running to multiple cores.
|
||||
pass_criteria:
|
||||
- Amount of written blocks to cache is less or equal than amount set
|
||||
with sequential cut-off threshold for three first cores.
|
||||
- Amount of written blocks to cache is equal to io size run against last core.
|
||||
with sequential cutoff threshold for three first cores.
|
||||
- Amount of written blocks to cache is equal to I/O size run against last core.
|
||||
"""
|
||||
|
||||
with TestRun.step("Prepare cache and core devices"):
|
||||
@ -76,7 +75,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
)
|
||||
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
||||
|
||||
with TestRun.step("Set sequential cut-off parameters for all cores"):
|
||||
with TestRun.step("Set sequential cutoff parameters for all cores"):
|
||||
writes_before_list = []
|
||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||
thresholds_list = [
|
||||
@ -96,7 +95,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||
core.set_seq_cutoff_threshold(threshold)
|
||||
|
||||
with TestRun.step("Prepare sequential IO against first three cores"):
|
||||
with TestRun.step("Prepare sequential I/O against first three cores"):
|
||||
block_size = Size(4, Unit.KibiByte)
|
||||
fio = Fio().create_command().io_engine(IoEngine.libaio).block_size(block_size).direct(True)
|
||||
|
||||
@ -107,7 +106,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
fio_job.target(core.path)
|
||||
writes_before_list.append(core.get_statistics().block_stats.cache.writes)
|
||||
|
||||
with TestRun.step("Prepare random IO against the last core"):
|
||||
with TestRun.step("Prepare random I/O against the last core"):
|
||||
fio_job = fio.add_job(f"core_{core_list[-1].core_id}")
|
||||
fio_job.size(io_sizes_list[-1])
|
||||
fio_job.read_write(io_type_last)
|
||||
@ -117,7 +116,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
with TestRun.step("Run fio against all cores"):
|
||||
fio.run()
|
||||
|
||||
with TestRun.step("Verify writes to cache count after IO"):
|
||||
with TestRun.step("Verify writes to cache count after I/O"):
|
||||
margins = [
|
||||
min(block_size * (core.get_seq_cut_off_parameters().promotion_count - 1), threshold)
|
||||
for core, threshold in zip(core_list[:-1], thresholds_list[:-1])
|
||||
@ -159,17 +158,16 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cache_line_size):
|
||||
def test_seq_cutoff_multi_core_cpu_pinned(cache_mode, io_type, io_type_last, cache_line_size):
|
||||
"""
|
||||
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
||||
title: Functional sequential cutoff test with multiple cores and cpu pinned I/O
|
||||
description: |
|
||||
Testing if amount of data written to cache after sequential writes for different
|
||||
sequential cut-off thresholds on each core, while running sequential IO, pinned,
|
||||
on 3 out of 4 cores and random IO against the last core, is correct.
|
||||
Test checking if data is cached properly with sequential cutoff "always" policy
|
||||
when sequential and random cpu pinned I/O is running to multiple cores.
|
||||
pass_criteria:
|
||||
- Amount of written blocks to cache is less or equal than amount set
|
||||
with sequential cut-off threshold for three first cores.
|
||||
- Amount of written blocks to cache is equal to io size run against last core.
|
||||
with sequential cutoff threshold for three first cores.
|
||||
- Amount of written blocks to cache is equal to I/O size run against last core.
|
||||
"""
|
||||
|
||||
with TestRun.step("Partition cache and core devices"):
|
||||
@ -198,7 +196,7 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
||||
)
|
||||
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
||||
|
||||
with TestRun.step(f"Set sequential cut-off parameters for all cores"):
|
||||
with TestRun.step("Set sequential cutoff parameters for all cores"):
|
||||
writes_before_list = []
|
||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||
thresholds_list = [
|
||||
@ -218,7 +216,9 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||
core.set_seq_cutoff_threshold(threshold)
|
||||
|
||||
with TestRun.step("Prepare sequential IO against first three cores"):
|
||||
with TestRun.step(
|
||||
"Prepare sequential I/O against first three cores and random I/O against the last one"
|
||||
):
|
||||
fio = (
|
||||
Fio()
|
||||
.create_command()
|
||||
@ -244,10 +244,10 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
||||
fio_job.target(core_list[-1].path)
|
||||
writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes)
|
||||
|
||||
with TestRun.step("Running IO against all cores"):
|
||||
with TestRun.step("Running I/O against all cores"):
|
||||
fio.run()
|
||||
|
||||
with TestRun.step("Verifying writes to cache count after IO"):
|
||||
with TestRun.step("Verifying writes to cache count after I/O"):
|
||||
for core, writes, threshold, io_size in zip(
|
||||
core_list[:-1], writes_before_list[:-1], thresholds_list[:-1], io_sizes_list[:-1]
|
||||
):
|
||||
@ -282,16 +282,14 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
||||
"""
|
||||
title: Sequential cut-off tests for writes and reads for 'never', 'always' and 'full' policies
|
||||
title: Functional test for sequential cutoff threshold parameter
|
||||
description: |
|
||||
Testing if amount of data written to cache after sequential writes and reads for different
|
||||
sequential cut-off policies with cache configured with different cache line size
|
||||
is valid for sequential cut-off threshold parameter, assuming that cache occupancy
|
||||
doesn't reach 100% during test.
|
||||
Check if data is cached properly according to sequential cutoff policy and
|
||||
threshold parameter
|
||||
pass_criteria:
|
||||
- Amount of written blocks to cache is less or equal than amount set
|
||||
with sequential cut-off parameter in case of 'always' policy.
|
||||
- Amount of written blocks to cache is at least equal io size in case of 'never' and 'full'
|
||||
- Amount of blocks written to cache is less than or equal to amount set
|
||||
with sequential cutoff parameter in case of 'always' policy.
|
||||
- Amount of blocks written to cache is at least equal to io size in case of 'never' and 'full'
|
||||
policy.
|
||||
"""
|
||||
|
||||
@ -326,13 +324,13 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
||||
)
|
||||
io_size = (threshold + fio_additional_size).align_down(0x1000)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy mode to {policy}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy mode to {policy}"):
|
||||
cache.set_seq_cutoff_policy(policy)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
|
||||
cache.set_seq_cutoff_threshold(threshold)
|
||||
|
||||
with TestRun.step("Prepare sequential IO against core"):
|
||||
with TestRun.step("Prepare sequential I/O against core"):
|
||||
sync()
|
||||
writes_before = core.get_statistics().block_stats.cache.writes
|
||||
fio = (
|
||||
@ -364,16 +362,15 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
||||
"""
|
||||
title: Sequential cut-off tests during writes and reads on full cache for 'full' policy
|
||||
title: Functional test for sequential cutoff threshold parameter and 'full' policy
|
||||
description: |
|
||||
Testing if amount of data written to cache after sequential io against fully occupied
|
||||
cache for 'full' sequential cut-off policy with cache configured with different cache
|
||||
line sizes is valid for sequential cut-off threshold parameter.
|
||||
Check if data is cached properly according to sequential cutoff 'full' policy and given
|
||||
threshold parameter
|
||||
pass_criteria:
|
||||
- Amount of written blocks to cache is big enough to fill cache when 'never' sequential
|
||||
cut-off policy is set
|
||||
cutoff policy is set
|
||||
- Amount of written blocks to cache is less or equal than amount set
|
||||
with sequential cut-off parameter in case of 'full' policy.
|
||||
with sequential cutoff parameter in case of 'full' policy.
|
||||
"""
|
||||
|
||||
with TestRun.step("Partition cache and core devices"):
|
||||
@ -407,10 +404,10 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
||||
)
|
||||
io_size = (threshold + fio_additional_size).align_down(0x1000)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.never}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.never}"):
|
||||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
||||
|
||||
with TestRun.step("Prepare sequential IO against core"):
|
||||
with TestRun.step("Prepare sequential I/O against core"):
|
||||
sync()
|
||||
fio = (
|
||||
Fio()
|
||||
@ -432,13 +429,13 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
||||
f"Cache occupancy is too small: {occupancy_percentage}, expected at least 95%"
|
||||
)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.full}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.full}"):
|
||||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.full)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
|
||||
cache.set_seq_cutoff_threshold(threshold)
|
||||
|
||||
with TestRun.step(f"Running sequential IO ({io_dir})"):
|
||||
with TestRun.step(f"Running sequential I/O ({io_dir})"):
|
||||
sync()
|
||||
writes_before = core.get_statistics().block_stats.cache.writes
|
||||
fio = (
|
||||
|
@ -1,13 +1,13 @@
|
||||
#
|
||||
# Copyright(c) 2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import pytest
|
||||
|
||||
from api.cas import casadm
|
||||
from api.cas.cache_config import CacheMode
|
||||
from api.cas.cache_config import CacheMode, CacheModeTrait
|
||||
from core.test_run import TestRun
|
||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||
from test_tools.udev import Udev
|
||||
@ -20,16 +20,14 @@ dd_count = 100
|
||||
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WA, CacheMode.WB])
|
||||
@pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.InsertRead))
|
||||
@pytest.mark.CI()
|
||||
def test_ci_read(cache_mode):
|
||||
"""
|
||||
title: Verification test for write mode: write around
|
||||
description: Verify if write mode: write around, works as expected and cache only reads
|
||||
and does not cache write
|
||||
title: Verification test for caching reads in various cache modes
|
||||
description: Check if reads are properly cached in various cache modes
|
||||
pass criteria:
|
||||
- writes are not cached
|
||||
- reads are cached
|
||||
- Reads are cached
|
||||
"""
|
||||
|
||||
with TestRun.step("Prepare partitions"):
|
||||
@ -45,7 +43,7 @@ def test_ci_read(cache_mode):
|
||||
with TestRun.step("Disable udev"):
|
||||
Udev.disable()
|
||||
|
||||
with TestRun.step(f"Start cache with cache_mode={cache_mode}"):
|
||||
with TestRun.step(f"Start cache in {cache_mode} cache mode"):
|
||||
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
|
||||
cache_mode=cache_mode)
|
||||
casadm.add_core(cache, core_device)
|
||||
@ -99,6 +97,13 @@ def test_ci_read(cache_mode):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
@pytest.mark.CI()
|
||||
def test_ci_write_around_write():
|
||||
"""
|
||||
title: Verification test for writes in Write-Around cache mode
|
||||
description: Validate I/O statistics after writing to exported object in Write-Around cache mode
|
||||
pass criteria:
|
||||
- Writes are not cached
|
||||
- After inserting writes to core, data is read from core and not from cache
|
||||
"""
|
||||
with TestRun.step("Prepare partitions"):
|
||||
cache_device = TestRun.disks["cache"]
|
||||
core_device = TestRun.disks["core"]
|
||||
@ -112,7 +117,7 @@ def test_ci_write_around_write():
|
||||
with TestRun.step("Disable udev"):
|
||||
Udev.disable()
|
||||
|
||||
with TestRun.step("Start CAS Linux in Write Around mode"):
|
||||
with TestRun.step("Start cache in Write-Around mode"):
|
||||
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
|
||||
cache_mode=CacheMode.WA)
|
||||
casadm.add_core(cache, core_device)
|
||||
@ -183,14 +188,14 @@ def test_ci_write_around_write():
|
||||
else:
|
||||
TestRun.LOGGER.error(f"Writes to cache: {write_cache_delta_1} != 0")
|
||||
|
||||
with TestRun.step("Verify that reads propagated to core"):
|
||||
with TestRun.step("Verify that data was read from core"):
|
||||
read_core_delta_2 = read_core_2 - read_core_1
|
||||
if read_core_delta_2 == data_write:
|
||||
TestRun.LOGGER.info(f"Reads from core: {read_core_delta_2} == {data_write}")
|
||||
else:
|
||||
TestRun.LOGGER.error(f"Reads from core: {read_core_delta_2} != {data_write}")
|
||||
|
||||
with TestRun.step("Verify that reads did not occur on cache"):
|
||||
with TestRun.step("Verify that data was not read from cache"):
|
||||
read_cache_delta_2 = read_cache_2 - read_cache_1
|
||||
if read_cache_delta_2.value == 0:
|
||||
TestRun.LOGGER.info(f"Reads from cache: {read_cache_delta_2} == 0")
|
||||
@ -203,6 +208,14 @@ def test_ci_write_around_write():
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
@pytest.mark.CI()
|
||||
def test_ci_write_through_write():
|
||||
"""
|
||||
title: Verification test for Write-Through cache mode
|
||||
description: |
|
||||
Validate if reads and writes are cached properly for cache in Write-Through mode
|
||||
pass criteria:
|
||||
- Writes are inserted to cache and core
|
||||
- Reads are not cached
|
||||
"""
|
||||
with TestRun.step("Prepare partitions"):
|
||||
cache_device = TestRun.disks["cache"]
|
||||
core_device = TestRun.disks["core"]
|
||||
@ -216,7 +229,7 @@ def test_ci_write_through_write():
|
||||
with TestRun.step("Disable udev"):
|
||||
Udev.disable()
|
||||
|
||||
with TestRun.step("Start CAS Linux in Write Through mode"):
|
||||
with TestRun.step("Start cache in Write-Through mode"):
|
||||
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
|
||||
cache_mode=CacheMode.WT)
|
||||
casadm.add_core(cache, core_device)
|
||||
|
@ -25,51 +25,51 @@ from test_tools.memory import disable_memory_affecting_functions, get_mem_free,
|
||||
@pytest.mark.os_dependent
|
||||
def test_insufficient_memory_for_cas_module():
|
||||
"""
|
||||
title: Negative test for the ability of CAS to load the kernel module with insufficient memory.
|
||||
title: Load CAS kernel module with insufficient memory
|
||||
description: |
|
||||
Check that the CAS kernel module won’t be loaded if enough memory is not available
|
||||
Negative test for the ability to load the CAS kernel module with insufficient memory.
|
||||
pass_criteria:
|
||||
- CAS module cannot be loaded with not enough memory.
|
||||
- Loading CAS with not enough memory returns error.
|
||||
- CAS kernel module cannot be loaded with not enough memory.
|
||||
- Loading CAS kernel module with not enough memory returns error.
|
||||
"""
|
||||
|
||||
with TestRun.step("Disable caching and memory over-committing"):
|
||||
disable_memory_affecting_functions()
|
||||
drop_caches()
|
||||
|
||||
with TestRun.step("Measure memory usage without OpenCAS module"):
|
||||
with TestRun.step("Measure memory usage without CAS kernel module"):
|
||||
if is_kernel_module_loaded(CasModule.cache.value):
|
||||
unload_kernel_module(CasModule.cache.value)
|
||||
available_mem_before_cas = get_mem_free()
|
||||
|
||||
with TestRun.step("Load CAS module"):
|
||||
with TestRun.step("Load CAS kernel module"):
|
||||
load_kernel_module(CasModule.cache.value)
|
||||
|
||||
with TestRun.step("Measure memory usage with CAS module"):
|
||||
with TestRun.step("Measure memory usage with CAS kernel module"):
|
||||
available_mem_with_cas = get_mem_free()
|
||||
memory_used_by_cas = available_mem_before_cas - available_mem_with_cas
|
||||
TestRun.LOGGER.info(
|
||||
f"OpenCAS module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM."
|
||||
f"CAS kernel module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM."
|
||||
)
|
||||
|
||||
with TestRun.step("Unload CAS module"):
|
||||
with TestRun.step("Unload CAS kernel module"):
|
||||
unload_kernel_module(CasModule.cache.value)
|
||||
|
||||
with TestRun.step("Allocate memory, leaving not enough memory for CAS module"):
|
||||
memory_to_leave = get_mem_free() - (memory_used_by_cas * (3 / 4))
|
||||
allocate_memory(memory_to_leave)
|
||||
TestRun.LOGGER.info(
|
||||
f"Memory left for OpenCAS module: {get_mem_free().get_value(Unit.MiB):0.2f} MiB."
|
||||
f"Memory left for CAS kernel module: {get_mem_free().get_value(Unit.MiB):0.2f} MiB."
|
||||
)
|
||||
|
||||
with TestRun.step(
|
||||
"Try to load OpenCAS module and check if correct error message is printed on failure"
|
||||
"Try to load CAS kernel module and check if correct error message is printed on failure"
|
||||
):
|
||||
output = load_kernel_module(CasModule.cache.value)
|
||||
if output.stderr and output.exit_code != 0:
|
||||
TestRun.LOGGER.info(f"Cannot load OpenCAS module as expected.\n{output.stderr}")
|
||||
TestRun.LOGGER.info(f"Cannot load CAS kernel module as expected.\n{output.stderr}")
|
||||
else:
|
||||
TestRun.LOGGER.error("Loading OpenCAS module successfully finished, but should fail.")
|
||||
TestRun.LOGGER.error("Loading CAS kernel module successfully finished, but should fail.")
|
||||
|
||||
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
||||
@ -118,3 +118,4 @@ def test_attach_cache_min_ram():
|
||||
|
||||
with TestRun.step("Unlock RAM memory"):
|
||||
unmount_ramfs()
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -23,14 +23,14 @@ from test_tools.udev import Udev
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_cleaning_policy():
|
||||
"""
|
||||
Title: test_cleaning_policy
|
||||
Title: Basic test for cleaning policy
|
||||
description: |
|
||||
The test is to see if dirty data will be removed from the Cache after changing the
|
||||
cleaning policy from NOP to one that expects a flush.
|
||||
Verify cleaning behaviour after changing cleaning policy from NOP
|
||||
to one that expects a flush.
|
||||
pass_criteria:
|
||||
- Cache is successfully populated with dirty data
|
||||
- Cleaning policy is changed successfully
|
||||
- There is no dirty data after the policy change
|
||||
- Cache is successfully populated with dirty data
|
||||
- Cleaning policy is changed successfully
|
||||
- There is no dirty data after the policy change
|
||||
"""
|
||||
wait_time = 60
|
||||
|
||||
|
126
test/functional/tests/cli/test_cli_help_and_version.py
Normal file
126
test/functional/tests/cli/test_cli_help_and_version.py
Normal file
@ -0,0 +1,126 @@
|
||||
#
|
||||
# Copyright(c) 2020-2022 Intel Corporation
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import re
|
||||
import pytest
|
||||
|
||||
from api.cas import casadm
|
||||
from api.cas.casadm_params import OutputFormat
|
||||
from api.cas.cli_help_messages import *
|
||||
from api.cas.cli_messages import check_stderr_msg, check_stdout_msg
|
||||
from core.test_run import TestRun
|
||||
|
||||
|
||||
@pytest.mark.parametrize("shortcut", [True, False])
|
||||
def test_cli_help(shortcut):
|
||||
"""
|
||||
title: Test for 'help' command.
|
||||
description: Test if help for commands displays correct output.
|
||||
pass_criteria:
|
||||
- Proper help displays for every command.
|
||||
"""
|
||||
TestRun.LOGGER.info("Run 'help' for every 'casadm' command.")
|
||||
output = casadm.help(shortcut)
|
||||
check_stdout_msg(output, casadm_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -S" if shortcut else " --start-cache")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, start_cache_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -T" if shortcut else " --stop-cache")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, stop_cache_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -X" if shortcut else " --set-param")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, set_params_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -G" if shortcut else " --get-param")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, get_params_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -Q" if shortcut else " --set-cache-mode")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, set_cache_mode_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -A" if shortcut else " --add-core")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, add_core_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -R" if shortcut else " --remove-core")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, remove_core_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + " --remove-detached"
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, remove_detached_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -L" if shortcut else " --list-caches")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, list_caches_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -P" if shortcut else " --stats")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, stats_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -Z" if shortcut else " --reset-counters")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, reset_counters_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -F" if shortcut else " --flush-cache")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, flush_cache_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -C" if shortcut else " --io-class")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, ioclass_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -V" if shortcut else " --version")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, version_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -H" if shortcut else " --help")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, help_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + " --standby"
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, standby_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + " --zero-metadata"
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stdout_msg(output, zero_metadata_help)
|
||||
|
||||
output = TestRun.executor.run("casadm" + (" -Y" if shortcut else " --yell")
|
||||
+ (" -H" if shortcut else " --help"))
|
||||
check_stderr_msg(output, unrecognized_stderr)
|
||||
check_stdout_msg(output, unrecognized_stdout)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("output_format", OutputFormat)
|
||||
@pytest.mark.parametrize("shortcut", [True, False])
|
||||
def test_cli_version(shortcut, output_format):
|
||||
"""
|
||||
title: Test for 'version' command.
|
||||
description: Test if 'version' command displays correct output.
|
||||
pass_criteria:
|
||||
- Proper component names displayed in table with component versions.
|
||||
"""
|
||||
TestRun.LOGGER.info("Check version.")
|
||||
output = casadm.print_version(output_format, shortcut).stdout
|
||||
TestRun.LOGGER.info(output)
|
||||
if not names_in_output(output) or not versions_in_output(output):
|
||||
TestRun.fail("'Version' command failed.")
|
||||
|
||||
|
||||
def names_in_output(output):
|
||||
return ("CAS Cache Kernel Module" in output
|
||||
and "CAS CLI Utility" in output)
|
||||
|
||||
|
||||
def versions_in_output(output):
|
||||
version_pattern = re.compile(r"(\d){2}\.(\d){2}\.(\d)\.(\d){4}.(\S)")
|
||||
return len(version_pattern.findall(output)) == 2
|
@ -14,7 +14,7 @@ def test_cli_help_spelling():
|
||||
title: Spelling test for 'help' command
|
||||
description: Validates spelling of 'help' in CLI
|
||||
pass criteria:
|
||||
- no spelling mistakes are found
|
||||
- No spelling mistakes are found
|
||||
"""
|
||||
|
||||
cas_dictionary = os.path.join(TestRun.usr.repo_dir, "test", "functional", "resources")
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2020-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -20,12 +20,11 @@ from test_tools.dd import Dd
|
||||
@pytest.mark.parametrize("purge_target", ["cache", "core"])
|
||||
def test_purge(purge_target):
|
||||
"""
|
||||
title: Call purge without and with `--script` switch
|
||||
description: |
|
||||
Check if purge is called only when `--script` switch is used.
|
||||
title: Basic test for purge command
|
||||
description: Check purge command behaviour with and without '--script' flag
|
||||
pass_criteria:
|
||||
- casadm returns an error when `--script` is missing
|
||||
- cache is wiped when purge command is used properly
|
||||
- Error returned when '--script' is missing
|
||||
- Cache is wiped when purge command is used properly
|
||||
"""
|
||||
with TestRun.step("Prepare devices"):
|
||||
cache_device = TestRun.disks["cache"]
|
||||
@ -41,7 +40,7 @@ def test_purge(purge_target):
|
||||
cache = casadm.start_cache(cache_device, force=True)
|
||||
core = casadm.add_core(cache, core_device)
|
||||
|
||||
with TestRun.step("Trigger IO to prepared cache instance"):
|
||||
with TestRun.step("Trigger I/O to prepared cache instance"):
|
||||
dd = (
|
||||
Dd()
|
||||
.input("/dev/zero")
|
||||
@ -79,8 +78,3 @@ def test_purge(purge_target):
|
||||
if cache.get_statistics().usage_stats.occupancy.get_value() != 0:
|
||||
TestRun.fail(f"{cache.get_statistics().usage_stats.occupancy.get_value()}")
|
||||
TestRun.fail(f"Purge {purge_target} should invalidate all cache lines!")
|
||||
|
||||
with TestRun.step(
|
||||
f"Stop cache"
|
||||
):
|
||||
casadm.stop_all_caches()
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies
|
||||
# Copyright(c) 2024-2025 Huawei Technologies
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -44,8 +44,8 @@ def test_standby_neg_cli_params():
|
||||
"""
|
||||
title: Verifying parameters for starting a standby cache instance
|
||||
description: |
|
||||
Try executing the standby init command with required arguments missing or
|
||||
disallowed arguments present.
|
||||
Try executing the standby init command with required arguments missing or
|
||||
disallowed arguments present.
|
||||
pass_criteria:
|
||||
- The execution is unsuccessful for all improper argument combinations
|
||||
- A proper error message is displayed for unsuccessful executions
|
||||
@ -272,8 +272,8 @@ def test_start_neg_cli_flags():
|
||||
"""
|
||||
title: Blocking standby start command with mutually exclusive flags
|
||||
description: |
|
||||
Try executing the standby start command with different combinations of mutually
|
||||
exclusive flags.
|
||||
Try executing the standby start command with different combinations of mutually
|
||||
exclusive flags.
|
||||
pass_criteria:
|
||||
- The command execution is unsuccessful for commands with mutually exclusive flags
|
||||
- A proper error message is displayed
|
||||
@ -327,7 +327,7 @@ def test_activate_without_detach():
|
||||
"""
|
||||
title: Activate cache without detach command.
|
||||
description: |
|
||||
Try activate passive cache without detach command before activation.
|
||||
Try to activate passive cache without detach command before activation.
|
||||
pass_criteria:
|
||||
- The activation is not possible
|
||||
- The cache remains in Standby state after unsuccessful activation
|
||||
@ -390,14 +390,14 @@ def test_activate_without_detach():
|
||||
@pytest.mark.require_disk("standby_cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
||||
def test_activate_neg_cache_line_size():
|
||||
"""
|
||||
title: Blocking cache with mismatching cache line size activation.
|
||||
description: |
|
||||
Try restoring cache operations from a replicated cache that was initialized
|
||||
with different cache line size than the original cache.
|
||||
pass_criteria:
|
||||
- The activation is cancelled
|
||||
- The cache remains in Standby detached state after an unsuccessful activation
|
||||
- A proper error message is displayed
|
||||
title: Blocking cache with mismatching cache line size activation.
|
||||
description: |
|
||||
Try restoring cache operations from a replicated cache that was initialized
|
||||
with different cache line size than the original cache.
|
||||
pass_criteria:
|
||||
- The activation is cancelled
|
||||
- The cache remains in Standby detached state after an unsuccessful activation
|
||||
- A proper error message is displayed
|
||||
"""
|
||||
|
||||
with TestRun.step("Prepare cache devices"):
|
||||
@ -593,7 +593,7 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("caches"))
|
||||
def test_standby_activate_with_corepool():
|
||||
"""
|
||||
title: Activate standby cache instance with corepool
|
||||
title: Activate standby cache instance with core pool
|
||||
description: |
|
||||
Activation of standby cache with core taken from core pool
|
||||
pass_criteria:
|
||||
@ -652,12 +652,12 @@ def test_standby_activate_with_corepool():
|
||||
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||
def test_standby_start_stop(cache_line_size):
|
||||
"""
|
||||
title: Start and stop a standby cache instance.
|
||||
description: Test if cache can be started in standby state and stopped without activation.
|
||||
pass_criteria:
|
||||
- A cache exported object appears after starting a cache in standby state
|
||||
- The data written to the cache exported object committed on the underlying cache device
|
||||
- The cache exported object disappears after stopping the standby cache instance
|
||||
title: Start and stop a standby cache instance.
|
||||
description: Test if cache can be started in standby state and stopped without activation.
|
||||
pass_criteria:
|
||||
- A cache exported object appears after starting a cache in standby state
|
||||
- The data written to the cache exported object committed on the underlying cache device
|
||||
- The cache exported object disappears after stopping the standby cache instance
|
||||
"""
|
||||
with TestRun.step("Prepare a cache device"):
|
||||
cache_size = Size(500, Unit.MebiByte)
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -21,12 +21,12 @@ CORE_ID_RANGE = (0, 4095)
|
||||
@pytest.mark.parametrize("shortcut", [True, False])
|
||||
def test_cli_start_stop_default_id(shortcut):
|
||||
"""
|
||||
title: Test for starting a cache with a default ID - short and long command
|
||||
description: |
|
||||
Start a new cache with a default ID and then stop this cache.
|
||||
pass_criteria:
|
||||
- The cache has successfully started with default ID
|
||||
- The cache has successfully stopped
|
||||
title: Test for starting a cache with a default ID - short and long command
|
||||
description: |
|
||||
Start a new cache with a default ID and then stop this cache.
|
||||
pass_criteria:
|
||||
- The cache has successfully started with default ID
|
||||
- The cache has successfully stopped
|
||||
"""
|
||||
with TestRun.step("Prepare the device for the cache."):
|
||||
cache_device = TestRun.disks['cache']
|
||||
@ -62,12 +62,12 @@ def test_cli_start_stop_default_id(shortcut):
|
||||
@pytest.mark.parametrize("shortcut", [True, False])
|
||||
def test_cli_start_stop_custom_id(shortcut):
|
||||
"""
|
||||
title: Test for starting a cache with a custom ID - short and long command
|
||||
description: |
|
||||
Start a new cache with a random ID (from allowed pool) and then stop this cache.
|
||||
pass_criteria:
|
||||
- The cache has successfully started with a custom ID
|
||||
- The cache has successfully stopped
|
||||
title: Test for starting a cache with a custom ID - short and long command
|
||||
description: |
|
||||
Start a new cache with a random ID (from allowed pool) and then stop this cache.
|
||||
pass_criteria:
|
||||
- The cache has successfully started with a custom ID
|
||||
- The cache has successfully stopped
|
||||
"""
|
||||
with TestRun.step("Prepare the device for the cache."):
|
||||
cache_device = TestRun.disks['cache']
|
||||
@ -106,13 +106,13 @@ def test_cli_start_stop_custom_id(shortcut):
|
||||
@pytest.mark.parametrize("shortcut", [True, False])
|
||||
def test_cli_add_remove_default_id(shortcut):
|
||||
"""
|
||||
title: Test for adding and removing a core with a default ID - short and long command
|
||||
description: |
|
||||
Start a new cache and add a core to it without passing a core ID as an argument
|
||||
and then remove this core from the cache.
|
||||
pass_criteria:
|
||||
- The core is added to the cache with a default ID
|
||||
- The core is successfully removed from the cache
|
||||
title: Test for adding and removing a core with a default ID - short and long command
|
||||
description: |
|
||||
Start a new cache and add a core to it without passing a core ID as an argument
|
||||
and then remove this core from the cache.
|
||||
pass_criteria:
|
||||
- The core is added to the cache with a default ID
|
||||
- The core is successfully removed from the cache
|
||||
"""
|
||||
with TestRun.step("Prepare the devices."):
|
||||
cache_disk = TestRun.disks['cache']
|
||||
@ -157,13 +157,13 @@ def test_cli_add_remove_default_id(shortcut):
|
||||
@pytest.mark.parametrize("shortcut", [True, False])
|
||||
def test_cli_add_remove_custom_id(shortcut):
|
||||
"""
|
||||
title: Test for adding and removing a core with a custom ID - short and long command
|
||||
description: |
|
||||
Start a new cache and add a core to it with passing a random core ID
|
||||
(from allowed pool) as an argument and then remove this core from the cache.
|
||||
pass_criteria:
|
||||
- The core is added to the cache with a default ID
|
||||
- The core is successfully removed from the cache
|
||||
title: Test for adding and removing a core with a custom ID - short and long command
|
||||
description: |
|
||||
Start a new cache and add a core to it with passing a random core ID
|
||||
(from allowed pool) as an argument and then remove this core from the cache.
|
||||
pass_criteria:
|
||||
- The core is added to the cache with a default ID
|
||||
- The core is successfully removed from the cache
|
||||
"""
|
||||
with TestRun.step("Prepare the devices."):
|
||||
cache_disk = TestRun.disks['cache']
|
||||
@ -209,13 +209,13 @@ def test_cli_add_remove_custom_id(shortcut):
|
||||
@pytest.mark.parametrize("shortcut", [True, False])
|
||||
def test_cli_load_and_force(shortcut):
|
||||
"""
|
||||
title: Test if it is possible to use start command with 'load' and 'force' flag at once
|
||||
description: |
|
||||
Try to start cache with 'load' and 'force' options at the same time
|
||||
and check if it is not possible to do
|
||||
pass_criteria:
|
||||
- Start cache command with both 'force' and 'load' options should fail
|
||||
- Proper message should be received
|
||||
title: Test if it is possible to use start command with 'load' and 'force' flag at once
|
||||
description: |
|
||||
Try to start cache with 'load' and 'force' options at the same time
|
||||
and check if it is not possible to do
|
||||
pass_criteria:
|
||||
- Start cache command with both 'force' and 'load' options should fail
|
||||
- Proper message should be received
|
||||
"""
|
||||
with TestRun.step("Prepare cache."):
|
||||
cache_device = TestRun.disks['cache']
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -24,9 +24,10 @@ from test_tools.udev import Udev
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_cleaning_policy():
|
||||
"""
|
||||
Title: test manual casadm flush
|
||||
description: | The test is to see if dirty data will be removed from the Cache
|
||||
or Core after using the casadm command with the corresponding parameter.
|
||||
title: Test for manual cache and core flushing
|
||||
description: |
|
||||
The test is to see if dirty data will be removed from the cache
|
||||
or core after using the casadm command with the corresponding parameter.
|
||||
pass_criteria:
|
||||
- Cache and core are filled with dirty data.
|
||||
- After cache and core flush dirty data are cleared.
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -22,26 +22,26 @@ from type_def.size import Size, Unit
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_default_params():
|
||||
"""
|
||||
title: Default sequential cut-off threshold & policy test
|
||||
title: Default sequential cutoff threshold & policy test
|
||||
description: Test if proper default threshold and policy is set after cache start
|
||||
pass_criteria:
|
||||
- "Full" shall be default sequential cut-off policy
|
||||
- There shall be default 1MiB (1024kiB) value for sequential cut-off threshold
|
||||
- "Full" shall be default sequential cutoff policy
|
||||
- There shall be default 1MiB (1024kiB) value for sequential cutoff threshold
|
||||
"""
|
||||
with TestRun.step("Test prepare (start cache and add core)"):
|
||||
cache, cores = prepare()
|
||||
|
||||
with TestRun.step("Getting sequential cut-off parameters"):
|
||||
with TestRun.step("Getting sequential cutoff parameters"):
|
||||
params = cores[0].get_seq_cut_off_parameters()
|
||||
|
||||
with TestRun.step("Check if proper sequential cut off policy is set as a default"):
|
||||
with TestRun.step("Check if proper sequential cutoff policy is set as a default"):
|
||||
if params.policy != SeqCutOffPolicy.DEFAULT:
|
||||
TestRun.fail(f"Wrong sequential cut off policy set: {params.policy} "
|
||||
TestRun.fail(f"Wrong sequential cutoff policy set: {params.policy} "
|
||||
f"should be {SeqCutOffPolicy.DEFAULT}")
|
||||
|
||||
with TestRun.step("Check if proper sequential cut off threshold is set as a default"):
|
||||
with TestRun.step("Check if proper sequential cutoff threshold is set as a default"):
|
||||
if params.threshold != SEQ_CUT_OFF_THRESHOLD_DEFAULT:
|
||||
TestRun.fail(f"Wrong sequential cut off threshold set: {params.threshold} "
|
||||
TestRun.fail(f"Wrong sequential cutoff threshold set: {params.threshold} "
|
||||
f"should be {SEQ_CUT_OFF_THRESHOLD_DEFAULT}")
|
||||
|
||||
|
||||
@ -50,32 +50,31 @@ def test_seq_cutoff_default_params():
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_set_get_policy_core(policy):
|
||||
"""
|
||||
title: Sequential cut-off policy set/get test for core
|
||||
title: Sequential cutoff policy set/get test for core
|
||||
description: |
|
||||
Test if CAS is setting proper sequential cut-off policy for core and
|
||||
returns previously set value
|
||||
Verify if it is possible to set and get a sequential cutoff policy per core
|
||||
pass_criteria:
|
||||
- Sequential cut-off policy obtained from get-param command for the first core must be
|
||||
- Sequential cutoff policy obtained from get-param command for the first core must be
|
||||
the same as the one used in set-param command
|
||||
- Sequential cut-off policy obtained from get-param command for the second core must be
|
||||
- Sequential cutoff policy obtained from get-param command for the second core must be
|
||||
proper default value
|
||||
"""
|
||||
with TestRun.step("Test prepare (start cache and add 2 cores)"):
|
||||
cache, cores = prepare(cores_count=2)
|
||||
|
||||
with TestRun.step(f"Setting core sequential cut off policy mode to {policy}"):
|
||||
with TestRun.step(f"Setting core sequential cutoff policy mode to {policy}"):
|
||||
cores[0].set_seq_cutoff_policy(policy)
|
||||
|
||||
with TestRun.step("Check if proper sequential cut off policy was set for the first core"):
|
||||
with TestRun.step("Check if proper sequential cutoff policy was set for the first core"):
|
||||
if cores[0].get_seq_cut_off_policy() != policy:
|
||||
TestRun.fail(f"Wrong sequential cut off policy set: "
|
||||
TestRun.fail(f"Wrong sequential cutoff policy set: "
|
||||
f"{cores[0].get_seq_cut_off_policy()} "
|
||||
f"should be {policy}")
|
||||
|
||||
with TestRun.step("Check if proper default sequential cut off policy was set for the "
|
||||
with TestRun.step("Check if proper default sequential cutoff policy was set for the "
|
||||
"second core"):
|
||||
if cores[1].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT:
|
||||
TestRun.fail(f"Wrong default sequential cut off policy: "
|
||||
TestRun.fail(f"Wrong default sequential cutoff policy: "
|
||||
f"{cores[1].get_seq_cut_off_policy()} "
|
||||
f"should be {SeqCutOffPolicy.DEFAULT}")
|
||||
|
||||
@ -85,24 +84,23 @@ def test_seq_cutoff_set_get_policy_core(policy):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_set_get_policy_cache(policy):
|
||||
"""
|
||||
title: Sequential cut-off policy set/get test for cache
|
||||
title: Sequential cutoff policy set/get test for cache
|
||||
description: |
|
||||
Test if CAS is setting proper sequential cut-off policy for whole cache and
|
||||
returns previously set value
|
||||
Verify if it is possible to set and get a sequential cutoff policy for the whole cache
|
||||
pass_criteria:
|
||||
- Sequential cut-off policy obtained from get-param command for each of 3 cores must be the
|
||||
- Sequential cutoff policy obtained from get-param command for each of 3 cores must be the
|
||||
same as the one used in set-param command for cache
|
||||
"""
|
||||
with TestRun.step("Test prepare (start cache and add 3 cores)"):
|
||||
cache, cores = prepare(cores_count=3)
|
||||
|
||||
with TestRun.step(f"Setting sequential cut off policy mode {policy} for cache"):
|
||||
with TestRun.step(f"Setting sequential cutoff policy mode {policy} for cache"):
|
||||
cache.set_seq_cutoff_policy(policy)
|
||||
|
||||
for i in TestRun.iteration(range(0, len(cores)), "Verifying if proper policy was set"):
|
||||
with TestRun.step(f"Check if proper sequential cut off policy was set for core"):
|
||||
with TestRun.step(f"Check if proper sequential cutoff policy was set for core"):
|
||||
if cores[i].get_seq_cut_off_policy() != policy:
|
||||
TestRun.fail(f"Wrong core sequential cut off policy: "
|
||||
TestRun.fail(f"Wrong core sequential cutoff policy: "
|
||||
f"{cores[i].get_seq_cut_off_policy()} "
|
||||
f"should be {policy}")
|
||||
|
||||
@ -111,23 +109,25 @@ def test_seq_cutoff_set_get_policy_cache(policy):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_policy_load():
|
||||
"""
|
||||
title: Sequential cut-off policy set/get test with cache load between
|
||||
title: Sequential cutoff policy set/get test with cache load between
|
||||
description: |
|
||||
Set each possible policy for different core, stop cache, test if after cache load
|
||||
sequential cut-off policy value previously set is being loaded correctly for each core.
|
||||
Set each possible policy for different core, stop cache, test if after cache load
|
||||
sequential cutoff policy value previously set is being loaded correctly for each core.
|
||||
pass_criteria:
|
||||
- Sequential cut-off policy obtained from get-param command after cache load
|
||||
- Sequential cutoff policy obtained from get-param command after cache load
|
||||
must be the same as the one used in set-param command before cache stop
|
||||
- Sequential cut-off policy loaded for the last core should be the default one
|
||||
- Sequential cutoff policy loaded for the last core should be the default one
|
||||
"""
|
||||
with TestRun.step(f"Test prepare (start cache and add {len(SeqCutOffPolicy) + 1} cores)"):
|
||||
# Create as many cores as many possible policies including default one
|
||||
cache, cores = prepare(cores_count=len(SeqCutOffPolicy) + 1)
|
||||
policies = [policy for policy in SeqCutOffPolicy]
|
||||
|
||||
for i, core in TestRun.iteration(enumerate(cores[:-1]), "Set all possible policies "
|
||||
"except the default one"):
|
||||
with TestRun.step(f"Setting cache sequential cut off policy mode to "
|
||||
for i, core in TestRun.iteration(
|
||||
enumerate(cores[:-1]),
|
||||
"Set all possible policies except the default one"
|
||||
):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy mode to "
|
||||
f"{policies[i]}"):
|
||||
cores[i].set_seq_cutoff_policy(policies[i])
|
||||
|
||||
@ -140,18 +140,21 @@ def test_seq_cutoff_policy_load():
|
||||
with TestRun.step("Getting cores from loaded cache"):
|
||||
cores = loaded_cache.get_core_devices()
|
||||
|
||||
for i, core in TestRun.iteration(enumerate(cores[:-1]), "Check if proper policies have "
|
||||
"been loaded"):
|
||||
with TestRun.step(f"Check if proper sequential cut off policy was loaded"):
|
||||
for i, core in TestRun.iteration(
|
||||
enumerate(cores[:-1]),
|
||||
"Check if proper policies have been loaded"
|
||||
):
|
||||
with TestRun.step(f"Check if proper sequential cutoff policy was loaded"):
|
||||
if cores[i].get_seq_cut_off_policy() != policies[i]:
|
||||
TestRun.fail(f"Wrong sequential cut off policy loaded: "
|
||||
TestRun.fail(f"Wrong sequential cutoff policy loaded: "
|
||||
f"{cores[i].get_seq_cut_off_policy()} "
|
||||
f"should be {policies[i]}")
|
||||
|
||||
with TestRun.step(f"Check if proper (default) sequential cut off policy was loaded for "
|
||||
f"last core"):
|
||||
with TestRun.step(
|
||||
"Check if proper (default) sequential cutoff policy was loaded for last core"
|
||||
):
|
||||
if cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT:
|
||||
TestRun.fail(f"Wrong sequential cut off policy loaded: "
|
||||
TestRun.fail(f"Wrong sequential cutoff policy loaded: "
|
||||
f"{cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy()} "
|
||||
f"should be {SeqCutOffPolicy.DEFAULT}")
|
||||
|
||||
@ -163,16 +166,16 @@ def test_seq_cutoff_policy_load():
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_set_invalid_threshold(threshold):
|
||||
"""
|
||||
title: Invalid sequential cut-off threshold test
|
||||
description: Test if CAS is allowing setting invalid sequential cut-off threshold
|
||||
title: Invalid sequential cutoff threshold test
|
||||
description: Validate setting invalid sequential cutoff threshold
|
||||
pass_criteria:
|
||||
- Setting invalid sequential cut-off threshold should be blocked
|
||||
- Setting invalid sequential cutoff threshold should be blocked
|
||||
"""
|
||||
with TestRun.step("Test prepare (start cache and add core)"):
|
||||
cache, cores = prepare()
|
||||
_threshold = Size(threshold, Unit.KibiByte)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off threshold to out of range value: "
|
||||
with TestRun.step(f"Setting cache sequential cutoff threshold to out of range value: "
|
||||
f"{_threshold}"):
|
||||
command = set_param_cutoff_cmd(
|
||||
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
|
||||
@ -182,7 +185,7 @@ def test_seq_cutoff_set_invalid_threshold(threshold):
|
||||
not in output.stderr:
|
||||
TestRun.fail("Command succeeded (should fail)!")
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off threshold "
|
||||
with TestRun.step(f"Setting cache sequential cutoff threshold "
|
||||
f"to value passed as a float"):
|
||||
command = set_param_cutoff_cmd(
|
||||
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
|
||||
@ -199,25 +202,23 @@ def test_seq_cutoff_set_invalid_threshold(threshold):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_set_get_threshold(threshold):
|
||||
"""
|
||||
title: Sequential cut-off threshold set/get test
|
||||
description: |
|
||||
Test if CAS is setting proper sequential cut-off threshold and returns
|
||||
previously set value
|
||||
title: Sequential cutoff threshold set/get test
|
||||
description: Verify setting and getting value of sequential cutoff threshold
|
||||
pass_criteria:
|
||||
- Sequential cut-off threshold obtained from get-param command must be the same as
|
||||
- Sequential cutoff threshold obtained from get-param command must be the same as
|
||||
the one used in set-param command
|
||||
"""
|
||||
with TestRun.step("Test prepare (start cache and add core)"):
|
||||
cache, cores = prepare()
|
||||
_threshold = Size(threshold, Unit.KibiByte)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off threshold to "
|
||||
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
||||
f"{_threshold}"):
|
||||
cores[0].set_seq_cutoff_threshold(_threshold)
|
||||
|
||||
with TestRun.step("Check if proper sequential cut off threshold was set"):
|
||||
with TestRun.step("Check if proper sequential cutoff threshold was set"):
|
||||
if cores[0].get_seq_cut_off_threshold() != _threshold:
|
||||
TestRun.fail(f"Wrong sequential cut off threshold set: "
|
||||
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
||||
f"{cores[0].get_seq_cut_off_threshold()} "
|
||||
f"should be {_threshold}")
|
||||
|
||||
@ -228,20 +229,17 @@ def test_seq_cutoff_set_get_threshold(threshold):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_threshold_load(threshold):
|
||||
"""
|
||||
title: Sequential cut-off threshold set/get test with cache load between
|
||||
description: |
|
||||
Test if after cache load sequential cut-off threshold
|
||||
value previously set is being loaded correctly. Each of possible sequential cut-off
|
||||
policies is set for different core.
|
||||
title: Sequential cutoff threshold after loading cache
|
||||
description: Verify sequential cutoff threshold value after reloading the cache.
|
||||
pass_criteria:
|
||||
- Sequential cut-off threshold obtained from get-param command after cache load
|
||||
- Sequential cutoff threshold obtained from get-param command after cache load
|
||||
must be the same as the one used in set-param command before cache stop
|
||||
"""
|
||||
with TestRun.step("Test prepare (start cache and add core)"):
|
||||
cache, cores = prepare()
|
||||
_threshold = Size(threshold, Unit.KibiByte)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off threshold to "
|
||||
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
||||
f"{_threshold}"):
|
||||
cores[0].set_seq_cutoff_threshold(_threshold)
|
||||
|
||||
@ -254,9 +252,9 @@ def test_seq_cutoff_threshold_load(threshold):
|
||||
with TestRun.step("Getting core from loaded cache"):
|
||||
cores_load = loaded_cache.get_core_devices()
|
||||
|
||||
with TestRun.step("Check if proper sequential cut off policy was loaded"):
|
||||
with TestRun.step("Check if proper sequential cutoff policy was loaded"):
|
||||
if cores_load[0].get_seq_cut_off_threshold() != _threshold:
|
||||
TestRun.fail(f"Wrong sequential cut off threshold set: "
|
||||
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
||||
f"{cores_load[0].get_seq_cut_off_threshold()} "
|
||||
f"should be {_threshold}")
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2020-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -36,15 +36,15 @@ number_of_checks = 10
|
||||
@pytest.mark.parametrizex("cache_mode", CacheMode)
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_set_get_seqcutoff_params(cache_mode):
|
||||
def test_set_get_seq_cutoff_params(cache_mode):
|
||||
"""
|
||||
title: Test for setting and reading sequential cut-off parameters.
|
||||
description: |
|
||||
Verify that it is possible to set and read all available sequential cut-off
|
||||
parameters using casadm --set-param and --get-param options.
|
||||
pass_criteria:
|
||||
- All sequential cut-off parameters are set to given values.
|
||||
- All sequential cut-off parameters displays proper values.
|
||||
title: Test for setting and reading sequential cutoff parameters.
|
||||
description: |
|
||||
Verify that it is possible to set and read all available sequential cutoff
|
||||
parameters using casadm --set-param and --get-param options.
|
||||
pass_criteria:
|
||||
- All sequential cutoff parameters are set to given values.
|
||||
- All sequential cutoff parameters displays proper values.
|
||||
"""
|
||||
|
||||
with TestRun.step("Partition cache and core devices"):
|
||||
@ -56,60 +56,60 @@ def test_set_get_seqcutoff_params(cache_mode):
|
||||
):
|
||||
caches, cores = cache_prepare(cache_mode, cache_dev, core_dev)
|
||||
|
||||
with TestRun.step("Check sequential cut-off default parameters"):
|
||||
default_seqcutoff_params = SeqCutOffParameters.default_seq_cut_off_params()
|
||||
with TestRun.step("Check sequential cutoff default parameters"):
|
||||
default_seq_cutoff_params = SeqCutOffParameters.default_seq_cut_off_params()
|
||||
for i in range(caches_count):
|
||||
for j in range(cores_per_cache):
|
||||
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params)
|
||||
check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
|
||||
|
||||
with TestRun.step(
|
||||
"Set new random values for sequential cut-off parameters for one core only"
|
||||
"Set new random values for sequential cutoff parameters for one core only"
|
||||
):
|
||||
for check in range(number_of_checks):
|
||||
random_seqcutoff_params = new_seqcutoff_parameters_random_values()
|
||||
cores[0][0].set_seq_cutoff_parameters(random_seqcutoff_params)
|
||||
random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
|
||||
cores[0][0].set_seq_cutoff_parameters(random_seq_cutoff_params)
|
||||
|
||||
# Check changed parameters for first core:
|
||||
check_seqcutoff_parameters(cores[0][0], random_seqcutoff_params)
|
||||
check_seq_cutoff_parameters(cores[0][0], random_seq_cutoff_params)
|
||||
|
||||
# Check default parameters for other cores:
|
||||
for j in range(1, cores_per_cache):
|
||||
check_seqcutoff_parameters(cores[0][j], default_seqcutoff_params)
|
||||
check_seq_cutoff_parameters(cores[0][j], default_seq_cutoff_params)
|
||||
for i in range(1, caches_count):
|
||||
for j in range(cores_per_cache):
|
||||
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params)
|
||||
check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
|
||||
|
||||
with TestRun.step(
|
||||
"Set new random values for sequential cut-off parameters "
|
||||
"Set new random values for sequential cutoff parameters "
|
||||
"for all cores within given cache instance"
|
||||
):
|
||||
for check in range(number_of_checks):
|
||||
random_seqcutoff_params = new_seqcutoff_parameters_random_values()
|
||||
caches[0].set_seq_cutoff_parameters(random_seqcutoff_params)
|
||||
random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
|
||||
caches[0].set_seq_cutoff_parameters(random_seq_cutoff_params)
|
||||
|
||||
# Check changed parameters for first cache instance:
|
||||
for j in range(cores_per_cache):
|
||||
check_seqcutoff_parameters(cores[0][j], random_seqcutoff_params)
|
||||
check_seq_cutoff_parameters(cores[0][j], random_seq_cutoff_params)
|
||||
|
||||
# Check default parameters for other cache instances:
|
||||
for i in range(1, caches_count):
|
||||
for j in range(cores_per_cache):
|
||||
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params)
|
||||
check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
|
||||
|
||||
with TestRun.step(
|
||||
"Set new random values for sequential cut-off parameters for all cores"
|
||||
"Set new random values for sequential cutoff parameters for all cores"
|
||||
):
|
||||
for check in range(number_of_checks):
|
||||
seqcutoff_params = []
|
||||
seq_cutoff_params = []
|
||||
for i in range(caches_count):
|
||||
for j in range(cores_per_cache):
|
||||
random_seqcutoff_params = new_seqcutoff_parameters_random_values()
|
||||
seqcutoff_params.append(random_seqcutoff_params)
|
||||
cores[i][j].set_seq_cutoff_parameters(random_seqcutoff_params)
|
||||
random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
|
||||
seq_cutoff_params.append(random_seq_cutoff_params)
|
||||
cores[i][j].set_seq_cutoff_parameters(random_seq_cutoff_params)
|
||||
for i in range(caches_count):
|
||||
for j in range(cores_per_cache):
|
||||
check_seqcutoff_parameters(
|
||||
cores[i][j], seqcutoff_params[i * cores_per_cache + j]
|
||||
check_seq_cutoff_parameters(
|
||||
cores[i][j], seq_cutoff_params[i * cores_per_cache + j]
|
||||
)
|
||||
|
||||
|
||||
@ -119,14 +119,14 @@ def test_set_get_seqcutoff_params(cache_mode):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_set_get_cleaning_params(cache_mode, cleaning_policy):
|
||||
"""
|
||||
title: Test for setting and reading cleaning parameters.
|
||||
description: |
|
||||
Verify that it is possible to set and read all available cleaning
|
||||
parameters for all cleaning policies using casadm --set-param and
|
||||
--get-param options.
|
||||
pass_criteria:
|
||||
- All cleaning parameters are set to given values.
|
||||
- All cleaning parameters displays proper values.
|
||||
title: Test for setting and reading cleaning parameters.
|
||||
description: |
|
||||
Verify that it is possible to set and read all available cleaning
|
||||
parameters for all cleaning policies using casadm --set-param and
|
||||
--get-param options.
|
||||
pass_criteria:
|
||||
- All cleaning parameters are set to given values.
|
||||
- All cleaning parameters displays proper values.
|
||||
"""
|
||||
|
||||
with TestRun.step("Partition cache and core devices"):
|
||||
@ -231,7 +231,7 @@ def cache_prepare(cache_mode, cache_dev, core_dev):
|
||||
return caches, cores
|
||||
|
||||
|
||||
def new_seqcutoff_parameters_random_values():
|
||||
def new_seq_cutoff_parameters_random_values():
|
||||
return SeqCutOffParameters(
|
||||
threshold=Size(random.randrange(1, 1000000), Unit.KibiByte),
|
||||
policy=random.choice(list(SeqCutOffPolicy)),
|
||||
@ -275,27 +275,27 @@ def new_cleaning_parameters_random_values(cleaning_policy):
|
||||
return cleaning_params
|
||||
|
||||
|
||||
def check_seqcutoff_parameters(core, seqcutoff_params):
|
||||
current_seqcutoff_params = core.get_seq_cut_off_parameters()
|
||||
def check_seq_cutoff_parameters(core, seq_cutoff_params):
|
||||
current_seq_cutoff_params = core.get_seq_cut_off_parameters()
|
||||
failed_params = ""
|
||||
if current_seqcutoff_params.threshold != seqcutoff_params.threshold:
|
||||
if current_seq_cutoff_params.threshold != seq_cutoff_params.threshold:
|
||||
failed_params += (
|
||||
f"Threshold is {current_seqcutoff_params.threshold}, "
|
||||
f"should be {seqcutoff_params.threshold}\n"
|
||||
f"Threshold is {current_seq_cutoff_params.threshold}, "
|
||||
f"should be {seq_cutoff_params.threshold}\n"
|
||||
)
|
||||
if current_seqcutoff_params.policy != seqcutoff_params.policy:
|
||||
if current_seq_cutoff_params.policy != seq_cutoff_params.policy:
|
||||
failed_params += (
|
||||
f"Policy is {current_seqcutoff_params.policy}, "
|
||||
f"should be {seqcutoff_params.policy}\n"
|
||||
f"Policy is {current_seq_cutoff_params.policy}, "
|
||||
f"should be {seq_cutoff_params.policy}\n"
|
||||
)
|
||||
if current_seqcutoff_params.promotion_count != seqcutoff_params.promotion_count:
|
||||
if current_seq_cutoff_params.promotion_count != seq_cutoff_params.promotion_count:
|
||||
failed_params += (
|
||||
f"Promotion count is {current_seqcutoff_params.promotion_count}, "
|
||||
f"should be {seqcutoff_params.promotion_count}\n"
|
||||
f"Promotion count is {current_seq_cutoff_params.promotion_count}, "
|
||||
f"should be {seq_cutoff_params.promotion_count}\n"
|
||||
)
|
||||
if failed_params:
|
||||
TestRun.LOGGER.error(
|
||||
f"Sequential cut-off parameters are not correct "
|
||||
f"Sequential cutoff parameters are not correct "
|
||||
f"for {core.path}:\n{failed_params}"
|
||||
)
|
||||
|
||||
@ -306,12 +306,12 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
||||
failed_params = ""
|
||||
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
|
||||
failed_params += (
|
||||
f"Wake Up time is {current_cleaning_params.wake_up_time}, "
|
||||
f"Wake up time is {current_cleaning_params.wake_up_time}, "
|
||||
f"should be {cleaning_params.wake_up_time}\n"
|
||||
)
|
||||
if current_cleaning_params.staleness_time != cleaning_params.staleness_time:
|
||||
failed_params += (
|
||||
f"Staleness Time is {current_cleaning_params.staleness_time}, "
|
||||
f"Staleness time is {current_cleaning_params.staleness_time}, "
|
||||
f"should be {cleaning_params.staleness_time}\n"
|
||||
)
|
||||
if (
|
||||
@ -319,7 +319,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
||||
!= cleaning_params.flush_max_buffers
|
||||
):
|
||||
failed_params += (
|
||||
f"Flush Max Buffers is {current_cleaning_params.flush_max_buffers}, "
|
||||
f"Flush max buffers is {current_cleaning_params.flush_max_buffers}, "
|
||||
f"should be {cleaning_params.flush_max_buffers}\n"
|
||||
)
|
||||
if (
|
||||
@ -327,7 +327,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
||||
!= cleaning_params.activity_threshold
|
||||
):
|
||||
failed_params += (
|
||||
f"Activity Threshold is {current_cleaning_params.activity_threshold}, "
|
||||
f"Activity threshold is {current_cleaning_params.activity_threshold}, "
|
||||
f"should be {cleaning_params.activity_threshold}\n"
|
||||
)
|
||||
if failed_params:
|
||||
@ -341,7 +341,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
||||
failed_params = ""
|
||||
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
|
||||
failed_params += (
|
||||
f"Wake Up time is {current_cleaning_params.wake_up_time}, "
|
||||
f"Wake up time is {current_cleaning_params.wake_up_time}, "
|
||||
f"should be {cleaning_params.wake_up_time}\n"
|
||||
)
|
||||
if (
|
||||
@ -349,7 +349,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
||||
!= cleaning_params.flush_max_buffers
|
||||
):
|
||||
failed_params += (
|
||||
f"Flush Max Buffers is {current_cleaning_params.flush_max_buffers}, "
|
||||
f"Flush max buffers is {current_cleaning_params.flush_max_buffers}, "
|
||||
f"should be {cleaning_params.flush_max_buffers}\n"
|
||||
)
|
||||
if failed_params:
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
import time
|
||||
@ -24,20 +24,19 @@ from type_def.size import Size, Unit
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_zero_metadata_negative_cases():
|
||||
"""
|
||||
title: Test for '--zero-metadata' negative cases.
|
||||
description: |
|
||||
Test for '--zero-metadata' scenarios with expected failures.
|
||||
pass_criteria:
|
||||
- Zeroing metadata without '--force' failed when run on cache.
|
||||
- Zeroing metadata with '--force' failed when run on cache.
|
||||
- Zeroing metadata failed when run on system drive.
|
||||
- Load cache command failed after successfully zeroing metadata on the cache device.
|
||||
title: Test for '--zero-metadata' negative cases.
|
||||
description: Test for '--zero-metadata' scenarios with expected failures.
|
||||
pass_criteria:
|
||||
- Zeroing metadata without '--force' failed when run on cache.
|
||||
- Zeroing metadata with '--force' failed when run on cache.
|
||||
- Zeroing metadata failed when run on system drive.
|
||||
- Load cache command failed after successfully zeroing metadata on the cache device.
|
||||
"""
|
||||
with TestRun.step("Prepare cache and core devices."):
|
||||
cache_dev, core_dev, cache_disk = prepare_devices()
|
||||
|
||||
with TestRun.step("Start cache."):
|
||||
cache = casadm.start_cache(cache_dev, force=True)
|
||||
casadm.start_cache(cache_dev, force=True)
|
||||
|
||||
with TestRun.step("Try to zero metadata and validate error message."):
|
||||
try:
|
||||
@ -75,7 +74,7 @@ def test_zero_metadata_negative_cases():
|
||||
|
||||
with TestRun.step("Load cache."):
|
||||
try:
|
||||
cache = casadm.load_cache(cache_dev)
|
||||
casadm.load_cache(cache_dev)
|
||||
TestRun.LOGGER.error("Loading cache should fail.")
|
||||
except CmdException:
|
||||
TestRun.LOGGER.info("Loading cache failed as expected.")
|
||||
@ -86,12 +85,11 @@ def test_zero_metadata_negative_cases():
|
||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||
def test_zero_metadata_filesystem(filesystem):
|
||||
"""
|
||||
title: Test for '--zero-metadata' and filesystem.
|
||||
description: |
|
||||
Test for '--zero-metadata' on drive with filesystem.
|
||||
pass_criteria:
|
||||
- Zeroing metadata on device with filesystem failed and not removed filesystem.
|
||||
- Zeroing metadata on mounted device failed.
|
||||
title: Test for '--zero-metadata' and filesystem.
|
||||
description: Test for '--zero-metadata' on drive with filesystem.
|
||||
pass_criteria:
|
||||
- Zeroing metadata on device with filesystem failed and not removed filesystem.
|
||||
- Zeroing metadata on mounted device failed.
|
||||
"""
|
||||
mount_point = "/mnt"
|
||||
with TestRun.step("Prepare devices."):
|
||||
@ -131,14 +129,14 @@ def test_zero_metadata_filesystem(filesystem):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_zero_metadata_dirty_data():
|
||||
"""
|
||||
title: Test for '--zero-metadata' and dirty data scenario.
|
||||
description: |
|
||||
Test for '--zero-metadata' with and without 'force' option if there are dirty data
|
||||
on cache.
|
||||
pass_criteria:
|
||||
- Zeroing metadata without force failed on cache with dirty data.
|
||||
- Zeroing metadata with force ran successfully on cache with dirty data.
|
||||
- Cache started successfully after zeroing metadata on cache with dirty data.
|
||||
title: Test for '--zero-metadata' and dirty data scenario.
|
||||
description: |
|
||||
Test for '--zero-metadata' with and without 'force' option if there are dirty data
|
||||
on cache.
|
||||
pass_criteria:
|
||||
- Zeroing metadata without force failed on cache with dirty data.
|
||||
- Zeroing metadata with force ran successfully on cache with dirty data.
|
||||
- Cache started successfully after zeroing metadata on cache with dirty data.
|
||||
"""
|
||||
with TestRun.step("Prepare cache and core devices."):
|
||||
cache_dev, core_disk, cache_disk = prepare_devices()
|
||||
@ -165,7 +163,7 @@ def test_zero_metadata_dirty_data():
|
||||
|
||||
with TestRun.step("Start cache (expect to fail)."):
|
||||
try:
|
||||
cache = casadm.start_cache(cache_dev, CacheMode.WB)
|
||||
casadm.start_cache(cache_dev, CacheMode.WB)
|
||||
except CmdException:
|
||||
TestRun.LOGGER.info("Start cache failed as expected.")
|
||||
|
||||
@ -186,7 +184,7 @@ def test_zero_metadata_dirty_data():
|
||||
|
||||
with TestRun.step("Start cache without 'force' option."):
|
||||
try:
|
||||
cache = casadm.start_cache(cache_dev, CacheMode.WB)
|
||||
casadm.start_cache(cache_dev, CacheMode.WB)
|
||||
TestRun.LOGGER.info("Cache started successfully.")
|
||||
except CmdException:
|
||||
TestRun.LOGGER.error("Start cache failed.")
|
||||
@ -196,21 +194,21 @@ def test_zero_metadata_dirty_data():
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_zero_metadata_dirty_shutdown():
|
||||
"""
|
||||
title: Test for '--zero-metadata' and dirty shutdown scenario.
|
||||
description: |
|
||||
Test for '--zero-metadata' with and without 'force' option on cache which had been dirty
|
||||
shut down before.
|
||||
pass_criteria:
|
||||
- Zeroing metadata without force failed on cache after dirty shutdown.
|
||||
- Zeroing metadata with force ran successfully on cache after dirty shutdown.
|
||||
- Cache started successfully after dirty shutdown and zeroing metadata on cache.
|
||||
title: Test for '--zero-metadata' and dirty shutdown scenario.
|
||||
description: |
|
||||
Test for '--zero-metadata' with and without 'force' option on cache which had been dirty
|
||||
shut down before.
|
||||
pass_criteria:
|
||||
- Zeroing metadata without force failed on cache after dirty shutdown.
|
||||
- Zeroing metadata with force ran successfully on cache after dirty shutdown.
|
||||
- Cache started successfully after dirty shutdown and zeroing metadata on cache.
|
||||
"""
|
||||
with TestRun.step("Prepare cache and core devices."):
|
||||
cache_dev, core_disk, cache_disk = prepare_devices()
|
||||
|
||||
with TestRun.step("Start cache."):
|
||||
cache = casadm.start_cache(cache_dev, CacheMode.WT, force=True)
|
||||
core = cache.add_core(core_disk)
|
||||
cache.add_core(core_disk)
|
||||
|
||||
with TestRun.step("Unplug cache device."):
|
||||
cache_disk.unplug()
|
||||
@ -227,7 +225,7 @@ def test_zero_metadata_dirty_shutdown():
|
||||
|
||||
with TestRun.step("Start cache (expect to fail)."):
|
||||
try:
|
||||
cache = casadm.start_cache(cache_dev, CacheMode.WT)
|
||||
casadm.start_cache(cache_dev, CacheMode.WT)
|
||||
TestRun.LOGGER.error("Starting cache should fail!")
|
||||
except CmdException:
|
||||
TestRun.LOGGER.info("Start cache failed as expected.")
|
||||
@ -249,7 +247,7 @@ def test_zero_metadata_dirty_shutdown():
|
||||
|
||||
with TestRun.step("Start cache."):
|
||||
try:
|
||||
cache = casadm.start_cache(cache_dev, CacheMode.WT)
|
||||
casadm.start_cache(cache_dev, CacheMode.WT)
|
||||
TestRun.LOGGER.info("Cache started successfully.")
|
||||
except CmdException:
|
||||
TestRun.LOGGER.error("Start cache failed.")
|
||||
|
Loading…
Reference in New Issue
Block a user