Merge pull request #1618 from katlapinka/kasiat/refactor-tests-description
Cleanup tests descriptions, prepare steps and values naming PART-1
This commit is contained in:
commit
4d23c5f586
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -28,11 +28,10 @@ block_sizes = [1, 2, 4, 5, 8, 16, 32, 64, 128]
|
|||||||
@pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.nand]))
|
@pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.nand]))
|
||||||
def test_support_different_io_size(cache_mode):
|
def test_support_different_io_size(cache_mode):
|
||||||
"""
|
"""
|
||||||
title: OpenCAS supports different IO sizes
|
title: Support for different I/O sizes
|
||||||
description: |
|
description: Verify support for I/O of size in rage from 512B to 128KiB
|
||||||
OpenCAS supports IO of size in rage from 512b to 128K
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No IO errors
|
- No I/O errors
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Prepare cache and core devices"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -30,20 +30,20 @@ mountpoint = "/mnt"
|
|||||||
@pytest.mark.CI
|
@pytest.mark.CI
|
||||||
def test_cas_version():
|
def test_cas_version():
|
||||||
"""
|
"""
|
||||||
title: Test for CAS version
|
title: Test for version number
|
||||||
description:
|
description:
|
||||||
Check if CAS print version cmd returns consistent version with version file
|
Check if version printed by cmd returns value consistent with version file
|
||||||
pass criteria:
|
pass criteria:
|
||||||
- casadm version command succeeds
|
- Version command succeeds
|
||||||
- versions from cmd and file in /var/lib/opencas/cas_version are consistent
|
- Versions from cmd and file in /var/lib/opencas/cas_version are consistent
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Read cas version using casadm cmd"):
|
with TestRun.step("Read version using casadm cmd"):
|
||||||
output = casadm.print_version(output_format=OutputFormat.csv)
|
output = casadm.print_version(output_format=OutputFormat.csv)
|
||||||
cmd_version = output.stdout
|
cmd_version = output.stdout
|
||||||
cmd_cas_versions = [version.split(",")[1] for version in cmd_version.split("\n")[1:]]
|
cmd_cas_versions = [version.split(",")[1] for version in cmd_version.split("\n")[1:]]
|
||||||
|
|
||||||
with TestRun.step(f"Read cas version from {version_file_path} location"):
|
with TestRun.step(f"Read version from {version_file_path} location"):
|
||||||
file_read = read_file(version_file_path).split("\n")
|
file_read = read_file(version_file_path).split("\n")
|
||||||
file_cas_version = next(
|
file_cas_version = next(
|
||||||
(line.split("=")[1] for line in file_read if "CAS_VERSION=" in line)
|
(line.split("=")[1] for line in file_read if "CAS_VERSION=" in line)
|
||||||
@ -51,25 +51,24 @@ def test_cas_version():
|
|||||||
|
|
||||||
with TestRun.step("Compare cmd and file versions"):
|
with TestRun.step("Compare cmd and file versions"):
|
||||||
if not all(file_cas_version == cmd_cas_version for cmd_cas_version in cmd_cas_versions):
|
if not all(file_cas_version == cmd_cas_version for cmd_cas_version in cmd_cas_versions):
|
||||||
TestRun.LOGGER.error(f"Cmd and file versions doesn`t match")
|
TestRun.LOGGER.error(f"Cmd and file versions doesn't match")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
@pytest.mark.CI
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
||||||
def test_negative_start_cache():
|
def test_negative_start_cache():
|
||||||
"""
|
"""
|
||||||
title: Test start cache negative on cache device
|
title: Negative test for starting cache
|
||||||
description:
|
description:
|
||||||
Check for negative cache start scenarios
|
Check starting cache using the same device or cache ID twice
|
||||||
pass criteria:
|
pass criteria:
|
||||||
- Cache start succeeds
|
- Cache start succeeds
|
||||||
- Fails to start cache on the same device with another id
|
- Starting cache on the same device with another ID fails
|
||||||
- Fails to start cache on another partition with the same id
|
- Starting cache on another partition with the same ID fails
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Prepare cache device"):
|
with TestRun.step("Prepare cache device"):
|
||||||
cache_dev = TestRun.disks["cache"]
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
|
||||||
cache_dev.create_partitions([Size(2, Unit.GibiByte)] * 2)
|
cache_dev.create_partitions([Size(2, Unit.GibiByte)] * 2)
|
||||||
|
|
||||||
cache_dev_1 = cache_dev.partitions[0]
|
cache_dev_1 = cache_dev.partitions[0]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -65,10 +65,10 @@ def test_cleaning_policies_in_write_back(cleaning_policy: CleaningPolicy):
|
|||||||
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
|
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
|
||||||
set_cleaning_policy_params(cache, cleaning_policy)
|
set_cleaning_policy_params(cache, cleaning_policy)
|
||||||
|
|
||||||
with TestRun.step("Check for running CAS cleaner"):
|
with TestRun.step("Check for running cleaner process"):
|
||||||
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
TestRun.fail("CAS cleaner process is not running!")
|
TestRun.fail("Cleaner process is not running!")
|
||||||
|
|
||||||
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
||||||
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
||||||
@ -133,10 +133,10 @@ def test_cleaning_policies_in_write_through(cleaning_policy):
|
|||||||
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
|
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
|
||||||
set_cleaning_policy_params(cache, cleaning_policy)
|
set_cleaning_policy_params(cache, cleaning_policy)
|
||||||
|
|
||||||
with TestRun.step("Check for running CAS cleaner"):
|
with TestRun.step("Check for running cleaner process"):
|
||||||
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
TestRun.fail("CAS cleaner process is not running!")
|
TestRun.fail("Cleaner process is not running!")
|
||||||
|
|
||||||
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
||||||
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
||||||
@ -193,12 +193,12 @@ def set_cleaning_policy_params(cache, cleaning_policy):
|
|||||||
|
|
||||||
if current_acp_params.wake_up_time != acp_params.wake_up_time:
|
if current_acp_params.wake_up_time != acp_params.wake_up_time:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Wake Up time is {current_acp_params.wake_up_time}, "
|
f"Wake up time is {current_acp_params.wake_up_time}, "
|
||||||
f"should be {acp_params.wake_up_time}\n"
|
f"should be {acp_params.wake_up_time}\n"
|
||||||
)
|
)
|
||||||
if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers:
|
if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Flush Max Buffers is {current_acp_params.flush_max_buffers}, "
|
f"Flush max buffers is {current_acp_params.flush_max_buffers}, "
|
||||||
f"should be {acp_params.flush_max_buffers}\n"
|
f"should be {acp_params.flush_max_buffers}\n"
|
||||||
)
|
)
|
||||||
TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}")
|
TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}")
|
||||||
@ -215,22 +215,22 @@ def set_cleaning_policy_params(cache, cleaning_policy):
|
|||||||
failed_params = ""
|
failed_params = ""
|
||||||
if current_alru_params.wake_up_time != alru_params.wake_up_time:
|
if current_alru_params.wake_up_time != alru_params.wake_up_time:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Wake Up time is {current_alru_params.wake_up_time}, "
|
f"Wake up time is {current_alru_params.wake_up_time}, "
|
||||||
f"should be {alru_params.wake_up_time}\n"
|
f"should be {alru_params.wake_up_time}\n"
|
||||||
)
|
)
|
||||||
if current_alru_params.staleness_time != alru_params.staleness_time:
|
if current_alru_params.staleness_time != alru_params.staleness_time:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Staleness Time is {current_alru_params.staleness_time}, "
|
f"Staleness time is {current_alru_params.staleness_time}, "
|
||||||
f"should be {alru_params.staleness_time}\n"
|
f"should be {alru_params.staleness_time}\n"
|
||||||
)
|
)
|
||||||
if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers:
|
if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Flush Max Buffers is {current_alru_params.flush_max_buffers}, "
|
f"Flush max buffers is {current_alru_params.flush_max_buffers}, "
|
||||||
f"should be {alru_params.flush_max_buffers}\n"
|
f"should be {alru_params.flush_max_buffers}\n"
|
||||||
)
|
)
|
||||||
if current_alru_params.activity_threshold != alru_params.activity_threshold:
|
if current_alru_params.activity_threshold != alru_params.activity_threshold:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Activity Threshold is {current_alru_params.activity_threshold}, "
|
f"Activity threshold is {current_alru_params.activity_threshold}, "
|
||||||
f"should be {alru_params.activity_threshold}\n"
|
f"should be {alru_params.activity_threshold}\n"
|
||||||
)
|
)
|
||||||
TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}")
|
TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}")
|
||||||
@ -245,9 +245,9 @@ def check_cleaning_policy_operation(
|
|||||||
case CleaningPolicy.alru:
|
case CleaningPolicy.alru:
|
||||||
if core_writes_before_wait_for_cleaning != Size.zero():
|
if core_writes_before_wait_for_cleaning != Size.zero():
|
||||||
TestRun.LOGGER.error(
|
TestRun.LOGGER.error(
|
||||||
"CAS cleaner started to clean dirty data right after IO! "
|
"Cleaner process started to clean dirty data right after I/O! "
|
||||||
"According to ALRU parameters set in this test cleaner should "
|
"According to ALRU parameters set in this test cleaner should "
|
||||||
"wait 10 seconds after IO before cleaning dirty data"
|
"wait 10 seconds after I/O before cleaning dirty data"
|
||||||
)
|
)
|
||||||
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
||||||
TestRun.LOGGER.error(
|
TestRun.LOGGER.error(
|
||||||
@ -266,9 +266,9 @@ def check_cleaning_policy_operation(
|
|||||||
case CleaningPolicy.acp:
|
case CleaningPolicy.acp:
|
||||||
if core_writes_before_wait_for_cleaning == Size.zero():
|
if core_writes_before_wait_for_cleaning == Size.zero():
|
||||||
TestRun.LOGGER.error(
|
TestRun.LOGGER.error(
|
||||||
"CAS cleaner did not start cleaning dirty data right after IO! "
|
"Cleaner process did not start cleaning dirty data right after I/O! "
|
||||||
"According to ACP policy cleaner should start "
|
"According to ACP policy cleaner should start "
|
||||||
"cleaning dirty data right after IO"
|
"cleaning dirty data right after I/O"
|
||||||
)
|
)
|
||||||
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
||||||
TestRun.LOGGER.error(
|
TestRun.LOGGER.error(
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2021 Intel Corporation
|
# Copyright(c) 2020-2021 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -153,7 +153,7 @@ def test_concurrent_caches_flush(cache_mode: CacheMode):
|
|||||||
"""
|
"""
|
||||||
title: Flush multiple caches simultaneously.
|
title: Flush multiple caches simultaneously.
|
||||||
description: |
|
description: |
|
||||||
CAS should successfully flush multiple caches if there is already other flush in progress.
|
Check for flushing multiple caches if there is already other flush in progress.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No system crash.
|
- No system crash.
|
||||||
- Flush for each cache should finish successfully.
|
- Flush for each cache should finish successfully.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -46,7 +46,7 @@ def test_cache_stop_and_load(cache_mode):
|
|||||||
"""
|
"""
|
||||||
title: Test for stopping and loading cache back with dynamic cache mode switching.
|
title: Test for stopping and loading cache back with dynamic cache mode switching.
|
||||||
description: |
|
description: |
|
||||||
Validate the ability of the CAS to switch cache modes at runtime and
|
Validate the ability to switch cache modes at runtime and
|
||||||
check if all of them are working properly after switching and
|
check if all of them are working properly after switching and
|
||||||
after stopping and reloading cache back.
|
after stopping and reloading cache back.
|
||||||
Check also other parameters consistency after reload.
|
Check also other parameters consistency after reload.
|
||||||
@ -138,10 +138,8 @@ def test_cache_stop_and_load(cache_mode):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode):
|
def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode):
|
||||||
"""
|
"""
|
||||||
title: Test for dynamic cache mode switching during IO.
|
title: Test for dynamic cache mode switching during I/O.
|
||||||
description: |
|
description: Validate the ability to switch cache modes during I/O on exported object.
|
||||||
Validate the ability of CAS to switch cache modes
|
|
||||||
during working IO on CAS device.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Cache mode is switched without errors.
|
- Cache mode is switched without errors.
|
||||||
"""
|
"""
|
||||||
@ -182,7 +180,7 @@ def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mo
|
|||||||
):
|
):
|
||||||
cache.set_cache_mode(cache_mode=cache_mode_2, flush=flush)
|
cache.set_cache_mode(cache_mode=cache_mode_2, flush=flush)
|
||||||
|
|
||||||
with TestRun.step(f"Check if cache mode has switched properly during IO"):
|
with TestRun.step("Check if cache mode has switched properly during I/O"):
|
||||||
cache_mode_after_switch = cache.get_cache_mode()
|
cache_mode_after_switch = cache.get_cache_mode()
|
||||||
if cache_mode_after_switch != cache_mode_2:
|
if cache_mode_after_switch != cache_mode_2:
|
||||||
TestRun.fail(
|
TestRun.fail(
|
||||||
@ -229,7 +227,7 @@ def run_io_and_verify(cache, core, io_mode):
|
|||||||
):
|
):
|
||||||
TestRun.fail(
|
TestRun.fail(
|
||||||
"Write-Back cache mode is not working properly! "
|
"Write-Back cache mode is not working properly! "
|
||||||
"There should be some writes to CAS device and none to the core"
|
"There should be some writes to exported object and none to the core"
|
||||||
)
|
)
|
||||||
case CacheMode.PT:
|
case CacheMode.PT:
|
||||||
if (
|
if (
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2022 Intel Corporation
|
# Copyright(c) 2020-2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -18,11 +18,11 @@ def test_remove_multilevel_core():
|
|||||||
"""
|
"""
|
||||||
title: Test of the ability to remove a core used in a multilevel cache.
|
title: Test of the ability to remove a core used in a multilevel cache.
|
||||||
description: |
|
description: |
|
||||||
Negative test if OpenCAS does not allow to remove a core when the related exported object
|
Negative test for removing a core when the related exported object
|
||||||
is used as a core device for another cache instance.
|
is used as a core device for another cache instance.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No system crash.
|
- No system crash.
|
||||||
- OpenCAS does not allow removing a core used in a multilevel cache instance.
|
- Removing a core used in a multilevel cache instance is forbidden.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Prepare cache and core devices"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2022 Intel Corporation
|
# Copyright(c) 2020-2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
|
|||||||
with TestRun.step("Disable udev"):
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step(f"Start cache in Write-Back"):
|
with TestRun.step(f"Start cache in Write-Back cache mode"):
|
||||||
cache_disk = TestRun.disks["cache"]
|
cache_disk = TestRun.disks["cache"]
|
||||||
core_disk = TestRun.disks["core"]
|
core_disk = TestRun.disks["core"]
|
||||||
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
|
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
|
||||||
@ -105,7 +105,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
|
|||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
"Write random number of 4k block requests to each stream and check if all "
|
"Write random number of 4k block requests to each stream and check if all "
|
||||||
"writes were sent in pass-through mode"
|
"writes were sent in pass-through"
|
||||||
):
|
):
|
||||||
core_statistics_before = core.get_statistics([StatsFilter.req, StatsFilter.blk])
|
core_statistics_before = core.get_statistics([StatsFilter.req, StatsFilter.blk])
|
||||||
random.shuffle(offsets)
|
random.shuffle(offsets)
|
||||||
@ -170,7 +170,7 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
|
|||||||
with TestRun.step("Reset core statistics counters"):
|
with TestRun.step("Reset core statistics counters"):
|
||||||
core.reset_counters()
|
core.reset_counters()
|
||||||
|
|
||||||
with TestRun.step("Run FIO on core device"):
|
with TestRun.step("Run fio on core device"):
|
||||||
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
|
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
|
||||||
sequential_streams = streams_seq_rand[0]
|
sequential_streams = streams_seq_rand[0]
|
||||||
random_streams = streams_seq_rand[1]
|
random_streams = streams_seq_rand[1]
|
||||||
@ -216,12 +216,14 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
|
|||||||
- No system crash
|
- No system crash
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step(f"Disable udev"):
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step("Create filesystem on core device"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_disk = TestRun.disks["cache"]
|
cache_disk = TestRun.disks["cache"]
|
||||||
core_disk = TestRun.disks["core"]
|
core_disk = TestRun.disks["core"]
|
||||||
|
|
||||||
|
with TestRun.step("Create filesystem on core device"):
|
||||||
core_disk.create_filesystem(filesystem)
|
core_disk.create_filesystem(filesystem)
|
||||||
|
|
||||||
with TestRun.step("Start cache and add core"):
|
with TestRun.step("Start cache and add core"):
|
||||||
@ -231,7 +233,7 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
|
|||||||
with TestRun.step("Mount core"):
|
with TestRun.step("Mount core"):
|
||||||
core.mount(mount_point=mount_point)
|
core.mount(mount_point=mount_point)
|
||||||
|
|
||||||
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB"):
|
with TestRun.step("Set sequential cutoff policy to always and threshold to 20MiB"):
|
||||||
core.set_seq_cutoff_policy(policy=SeqCutOffPolicy.always)
|
core.set_seq_cutoff_policy(policy=SeqCutOffPolicy.always)
|
||||||
core.set_seq_cutoff_threshold(threshold=Size(20, Unit.MebiByte))
|
core.set_seq_cutoff_threshold(threshold=Size(20, Unit.MebiByte))
|
||||||
|
|
||||||
@ -279,7 +281,7 @@ def run_dd(target_path, count, seek):
|
|||||||
TestRun.LOGGER.info(f"dd command:\n{dd}")
|
TestRun.LOGGER.info(f"dd command:\n{dd}")
|
||||||
output = dd.run()
|
output = dd.run()
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Error during IO", output)
|
raise CmdException("Error during I/O", output)
|
||||||
|
|
||||||
|
|
||||||
def check_statistics(stats_before, stats_after, expected_pt_writes, expected_writes_to_cache):
|
def check_statistics(stats_before, stats_after, expected_pt_writes, expected_writes_to_cache):
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -40,15 +40,14 @@ class VerifyType(Enum):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_size):
|
def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_size):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
title: Functional sequential cutoff test with multiple cores
|
||||||
description: |
|
description: |
|
||||||
Testing if amount of data written to cache after sequential writes for different
|
Test checking if data is cached properly with sequential cutoff "always" policy
|
||||||
sequential cut-off thresholds on each core, while running sequential IO on 3 out of 4
|
when sequential and random I/O is running to multiple cores.
|
||||||
cores and random IO against the last core, is correct.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Amount of written blocks to cache is less or equal than amount set
|
- Amount of written blocks to cache is less or equal than amount set
|
||||||
with sequential cut-off threshold for three first cores.
|
with sequential cutoff threshold for three first cores.
|
||||||
- Amount of written blocks to cache is equal to io size run against last core.
|
- Amount of written blocks to cache is equal to I/O size run against last core.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Prepare cache and core devices"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
@ -76,7 +75,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
|||||||
)
|
)
|
||||||
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
||||||
|
|
||||||
with TestRun.step("Set sequential cut-off parameters for all cores"):
|
with TestRun.step("Set sequential cutoff parameters for all cores"):
|
||||||
writes_before_list = []
|
writes_before_list = []
|
||||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||||
thresholds_list = [
|
thresholds_list = [
|
||||||
@ -96,7 +95,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
|||||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||||
core.set_seq_cutoff_threshold(threshold)
|
core.set_seq_cutoff_threshold(threshold)
|
||||||
|
|
||||||
with TestRun.step("Prepare sequential IO against first three cores"):
|
with TestRun.step("Prepare sequential I/O against first three cores"):
|
||||||
block_size = Size(4, Unit.KibiByte)
|
block_size = Size(4, Unit.KibiByte)
|
||||||
fio = Fio().create_command().io_engine(IoEngine.libaio).block_size(block_size).direct(True)
|
fio = Fio().create_command().io_engine(IoEngine.libaio).block_size(block_size).direct(True)
|
||||||
|
|
||||||
@ -107,7 +106,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
|||||||
fio_job.target(core.path)
|
fio_job.target(core.path)
|
||||||
writes_before_list.append(core.get_statistics().block_stats.cache.writes)
|
writes_before_list.append(core.get_statistics().block_stats.cache.writes)
|
||||||
|
|
||||||
with TestRun.step("Prepare random IO against the last core"):
|
with TestRun.step("Prepare random I/O against the last core"):
|
||||||
fio_job = fio.add_job(f"core_{core_list[-1].core_id}")
|
fio_job = fio.add_job(f"core_{core_list[-1].core_id}")
|
||||||
fio_job.size(io_sizes_list[-1])
|
fio_job.size(io_sizes_list[-1])
|
||||||
fio_job.read_write(io_type_last)
|
fio_job.read_write(io_type_last)
|
||||||
@ -117,7 +116,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
|||||||
with TestRun.step("Run fio against all cores"):
|
with TestRun.step("Run fio against all cores"):
|
||||||
fio.run()
|
fio.run()
|
||||||
|
|
||||||
with TestRun.step("Verify writes to cache count after IO"):
|
with TestRun.step("Verify writes to cache count after I/O"):
|
||||||
margins = [
|
margins = [
|
||||||
min(block_size * (core.get_seq_cut_off_parameters().promotion_count - 1), threshold)
|
min(block_size * (core.get_seq_cut_off_parameters().promotion_count - 1), threshold)
|
||||||
for core, threshold in zip(core_list[:-1], thresholds_list[:-1])
|
for core, threshold in zip(core_list[:-1], thresholds_list[:-1])
|
||||||
@ -159,17 +158,16 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
|||||||
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cache_line_size):
|
def test_seq_cutoff_multi_core_cpu_pinned(cache_mode, io_type, io_type_last, cache_line_size):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
title: Functional sequential cutoff test with multiple cores and cpu pinned I/O
|
||||||
description: |
|
description: |
|
||||||
Testing if amount of data written to cache after sequential writes for different
|
Test checking if data is cached properly with sequential cutoff "always" policy
|
||||||
sequential cut-off thresholds on each core, while running sequential IO, pinned,
|
when sequential and random cpu pinned I/O is running to multiple cores.
|
||||||
on 3 out of 4 cores and random IO against the last core, is correct.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Amount of written blocks to cache is less or equal than amount set
|
- Amount of written blocks to cache is less or equal than amount set
|
||||||
with sequential cut-off threshold for three first cores.
|
with sequential cutoff threshold for three first cores.
|
||||||
- Amount of written blocks to cache is equal to io size run against last core.
|
- Amount of written blocks to cache is equal to I/O size run against last core.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Partition cache and core devices"):
|
||||||
@ -198,7 +196,7 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
|||||||
)
|
)
|
||||||
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
||||||
|
|
||||||
with TestRun.step(f"Set sequential cut-off parameters for all cores"):
|
with TestRun.step("Set sequential cutoff parameters for all cores"):
|
||||||
writes_before_list = []
|
writes_before_list = []
|
||||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||||
thresholds_list = [
|
thresholds_list = [
|
||||||
@ -218,7 +216,9 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
|||||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||||
core.set_seq_cutoff_threshold(threshold)
|
core.set_seq_cutoff_threshold(threshold)
|
||||||
|
|
||||||
with TestRun.step("Prepare sequential IO against first three cores"):
|
with TestRun.step(
|
||||||
|
"Prepare sequential I/O against first three cores and random I/O against the last one"
|
||||||
|
):
|
||||||
fio = (
|
fio = (
|
||||||
Fio()
|
Fio()
|
||||||
.create_command()
|
.create_command()
|
||||||
@ -244,10 +244,10 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
|||||||
fio_job.target(core_list[-1].path)
|
fio_job.target(core_list[-1].path)
|
||||||
writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes)
|
writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes)
|
||||||
|
|
||||||
with TestRun.step("Running IO against all cores"):
|
with TestRun.step("Running I/O against all cores"):
|
||||||
fio.run()
|
fio.run()
|
||||||
|
|
||||||
with TestRun.step("Verifying writes to cache count after IO"):
|
with TestRun.step("Verifying writes to cache count after I/O"):
|
||||||
for core, writes, threshold, io_size in zip(
|
for core, writes, threshold, io_size in zip(
|
||||||
core_list[:-1], writes_before_list[:-1], thresholds_list[:-1], io_sizes_list[:-1]
|
core_list[:-1], writes_before_list[:-1], thresholds_list[:-1], io_sizes_list[:-1]
|
||||||
):
|
):
|
||||||
@ -282,16 +282,14 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off tests for writes and reads for 'never', 'always' and 'full' policies
|
title: Functional test for sequential cutoff threshold parameter
|
||||||
description: |
|
description: |
|
||||||
Testing if amount of data written to cache after sequential writes and reads for different
|
Check if data is cached properly according to sequential cutoff policy and
|
||||||
sequential cut-off policies with cache configured with different cache line size
|
threshold parameter
|
||||||
is valid for sequential cut-off threshold parameter, assuming that cache occupancy
|
|
||||||
doesn't reach 100% during test.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Amount of written blocks to cache is less or equal than amount set
|
- Amount of blocks written to cache is less than or equal to amount set
|
||||||
with sequential cut-off parameter in case of 'always' policy.
|
with sequential cutoff parameter in case of 'always' policy.
|
||||||
- Amount of written blocks to cache is at least equal io size in case of 'never' and 'full'
|
- Amount of blocks written to cache is at least equal to io size in case of 'never' and 'full'
|
||||||
policy.
|
policy.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -332,7 +330,7 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
|||||||
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
|
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
|
||||||
cache.set_seq_cutoff_threshold(threshold)
|
cache.set_seq_cutoff_threshold(threshold)
|
||||||
|
|
||||||
with TestRun.step("Prepare sequential IO against core"):
|
with TestRun.step("Prepare sequential I/O against core"):
|
||||||
sync()
|
sync()
|
||||||
writes_before = core.get_statistics().block_stats.cache.writes
|
writes_before = core.get_statistics().block_stats.cache.writes
|
||||||
fio = (
|
fio = (
|
||||||
@ -364,16 +362,15 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off tests during writes and reads on full cache for 'full' policy
|
title: Functional test for sequential cutoff threshold parameter and 'full' policy
|
||||||
description: |
|
description: |
|
||||||
Testing if amount of data written to cache after sequential io against fully occupied
|
Check if data is cached properly according to sequential cutoff 'full' policy and given
|
||||||
cache for 'full' sequential cut-off policy with cache configured with different cache
|
threshold parameter
|
||||||
line sizes is valid for sequential cut-off threshold parameter.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Amount of written blocks to cache is big enough to fill cache when 'never' sequential
|
- Amount of written blocks to cache is big enough to fill cache when 'never' sequential
|
||||||
cut-off policy is set
|
cutoff policy is set
|
||||||
- Amount of written blocks to cache is less or equal than amount set
|
- Amount of written blocks to cache is less or equal than amount set
|
||||||
with sequential cut-off parameter in case of 'full' policy.
|
with sequential cutoff parameter in case of 'full' policy.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Partition cache and core devices"):
|
||||||
@ -410,7 +407,7 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
|||||||
with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.never}"):
|
with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.never}"):
|
||||||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
||||||
|
|
||||||
with TestRun.step("Prepare sequential IO against core"):
|
with TestRun.step("Prepare sequential I/O against core"):
|
||||||
sync()
|
sync()
|
||||||
fio = (
|
fio = (
|
||||||
Fio()
|
Fio()
|
||||||
@ -438,7 +435,7 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
|||||||
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
|
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
|
||||||
cache.set_seq_cutoff_threshold(threshold)
|
cache.set_seq_cutoff_threshold(threshold)
|
||||||
|
|
||||||
with TestRun.step(f"Running sequential IO ({io_dir})"):
|
with TestRun.step(f"Running sequential I/O ({io_dir})"):
|
||||||
sync()
|
sync()
|
||||||
writes_before = core.get_statistics().block_stats.cache.writes
|
writes_before = core.get_statistics().block_stats.cache.writes
|
||||||
fio = (
|
fio = (
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from api.cas import casadm
|
from api.cas import casadm
|
||||||
from api.cas.cache_config import CacheMode
|
from api.cas.cache_config import CacheMode, CacheModeTrait
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools.udev import Udev
|
from test_tools.udev import Udev
|
||||||
@ -20,19 +20,17 @@ dd_count = 100
|
|||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WA, CacheMode.WB])
|
@pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.InsertRead))
|
||||||
@pytest.mark.CI()
|
@pytest.mark.CI()
|
||||||
def test_ci_read(cache_mode):
|
def test_ci_read(cache_mode):
|
||||||
"""
|
"""
|
||||||
title: Verification test for write mode: write around
|
title: Verification test for caching reads in various cache modes
|
||||||
description: Verify if write mode: write around, works as expected and cache only reads
|
description: Check if reads are properly cached in various cache modes
|
||||||
and does not cache write
|
|
||||||
pass criteria:
|
pass criteria:
|
||||||
- writes are not cached
|
- Reads are cached
|
||||||
- reads are cached
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Prepare partitions"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
core_device = TestRun.disks["core"]
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
@ -45,7 +43,7 @@ def test_ci_read(cache_mode):
|
|||||||
with TestRun.step("Disable udev"):
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step(f"Start cache with cache_mode={cache_mode}"):
|
with TestRun.step(f"Start cache in {cache_mode} cache mode"):
|
||||||
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
|
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
|
||||||
cache_mode=cache_mode)
|
cache_mode=cache_mode)
|
||||||
casadm.add_core(cache, core_device)
|
casadm.add_core(cache, core_device)
|
||||||
@ -99,7 +97,14 @@ def test_ci_read(cache_mode):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
@pytest.mark.CI()
|
@pytest.mark.CI()
|
||||||
def test_ci_write_around_write():
|
def test_ci_write_around_write():
|
||||||
with TestRun.step("Prepare partitions"):
|
"""
|
||||||
|
title: Verification test for writes in Write-Around cache mode
|
||||||
|
description: Validate I/O statistics after writing to exported object in Write-Around cache mode
|
||||||
|
pass criteria:
|
||||||
|
- Writes are not cached
|
||||||
|
- After inserting writes to core, data is read from core and not from cache
|
||||||
|
"""
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
core_device = TestRun.disks["core"]
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
@ -112,7 +117,7 @@ def test_ci_write_around_write():
|
|||||||
with TestRun.step("Disable udev"):
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step("Start CAS Linux in Write Around mode"):
|
with TestRun.step("Start cache in Write-Around mode"):
|
||||||
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
|
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
|
||||||
cache_mode=CacheMode.WA)
|
cache_mode=CacheMode.WA)
|
||||||
casadm.add_core(cache, core_device)
|
casadm.add_core(cache, core_device)
|
||||||
@ -183,14 +188,14 @@ def test_ci_write_around_write():
|
|||||||
else:
|
else:
|
||||||
TestRun.LOGGER.error(f"Writes to cache: {write_cache_delta_1} != 0")
|
TestRun.LOGGER.error(f"Writes to cache: {write_cache_delta_1} != 0")
|
||||||
|
|
||||||
with TestRun.step("Verify that reads propagated to core"):
|
with TestRun.step("Verify that data was read from core"):
|
||||||
read_core_delta_2 = read_core_2 - read_core_1
|
read_core_delta_2 = read_core_2 - read_core_1
|
||||||
if read_core_delta_2 == data_write:
|
if read_core_delta_2 == data_write:
|
||||||
TestRun.LOGGER.info(f"Reads from core: {read_core_delta_2} == {data_write}")
|
TestRun.LOGGER.info(f"Reads from core: {read_core_delta_2} == {data_write}")
|
||||||
else:
|
else:
|
||||||
TestRun.LOGGER.error(f"Reads from core: {read_core_delta_2} != {data_write}")
|
TestRun.LOGGER.error(f"Reads from core: {read_core_delta_2} != {data_write}")
|
||||||
|
|
||||||
with TestRun.step("Verify that reads did not occur on cache"):
|
with TestRun.step("Verify that data was not read from cache"):
|
||||||
read_cache_delta_2 = read_cache_2 - read_cache_1
|
read_cache_delta_2 = read_cache_2 - read_cache_1
|
||||||
if read_cache_delta_2.value == 0:
|
if read_cache_delta_2.value == 0:
|
||||||
TestRun.LOGGER.info(f"Reads from cache: {read_cache_delta_2} == 0")
|
TestRun.LOGGER.info(f"Reads from cache: {read_cache_delta_2} == 0")
|
||||||
@ -203,7 +208,15 @@ def test_ci_write_around_write():
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
@pytest.mark.CI()
|
@pytest.mark.CI()
|
||||||
def test_ci_write_through_write():
|
def test_ci_write_through_write():
|
||||||
with TestRun.step("Prepare partitions"):
|
"""
|
||||||
|
title: Verification test for Write-Through cache mode
|
||||||
|
description: |
|
||||||
|
Validate if reads and writes are cached properly for cache in Write-Through mode
|
||||||
|
pass criteria:
|
||||||
|
- Writes are inserted to cache and core
|
||||||
|
- Reads are not cached
|
||||||
|
"""
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
core_device = TestRun.disks["core"]
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
@ -216,7 +229,7 @@ def test_ci_write_through_write():
|
|||||||
with TestRun.step("Disable udev"):
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step("Start CAS Linux in Write Through mode"):
|
with TestRun.step("Start cache in Write-Through mode"):
|
||||||
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
|
cache = casadm.start_cache(cache_dev=cache_device, cache_id=1, force=True,
|
||||||
cache_mode=CacheMode.WT)
|
cache_mode=CacheMode.WT)
|
||||||
casadm.add_core(cache, core_device)
|
casadm.add_core(cache, core_device)
|
||||||
|
@ -25,51 +25,51 @@ from test_tools.memory import disable_memory_affecting_functions, get_mem_free,
|
|||||||
@pytest.mark.os_dependent
|
@pytest.mark.os_dependent
|
||||||
def test_insufficient_memory_for_cas_module():
|
def test_insufficient_memory_for_cas_module():
|
||||||
"""
|
"""
|
||||||
title: Negative test for the ability of CAS to load the kernel module with insufficient memory.
|
title: Load CAS kernel module with insufficient memory
|
||||||
description: |
|
description: |
|
||||||
Check that the CAS kernel module won’t be loaded if enough memory is not available
|
Negative test for the ability to load the CAS kernel module with insufficient memory.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- CAS module cannot be loaded with not enough memory.
|
- CAS kernel module cannot be loaded with not enough memory.
|
||||||
- Loading CAS with not enough memory returns error.
|
- Loading CAS kernel module with not enough memory returns error.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Disable caching and memory over-committing"):
|
with TestRun.step("Disable caching and memory over-committing"):
|
||||||
disable_memory_affecting_functions()
|
disable_memory_affecting_functions()
|
||||||
drop_caches()
|
drop_caches()
|
||||||
|
|
||||||
with TestRun.step("Measure memory usage without OpenCAS module"):
|
with TestRun.step("Measure memory usage without CAS kernel module"):
|
||||||
if is_kernel_module_loaded(CasModule.cache.value):
|
if is_kernel_module_loaded(CasModule.cache.value):
|
||||||
unload_kernel_module(CasModule.cache.value)
|
unload_kernel_module(CasModule.cache.value)
|
||||||
available_mem_before_cas = get_mem_free()
|
available_mem_before_cas = get_mem_free()
|
||||||
|
|
||||||
with TestRun.step("Load CAS module"):
|
with TestRun.step("Load CAS kernel module"):
|
||||||
load_kernel_module(CasModule.cache.value)
|
load_kernel_module(CasModule.cache.value)
|
||||||
|
|
||||||
with TestRun.step("Measure memory usage with CAS module"):
|
with TestRun.step("Measure memory usage with CAS kernel module"):
|
||||||
available_mem_with_cas = get_mem_free()
|
available_mem_with_cas = get_mem_free()
|
||||||
memory_used_by_cas = available_mem_before_cas - available_mem_with_cas
|
memory_used_by_cas = available_mem_before_cas - available_mem_with_cas
|
||||||
TestRun.LOGGER.info(
|
TestRun.LOGGER.info(
|
||||||
f"OpenCAS module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM."
|
f"CAS kernel module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM."
|
||||||
)
|
)
|
||||||
|
|
||||||
with TestRun.step("Unload CAS module"):
|
with TestRun.step("Unload CAS kernel module"):
|
||||||
unload_kernel_module(CasModule.cache.value)
|
unload_kernel_module(CasModule.cache.value)
|
||||||
|
|
||||||
with TestRun.step("Allocate memory, leaving not enough memory for CAS module"):
|
with TestRun.step("Allocate memory, leaving not enough memory for CAS module"):
|
||||||
memory_to_leave = get_mem_free() - (memory_used_by_cas * (3 / 4))
|
memory_to_leave = get_mem_free() - (memory_used_by_cas * (3 / 4))
|
||||||
allocate_memory(memory_to_leave)
|
allocate_memory(memory_to_leave)
|
||||||
TestRun.LOGGER.info(
|
TestRun.LOGGER.info(
|
||||||
f"Memory left for OpenCAS module: {get_mem_free().get_value(Unit.MiB):0.2f} MiB."
|
f"Memory left for CAS kernel module: {get_mem_free().get_value(Unit.MiB):0.2f} MiB."
|
||||||
)
|
)
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
"Try to load OpenCAS module and check if correct error message is printed on failure"
|
"Try to load CAS kernel module and check if correct error message is printed on failure"
|
||||||
):
|
):
|
||||||
output = load_kernel_module(CasModule.cache.value)
|
output = load_kernel_module(CasModule.cache.value)
|
||||||
if output.stderr and output.exit_code != 0:
|
if output.stderr and output.exit_code != 0:
|
||||||
TestRun.LOGGER.info(f"Cannot load OpenCAS module as expected.\n{output.stderr}")
|
TestRun.LOGGER.info(f"Cannot load CAS kernel module as expected.\n{output.stderr}")
|
||||||
else:
|
else:
|
||||||
TestRun.LOGGER.error("Loading OpenCAS module successfully finished, but should fail.")
|
TestRun.LOGGER.error("Loading CAS kernel module successfully finished, but should fail.")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
||||||
@ -118,3 +118,4 @@ def test_attach_cache_min_ram():
|
|||||||
|
|
||||||
with TestRun.step("Unlock RAM memory"):
|
with TestRun.step("Unlock RAM memory"):
|
||||||
unmount_ramfs()
|
unmount_ramfs()
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -23,10 +23,10 @@ from test_tools.udev import Udev
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_cleaning_policy():
|
def test_cleaning_policy():
|
||||||
"""
|
"""
|
||||||
Title: test_cleaning_policy
|
Title: Basic test for cleaning policy
|
||||||
description: |
|
description: |
|
||||||
The test is to see if dirty data will be removed from the Cache after changing the
|
Verify cleaning behaviour after changing cleaning policy from NOP
|
||||||
cleaning policy from NOP to one that expects a flush.
|
to one that expects a flush.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Cache is successfully populated with dirty data
|
- Cache is successfully populated with dirty data
|
||||||
- Cleaning policy is changed successfully
|
- Cleaning policy is changed successfully
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -14,7 +15,7 @@ def test_cli_help_spelling():
|
|||||||
title: Spelling test for 'help' command
|
title: Spelling test for 'help' command
|
||||||
description: Validates spelling of 'help' in CLI
|
description: Validates spelling of 'help' in CLI
|
||||||
pass criteria:
|
pass criteria:
|
||||||
- no spelling mistakes are found
|
- No spelling mistakes are found
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cas_dictionary = os.path.join(TestRun.usr.repo_dir, "test", "functional", "resources")
|
cas_dictionary = os.path.join(TestRun.usr.repo_dir, "test", "functional", "resources")
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2021 Intel Corporation
|
# Copyright(c) 2020-2021 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -20,12 +20,11 @@ from test_tools.dd import Dd
|
|||||||
@pytest.mark.parametrize("purge_target", ["cache", "core"])
|
@pytest.mark.parametrize("purge_target", ["cache", "core"])
|
||||||
def test_purge(purge_target):
|
def test_purge(purge_target):
|
||||||
"""
|
"""
|
||||||
title: Call purge without and with `--script` switch
|
title: Basic test for purge command
|
||||||
description: |
|
description: Check purge command behaviour with and without '--script' flag
|
||||||
Check if purge is called only when `--script` switch is used.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- casadm returns an error when `--script` is missing
|
- Error returned when '--script' is missing
|
||||||
- cache is wiped when purge command is used properly
|
- Cache is wiped when purge command is used properly
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices"):
|
with TestRun.step("Prepare devices"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
@ -41,7 +40,7 @@ def test_purge(purge_target):
|
|||||||
cache = casadm.start_cache(cache_device, force=True)
|
cache = casadm.start_cache(cache_device, force=True)
|
||||||
core = casadm.add_core(cache, core_device)
|
core = casadm.add_core(cache, core_device)
|
||||||
|
|
||||||
with TestRun.step("Trigger IO to prepared cache instance"):
|
with TestRun.step("Trigger I/O to prepared cache instance"):
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd()
|
||||||
.input("/dev/zero")
|
.input("/dev/zero")
|
||||||
@ -79,8 +78,3 @@ def test_purge(purge_target):
|
|||||||
if cache.get_statistics().usage_stats.occupancy.get_value() != 0:
|
if cache.get_statistics().usage_stats.occupancy.get_value() != 0:
|
||||||
TestRun.fail(f"{cache.get_statistics().usage_stats.occupancy.get_value()}")
|
TestRun.fail(f"{cache.get_statistics().usage_stats.occupancy.get_value()}")
|
||||||
TestRun.fail(f"Purge {purge_target} should invalidate all cache lines!")
|
TestRun.fail(f"Purge {purge_target} should invalidate all cache lines!")
|
||||||
|
|
||||||
with TestRun.step(
|
|
||||||
f"Stop cache"
|
|
||||||
):
|
|
||||||
casadm.stop_all_caches()
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies
|
# Copyright(c) 2024-2025 Huawei Technologies
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -120,11 +120,12 @@ def test_activate_neg_cli_params():
|
|||||||
-The execution is unsuccessful for all improper argument combinations
|
-The execution is unsuccessful for all improper argument combinations
|
||||||
-A proper error message is displayed for unsuccessful executions
|
-A proper error message is displayed for unsuccessful executions
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare the device for the cache."):
|
with TestRun.step("Prepare the device for the cache."):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
cache_device = cache_device.partitions[0]
|
cache_device = cache_device.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
|
|
||||||
with TestRun.step("Init standby cache"):
|
with TestRun.step("Init standby cache"):
|
||||||
cache_dev = Device(cache_device.path)
|
cache_dev = Device(cache_device.path)
|
||||||
@ -201,6 +202,8 @@ def test_standby_neg_cli_management():
|
|||||||
- The execution is successful for allowed management commands
|
- The execution is successful for allowed management commands
|
||||||
- A proper error message is displayed for unsuccessful executions
|
- A proper error message is displayed for unsuccessful executions
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare the device for the cache."):
|
with TestRun.step("Prepare the device for the cache."):
|
||||||
device = TestRun.disks["cache"]
|
device = TestRun.disks["cache"]
|
||||||
device.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
|
device.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
|
||||||
@ -208,7 +211,6 @@ def test_standby_neg_cli_management():
|
|||||||
core_device = device.partitions[1]
|
core_device = device.partitions[1]
|
||||||
|
|
||||||
with TestRun.step("Prepare the standby instance"):
|
with TestRun.step("Prepare the standby instance"):
|
||||||
cache_id = 1
|
|
||||||
cache = casadm.standby_init(
|
cache = casadm.standby_init(
|
||||||
cache_dev=cache_device, cache_id=cache_id,
|
cache_dev=cache_device, cache_id=cache_id,
|
||||||
cache_line_size=CacheLineSize.LINE_32KiB, force=True
|
cache_line_size=CacheLineSize.LINE_32KiB, force=True
|
||||||
@ -278,13 +280,13 @@ def test_start_neg_cli_flags():
|
|||||||
- The command execution is unsuccessful for commands with mutually exclusive flags
|
- The command execution is unsuccessful for commands with mutually exclusive flags
|
||||||
- A proper error message is displayed
|
- A proper error message is displayed
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
cache_line_size = 32
|
||||||
|
|
||||||
with TestRun.step("Prepare the device for the cache."):
|
with TestRun.step("Prepare the device for the cache."):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
cache_device = cache_device.partitions[0]
|
cache_device = cache_device.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
cache_line_size = 32
|
|
||||||
|
|
||||||
with TestRun.step("Try to start standby cache with mutually exclusive parameters"):
|
with TestRun.step("Try to start standby cache with mutually exclusive parameters"):
|
||||||
init_required_params = f' --cache-device {cache_device.path}' \
|
init_required_params = f' --cache-device {cache_device.path}' \
|
||||||
@ -327,19 +329,19 @@ def test_activate_without_detach():
|
|||||||
"""
|
"""
|
||||||
title: Activate cache without detach command.
|
title: Activate cache without detach command.
|
||||||
description: |
|
description: |
|
||||||
Try activate passive cache without detach command before activation.
|
Try to activate passive cache without detach command before activation.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- The activation is not possible
|
- The activation is not possible
|
||||||
- The cache remains in Standby state after unsuccessful activation
|
- The cache remains in Standby state after unsuccessful activation
|
||||||
- The cache exported object is present after an unsuccessful activation
|
- The cache exported object is present after an unsuccessful activation
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
||||||
|
|
||||||
with TestRun.step("Prepare the device for the cache."):
|
with TestRun.step("Prepare the device for the cache."):
|
||||||
cache_dev = TestRun.disks["cache"]
|
cache_dev = TestRun.disks["cache"]
|
||||||
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
cache_dev = cache_dev.partitions[0]
|
cache_dev = cache_dev.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
|
||||||
|
|
||||||
with TestRun.step("Start cache instance."):
|
with TestRun.step("Start cache instance."):
|
||||||
cache = casadm.start_cache(cache_dev=cache_dev, cache_id=cache_id)
|
cache = casadm.start_cache(cache_dev=cache_dev, cache_id=cache_id)
|
||||||
@ -399,6 +401,9 @@ def test_activate_neg_cache_line_size():
|
|||||||
- The cache remains in Standby detached state after an unsuccessful activation
|
- The cache remains in Standby detached state after an unsuccessful activation
|
||||||
- A proper error message is displayed
|
- A proper error message is displayed
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
active_cls, standby_cls = CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB
|
||||||
|
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
||||||
|
|
||||||
with TestRun.step("Prepare cache devices"):
|
with TestRun.step("Prepare cache devices"):
|
||||||
active_cache_dev = TestRun.disks["active_cache"]
|
active_cache_dev = TestRun.disks["active_cache"]
|
||||||
@ -407,15 +412,11 @@ def test_activate_neg_cache_line_size():
|
|||||||
standby_cache_dev = TestRun.disks["standby_cache"]
|
standby_cache_dev = TestRun.disks["standby_cache"]
|
||||||
standby_cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
standby_cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
standby_cache_dev = standby_cache_dev.partitions[0]
|
standby_cache_dev = standby_cache_dev.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
active_cls, standby_cls = CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB
|
|
||||||
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
|
||||||
|
|
||||||
with TestRun.step("Start active cache instance."):
|
with TestRun.step("Start active cache instance."):
|
||||||
active_cache = casadm.start_cache(cache_dev=active_cache_dev, cache_id=cache_id,
|
active_cache = casadm.start_cache(cache_dev=active_cache_dev, cache_id=cache_id,
|
||||||
cache_line_size=active_cls)
|
cache_line_size=active_cls)
|
||||||
|
|
||||||
with TestRun.step("Create dump file with cache metadata"):
|
|
||||||
with TestRun.step("Get metadata size"):
|
with TestRun.step("Get metadata size"):
|
||||||
dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
|
dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
|
||||||
md_size = dmesg.get_metadata_size_on_device(dmesg_out)
|
md_size = dmesg.get_metadata_size_on_device(dmesg_out)
|
||||||
@ -489,17 +490,18 @@ def test_standby_init_with_preexisting_metadata():
|
|||||||
- initialize cache without force flag fails and informative error message is printed
|
- initialize cache without force flag fails and informative error message is printed
|
||||||
- initialize cache with force flag succeeds and passive instance is present in system
|
- initialize cache with force flag succeeds and passive instance is present in system
|
||||||
"""
|
"""
|
||||||
|
cache_line_size = CacheLineSize.LINE_32KiB
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare device for cache"):
|
with TestRun.step("Prepare device for cache"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
||||||
cache_device = cache_device.partitions[0]
|
cache_device = cache_device.partitions[0]
|
||||||
cls = CacheLineSize.LINE_32KiB
|
|
||||||
cache_id = 1
|
|
||||||
|
|
||||||
with TestRun.step("Start standby cache instance"):
|
with TestRun.step("Start standby cache instance"):
|
||||||
cache = casadm.standby_init(
|
cache = casadm.standby_init(
|
||||||
cache_dev=cache_device,
|
cache_dev=cache_device,
|
||||||
cache_line_size=cls,
|
cache_line_size=cache_line_size,
|
||||||
cache_id=cache_id,
|
cache_id=cache_id,
|
||||||
force=True,
|
force=True,
|
||||||
)
|
)
|
||||||
@ -512,7 +514,7 @@ def test_standby_init_with_preexisting_metadata():
|
|||||||
standby_init_cmd(
|
standby_init_cmd(
|
||||||
cache_dev=cache_device.path,
|
cache_dev=cache_device.path,
|
||||||
cache_id=str(cache_id),
|
cache_id=str(cache_id),
|
||||||
cache_line_size=str(int(cls.value.value / Unit.KibiByte.value)),
|
cache_line_size=str(int(cache_line_size.value.value / Unit.KibiByte.value)),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
if not check_stderr_msg(output, start_cache_with_existing_metadata):
|
if not check_stderr_msg(output, start_cache_with_existing_metadata):
|
||||||
@ -524,7 +526,7 @@ def test_standby_init_with_preexisting_metadata():
|
|||||||
with TestRun.step("Try initialize cache with force flag"):
|
with TestRun.step("Try initialize cache with force flag"):
|
||||||
casadm.standby_init(
|
casadm.standby_init(
|
||||||
cache_dev=cache_device,
|
cache_dev=cache_device,
|
||||||
cache_line_size=cls,
|
cache_line_size=cache_line_size,
|
||||||
cache_id=cache_id,
|
cache_id=cache_id,
|
||||||
force=True,
|
force=True,
|
||||||
)
|
)
|
||||||
@ -549,12 +551,13 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
|
|||||||
- initialize cache without force flag fails and informative error message is printed
|
- initialize cache without force flag fails and informative error message is printed
|
||||||
- initialize cache with force flag succeeds and passive instance is present in system
|
- initialize cache with force flag succeeds and passive instance is present in system
|
||||||
"""
|
"""
|
||||||
|
cache_line_size = CacheLineSize.LINE_32KiB
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare device for cache"):
|
with TestRun.step("Prepare device for cache"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
||||||
cache_device = cache_device.partitions[0]
|
cache_device = cache_device.partitions[0]
|
||||||
cls = CacheLineSize.LINE_32KiB
|
|
||||||
cache_id = 1
|
|
||||||
|
|
||||||
with TestRun.step("Create filesystem on cache device partition"):
|
with TestRun.step("Create filesystem on cache device partition"):
|
||||||
cache_device.create_filesystem(filesystem)
|
cache_device.create_filesystem(filesystem)
|
||||||
@ -564,7 +567,7 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
|
|||||||
standby_init_cmd(
|
standby_init_cmd(
|
||||||
cache_dev=cache_device.path,
|
cache_dev=cache_device.path,
|
||||||
cache_id=str(cache_id),
|
cache_id=str(cache_id),
|
||||||
cache_line_size=str(int(cls.value.value / Unit.KibiByte.value)),
|
cache_line_size=str(int(cache_line_size.value.value / Unit.KibiByte.value)),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
if not check_stderr_msg(output, standby_init_with_existing_filesystem):
|
if not check_stderr_msg(output, standby_init_with_existing_filesystem):
|
||||||
@ -576,7 +579,7 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
|
|||||||
with TestRun.step("Try initialize cache with force flag"):
|
with TestRun.step("Try initialize cache with force flag"):
|
||||||
casadm.standby_init(
|
casadm.standby_init(
|
||||||
cache_dev=cache_device,
|
cache_dev=cache_device,
|
||||||
cache_line_size=cls,
|
cache_line_size=cache_line_size,
|
||||||
cache_id=cache_id,
|
cache_id=cache_id,
|
||||||
force=True,
|
force=True,
|
||||||
)
|
)
|
||||||
@ -600,6 +603,11 @@ def test_standby_activate_with_corepool():
|
|||||||
- During activate metadata on the device match with metadata in DRAM
|
- During activate metadata on the device match with metadata in DRAM
|
||||||
- Core is in active state after activate
|
- Core is in active state after activate
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
core_id = 1
|
||||||
|
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
||||||
|
cache_line_size = CacheLineSize.LINE_16KiB
|
||||||
|
|
||||||
with TestRun.step("Prepare cache and core devices"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
caches_dev = TestRun.disks["caches"]
|
caches_dev = TestRun.disks["caches"]
|
||||||
caches_dev.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
|
caches_dev.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
|
||||||
@ -609,13 +617,8 @@ def test_standby_activate_with_corepool():
|
|||||||
core_dev.create_partitions([Size(200, Unit.MebiByte)])
|
core_dev.create_partitions([Size(200, Unit.MebiByte)])
|
||||||
core_dev = core_dev.partitions[0]
|
core_dev = core_dev.partitions[0]
|
||||||
|
|
||||||
cache_id = 1
|
|
||||||
core_id = 1
|
|
||||||
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
|
||||||
cls = CacheLineSize.LINE_16KiB
|
|
||||||
|
|
||||||
with TestRun.step("Start regular cache instance"):
|
with TestRun.step("Start regular cache instance"):
|
||||||
cache = casadm.start_cache(cache_dev=active_cache_dev, cache_line_size=cls,
|
cache = casadm.start_cache(cache_dev=active_cache_dev, cache_line_size=cache_line_size,
|
||||||
cache_id=cache_id)
|
cache_id=cache_id)
|
||||||
|
|
||||||
with TestRun.step("Add core to regular cache instance"):
|
with TestRun.step("Add core to regular cache instance"):
|
||||||
@ -629,7 +632,7 @@ def test_standby_activate_with_corepool():
|
|||||||
|
|
||||||
with TestRun.step("Start standby cache instance."):
|
with TestRun.step("Start standby cache instance."):
|
||||||
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
|
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
|
||||||
cache_line_size=cls,
|
cache_line_size=cache_line_size,
|
||||||
force=True)
|
force=True)
|
||||||
|
|
||||||
with TestRun.step(f"Copy changed metadata to the standby instance"):
|
with TestRun.step(f"Copy changed metadata to the standby instance"):
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -24,13 +24,16 @@ from test_tools.udev import Udev
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_cleaning_policy():
|
def test_cleaning_policy():
|
||||||
"""
|
"""
|
||||||
Title: test manual casadm flush
|
title: Test for manual cache and core flushing
|
||||||
description: | The test is to see if dirty data will be removed from the Cache
|
description: |
|
||||||
or Core after using the casadm command with the corresponding parameter.
|
The test is to see if dirty data will be removed from the cache
|
||||||
|
or core after using the casadm command with the corresponding parameter.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Cache and core are filled with dirty data.
|
- Cache and core are filled with dirty data.
|
||||||
- After cache and core flush dirty data are cleared.
|
- After cache and core flush dirty data are cleared.
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
cache_disk = TestRun.disks["cache"]
|
cache_disk = TestRun.disks["cache"]
|
||||||
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
|
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
@ -39,7 +42,8 @@ def test_cleaning_policy():
|
|||||||
core_disk = TestRun.disks["core"]
|
core_disk = TestRun.disks["core"]
|
||||||
core_disk.create_partitions([Size(1, Unit.GibiByte)])
|
core_disk.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
core_dev = core_disk.partitions[0]
|
core_dev = core_disk.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step("Start cache and set cleaning policy to NOP"):
|
with TestRun.step("Start cache and set cleaning policy to NOP"):
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -22,17 +22,28 @@ from type_def.size import Size, Unit
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_default_params():
|
def test_seq_cutoff_default_params():
|
||||||
"""
|
"""
|
||||||
title: Default sequential cut-off threshold & policy test
|
title: Default sequential cutoff threshold & policy test
|
||||||
description: Test if proper default threshold and policy is set after cache start
|
description: Test if proper default threshold and policy is set after cache start
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- "Full" shall be default sequential cut-off policy
|
- "Full" shall be default sequential cutoff policy
|
||||||
- There shall be default 1MiB (1024kiB) value for sequential cut-off threshold
|
- There shall be default 1MiB (1024kiB) value for sequential cutoff threshold
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add core)"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache, cores = prepare()
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
with TestRun.step("Getting sequential cut-off parameters"):
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
params = cores[0].get_seq_cut_off_parameters()
|
core_device.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
|
with TestRun.step("Getting sequential cutoff parameters"):
|
||||||
|
params = core.get_seq_cut_off_parameters()
|
||||||
|
|
||||||
with TestRun.step("Check if proper sequential cutoff policy is set as a default"):
|
with TestRun.step("Check if proper sequential cutoff policy is set as a default"):
|
||||||
if params.policy != SeqCutOffPolicy.DEFAULT:
|
if params.policy != SeqCutOffPolicy.DEFAULT:
|
||||||
@ -50,18 +61,27 @@ def test_seq_cutoff_default_params():
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_set_get_policy_core(policy):
|
def test_seq_cutoff_set_get_policy_core(policy):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off policy set/get test for core
|
title: Sequential cutoff policy set/get test for core
|
||||||
description: |
|
description: |
|
||||||
Test if CAS is setting proper sequential cut-off policy for core and
|
Verify if it is possible to set and get a sequential cutoff policy per core
|
||||||
returns previously set value
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Sequential cut-off policy obtained from get-param command for the first core must be
|
- Sequential cutoff policy obtained from get-param command for the first core must be
|
||||||
the same as the one used in set-param command
|
the same as the one used in set-param command
|
||||||
- Sequential cut-off policy obtained from get-param command for the second core must be
|
- Sequential cutoff policy obtained from get-param command for the second core must be
|
||||||
proper default value
|
proper default value
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add 2 cores)"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache, cores = prepare(cores_count=2)
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)] * 2)
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add cores"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
cores = [cache.add_core(core_dev=part) for part in core_device.partitions]
|
||||||
|
|
||||||
with TestRun.step(f"Setting core sequential cutoff policy mode to {policy}"):
|
with TestRun.step(f"Setting core sequential cutoff policy mode to {policy}"):
|
||||||
cores[0].set_seq_cutoff_policy(policy)
|
cores[0].set_seq_cutoff_policy(policy)
|
||||||
@ -85,16 +105,25 @@ def test_seq_cutoff_set_get_policy_core(policy):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_set_get_policy_cache(policy):
|
def test_seq_cutoff_set_get_policy_cache(policy):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off policy set/get test for cache
|
title: Sequential cutoff policy set/get test for cache
|
||||||
description: |
|
description: |
|
||||||
Test if CAS is setting proper sequential cut-off policy for whole cache and
|
Verify if it is possible to set and get a sequential cutoff policy for the whole cache
|
||||||
returns previously set value
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Sequential cut-off policy obtained from get-param command for each of 3 cores must be the
|
- Sequential cutoff policy obtained from get-param command for each of 3 cores must be the
|
||||||
same as the one used in set-param command for cache
|
same as the one used in set-param command for cache
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add 3 cores)"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache, cores = prepare(cores_count=3)
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)] * 3)
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add cores"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
cores = [cache.add_core(core_dev=part) for part in core_device.partitions]
|
||||||
|
|
||||||
with TestRun.step(f"Setting sequential cutoff policy mode {policy} for cache"):
|
with TestRun.step(f"Setting sequential cutoff policy mode {policy} for cache"):
|
||||||
cache.set_seq_cutoff_policy(policy)
|
cache.set_seq_cutoff_policy(policy)
|
||||||
@ -111,22 +140,34 @@ def test_seq_cutoff_set_get_policy_cache(policy):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_policy_load():
|
def test_seq_cutoff_policy_load():
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off policy set/get test with cache load between
|
title: Sequential cutoff policy set/get test with cache load between
|
||||||
description: |
|
description: |
|
||||||
Set each possible policy for different core, stop cache, test if after cache load
|
Set each possible policy for different core, stop cache, test if after cache load
|
||||||
sequential cut-off policy value previously set is being loaded correctly for each core.
|
sequential cutoff policy value previously set is being loaded correctly for each core.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Sequential cut-off policy obtained from get-param command after cache load
|
- Sequential cutoff policy obtained from get-param command after cache load
|
||||||
must be the same as the one used in set-param command before cache stop
|
must be the same as the one used in set-param command before cache stop
|
||||||
- Sequential cut-off policy loaded for the last core should be the default one
|
- Sequential cutoff policy loaded for the last core should be the default one
|
||||||
"""
|
"""
|
||||||
with TestRun.step(f"Test prepare (start cache and add {len(SeqCutOffPolicy) + 1} cores)"):
|
|
||||||
# Create as many cores as many possible policies including default one
|
|
||||||
cache, cores = prepare(cores_count=len(SeqCutOffPolicy) + 1)
|
|
||||||
policies = [policy for policy in SeqCutOffPolicy]
|
policies = [policy for policy in SeqCutOffPolicy]
|
||||||
|
|
||||||
for i, core in TestRun.iteration(enumerate(cores[:-1]), "Set all possible policies "
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
"except the default one"):
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)] * (len(SeqCutOffPolicy) + 1))
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add cores"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
cores = [cache.add_core(core_dev=part) for part in core_device.partitions]
|
||||||
|
|
||||||
|
for i, core in TestRun.iteration(
|
||||||
|
enumerate(cores[:-1]),
|
||||||
|
"Set all possible policies except the default one"
|
||||||
|
):
|
||||||
with TestRun.step(f"Setting cache sequential cutoff policy mode to "
|
with TestRun.step(f"Setting cache sequential cutoff policy mode to "
|
||||||
f"{policies[i]}"):
|
f"{policies[i]}"):
|
||||||
cores[i].set_seq_cutoff_policy(policies[i])
|
cores[i].set_seq_cutoff_policy(policies[i])
|
||||||
@ -140,16 +181,19 @@ def test_seq_cutoff_policy_load():
|
|||||||
with TestRun.step("Getting cores from loaded cache"):
|
with TestRun.step("Getting cores from loaded cache"):
|
||||||
cores = loaded_cache.get_core_devices()
|
cores = loaded_cache.get_core_devices()
|
||||||
|
|
||||||
for i, core in TestRun.iteration(enumerate(cores[:-1]), "Check if proper policies have "
|
for i, core in TestRun.iteration(
|
||||||
"been loaded"):
|
enumerate(cores[:-1]),
|
||||||
|
"Check if proper policies have been loaded"
|
||||||
|
):
|
||||||
with TestRun.step(f"Check if proper sequential cutoff policy was loaded"):
|
with TestRun.step(f"Check if proper sequential cutoff policy was loaded"):
|
||||||
if cores[i].get_seq_cut_off_policy() != policies[i]:
|
if cores[i].get_seq_cut_off_policy() != policies[i]:
|
||||||
TestRun.fail(f"Wrong sequential cutoff policy loaded: "
|
TestRun.fail(f"Wrong sequential cutoff policy loaded: "
|
||||||
f"{cores[i].get_seq_cut_off_policy()} "
|
f"{cores[i].get_seq_cut_off_policy()} "
|
||||||
f"should be {policies[i]}")
|
f"should be {policies[i]}")
|
||||||
|
|
||||||
with TestRun.step(f"Check if proper (default) sequential cut off policy was loaded for "
|
with TestRun.step(
|
||||||
f"last core"):
|
"Check if proper (default) sequential cutoff policy was loaded for last core"
|
||||||
|
):
|
||||||
if cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT:
|
if cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy() != SeqCutOffPolicy.DEFAULT:
|
||||||
TestRun.fail(f"Wrong sequential cutoff policy loaded: "
|
TestRun.fail(f"Wrong sequential cutoff policy loaded: "
|
||||||
f"{cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy()} "
|
f"{cores[len(SeqCutOffPolicy)].get_seq_cut_off_policy()} "
|
||||||
@ -163,19 +207,31 @@ def test_seq_cutoff_policy_load():
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_set_invalid_threshold(threshold):
|
def test_seq_cutoff_set_invalid_threshold(threshold):
|
||||||
"""
|
"""
|
||||||
title: Invalid sequential cut-off threshold test
|
title: Invalid sequential cutoff threshold test
|
||||||
description: Test if CAS is allowing setting invalid sequential cut-off threshold
|
description: Validate setting invalid sequential cutoff threshold
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Setting invalid sequential cut-off threshold should be blocked
|
- Setting invalid sequential cutoff threshold should be blocked
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add core)"):
|
|
||||||
cache, cores = prepare()
|
|
||||||
_threshold = Size(threshold, Unit.KibiByte)
|
_threshold = Size(threshold, Unit.KibiByte)
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cutoff threshold to out of range value: "
|
with TestRun.step(f"Setting cache sequential cutoff threshold to out of range value: "
|
||||||
f"{_threshold}"):
|
f"{_threshold}"):
|
||||||
command = set_param_cutoff_cmd(
|
command = set_param_cutoff_cmd(
|
||||||
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
|
cache_id=str(cache.cache_id), core_id=str(core.core_id),
|
||||||
threshold=str(int(_threshold.get_value(Unit.KiloByte))))
|
threshold=str(int(_threshold.get_value(Unit.KiloByte))))
|
||||||
output = TestRun.executor.run_expect_fail(command)
|
output = TestRun.executor.run_expect_fail(command)
|
||||||
if "Invalid sequential cutoff threshold, must be in the range 1-4194181"\
|
if "Invalid sequential cutoff threshold, must be in the range 1-4194181"\
|
||||||
@ -185,7 +241,7 @@ def test_seq_cutoff_set_invalid_threshold(threshold):
|
|||||||
with TestRun.step(f"Setting cache sequential cutoff threshold "
|
with TestRun.step(f"Setting cache sequential cutoff threshold "
|
||||||
f"to value passed as a float"):
|
f"to value passed as a float"):
|
||||||
command = set_param_cutoff_cmd(
|
command = set_param_cutoff_cmd(
|
||||||
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
|
cache_id=str(cache.cache_id), core_id=str(core.core_id),
|
||||||
threshold=str(_threshold.get_value(Unit.KiloByte)))
|
threshold=str(_threshold.get_value(Unit.KiloByte)))
|
||||||
output = TestRun.executor.run_expect_fail(command)
|
output = TestRun.executor.run_expect_fail(command)
|
||||||
if "Invalid sequential cutoff threshold, must be a correct unsigned decimal integer"\
|
if "Invalid sequential cutoff threshold, must be a correct unsigned decimal integer"\
|
||||||
@ -199,26 +255,36 @@ def test_seq_cutoff_set_invalid_threshold(threshold):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_set_get_threshold(threshold):
|
def test_seq_cutoff_set_get_threshold(threshold):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off threshold set/get test
|
title: Sequential cutoff threshold set/get test
|
||||||
description: |
|
description: Verify setting and getting value of sequential cutoff threshold
|
||||||
Test if CAS is setting proper sequential cut-off threshold and returns
|
|
||||||
previously set value
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Sequential cut-off threshold obtained from get-param command must be the same as
|
- Sequential cutoff threshold obtained from get-param command must be the same as
|
||||||
the one used in set-param command
|
the one used in set-param command
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add core)"):
|
|
||||||
cache, cores = prepare()
|
|
||||||
_threshold = Size(threshold, Unit.KibiByte)
|
_threshold = Size(threshold, Unit.KibiByte)
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
||||||
f"{_threshold}"):
|
f"{_threshold}"):
|
||||||
cores[0].set_seq_cutoff_threshold(_threshold)
|
core.set_seq_cutoff_threshold(_threshold)
|
||||||
|
|
||||||
with TestRun.step("Check if proper sequential cutoff threshold was set"):
|
with TestRun.step("Check if proper sequential cutoff threshold was set"):
|
||||||
if cores[0].get_seq_cut_off_threshold() != _threshold:
|
if core.get_seq_cut_off_threshold() != _threshold:
|
||||||
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
||||||
f"{cores[0].get_seq_cut_off_threshold()} "
|
f"{core.get_seq_cut_off_threshold()} "
|
||||||
f"should be {_threshold}")
|
f"should be {_threshold}")
|
||||||
|
|
||||||
|
|
||||||
@ -228,22 +294,31 @@ def test_seq_cutoff_set_get_threshold(threshold):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_threshold_load(threshold):
|
def test_seq_cutoff_threshold_load(threshold):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off threshold set/get test with cache load between
|
title: Sequential cutoff threshold after loading cache
|
||||||
description: |
|
description: Verify sequential cutoff threshold value after reloading the cache.
|
||||||
Test if after cache load sequential cut-off threshold
|
|
||||||
value previously set is being loaded correctly. Each of possible sequential cut-off
|
|
||||||
policies is set for different core.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Sequential cut-off threshold obtained from get-param command after cache load
|
- Sequential cutoff threshold obtained from get-param command after cache load
|
||||||
must be the same as the one used in set-param command before cache stop
|
must be the same as the one used in set-param command before cache stop
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add core)"):
|
|
||||||
cache, cores = prepare()
|
|
||||||
_threshold = Size(threshold, Unit.KibiByte)
|
_threshold = Size(threshold, Unit.KibiByte)
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
||||||
f"{_threshold}"):
|
f"{_threshold}"):
|
||||||
cores[0].set_seq_cutoff_threshold(_threshold)
|
core.set_seq_cutoff_threshold(_threshold)
|
||||||
|
|
||||||
with TestRun.step("Stopping cache"):
|
with TestRun.step("Stopping cache"):
|
||||||
cache.stop()
|
cache.stop()
|
||||||
@ -259,23 +334,3 @@ def test_seq_cutoff_threshold_load(threshold):
|
|||||||
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
||||||
f"{cores_load[0].get_seq_cut_off_threshold()} "
|
f"{cores_load[0].get_seq_cut_off_threshold()} "
|
||||||
f"should be {_threshold}")
|
f"should be {_threshold}")
|
||||||
|
|
||||||
|
|
||||||
def prepare(cores_count=1):
|
|
||||||
cache_device = TestRun.disks['cache']
|
|
||||||
core_device = TestRun.disks['core']
|
|
||||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
|
||||||
partitions = []
|
|
||||||
for x in range(cores_count):
|
|
||||||
partitions.append(Size(1, Unit.GibiByte))
|
|
||||||
|
|
||||||
core_device.create_partitions(partitions)
|
|
||||||
cache_part = cache_device.partitions[0]
|
|
||||||
core_parts = core_device.partitions
|
|
||||||
TestRun.LOGGER.info("Staring cache")
|
|
||||||
cache = casadm.start_cache(cache_part, force=True)
|
|
||||||
TestRun.LOGGER.info("Adding core devices")
|
|
||||||
core_list = []
|
|
||||||
for core_part in core_parts:
|
|
||||||
core_list.append(cache.add_core(core_dev=core_part))
|
|
||||||
return cache, core_list
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2021 Intel Corporation
|
# Copyright(c) 2020-2021 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -36,80 +36,96 @@ number_of_checks = 10
|
|||||||
@pytest.mark.parametrizex("cache_mode", CacheMode)
|
@pytest.mark.parametrizex("cache_mode", CacheMode)
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_set_get_seqcutoff_params(cache_mode):
|
def test_set_get_seq_cutoff_params(cache_mode):
|
||||||
"""
|
"""
|
||||||
title: Test for setting and reading sequential cut-off parameters.
|
title: Test for setting and reading sequential cutoff parameters.
|
||||||
description: |
|
description: |
|
||||||
Verify that it is possible to set and read all available sequential cut-off
|
Verify that it is possible to set and read all available sequential cutoff
|
||||||
parameters using casadm --set-param and --get-param options.
|
parameters using casadm --set-param and --get-param options.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- All sequential cut-off parameters are set to given values.
|
- All sequential cutoff parameters are set to given values.
|
||||||
- All sequential cut-off parameters displays proper values.
|
- All sequential cutoff parameters displays proper values.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Partition cache and core devices"):
|
||||||
cache_dev, core_dev = storage_prepare()
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
cache_parts = [Size(1, Unit.GibiByte)] * caches_count
|
||||||
|
cache_dev.create_partitions(cache_parts)
|
||||||
|
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
core_parts = [Size(2, Unit.GibiByte)] * cores_per_cache * caches_count
|
||||||
|
core_dev.create_partitions(core_parts)
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
f"Start {caches_count} caches in {cache_mode} cache mode "
|
f"Start {caches_count} caches in {cache_mode} cache mode "
|
||||||
f"and add {cores_per_cache} cores per cache"
|
f"and add {cores_per_cache} cores per cache"
|
||||||
):
|
):
|
||||||
caches, cores = cache_prepare(cache_mode, cache_dev, core_dev)
|
caches = [
|
||||||
|
casadm.start_cache(part, cache_mode, force=True) for part in cache_dev.partitions
|
||||||
|
]
|
||||||
|
|
||||||
with TestRun.step("Check sequential cut-off default parameters"):
|
cores = [
|
||||||
default_seqcutoff_params = SeqCutOffParameters.default_seq_cut_off_params()
|
[
|
||||||
|
caches[i].add_core(
|
||||||
|
core_dev.partitions[i * cores_per_cache + j]
|
||||||
|
) for j in range(cores_per_cache)
|
||||||
|
] for i in range(caches_count)
|
||||||
|
]
|
||||||
|
|
||||||
|
with TestRun.step("Check sequential cutoff default parameters"):
|
||||||
|
default_seq_cutoff_params = SeqCutOffParameters.default_seq_cut_off_params()
|
||||||
for i in range(caches_count):
|
for i in range(caches_count):
|
||||||
for j in range(cores_per_cache):
|
for j in range(cores_per_cache):
|
||||||
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params)
|
check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
"Set new random values for sequential cut-off parameters for one core only"
|
"Set new random values for sequential cutoff parameters for one core only"
|
||||||
):
|
):
|
||||||
for check in range(number_of_checks):
|
for check in range(number_of_checks):
|
||||||
random_seqcutoff_params = new_seqcutoff_parameters_random_values()
|
random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
|
||||||
cores[0][0].set_seq_cutoff_parameters(random_seqcutoff_params)
|
cores[0][0].set_seq_cutoff_parameters(random_seq_cutoff_params)
|
||||||
|
|
||||||
# Check changed parameters for first core:
|
# Check changed parameters for first core:
|
||||||
check_seqcutoff_parameters(cores[0][0], random_seqcutoff_params)
|
check_seq_cutoff_parameters(cores[0][0], random_seq_cutoff_params)
|
||||||
|
|
||||||
# Check default parameters for other cores:
|
# Check default parameters for other cores:
|
||||||
for j in range(1, cores_per_cache):
|
for j in range(1, cores_per_cache):
|
||||||
check_seqcutoff_parameters(cores[0][j], default_seqcutoff_params)
|
check_seq_cutoff_parameters(cores[0][j], default_seq_cutoff_params)
|
||||||
for i in range(1, caches_count):
|
for i in range(1, caches_count):
|
||||||
for j in range(cores_per_cache):
|
for j in range(cores_per_cache):
|
||||||
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params)
|
check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
"Set new random values for sequential cut-off parameters "
|
"Set new random values for sequential cutoff parameters "
|
||||||
"for all cores within given cache instance"
|
"for all cores within given cache instance"
|
||||||
):
|
):
|
||||||
for check in range(number_of_checks):
|
for check in range(number_of_checks):
|
||||||
random_seqcutoff_params = new_seqcutoff_parameters_random_values()
|
random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
|
||||||
caches[0].set_seq_cutoff_parameters(random_seqcutoff_params)
|
caches[0].set_seq_cutoff_parameters(random_seq_cutoff_params)
|
||||||
|
|
||||||
# Check changed parameters for first cache instance:
|
# Check changed parameters for first cache instance:
|
||||||
for j in range(cores_per_cache):
|
for j in range(cores_per_cache):
|
||||||
check_seqcutoff_parameters(cores[0][j], random_seqcutoff_params)
|
check_seq_cutoff_parameters(cores[0][j], random_seq_cutoff_params)
|
||||||
|
|
||||||
# Check default parameters for other cache instances:
|
# Check default parameters for other cache instances:
|
||||||
for i in range(1, caches_count):
|
for i in range(1, caches_count):
|
||||||
for j in range(cores_per_cache):
|
for j in range(cores_per_cache):
|
||||||
check_seqcutoff_parameters(cores[i][j], default_seqcutoff_params)
|
check_seq_cutoff_parameters(cores[i][j], default_seq_cutoff_params)
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
"Set new random values for sequential cut-off parameters for all cores"
|
"Set new random values for sequential cutoff parameters for all cores"
|
||||||
):
|
):
|
||||||
for check in range(number_of_checks):
|
for check in range(number_of_checks):
|
||||||
seqcutoff_params = []
|
seq_cutoff_params = []
|
||||||
for i in range(caches_count):
|
for i in range(caches_count):
|
||||||
for j in range(cores_per_cache):
|
for j in range(cores_per_cache):
|
||||||
random_seqcutoff_params = new_seqcutoff_parameters_random_values()
|
random_seq_cutoff_params = new_seq_cutoff_parameters_random_values()
|
||||||
seqcutoff_params.append(random_seqcutoff_params)
|
seq_cutoff_params.append(random_seq_cutoff_params)
|
||||||
cores[i][j].set_seq_cutoff_parameters(random_seqcutoff_params)
|
cores[i][j].set_seq_cutoff_parameters(random_seq_cutoff_params)
|
||||||
for i in range(caches_count):
|
for i in range(caches_count):
|
||||||
for j in range(cores_per_cache):
|
for j in range(cores_per_cache):
|
||||||
check_seqcutoff_parameters(
|
check_seq_cutoff_parameters(
|
||||||
cores[i][j], seqcutoff_params[i * cores_per_cache + j]
|
cores[i][j], seq_cutoff_params[i * cores_per_cache + j]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -130,13 +146,25 @@ def test_set_get_cleaning_params(cache_mode, cleaning_policy):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Partition cache and core devices"):
|
||||||
cache_dev, core_dev = storage_prepare()
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
cache_parts = [Size(1, Unit.GibiByte)] * caches_count
|
||||||
|
cache_dev.create_partitions(cache_parts)
|
||||||
|
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
core_parts = [Size(2, Unit.GibiByte)] * cores_per_cache * caches_count
|
||||||
|
core_dev.create_partitions(core_parts)
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
f"Start {caches_count} caches in {cache_mode} cache mode "
|
f"Start {caches_count} caches in {cache_mode} cache mode "
|
||||||
f"and add {cores_per_cache} cores per cache"
|
f"and add {cores_per_cache} cores per cache"
|
||||||
):
|
):
|
||||||
caches, cores = cache_prepare(cache_mode, cache_dev, core_dev)
|
caches = [
|
||||||
|
casadm.start_cache(part, cache_mode, force=True) for part in cache_dev.partitions
|
||||||
|
]
|
||||||
|
|
||||||
|
for i in range(caches_count):
|
||||||
|
for j in range(cores_per_cache):
|
||||||
|
caches[i].add_core(core_dev.partitions[i * cores_per_cache + j])
|
||||||
|
|
||||||
with TestRun.step(f"Set cleaning policy to {cleaning_policy}"):
|
with TestRun.step(f"Set cleaning policy to {cleaning_policy}"):
|
||||||
if cleaning_policy != CleaningPolicy.DEFAULT:
|
if cleaning_policy != CleaningPolicy.DEFAULT:
|
||||||
@ -205,33 +233,7 @@ def test_set_get_cleaning_params(cache_mode, cleaning_policy):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def storage_prepare():
|
def new_seq_cutoff_parameters_random_values():
|
||||||
cache_dev = TestRun.disks["cache"]
|
|
||||||
cache_parts = [Size(1, Unit.GibiByte)] * caches_count
|
|
||||||
cache_dev.create_partitions(cache_parts)
|
|
||||||
core_dev = TestRun.disks["core"]
|
|
||||||
core_parts = [Size(2, Unit.GibiByte)] * cores_per_cache * caches_count
|
|
||||||
core_dev.create_partitions(core_parts)
|
|
||||||
|
|
||||||
return cache_dev, core_dev
|
|
||||||
|
|
||||||
|
|
||||||
def cache_prepare(cache_mode, cache_dev, core_dev):
|
|
||||||
caches = []
|
|
||||||
for i in range(caches_count):
|
|
||||||
caches.append(
|
|
||||||
casadm.start_cache(cache_dev.partitions[i], cache_mode, force=True)
|
|
||||||
)
|
|
||||||
cores = [[] for i in range(caches_count)]
|
|
||||||
for i in range(caches_count):
|
|
||||||
for j in range(cores_per_cache):
|
|
||||||
core_partition_nr = i * cores_per_cache + j
|
|
||||||
cores[i].append(caches[i].add_core(core_dev.partitions[core_partition_nr]))
|
|
||||||
|
|
||||||
return caches, cores
|
|
||||||
|
|
||||||
|
|
||||||
def new_seqcutoff_parameters_random_values():
|
|
||||||
return SeqCutOffParameters(
|
return SeqCutOffParameters(
|
||||||
threshold=Size(random.randrange(1, 1000000), Unit.KibiByte),
|
threshold=Size(random.randrange(1, 1000000), Unit.KibiByte),
|
||||||
policy=random.choice(list(SeqCutOffPolicy)),
|
policy=random.choice(list(SeqCutOffPolicy)),
|
||||||
@ -275,27 +277,27 @@ def new_cleaning_parameters_random_values(cleaning_policy):
|
|||||||
return cleaning_params
|
return cleaning_params
|
||||||
|
|
||||||
|
|
||||||
def check_seqcutoff_parameters(core, seqcutoff_params):
|
def check_seq_cutoff_parameters(core, seq_cutoff_params):
|
||||||
current_seqcutoff_params = core.get_seq_cut_off_parameters()
|
current_seq_cutoff_params = core.get_seq_cut_off_parameters()
|
||||||
failed_params = ""
|
failed_params = ""
|
||||||
if current_seqcutoff_params.threshold != seqcutoff_params.threshold:
|
if current_seq_cutoff_params.threshold != seq_cutoff_params.threshold:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Threshold is {current_seqcutoff_params.threshold}, "
|
f"Threshold is {current_seq_cutoff_params.threshold}, "
|
||||||
f"should be {seqcutoff_params.threshold}\n"
|
f"should be {seq_cutoff_params.threshold}\n"
|
||||||
)
|
)
|
||||||
if current_seqcutoff_params.policy != seqcutoff_params.policy:
|
if current_seq_cutoff_params.policy != seq_cutoff_params.policy:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Policy is {current_seqcutoff_params.policy}, "
|
f"Policy is {current_seq_cutoff_params.policy}, "
|
||||||
f"should be {seqcutoff_params.policy}\n"
|
f"should be {seq_cutoff_params.policy}\n"
|
||||||
)
|
)
|
||||||
if current_seqcutoff_params.promotion_count != seqcutoff_params.promotion_count:
|
if current_seq_cutoff_params.promotion_count != seq_cutoff_params.promotion_count:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Promotion count is {current_seqcutoff_params.promotion_count}, "
|
f"Promotion count is {current_seq_cutoff_params.promotion_count}, "
|
||||||
f"should be {seqcutoff_params.promotion_count}\n"
|
f"should be {seq_cutoff_params.promotion_count}\n"
|
||||||
)
|
)
|
||||||
if failed_params:
|
if failed_params:
|
||||||
TestRun.LOGGER.error(
|
TestRun.LOGGER.error(
|
||||||
f"Sequential cut-off parameters are not correct "
|
f"Sequential cutoff parameters are not correct "
|
||||||
f"for {core.path}:\n{failed_params}"
|
f"for {core.path}:\n{failed_params}"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -306,12 +308,12 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
|||||||
failed_params = ""
|
failed_params = ""
|
||||||
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
|
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Wake Up time is {current_cleaning_params.wake_up_time}, "
|
f"Wake up time is {current_cleaning_params.wake_up_time}, "
|
||||||
f"should be {cleaning_params.wake_up_time}\n"
|
f"should be {cleaning_params.wake_up_time}\n"
|
||||||
)
|
)
|
||||||
if current_cleaning_params.staleness_time != cleaning_params.staleness_time:
|
if current_cleaning_params.staleness_time != cleaning_params.staleness_time:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Staleness Time is {current_cleaning_params.staleness_time}, "
|
f"Staleness time is {current_cleaning_params.staleness_time}, "
|
||||||
f"should be {cleaning_params.staleness_time}\n"
|
f"should be {cleaning_params.staleness_time}\n"
|
||||||
)
|
)
|
||||||
if (
|
if (
|
||||||
@ -319,7 +321,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
|||||||
!= cleaning_params.flush_max_buffers
|
!= cleaning_params.flush_max_buffers
|
||||||
):
|
):
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Flush Max Buffers is {current_cleaning_params.flush_max_buffers}, "
|
f"Flush max buffers is {current_cleaning_params.flush_max_buffers}, "
|
||||||
f"should be {cleaning_params.flush_max_buffers}\n"
|
f"should be {cleaning_params.flush_max_buffers}\n"
|
||||||
)
|
)
|
||||||
if (
|
if (
|
||||||
@ -327,7 +329,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
|||||||
!= cleaning_params.activity_threshold
|
!= cleaning_params.activity_threshold
|
||||||
):
|
):
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Activity Threshold is {current_cleaning_params.activity_threshold}, "
|
f"Activity threshold is {current_cleaning_params.activity_threshold}, "
|
||||||
f"should be {cleaning_params.activity_threshold}\n"
|
f"should be {cleaning_params.activity_threshold}\n"
|
||||||
)
|
)
|
||||||
if failed_params:
|
if failed_params:
|
||||||
@ -341,7 +343,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
|||||||
failed_params = ""
|
failed_params = ""
|
||||||
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
|
if current_cleaning_params.wake_up_time != cleaning_params.wake_up_time:
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Wake Up time is {current_cleaning_params.wake_up_time}, "
|
f"Wake up time is {current_cleaning_params.wake_up_time}, "
|
||||||
f"should be {cleaning_params.wake_up_time}\n"
|
f"should be {cleaning_params.wake_up_time}\n"
|
||||||
)
|
)
|
||||||
if (
|
if (
|
||||||
@ -349,7 +351,7 @@ def check_cleaning_parameters(cache, cleaning_policy, cleaning_params):
|
|||||||
!= cleaning_params.flush_max_buffers
|
!= cleaning_params.flush_max_buffers
|
||||||
):
|
):
|
||||||
failed_params += (
|
failed_params += (
|
||||||
f"Flush Max Buffers is {current_cleaning_params.flush_max_buffers}, "
|
f"Flush max buffers is {current_cleaning_params.flush_max_buffers}, "
|
||||||
f"should be {cleaning_params.flush_max_buffers}\n"
|
f"should be {cleaning_params.flush_max_buffers}\n"
|
||||||
)
|
)
|
||||||
if failed_params:
|
if failed_params:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2021 Intel Corporation
|
# Copyright(c) 2021 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
import time
|
import time
|
||||||
@ -25,8 +25,7 @@ from type_def.size import Size, Unit
|
|||||||
def test_zero_metadata_negative_cases():
|
def test_zero_metadata_negative_cases():
|
||||||
"""
|
"""
|
||||||
title: Test for '--zero-metadata' negative cases.
|
title: Test for '--zero-metadata' negative cases.
|
||||||
description: |
|
description: Test for '--zero-metadata' scenarios with expected failures.
|
||||||
Test for '--zero-metadata' scenarios with expected failures.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Zeroing metadata without '--force' failed when run on cache.
|
- Zeroing metadata without '--force' failed when run on cache.
|
||||||
- Zeroing metadata with '--force' failed when run on cache.
|
- Zeroing metadata with '--force' failed when run on cache.
|
||||||
@ -34,10 +33,14 @@ def test_zero_metadata_negative_cases():
|
|||||||
- Load cache command failed after successfully zeroing metadata on the cache device.
|
- Load cache command failed after successfully zeroing metadata on the cache device.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare cache and core devices."):
|
with TestRun.step("Prepare cache and core devices."):
|
||||||
cache_dev, core_dev, cache_disk = prepare_devices()
|
cache_disk = TestRun.disks['cache']
|
||||||
|
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
||||||
|
cache_dev = cache_disk.partitions[0]
|
||||||
|
core_disk = TestRun.disks['core']
|
||||||
|
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache."):
|
||||||
cache = casadm.start_cache(cache_dev, force=True)
|
casadm.start_cache(cache_dev, force=True)
|
||||||
|
|
||||||
with TestRun.step("Try to zero metadata and validate error message."):
|
with TestRun.step("Try to zero metadata and validate error message."):
|
||||||
try:
|
try:
|
||||||
@ -75,7 +78,7 @@ def test_zero_metadata_negative_cases():
|
|||||||
|
|
||||||
with TestRun.step("Load cache."):
|
with TestRun.step("Load cache."):
|
||||||
try:
|
try:
|
||||||
cache = casadm.load_cache(cache_dev)
|
casadm.load_cache(cache_dev)
|
||||||
TestRun.LOGGER.error("Loading cache should fail.")
|
TestRun.LOGGER.error("Loading cache should fail.")
|
||||||
except CmdException:
|
except CmdException:
|
||||||
TestRun.LOGGER.info("Loading cache failed as expected.")
|
TestRun.LOGGER.info("Loading cache failed as expected.")
|
||||||
@ -87,15 +90,18 @@ def test_zero_metadata_negative_cases():
|
|||||||
def test_zero_metadata_filesystem(filesystem):
|
def test_zero_metadata_filesystem(filesystem):
|
||||||
"""
|
"""
|
||||||
title: Test for '--zero-metadata' and filesystem.
|
title: Test for '--zero-metadata' and filesystem.
|
||||||
description: |
|
description: Test for '--zero-metadata' on drive with filesystem.
|
||||||
Test for '--zero-metadata' on drive with filesystem.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Zeroing metadata on device with filesystem failed and not removed filesystem.
|
- Zeroing metadata on device with filesystem failed and not removed filesystem.
|
||||||
- Zeroing metadata on mounted device failed.
|
- Zeroing metadata on mounted device failed.
|
||||||
"""
|
"""
|
||||||
mount_point = "/mnt"
|
mount_point = "/mnt"
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
cache_dev, core_disk, cache_disk = prepare_devices()
|
cache_disk = TestRun.disks['cache']
|
||||||
|
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
||||||
|
cache_dev = cache_disk.partitions[0]
|
||||||
|
core_disk = TestRun.disks['core']
|
||||||
|
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step("Create filesystem on core device."):
|
with TestRun.step("Create filesystem on core device."):
|
||||||
core_disk.create_filesystem(filesystem)
|
core_disk.create_filesystem(filesystem)
|
||||||
@ -141,7 +147,11 @@ def test_zero_metadata_dirty_data():
|
|||||||
- Cache started successfully after zeroing metadata on cache with dirty data.
|
- Cache started successfully after zeroing metadata on cache with dirty data.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare cache and core devices."):
|
with TestRun.step("Prepare cache and core devices."):
|
||||||
cache_dev, core_disk, cache_disk = prepare_devices()
|
cache_disk = TestRun.disks['cache']
|
||||||
|
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
||||||
|
cache_dev = cache_disk.partitions[0]
|
||||||
|
core_disk = TestRun.disks['core']
|
||||||
|
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache."):
|
||||||
cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
|
cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
|
||||||
@ -165,7 +175,7 @@ def test_zero_metadata_dirty_data():
|
|||||||
|
|
||||||
with TestRun.step("Start cache (expect to fail)."):
|
with TestRun.step("Start cache (expect to fail)."):
|
||||||
try:
|
try:
|
||||||
cache = casadm.start_cache(cache_dev, CacheMode.WB)
|
casadm.start_cache(cache_dev, CacheMode.WB)
|
||||||
except CmdException:
|
except CmdException:
|
||||||
TestRun.LOGGER.info("Start cache failed as expected.")
|
TestRun.LOGGER.info("Start cache failed as expected.")
|
||||||
|
|
||||||
@ -186,7 +196,7 @@ def test_zero_metadata_dirty_data():
|
|||||||
|
|
||||||
with TestRun.step("Start cache without 'force' option."):
|
with TestRun.step("Start cache without 'force' option."):
|
||||||
try:
|
try:
|
||||||
cache = casadm.start_cache(cache_dev, CacheMode.WB)
|
casadm.start_cache(cache_dev, CacheMode.WB)
|
||||||
TestRun.LOGGER.info("Cache started successfully.")
|
TestRun.LOGGER.info("Cache started successfully.")
|
||||||
except CmdException:
|
except CmdException:
|
||||||
TestRun.LOGGER.error("Start cache failed.")
|
TestRun.LOGGER.error("Start cache failed.")
|
||||||
@ -206,11 +216,15 @@ def test_zero_metadata_dirty_shutdown():
|
|||||||
- Cache started successfully after dirty shutdown and zeroing metadata on cache.
|
- Cache started successfully after dirty shutdown and zeroing metadata on cache.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare cache and core devices."):
|
with TestRun.step("Prepare cache and core devices."):
|
||||||
cache_dev, core_disk, cache_disk = prepare_devices()
|
cache_disk = TestRun.disks['cache']
|
||||||
|
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
||||||
|
cache_dev = cache_disk.partitions[0]
|
||||||
|
core_disk = TestRun.disks['core']
|
||||||
|
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache."):
|
||||||
cache = casadm.start_cache(cache_dev, CacheMode.WT, force=True)
|
cache = casadm.start_cache(cache_dev, CacheMode.WT, force=True)
|
||||||
core = cache.add_core(core_disk)
|
cache.add_core(core_disk)
|
||||||
|
|
||||||
with TestRun.step("Unplug cache device."):
|
with TestRun.step("Unplug cache device."):
|
||||||
cache_disk.unplug()
|
cache_disk.unplug()
|
||||||
@ -227,7 +241,7 @@ def test_zero_metadata_dirty_shutdown():
|
|||||||
|
|
||||||
with TestRun.step("Start cache (expect to fail)."):
|
with TestRun.step("Start cache (expect to fail)."):
|
||||||
try:
|
try:
|
||||||
cache = casadm.start_cache(cache_dev, CacheMode.WT)
|
casadm.start_cache(cache_dev, CacheMode.WT)
|
||||||
TestRun.LOGGER.error("Starting cache should fail!")
|
TestRun.LOGGER.error("Starting cache should fail!")
|
||||||
except CmdException:
|
except CmdException:
|
||||||
TestRun.LOGGER.info("Start cache failed as expected.")
|
TestRun.LOGGER.info("Start cache failed as expected.")
|
||||||
@ -249,17 +263,7 @@ def test_zero_metadata_dirty_shutdown():
|
|||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache."):
|
||||||
try:
|
try:
|
||||||
cache = casadm.start_cache(cache_dev, CacheMode.WT)
|
casadm.start_cache(cache_dev, CacheMode.WT)
|
||||||
TestRun.LOGGER.info("Cache started successfully.")
|
TestRun.LOGGER.info("Cache started successfully.")
|
||||||
except CmdException:
|
except CmdException:
|
||||||
TestRun.LOGGER.error("Start cache failed.")
|
TestRun.LOGGER.error("Start cache failed.")
|
||||||
|
|
||||||
|
|
||||||
def prepare_devices():
|
|
||||||
cache_disk = TestRun.disks['cache']
|
|
||||||
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
|
||||||
cache_part = cache_disk.partitions[0]
|
|
||||||
core_disk = TestRun.disks['core']
|
|
||||||
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
|
||||||
|
|
||||||
return cache_part, core_disk, cache_disk
|
|
||||||
|
Loading…
Reference in New Issue
Block a user