Minor test description and names refactor
Signed-off-by: Katarzyna Treder <katarzyna.treder@h-partners.com>
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@@ -65,10 +65,10 @@ def test_cleaning_policies_in_write_back(cleaning_policy: CleaningPolicy):
|
||||
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
|
||||
set_cleaning_policy_params(cache, cleaning_policy)
|
||||
|
||||
with TestRun.step("Check for running CAS cleaner"):
|
||||
with TestRun.step("Check for running cleaner process"):
|
||||
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
||||
if output.exit_code != 0:
|
||||
TestRun.fail("CAS cleaner process is not running!")
|
||||
TestRun.fail("Cleaner process is not running!")
|
||||
|
||||
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
||||
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
||||
@@ -133,10 +133,10 @@ def test_cleaning_policies_in_write_through(cleaning_policy):
|
||||
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
|
||||
set_cleaning_policy_params(cache, cleaning_policy)
|
||||
|
||||
with TestRun.step("Check for running CAS cleaner"):
|
||||
with TestRun.step("Check for running cleaner process"):
|
||||
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
||||
if output.exit_code != 0:
|
||||
TestRun.fail("CAS cleaner process is not running!")
|
||||
TestRun.fail("Cleaner process is not running!")
|
||||
|
||||
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
||||
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
||||
@@ -193,12 +193,12 @@ def set_cleaning_policy_params(cache, cleaning_policy):
|
||||
|
||||
if current_acp_params.wake_up_time != acp_params.wake_up_time:
|
||||
failed_params += (
|
||||
f"Wake Up time is {current_acp_params.wake_up_time}, "
|
||||
f"Wake up time is {current_acp_params.wake_up_time}, "
|
||||
f"should be {acp_params.wake_up_time}\n"
|
||||
)
|
||||
if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers:
|
||||
failed_params += (
|
||||
f"Flush Max Buffers is {current_acp_params.flush_max_buffers}, "
|
||||
f"Flush max buffers is {current_acp_params.flush_max_buffers}, "
|
||||
f"should be {acp_params.flush_max_buffers}\n"
|
||||
)
|
||||
TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}")
|
||||
@@ -215,22 +215,22 @@ def set_cleaning_policy_params(cache, cleaning_policy):
|
||||
failed_params = ""
|
||||
if current_alru_params.wake_up_time != alru_params.wake_up_time:
|
||||
failed_params += (
|
||||
f"Wake Up time is {current_alru_params.wake_up_time}, "
|
||||
f"Wake up time is {current_alru_params.wake_up_time}, "
|
||||
f"should be {alru_params.wake_up_time}\n"
|
||||
)
|
||||
if current_alru_params.staleness_time != alru_params.staleness_time:
|
||||
failed_params += (
|
||||
f"Staleness Time is {current_alru_params.staleness_time}, "
|
||||
f"Staleness time is {current_alru_params.staleness_time}, "
|
||||
f"should be {alru_params.staleness_time}\n"
|
||||
)
|
||||
if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers:
|
||||
failed_params += (
|
||||
f"Flush Max Buffers is {current_alru_params.flush_max_buffers}, "
|
||||
f"Flush max buffers is {current_alru_params.flush_max_buffers}, "
|
||||
f"should be {alru_params.flush_max_buffers}\n"
|
||||
)
|
||||
if current_alru_params.activity_threshold != alru_params.activity_threshold:
|
||||
failed_params += (
|
||||
f"Activity Threshold is {current_alru_params.activity_threshold}, "
|
||||
f"Activity threshold is {current_alru_params.activity_threshold}, "
|
||||
f"should be {alru_params.activity_threshold}\n"
|
||||
)
|
||||
TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}")
|
||||
@@ -245,9 +245,9 @@ def check_cleaning_policy_operation(
|
||||
case CleaningPolicy.alru:
|
||||
if core_writes_before_wait_for_cleaning != Size.zero():
|
||||
TestRun.LOGGER.error(
|
||||
"CAS cleaner started to clean dirty data right after IO! "
|
||||
"Cleaner process started to clean dirty data right after I/O! "
|
||||
"According to ALRU parameters set in this test cleaner should "
|
||||
"wait 10 seconds after IO before cleaning dirty data"
|
||||
"wait 10 seconds after I/O before cleaning dirty data"
|
||||
)
|
||||
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
||||
TestRun.LOGGER.error(
|
||||
@@ -266,9 +266,9 @@ def check_cleaning_policy_operation(
|
||||
case CleaningPolicy.acp:
|
||||
if core_writes_before_wait_for_cleaning == Size.zero():
|
||||
TestRun.LOGGER.error(
|
||||
"CAS cleaner did not start cleaning dirty data right after IO! "
|
||||
"Cleaner process did not start cleaning dirty data right after I/O! "
|
||||
"According to ACP policy cleaner should start "
|
||||
"cleaning dirty data right after IO"
|
||||
"cleaning dirty data right after I/O"
|
||||
)
|
||||
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
||||
TestRun.LOGGER.error(
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2020-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@@ -153,7 +153,7 @@ def test_concurrent_caches_flush(cache_mode: CacheMode):
|
||||
"""
|
||||
title: Flush multiple caches simultaneously.
|
||||
description: |
|
||||
CAS should successfully flush multiple caches if there is already other flush in progress.
|
||||
Check for flushing multiple caches if there is already other flush in progress.
|
||||
pass_criteria:
|
||||
- No system crash.
|
||||
- Flush for each cache should finish successfully.
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@@ -46,7 +46,7 @@ def test_cache_stop_and_load(cache_mode):
|
||||
"""
|
||||
title: Test for stopping and loading cache back with dynamic cache mode switching.
|
||||
description: |
|
||||
Validate the ability of the CAS to switch cache modes at runtime and
|
||||
Validate the ability to switch cache modes at runtime and
|
||||
check if all of them are working properly after switching and
|
||||
after stopping and reloading cache back.
|
||||
Check also other parameters consistency after reload.
|
||||
@@ -138,10 +138,8 @@ def test_cache_stop_and_load(cache_mode):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode):
|
||||
"""
|
||||
title: Test for dynamic cache mode switching during IO.
|
||||
description: |
|
||||
Validate the ability of CAS to switch cache modes
|
||||
during working IO on CAS device.
|
||||
title: Test for dynamic cache mode switching during I/O.
|
||||
description: Validate the ability to switch cache modes during I/O on exported object.
|
||||
pass_criteria:
|
||||
- Cache mode is switched without errors.
|
||||
"""
|
||||
@@ -182,7 +180,7 @@ def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mo
|
||||
):
|
||||
cache.set_cache_mode(cache_mode=cache_mode_2, flush=flush)
|
||||
|
||||
with TestRun.step(f"Check if cache mode has switched properly during IO"):
|
||||
with TestRun.step("Check if cache mode has switched properly during I/O"):
|
||||
cache_mode_after_switch = cache.get_cache_mode()
|
||||
if cache_mode_after_switch != cache_mode_2:
|
||||
TestRun.fail(
|
||||
@@ -229,7 +227,7 @@ def run_io_and_verify(cache, core, io_mode):
|
||||
):
|
||||
TestRun.fail(
|
||||
"Write-Back cache mode is not working properly! "
|
||||
"There should be some writes to CAS device and none to the core"
|
||||
"There should be some writes to exported object and none to the core"
|
||||
)
|
||||
case CacheMode.PT:
|
||||
if (
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2020-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@@ -18,11 +18,11 @@ def test_remove_multilevel_core():
|
||||
"""
|
||||
title: Test of the ability to remove a core used in a multilevel cache.
|
||||
description: |
|
||||
Negative test if OpenCAS does not allow to remove a core when the related exported object
|
||||
Negative test for removing a core when the related exported object
|
||||
is used as a core device for another cache instance.
|
||||
pass_criteria:
|
||||
- No system crash.
|
||||
- OpenCAS does not allow removing a core used in a multilevel cache instance.
|
||||
- Removing a core used in a multilevel cache instance is forbidden.
|
||||
"""
|
||||
|
||||
with TestRun.step("Prepare cache and core devices"):
|
||||
|
@@ -57,7 +57,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
|
||||
with TestRun.step("Disable udev"):
|
||||
Udev.disable()
|
||||
|
||||
with TestRun.step(f"Start cache in Write-Back"):
|
||||
with TestRun.step(f"Start cache in Write-Back cache mode"):
|
||||
cache_disk = TestRun.disks["cache"]
|
||||
core_disk = TestRun.disks["core"]
|
||||
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
|
||||
@@ -105,7 +105,7 @@ def test_multistream_seq_cutoff_functional(streams_number, threshold):
|
||||
|
||||
with TestRun.step(
|
||||
"Write random number of 4k block requests to each stream and check if all "
|
||||
"writes were sent in pass-through mode"
|
||||
"writes were sent in pass-through"
|
||||
):
|
||||
core_statistics_before = core.get_statistics([StatsFilter.req, StatsFilter.blk])
|
||||
random.shuffle(offsets)
|
||||
@@ -170,7 +170,7 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
|
||||
with TestRun.step("Reset core statistics counters"):
|
||||
core.reset_counters()
|
||||
|
||||
with TestRun.step("Run FIO on core device"):
|
||||
with TestRun.step("Run fio on core device"):
|
||||
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
|
||||
sequential_streams = streams_seq_rand[0]
|
||||
random_streams = streams_seq_rand[1]
|
||||
@@ -216,7 +216,7 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
|
||||
- No system crash
|
||||
"""
|
||||
|
||||
with TestRun.step(f"Disable udev"):
|
||||
with TestRun.step("Disable udev"):
|
||||
Udev.disable()
|
||||
|
||||
with TestRun.step("Create filesystem on core device"):
|
||||
@@ -231,7 +231,7 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
|
||||
with TestRun.step("Mount core"):
|
||||
core.mount(mount_point=mount_point)
|
||||
|
||||
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB"):
|
||||
with TestRun.step("Set sequential cutoff policy to always and threshold to 20MiB"):
|
||||
core.set_seq_cutoff_policy(policy=SeqCutOffPolicy.always)
|
||||
core.set_seq_cutoff_threshold(threshold=Size(20, Unit.MebiByte))
|
||||
|
||||
@@ -279,7 +279,7 @@ def run_dd(target_path, count, seek):
|
||||
TestRun.LOGGER.info(f"dd command:\n{dd}")
|
||||
output = dd.run()
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error during IO", output)
|
||||
raise CmdException("Error during I/O", output)
|
||||
|
||||
|
||||
def check_statistics(stats_before, stats_after, expected_pt_writes, expected_writes_to_cache):
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@@ -40,15 +40,14 @@ class VerifyType(Enum):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_size):
|
||||
"""
|
||||
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
||||
title: Functional sequential cutoff test with multiple cores
|
||||
description: |
|
||||
Testing if amount of data written to cache after sequential writes for different
|
||||
sequential cut-off thresholds on each core, while running sequential IO on 3 out of 4
|
||||
cores and random IO against the last core, is correct.
|
||||
Test checking if data is cached properly with sequential cutoff "always" policy
|
||||
when sequential and random I/O is running to multiple cores.
|
||||
pass_criteria:
|
||||
- Amount of written blocks to cache is less or equal than amount set
|
||||
with sequential cut-off threshold for three first cores.
|
||||
- Amount of written blocks to cache is equal to io size run against last core.
|
||||
with sequential cutoff threshold for three first cores.
|
||||
- Amount of written blocks to cache is equal to I/O size run against last core.
|
||||
"""
|
||||
|
||||
with TestRun.step("Prepare cache and core devices"):
|
||||
@@ -76,7 +75,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
)
|
||||
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
||||
|
||||
with TestRun.step("Set sequential cut-off parameters for all cores"):
|
||||
with TestRun.step("Set sequential cutoff parameters for all cores"):
|
||||
writes_before_list = []
|
||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||
thresholds_list = [
|
||||
@@ -96,7 +95,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||
core.set_seq_cutoff_threshold(threshold)
|
||||
|
||||
with TestRun.step("Prepare sequential IO against first three cores"):
|
||||
with TestRun.step("Prepare sequential I/O against first three cores"):
|
||||
block_size = Size(4, Unit.KibiByte)
|
||||
fio = Fio().create_command().io_engine(IoEngine.libaio).block_size(block_size).direct(True)
|
||||
|
||||
@@ -107,7 +106,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
fio_job.target(core.path)
|
||||
writes_before_list.append(core.get_statistics().block_stats.cache.writes)
|
||||
|
||||
with TestRun.step("Prepare random IO against the last core"):
|
||||
with TestRun.step("Prepare random I/O against the last core"):
|
||||
fio_job = fio.add_job(f"core_{core_list[-1].core_id}")
|
||||
fio_job.size(io_sizes_list[-1])
|
||||
fio_job.read_write(io_type_last)
|
||||
@@ -117,7 +116,7 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
with TestRun.step("Run fio against all cores"):
|
||||
fio.run()
|
||||
|
||||
with TestRun.step("Verify writes to cache count after IO"):
|
||||
with TestRun.step("Verify writes to cache count after I/O"):
|
||||
margins = [
|
||||
min(block_size * (core.get_seq_cut_off_parameters().promotion_count - 1), threshold)
|
||||
for core, threshold in zip(core_list[:-1], thresholds_list[:-1])
|
||||
@@ -159,17 +158,16 @@ def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_siz
|
||||
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cache_line_size):
|
||||
def test_seq_cutoff_multi_core_cpu_pinned(cache_mode, io_type, io_type_last, cache_line_size):
|
||||
"""
|
||||
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
||||
title: Functional sequential cutoff test with multiple cores and cpu pinned I/O
|
||||
description: |
|
||||
Testing if amount of data written to cache after sequential writes for different
|
||||
sequential cut-off thresholds on each core, while running sequential IO, pinned,
|
||||
on 3 out of 4 cores and random IO against the last core, is correct.
|
||||
Test checking if data is cached properly with sequential cutoff "always" policy
|
||||
when sequential and random cpu pinned I/O is running to multiple cores.
|
||||
pass_criteria:
|
||||
- Amount of written blocks to cache is less or equal than amount set
|
||||
with sequential cut-off threshold for three first cores.
|
||||
- Amount of written blocks to cache is equal to io size run against last core.
|
||||
with sequential cutoff threshold for three first cores.
|
||||
- Amount of written blocks to cache is equal to I/O size run against last core.
|
||||
"""
|
||||
|
||||
with TestRun.step("Partition cache and core devices"):
|
||||
@@ -198,7 +196,7 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
||||
)
|
||||
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
||||
|
||||
with TestRun.step(f"Set sequential cut-off parameters for all cores"):
|
||||
with TestRun.step("Set sequential cutoff parameters for all cores"):
|
||||
writes_before_list = []
|
||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||
thresholds_list = [
|
||||
@@ -218,7 +216,9 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||
core.set_seq_cutoff_threshold(threshold)
|
||||
|
||||
with TestRun.step("Prepare sequential IO against first three cores"):
|
||||
with TestRun.step(
|
||||
"Prepare sequential I/O against first three cores and random I/O against the last one"
|
||||
):
|
||||
fio = (
|
||||
Fio()
|
||||
.create_command()
|
||||
@@ -244,10 +244,10 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
||||
fio_job.target(core_list[-1].path)
|
||||
writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes)
|
||||
|
||||
with TestRun.step("Running IO against all cores"):
|
||||
with TestRun.step("Running I/O against all cores"):
|
||||
fio.run()
|
||||
|
||||
with TestRun.step("Verifying writes to cache count after IO"):
|
||||
with TestRun.step("Verifying writes to cache count after I/O"):
|
||||
for core, writes, threshold, io_size in zip(
|
||||
core_list[:-1], writes_before_list[:-1], thresholds_list[:-1], io_sizes_list[:-1]
|
||||
):
|
||||
@@ -282,16 +282,14 @@ def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cach
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
||||
"""
|
||||
title: Sequential cut-off tests for writes and reads for 'never', 'always' and 'full' policies
|
||||
title: Functional test for sequential cutoff threshold parameter
|
||||
description: |
|
||||
Testing if amount of data written to cache after sequential writes and reads for different
|
||||
sequential cut-off policies with cache configured with different cache line size
|
||||
is valid for sequential cut-off threshold parameter, assuming that cache occupancy
|
||||
doesn't reach 100% during test.
|
||||
Check if data is cached properly according to sequential cutoff policy and
|
||||
threshold parameter
|
||||
pass_criteria:
|
||||
- Amount of written blocks to cache is less or equal than amount set
|
||||
with sequential cut-off parameter in case of 'always' policy.
|
||||
- Amount of written blocks to cache is at least equal io size in case of 'never' and 'full'
|
||||
- Amount of blocks written to cache is less than or equal to amount set
|
||||
with sequential cutoff parameter in case of 'always' policy.
|
||||
- Amount of blocks written to cache is at least equal to io size in case of 'never' and 'full'
|
||||
policy.
|
||||
"""
|
||||
|
||||
@@ -326,13 +324,13 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
||||
)
|
||||
io_size = (threshold + fio_additional_size).align_down(0x1000)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy mode to {policy}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy mode to {policy}"):
|
||||
cache.set_seq_cutoff_policy(policy)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
|
||||
cache.set_seq_cutoff_threshold(threshold)
|
||||
|
||||
with TestRun.step("Prepare sequential IO against core"):
|
||||
with TestRun.step("Prepare sequential I/O against core"):
|
||||
sync()
|
||||
writes_before = core.get_statistics().block_stats.cache.writes
|
||||
fio = (
|
||||
@@ -364,16 +362,15 @@ def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
||||
"""
|
||||
title: Sequential cut-off tests during writes and reads on full cache for 'full' policy
|
||||
title: Functional test for sequential cutoff threshold parameter and 'full' policy
|
||||
description: |
|
||||
Testing if amount of data written to cache after sequential io against fully occupied
|
||||
cache for 'full' sequential cut-off policy with cache configured with different cache
|
||||
line sizes is valid for sequential cut-off threshold parameter.
|
||||
Check if data is cached properly according to sequential cutoff 'full' policy and given
|
||||
threshold parameter
|
||||
pass_criteria:
|
||||
- Amount of written blocks to cache is big enough to fill cache when 'never' sequential
|
||||
cut-off policy is set
|
||||
cutoff policy is set
|
||||
- Amount of written blocks to cache is less or equal than amount set
|
||||
with sequential cut-off parameter in case of 'full' policy.
|
||||
with sequential cutoff parameter in case of 'full' policy.
|
||||
"""
|
||||
|
||||
with TestRun.step("Partition cache and core devices"):
|
||||
@@ -407,10 +404,10 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
||||
)
|
||||
io_size = (threshold + fio_additional_size).align_down(0x1000)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.never}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.never}"):
|
||||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
||||
|
||||
with TestRun.step("Prepare sequential IO against core"):
|
||||
with TestRun.step("Prepare sequential I/O against core"):
|
||||
sync()
|
||||
fio = (
|
||||
Fio()
|
||||
@@ -432,13 +429,13 @@ def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
||||
f"Cache occupancy is too small: {occupancy_percentage}, expected at least 95%"
|
||||
)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.full}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy mode to {SeqCutOffPolicy.full}"):
|
||||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.full)
|
||||
|
||||
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"):
|
||||
with TestRun.step(f"Setting cache sequential cutoff policy threshold to {threshold}"):
|
||||
cache.set_seq_cutoff_threshold(threshold)
|
||||
|
||||
with TestRun.step(f"Running sequential IO ({io_dir})"):
|
||||
with TestRun.step(f"Running sequential I/O ({io_dir})"):
|
||||
sync()
|
||||
writes_before = core.get_statistics().block_stats.cache.writes
|
||||
fio = (
|
||||
|
Reference in New Issue
Block a user