Add separate steps for preparing devices, fix indent and move constants
Signed-off-by: Katarzyna Treder <katarzyna.treder@h-partners.com>
This commit is contained in:
parent
ba7d907775
commit
476f62b2db
@ -69,7 +69,6 @@ def test_negative_start_cache():
|
|||||||
|
|
||||||
with TestRun.step("Prepare cache device"):
|
with TestRun.step("Prepare cache device"):
|
||||||
cache_dev = TestRun.disks["cache"]
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
|
||||||
cache_dev.create_partitions([Size(2, Unit.GibiByte)] * 2)
|
cache_dev.create_partitions([Size(2, Unit.GibiByte)] * 2)
|
||||||
|
|
||||||
cache_dev_1 = cache_dev.partitions[0]
|
cache_dev_1 = cache_dev.partitions[0]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2022 Intel Corporation
|
# Copyright(c) 2020-2022 Intel Corporation
|
||||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -219,9 +219,11 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
|
|||||||
with TestRun.step("Disable udev"):
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step("Create filesystem on core device"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_disk = TestRun.disks["cache"]
|
cache_disk = TestRun.disks["cache"]
|
||||||
core_disk = TestRun.disks["core"]
|
core_disk = TestRun.disks["core"]
|
||||||
|
|
||||||
|
with TestRun.step("Create filesystem on core device"):
|
||||||
core_disk.create_filesystem(filesystem)
|
core_disk.create_filesystem(filesystem)
|
||||||
|
|
||||||
with TestRun.step("Start cache and add core"):
|
with TestRun.step("Start cache and add core"):
|
||||||
|
@ -30,7 +30,7 @@ def test_ci_read(cache_mode):
|
|||||||
- Reads are cached
|
- Reads are cached
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Prepare partitions"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
core_device = TestRun.disks["core"]
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
@ -104,7 +104,7 @@ def test_ci_write_around_write():
|
|||||||
- Writes are not cached
|
- Writes are not cached
|
||||||
- After inserting writes to core, data is read from core and not from cache
|
- After inserting writes to core, data is read from core and not from cache
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare partitions"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
core_device = TestRun.disks["core"]
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
@ -216,7 +216,7 @@ def test_ci_write_through_write():
|
|||||||
- Writes are inserted to cache and core
|
- Writes are inserted to cache and core
|
||||||
- Reads are not cached
|
- Reads are not cached
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare partitions"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
core_device = TestRun.disks["core"]
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
|
@ -1,126 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright(c) 2020-2022 Intel Corporation
|
|
||||||
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
|
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
#
|
|
||||||
|
|
||||||
import re
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from api.cas import casadm
|
|
||||||
from api.cas.casadm_params import OutputFormat
|
|
||||||
from api.cas.cli_help_messages import *
|
|
||||||
from api.cas.cli_messages import check_stderr_msg, check_stdout_msg
|
|
||||||
from core.test_run import TestRun
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("shortcut", [True, False])
|
|
||||||
def test_cli_help(shortcut):
|
|
||||||
"""
|
|
||||||
title: Test for 'help' command.
|
|
||||||
description: Test if help for commands displays correct output.
|
|
||||||
pass_criteria:
|
|
||||||
- Proper help displays for every command.
|
|
||||||
"""
|
|
||||||
TestRun.LOGGER.info("Run 'help' for every 'casadm' command.")
|
|
||||||
output = casadm.help(shortcut)
|
|
||||||
check_stdout_msg(output, casadm_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -S" if shortcut else " --start-cache")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, start_cache_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -T" if shortcut else " --stop-cache")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, stop_cache_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -X" if shortcut else " --set-param")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, set_params_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -G" if shortcut else " --get-param")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, get_params_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -Q" if shortcut else " --set-cache-mode")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, set_cache_mode_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -A" if shortcut else " --add-core")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, add_core_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -R" if shortcut else " --remove-core")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, remove_core_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + " --remove-detached"
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, remove_detached_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -L" if shortcut else " --list-caches")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, list_caches_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -P" if shortcut else " --stats")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, stats_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -Z" if shortcut else " --reset-counters")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, reset_counters_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -F" if shortcut else " --flush-cache")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, flush_cache_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -C" if shortcut else " --io-class")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, ioclass_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -V" if shortcut else " --version")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, version_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -H" if shortcut else " --help")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, help_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + " --standby"
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, standby_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + " --zero-metadata"
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stdout_msg(output, zero_metadata_help)
|
|
||||||
|
|
||||||
output = TestRun.executor.run("casadm" + (" -Y" if shortcut else " --yell")
|
|
||||||
+ (" -H" if shortcut else " --help"))
|
|
||||||
check_stderr_msg(output, unrecognized_stderr)
|
|
||||||
check_stdout_msg(output, unrecognized_stdout)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("output_format", OutputFormat)
|
|
||||||
@pytest.mark.parametrize("shortcut", [True, False])
|
|
||||||
def test_cli_version(shortcut, output_format):
|
|
||||||
"""
|
|
||||||
title: Test for 'version' command.
|
|
||||||
description: Test if 'version' command displays correct output.
|
|
||||||
pass_criteria:
|
|
||||||
- Proper component names displayed in table with component versions.
|
|
||||||
"""
|
|
||||||
TestRun.LOGGER.info("Check version.")
|
|
||||||
output = casadm.print_version(output_format, shortcut).stdout
|
|
||||||
TestRun.LOGGER.info(output)
|
|
||||||
if not names_in_output(output) or not versions_in_output(output):
|
|
||||||
TestRun.fail("'Version' command failed.")
|
|
||||||
|
|
||||||
|
|
||||||
def names_in_output(output):
|
|
||||||
return ("CAS Cache Kernel Module" in output
|
|
||||||
and "CAS CLI Utility" in output)
|
|
||||||
|
|
||||||
|
|
||||||
def versions_in_output(output):
|
|
||||||
version_pattern = re.compile(r"(\d){2}\.(\d){2}\.(\d)\.(\d){4}.(\S)")
|
|
||||||
return len(version_pattern.findall(output)) == 2
|
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2025 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
|
@ -120,11 +120,12 @@ def test_activate_neg_cli_params():
|
|||||||
-The execution is unsuccessful for all improper argument combinations
|
-The execution is unsuccessful for all improper argument combinations
|
||||||
-A proper error message is displayed for unsuccessful executions
|
-A proper error message is displayed for unsuccessful executions
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare the device for the cache."):
|
with TestRun.step("Prepare the device for the cache."):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
cache_device = cache_device.partitions[0]
|
cache_device = cache_device.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
|
|
||||||
with TestRun.step("Init standby cache"):
|
with TestRun.step("Init standby cache"):
|
||||||
cache_dev = Device(cache_device.path)
|
cache_dev = Device(cache_device.path)
|
||||||
@ -201,6 +202,8 @@ def test_standby_neg_cli_management():
|
|||||||
- The execution is successful for allowed management commands
|
- The execution is successful for allowed management commands
|
||||||
- A proper error message is displayed for unsuccessful executions
|
- A proper error message is displayed for unsuccessful executions
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare the device for the cache."):
|
with TestRun.step("Prepare the device for the cache."):
|
||||||
device = TestRun.disks["cache"]
|
device = TestRun.disks["cache"]
|
||||||
device.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
|
device.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
|
||||||
@ -208,7 +211,6 @@ def test_standby_neg_cli_management():
|
|||||||
core_device = device.partitions[1]
|
core_device = device.partitions[1]
|
||||||
|
|
||||||
with TestRun.step("Prepare the standby instance"):
|
with TestRun.step("Prepare the standby instance"):
|
||||||
cache_id = 1
|
|
||||||
cache = casadm.standby_init(
|
cache = casadm.standby_init(
|
||||||
cache_dev=cache_device, cache_id=cache_id,
|
cache_dev=cache_device, cache_id=cache_id,
|
||||||
cache_line_size=CacheLineSize.LINE_32KiB, force=True
|
cache_line_size=CacheLineSize.LINE_32KiB, force=True
|
||||||
@ -278,13 +280,13 @@ def test_start_neg_cli_flags():
|
|||||||
- The command execution is unsuccessful for commands with mutually exclusive flags
|
- The command execution is unsuccessful for commands with mutually exclusive flags
|
||||||
- A proper error message is displayed
|
- A proper error message is displayed
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
cache_line_size = 32
|
||||||
|
|
||||||
with TestRun.step("Prepare the device for the cache."):
|
with TestRun.step("Prepare the device for the cache."):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
cache_device = cache_device.partitions[0]
|
cache_device = cache_device.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
cache_line_size = 32
|
|
||||||
|
|
||||||
with TestRun.step("Try to start standby cache with mutually exclusive parameters"):
|
with TestRun.step("Try to start standby cache with mutually exclusive parameters"):
|
||||||
init_required_params = f' --cache-device {cache_device.path}' \
|
init_required_params = f' --cache-device {cache_device.path}' \
|
||||||
@ -333,13 +335,13 @@ def test_activate_without_detach():
|
|||||||
- The cache remains in Standby state after unsuccessful activation
|
- The cache remains in Standby state after unsuccessful activation
|
||||||
- The cache exported object is present after an unsuccessful activation
|
- The cache exported object is present after an unsuccessful activation
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
||||||
|
|
||||||
with TestRun.step("Prepare the device for the cache."):
|
with TestRun.step("Prepare the device for the cache."):
|
||||||
cache_dev = TestRun.disks["cache"]
|
cache_dev = TestRun.disks["cache"]
|
||||||
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
cache_dev = cache_dev.partitions[0]
|
cache_dev = cache_dev.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
|
||||||
|
|
||||||
with TestRun.step("Start cache instance."):
|
with TestRun.step("Start cache instance."):
|
||||||
cache = casadm.start_cache(cache_dev=cache_dev, cache_id=cache_id)
|
cache = casadm.start_cache(cache_dev=cache_dev, cache_id=cache_id)
|
||||||
@ -399,6 +401,9 @@ def test_activate_neg_cache_line_size():
|
|||||||
- The cache remains in Standby detached state after an unsuccessful activation
|
- The cache remains in Standby detached state after an unsuccessful activation
|
||||||
- A proper error message is displayed
|
- A proper error message is displayed
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
active_cls, standby_cls = CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB
|
||||||
|
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
||||||
|
|
||||||
with TestRun.step("Prepare cache devices"):
|
with TestRun.step("Prepare cache devices"):
|
||||||
active_cache_dev = TestRun.disks["active_cache"]
|
active_cache_dev = TestRun.disks["active_cache"]
|
||||||
@ -407,73 +412,69 @@ def test_activate_neg_cache_line_size():
|
|||||||
standby_cache_dev = TestRun.disks["standby_cache"]
|
standby_cache_dev = TestRun.disks["standby_cache"]
|
||||||
standby_cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
standby_cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
standby_cache_dev = standby_cache_dev.partitions[0]
|
standby_cache_dev = standby_cache_dev.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
active_cls, standby_cls = CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB
|
|
||||||
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
|
||||||
|
|
||||||
with TestRun.step("Start active cache instance."):
|
with TestRun.step("Start active cache instance."):
|
||||||
active_cache = casadm.start_cache(cache_dev=active_cache_dev, cache_id=cache_id,
|
active_cache = casadm.start_cache(cache_dev=active_cache_dev, cache_id=cache_id,
|
||||||
cache_line_size=active_cls)
|
cache_line_size=active_cls)
|
||||||
|
|
||||||
with TestRun.step("Create dump file with cache metadata"):
|
with TestRun.step("Get metadata size"):
|
||||||
with TestRun.step("Get metadata size"):
|
dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
|
||||||
dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
|
md_size = dmesg.get_metadata_size_on_device(dmesg_out)
|
||||||
md_size = dmesg.get_metadata_size_on_device(dmesg_out)
|
|
||||||
|
|
||||||
with TestRun.step("Dump the metadata of the cache"):
|
with TestRun.step("Dump the metadata of the cache"):
|
||||||
dump_file_path = "/tmp/test_activate_corrupted.dump"
|
dump_file_path = "/tmp/test_activate_corrupted.dump"
|
||||||
md_dump = File(dump_file_path)
|
md_dump = File(dump_file_path)
|
||||||
md_dump.remove(force=True, ignore_errors=True)
|
md_dump.remove(force=True, ignore_errors=True)
|
||||||
dd_count = int(md_size / Size(1, Unit.MebiByte)) + 1
|
dd_count = int(md_size / Size(1, Unit.MebiByte)) + 1
|
||||||
(
|
(
|
||||||
Dd().input(active_cache_dev.path)
|
Dd().input(active_cache_dev.path)
|
||||||
.output(md_dump.full_path)
|
.output(md_dump.full_path)
|
||||||
.block_size(Size(1, Unit.MebiByte))
|
.block_size(Size(1, Unit.MebiByte))
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.run()
|
.run()
|
||||||
)
|
)
|
||||||
md_dump.refresh_item()
|
md_dump.refresh_item()
|
||||||
|
|
||||||
with TestRun.step("Stop cache instance."):
|
with TestRun.step("Stop cache instance."):
|
||||||
active_cache.stop()
|
active_cache.stop()
|
||||||
|
|
||||||
with TestRun.step("Start standby cache instance."):
|
with TestRun.step("Start standby cache instance."):
|
||||||
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
|
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
|
||||||
cache_line_size=standby_cls,
|
cache_line_size=standby_cls,
|
||||||
force=True)
|
force=True)
|
||||||
|
|
||||||
with TestRun.step("Verify if the cache exported object appeared in the system"):
|
with TestRun.step("Verify if the cache exported object appeared in the system"):
|
||||||
output = TestRun.executor.run_expect_success(
|
output = TestRun.executor.run_expect_success(
|
||||||
f"ls -la /dev/ | grep {cache_exp_obj_name}"
|
f"ls -la /dev/ | grep {cache_exp_obj_name}"
|
||||||
)
|
)
|
||||||
if output.stdout[0] != "b":
|
if output.stdout[0] != "b":
|
||||||
TestRun.fail("The cache exported object is not a block device")
|
TestRun.fail("The cache exported object is not a block device")
|
||||||
|
|
||||||
with TestRun.step("Detach standby cache instance"):
|
with TestRun.step("Detach standby cache instance"):
|
||||||
standby_cache.standby_detach()
|
standby_cache.standby_detach()
|
||||||
|
|
||||||
with TestRun.step(f"Copy changed metadata to the standby instance"):
|
with TestRun.step(f"Copy changed metadata to the standby instance"):
|
||||||
Dd().input(md_dump.full_path).output(standby_cache_dev.path).run()
|
Dd().input(md_dump.full_path).output(standby_cache_dev.path).run()
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
with TestRun.step("Try to activate cache instance"):
|
with TestRun.step("Try to activate cache instance"):
|
||||||
with pytest.raises(CmdException) as cmdExc:
|
with pytest.raises(CmdException) as cmdExc:
|
||||||
output = standby_cache.standby_activate(standby_cache_dev)
|
output = standby_cache.standby_activate(standby_cache_dev)
|
||||||
if not check_stderr_msg(output, cache_line_size_mismatch):
|
if not check_stderr_msg(output, cache_line_size_mismatch):
|
||||||
TestRun.LOGGER.error(
|
|
||||||
f'Expected error message in format '
|
|
||||||
f'"{cache_line_size_mismatch[0]}"'
|
|
||||||
f'Got "{output.stderr}" instead.'
|
|
||||||
)
|
|
||||||
assert "Failed to activate standby cache." in str(cmdExc.value)
|
|
||||||
|
|
||||||
with TestRun.step("Verify if cache is in standby detached state after failed activation"):
|
|
||||||
cache_status = standby_cache.get_status()
|
|
||||||
if cache_status != CacheStatus.standby_detached:
|
|
||||||
TestRun.LOGGER.error(
|
TestRun.LOGGER.error(
|
||||||
f'Expected Cache state: "{CacheStatus.standby.value}" '
|
f'Expected error message in format '
|
||||||
f'Got "{cache_status.value}" instead.'
|
f'"{cache_line_size_mismatch[0]}"'
|
||||||
|
f'Got "{output.stderr}" instead.'
|
||||||
)
|
)
|
||||||
|
assert "Failed to activate standby cache." in str(cmdExc.value)
|
||||||
|
|
||||||
|
with TestRun.step("Verify if cache is in standby detached state after failed activation"):
|
||||||
|
cache_status = standby_cache.get_status()
|
||||||
|
if cache_status != CacheStatus.standby_detached:
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f'Expected Cache state: "{CacheStatus.standby.value}" '
|
||||||
|
f'Got "{cache_status.value}" instead.'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
@pytest.mark.CI
|
||||||
@ -489,17 +490,18 @@ def test_standby_init_with_preexisting_metadata():
|
|||||||
- initialize cache without force flag fails and informative error message is printed
|
- initialize cache without force flag fails and informative error message is printed
|
||||||
- initialize cache with force flag succeeds and passive instance is present in system
|
- initialize cache with force flag succeeds and passive instance is present in system
|
||||||
"""
|
"""
|
||||||
|
cache_line_size = CacheLineSize.LINE_32KiB
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare device for cache"):
|
with TestRun.step("Prepare device for cache"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
||||||
cache_device = cache_device.partitions[0]
|
cache_device = cache_device.partitions[0]
|
||||||
cls = CacheLineSize.LINE_32KiB
|
|
||||||
cache_id = 1
|
|
||||||
|
|
||||||
with TestRun.step("Start standby cache instance"):
|
with TestRun.step("Start standby cache instance"):
|
||||||
cache = casadm.standby_init(
|
cache = casadm.standby_init(
|
||||||
cache_dev=cache_device,
|
cache_dev=cache_device,
|
||||||
cache_line_size=cls,
|
cache_line_size=cache_line_size,
|
||||||
cache_id=cache_id,
|
cache_id=cache_id,
|
||||||
force=True,
|
force=True,
|
||||||
)
|
)
|
||||||
@ -512,7 +514,7 @@ def test_standby_init_with_preexisting_metadata():
|
|||||||
standby_init_cmd(
|
standby_init_cmd(
|
||||||
cache_dev=cache_device.path,
|
cache_dev=cache_device.path,
|
||||||
cache_id=str(cache_id),
|
cache_id=str(cache_id),
|
||||||
cache_line_size=str(int(cls.value.value / Unit.KibiByte.value)),
|
cache_line_size=str(int(cache_line_size.value.value / Unit.KibiByte.value)),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
if not check_stderr_msg(output, start_cache_with_existing_metadata):
|
if not check_stderr_msg(output, start_cache_with_existing_metadata):
|
||||||
@ -524,7 +526,7 @@ def test_standby_init_with_preexisting_metadata():
|
|||||||
with TestRun.step("Try initialize cache with force flag"):
|
with TestRun.step("Try initialize cache with force flag"):
|
||||||
casadm.standby_init(
|
casadm.standby_init(
|
||||||
cache_dev=cache_device,
|
cache_dev=cache_device,
|
||||||
cache_line_size=cls,
|
cache_line_size=cache_line_size,
|
||||||
cache_id=cache_id,
|
cache_id=cache_id,
|
||||||
force=True,
|
force=True,
|
||||||
)
|
)
|
||||||
@ -549,12 +551,13 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
|
|||||||
- initialize cache without force flag fails and informative error message is printed
|
- initialize cache without force flag fails and informative error message is printed
|
||||||
- initialize cache with force flag succeeds and passive instance is present in system
|
- initialize cache with force flag succeeds and passive instance is present in system
|
||||||
"""
|
"""
|
||||||
|
cache_line_size = CacheLineSize.LINE_32KiB
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare device for cache"):
|
with TestRun.step("Prepare device for cache"):
|
||||||
cache_device = TestRun.disks["cache"]
|
cache_device = TestRun.disks["cache"]
|
||||||
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
||||||
cache_device = cache_device.partitions[0]
|
cache_device = cache_device.partitions[0]
|
||||||
cls = CacheLineSize.LINE_32KiB
|
|
||||||
cache_id = 1
|
|
||||||
|
|
||||||
with TestRun.step("Create filesystem on cache device partition"):
|
with TestRun.step("Create filesystem on cache device partition"):
|
||||||
cache_device.create_filesystem(filesystem)
|
cache_device.create_filesystem(filesystem)
|
||||||
@ -564,7 +567,7 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
|
|||||||
standby_init_cmd(
|
standby_init_cmd(
|
||||||
cache_dev=cache_device.path,
|
cache_dev=cache_device.path,
|
||||||
cache_id=str(cache_id),
|
cache_id=str(cache_id),
|
||||||
cache_line_size=str(int(cls.value.value / Unit.KibiByte.value)),
|
cache_line_size=str(int(cache_line_size.value.value / Unit.KibiByte.value)),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
if not check_stderr_msg(output, standby_init_with_existing_filesystem):
|
if not check_stderr_msg(output, standby_init_with_existing_filesystem):
|
||||||
@ -576,7 +579,7 @@ def test_standby_init_with_preexisting_filesystem(filesystem):
|
|||||||
with TestRun.step("Try initialize cache with force flag"):
|
with TestRun.step("Try initialize cache with force flag"):
|
||||||
casadm.standby_init(
|
casadm.standby_init(
|
||||||
cache_dev=cache_device,
|
cache_dev=cache_device,
|
||||||
cache_line_size=cls,
|
cache_line_size=cache_line_size,
|
||||||
cache_id=cache_id,
|
cache_id=cache_id,
|
||||||
force=True,
|
force=True,
|
||||||
)
|
)
|
||||||
@ -597,9 +600,14 @@ def test_standby_activate_with_corepool():
|
|||||||
description: |
|
description: |
|
||||||
Activation of standby cache with core taken from core pool
|
Activation of standby cache with core taken from core pool
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- During activate metadata on the device match with metadata in DRAM
|
- During activate metadata on the device match with metadata in DRAM
|
||||||
- Core is in active state after activate
|
- Core is in active state after activate
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
core_id = 1
|
||||||
|
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
||||||
|
cache_line_size = CacheLineSize.LINE_16KiB
|
||||||
|
|
||||||
with TestRun.step("Prepare cache and core devices"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
caches_dev = TestRun.disks["caches"]
|
caches_dev = TestRun.disks["caches"]
|
||||||
caches_dev.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
|
caches_dev.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
|
||||||
@ -609,13 +617,8 @@ def test_standby_activate_with_corepool():
|
|||||||
core_dev.create_partitions([Size(200, Unit.MebiByte)])
|
core_dev.create_partitions([Size(200, Unit.MebiByte)])
|
||||||
core_dev = core_dev.partitions[0]
|
core_dev = core_dev.partitions[0]
|
||||||
|
|
||||||
cache_id = 1
|
|
||||||
core_id = 1
|
|
||||||
cache_exp_obj_name = f"cas-cache-{cache_id}"
|
|
||||||
cls = CacheLineSize.LINE_16KiB
|
|
||||||
|
|
||||||
with TestRun.step("Start regular cache instance"):
|
with TestRun.step("Start regular cache instance"):
|
||||||
cache = casadm.start_cache(cache_dev=active_cache_dev, cache_line_size=cls,
|
cache = casadm.start_cache(cache_dev=active_cache_dev, cache_line_size=cache_line_size,
|
||||||
cache_id=cache_id)
|
cache_id=cache_id)
|
||||||
|
|
||||||
with TestRun.step("Add core to regular cache instance"):
|
with TestRun.step("Add core to regular cache instance"):
|
||||||
@ -629,7 +632,7 @@ def test_standby_activate_with_corepool():
|
|||||||
|
|
||||||
with TestRun.step("Start standby cache instance."):
|
with TestRun.step("Start standby cache instance."):
|
||||||
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
|
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
|
||||||
cache_line_size=cls,
|
cache_line_size=cache_line_size,
|
||||||
force=True)
|
force=True)
|
||||||
|
|
||||||
with TestRun.step(f"Copy changed metadata to the standby instance"):
|
with TestRun.step(f"Copy changed metadata to the standby instance"):
|
||||||
|
@ -32,6 +32,8 @@ def test_cleaning_policy():
|
|||||||
- Cache and core are filled with dirty data.
|
- Cache and core are filled with dirty data.
|
||||||
- After cache and core flush dirty data are cleared.
|
- After cache and core flush dirty data are cleared.
|
||||||
"""
|
"""
|
||||||
|
cache_id = 1
|
||||||
|
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
cache_disk = TestRun.disks["cache"]
|
cache_disk = TestRun.disks["cache"]
|
||||||
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
|
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
@ -40,7 +42,8 @@ def test_cleaning_policy():
|
|||||||
core_disk = TestRun.disks["core"]
|
core_disk = TestRun.disks["core"]
|
||||||
core_disk.create_partitions([Size(1, Unit.GibiByte)])
|
core_disk.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
core_dev = core_disk.partitions[0]
|
core_dev = core_disk.partitions[0]
|
||||||
cache_id = 1
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step("Start cache and set cleaning policy to NOP"):
|
with TestRun.step("Start cache and set cleaning policy to NOP"):
|
||||||
|
@ -28,11 +28,22 @@ def test_seq_cutoff_default_params():
|
|||||||
- "Full" shall be default sequential cutoff policy
|
- "Full" shall be default sequential cutoff policy
|
||||||
- There shall be default 1MiB (1024kiB) value for sequential cutoff threshold
|
- There shall be default 1MiB (1024kiB) value for sequential cutoff threshold
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add core)"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache, cores = prepare()
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
with TestRun.step("Getting sequential cutoff parameters"):
|
with TestRun.step("Getting sequential cutoff parameters"):
|
||||||
params = cores[0].get_seq_cut_off_parameters()
|
params = core.get_seq_cut_off_parameters()
|
||||||
|
|
||||||
with TestRun.step("Check if proper sequential cutoff policy is set as a default"):
|
with TestRun.step("Check if proper sequential cutoff policy is set as a default"):
|
||||||
if params.policy != SeqCutOffPolicy.DEFAULT:
|
if params.policy != SeqCutOffPolicy.DEFAULT:
|
||||||
@ -59,8 +70,18 @@ def test_seq_cutoff_set_get_policy_core(policy):
|
|||||||
- Sequential cutoff policy obtained from get-param command for the second core must be
|
- Sequential cutoff policy obtained from get-param command for the second core must be
|
||||||
proper default value
|
proper default value
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add 2 cores)"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache, cores = prepare(cores_count=2)
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)] * 2)
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add cores"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
cores = [cache.add_core(core_dev=part) for part in core_device.partitions]
|
||||||
|
|
||||||
with TestRun.step(f"Setting core sequential cutoff policy mode to {policy}"):
|
with TestRun.step(f"Setting core sequential cutoff policy mode to {policy}"):
|
||||||
cores[0].set_seq_cutoff_policy(policy)
|
cores[0].set_seq_cutoff_policy(policy)
|
||||||
@ -91,8 +112,18 @@ def test_seq_cutoff_set_get_policy_cache(policy):
|
|||||||
- Sequential cutoff policy obtained from get-param command for each of 3 cores must be the
|
- Sequential cutoff policy obtained from get-param command for each of 3 cores must be the
|
||||||
same as the one used in set-param command for cache
|
same as the one used in set-param command for cache
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add 3 cores)"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache, cores = prepare(cores_count=3)
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)] * 3)
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add cores"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
cores = [cache.add_core(core_dev=part) for part in core_device.partitions]
|
||||||
|
|
||||||
with TestRun.step(f"Setting sequential cutoff policy mode {policy} for cache"):
|
with TestRun.step(f"Setting sequential cutoff policy mode {policy} for cache"):
|
||||||
cache.set_seq_cutoff_policy(policy)
|
cache.set_seq_cutoff_policy(policy)
|
||||||
@ -117,11 +148,21 @@ def test_seq_cutoff_policy_load():
|
|||||||
- Sequential cutoff policy obtained from get-param command after cache load
|
- Sequential cutoff policy obtained from get-param command after cache load
|
||||||
must be the same as the one used in set-param command before cache stop
|
must be the same as the one used in set-param command before cache stop
|
||||||
- Sequential cutoff policy loaded for the last core should be the default one
|
- Sequential cutoff policy loaded for the last core should be the default one
|
||||||
"""
|
"""
|
||||||
with TestRun.step(f"Test prepare (start cache and add {len(SeqCutOffPolicy) + 1} cores)"):
|
policies = [policy for policy in SeqCutOffPolicy]
|
||||||
# Create as many cores as many possible policies including default one
|
|
||||||
cache, cores = prepare(cores_count=len(SeqCutOffPolicy) + 1)
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
policies = [policy for policy in SeqCutOffPolicy]
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)] * (len(SeqCutOffPolicy) + 1))
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add cores"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
cores = [cache.add_core(core_dev=part) for part in core_device.partitions]
|
||||||
|
|
||||||
for i, core in TestRun.iteration(
|
for i, core in TestRun.iteration(
|
||||||
enumerate(cores[:-1]),
|
enumerate(cores[:-1]),
|
||||||
@ -171,14 +212,26 @@ def test_seq_cutoff_set_invalid_threshold(threshold):
|
|||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Setting invalid sequential cutoff threshold should be blocked
|
- Setting invalid sequential cutoff threshold should be blocked
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add core)"):
|
_threshold = Size(threshold, Unit.KibiByte)
|
||||||
cache, cores = prepare()
|
|
||||||
_threshold = Size(threshold, Unit.KibiByte)
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cutoff threshold to out of range value: "
|
with TestRun.step(f"Setting cache sequential cutoff threshold to out of range value: "
|
||||||
f"{_threshold}"):
|
f"{_threshold}"):
|
||||||
command = set_param_cutoff_cmd(
|
command = set_param_cutoff_cmd(
|
||||||
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
|
cache_id=str(cache.cache_id), core_id=str(core.core_id),
|
||||||
threshold=str(int(_threshold.get_value(Unit.KiloByte))))
|
threshold=str(int(_threshold.get_value(Unit.KiloByte))))
|
||||||
output = TestRun.executor.run_expect_fail(command)
|
output = TestRun.executor.run_expect_fail(command)
|
||||||
if "Invalid sequential cutoff threshold, must be in the range 1-4194181"\
|
if "Invalid sequential cutoff threshold, must be in the range 1-4194181"\
|
||||||
@ -188,7 +241,7 @@ def test_seq_cutoff_set_invalid_threshold(threshold):
|
|||||||
with TestRun.step(f"Setting cache sequential cutoff threshold "
|
with TestRun.step(f"Setting cache sequential cutoff threshold "
|
||||||
f"to value passed as a float"):
|
f"to value passed as a float"):
|
||||||
command = set_param_cutoff_cmd(
|
command = set_param_cutoff_cmd(
|
||||||
cache_id=str(cache.cache_id), core_id=str(cores[0].core_id),
|
cache_id=str(cache.cache_id), core_id=str(core.core_id),
|
||||||
threshold=str(_threshold.get_value(Unit.KiloByte)))
|
threshold=str(_threshold.get_value(Unit.KiloByte)))
|
||||||
output = TestRun.executor.run_expect_fail(command)
|
output = TestRun.executor.run_expect_fail(command)
|
||||||
if "Invalid sequential cutoff threshold, must be a correct unsigned decimal integer"\
|
if "Invalid sequential cutoff threshold, must be a correct unsigned decimal integer"\
|
||||||
@ -208,18 +261,30 @@ def test_seq_cutoff_set_get_threshold(threshold):
|
|||||||
- Sequential cutoff threshold obtained from get-param command must be the same as
|
- Sequential cutoff threshold obtained from get-param command must be the same as
|
||||||
the one used in set-param command
|
the one used in set-param command
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add core)"):
|
_threshold = Size(threshold, Unit.KibiByte)
|
||||||
cache, cores = prepare()
|
|
||||||
_threshold = Size(threshold, Unit.KibiByte)
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
||||||
f"{_threshold}"):
|
f"{_threshold}"):
|
||||||
cores[0].set_seq_cutoff_threshold(_threshold)
|
core.set_seq_cutoff_threshold(_threshold)
|
||||||
|
|
||||||
with TestRun.step("Check if proper sequential cutoff threshold was set"):
|
with TestRun.step("Check if proper sequential cutoff threshold was set"):
|
||||||
if cores[0].get_seq_cut_off_threshold() != _threshold:
|
if core.get_seq_cut_off_threshold() != _threshold:
|
||||||
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
||||||
f"{cores[0].get_seq_cut_off_threshold()} "
|
f"{core.get_seq_cut_off_threshold()} "
|
||||||
f"should be {_threshold}")
|
f"should be {_threshold}")
|
||||||
|
|
||||||
|
|
||||||
@ -235,13 +300,25 @@ def test_seq_cutoff_threshold_load(threshold):
|
|||||||
- Sequential cutoff threshold obtained from get-param command after cache load
|
- Sequential cutoff threshold obtained from get-param command after cache load
|
||||||
must be the same as the one used in set-param command before cache stop
|
must be the same as the one used in set-param command before cache stop
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Test prepare (start cache and add core)"):
|
_threshold = Size(threshold, Unit.KibiByte)
|
||||||
cache, cores = prepare()
|
|
||||||
_threshold = Size(threshold, Unit.KibiByte)
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_part, force=True)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
with TestRun.step(f"Setting cache sequential cutoff threshold to "
|
||||||
f"{_threshold}"):
|
f"{_threshold}"):
|
||||||
cores[0].set_seq_cutoff_threshold(_threshold)
|
core.set_seq_cutoff_threshold(_threshold)
|
||||||
|
|
||||||
with TestRun.step("Stopping cache"):
|
with TestRun.step("Stopping cache"):
|
||||||
cache.stop()
|
cache.stop()
|
||||||
@ -257,23 +334,3 @@ def test_seq_cutoff_threshold_load(threshold):
|
|||||||
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
TestRun.fail(f"Wrong sequential cutoff threshold set: "
|
||||||
f"{cores_load[0].get_seq_cut_off_threshold()} "
|
f"{cores_load[0].get_seq_cut_off_threshold()} "
|
||||||
f"should be {_threshold}")
|
f"should be {_threshold}")
|
||||||
|
|
||||||
|
|
||||||
def prepare(cores_count=1):
|
|
||||||
cache_device = TestRun.disks['cache']
|
|
||||||
core_device = TestRun.disks['core']
|
|
||||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
|
||||||
partitions = []
|
|
||||||
for x in range(cores_count):
|
|
||||||
partitions.append(Size(1, Unit.GibiByte))
|
|
||||||
|
|
||||||
core_device.create_partitions(partitions)
|
|
||||||
cache_part = cache_device.partitions[0]
|
|
||||||
core_parts = core_device.partitions
|
|
||||||
TestRun.LOGGER.info("Staring cache")
|
|
||||||
cache = casadm.start_cache(cache_part, force=True)
|
|
||||||
TestRun.LOGGER.info("Adding core devices")
|
|
||||||
core_list = []
|
|
||||||
for core_part in core_parts:
|
|
||||||
core_list.append(cache.add_core(core_dev=core_part))
|
|
||||||
return cache, core_list
|
|
||||||
|
@ -48,13 +48,29 @@ def test_set_get_seq_cutoff_params(cache_mode):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Partition cache and core devices"):
|
||||||
cache_dev, core_dev = storage_prepare()
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
cache_parts = [Size(1, Unit.GibiByte)] * caches_count
|
||||||
|
cache_dev.create_partitions(cache_parts)
|
||||||
|
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
core_parts = [Size(2, Unit.GibiByte)] * cores_per_cache * caches_count
|
||||||
|
core_dev.create_partitions(core_parts)
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
f"Start {caches_count} caches in {cache_mode} cache mode "
|
f"Start {caches_count} caches in {cache_mode} cache mode "
|
||||||
f"and add {cores_per_cache} cores per cache"
|
f"and add {cores_per_cache} cores per cache"
|
||||||
):
|
):
|
||||||
caches, cores = cache_prepare(cache_mode, cache_dev, core_dev)
|
caches = [
|
||||||
|
casadm.start_cache(part, cache_mode, force=True) for part in cache_dev.partitions
|
||||||
|
]
|
||||||
|
|
||||||
|
cores = [
|
||||||
|
[
|
||||||
|
caches[i].add_core(
|
||||||
|
core_dev.partitions[i * cores_per_cache + j]
|
||||||
|
) for j in range(cores_per_cache)
|
||||||
|
] for i in range(caches_count)
|
||||||
|
]
|
||||||
|
|
||||||
with TestRun.step("Check sequential cutoff default parameters"):
|
with TestRun.step("Check sequential cutoff default parameters"):
|
||||||
default_seq_cutoff_params = SeqCutOffParameters.default_seq_cut_off_params()
|
default_seq_cutoff_params = SeqCutOffParameters.default_seq_cut_off_params()
|
||||||
@ -130,13 +146,25 @@ def test_set_get_cleaning_params(cache_mode, cleaning_policy):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Partition cache and core devices"):
|
||||||
cache_dev, core_dev = storage_prepare()
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
cache_parts = [Size(1, Unit.GibiByte)] * caches_count
|
||||||
|
cache_dev.create_partitions(cache_parts)
|
||||||
|
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
core_parts = [Size(2, Unit.GibiByte)] * cores_per_cache * caches_count
|
||||||
|
core_dev.create_partitions(core_parts)
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
f"Start {caches_count} caches in {cache_mode} cache mode "
|
f"Start {caches_count} caches in {cache_mode} cache mode "
|
||||||
f"and add {cores_per_cache} cores per cache"
|
f"and add {cores_per_cache} cores per cache"
|
||||||
):
|
):
|
||||||
caches, cores = cache_prepare(cache_mode, cache_dev, core_dev)
|
caches = [
|
||||||
|
casadm.start_cache(part, cache_mode, force=True) for part in cache_dev.partitions
|
||||||
|
]
|
||||||
|
|
||||||
|
for i in range(caches_count):
|
||||||
|
for j in range(cores_per_cache):
|
||||||
|
caches[i].add_core(core_dev.partitions[i * cores_per_cache + j])
|
||||||
|
|
||||||
with TestRun.step(f"Set cleaning policy to {cleaning_policy}"):
|
with TestRun.step(f"Set cleaning policy to {cleaning_policy}"):
|
||||||
if cleaning_policy != CleaningPolicy.DEFAULT:
|
if cleaning_policy != CleaningPolicy.DEFAULT:
|
||||||
@ -205,32 +233,6 @@ def test_set_get_cleaning_params(cache_mode, cleaning_policy):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def storage_prepare():
|
|
||||||
cache_dev = TestRun.disks["cache"]
|
|
||||||
cache_parts = [Size(1, Unit.GibiByte)] * caches_count
|
|
||||||
cache_dev.create_partitions(cache_parts)
|
|
||||||
core_dev = TestRun.disks["core"]
|
|
||||||
core_parts = [Size(2, Unit.GibiByte)] * cores_per_cache * caches_count
|
|
||||||
core_dev.create_partitions(core_parts)
|
|
||||||
|
|
||||||
return cache_dev, core_dev
|
|
||||||
|
|
||||||
|
|
||||||
def cache_prepare(cache_mode, cache_dev, core_dev):
|
|
||||||
caches = []
|
|
||||||
for i in range(caches_count):
|
|
||||||
caches.append(
|
|
||||||
casadm.start_cache(cache_dev.partitions[i], cache_mode, force=True)
|
|
||||||
)
|
|
||||||
cores = [[] for i in range(caches_count)]
|
|
||||||
for i in range(caches_count):
|
|
||||||
for j in range(cores_per_cache):
|
|
||||||
core_partition_nr = i * cores_per_cache + j
|
|
||||||
cores[i].append(caches[i].add_core(core_dev.partitions[core_partition_nr]))
|
|
||||||
|
|
||||||
return caches, cores
|
|
||||||
|
|
||||||
|
|
||||||
def new_seq_cutoff_parameters_random_values():
|
def new_seq_cutoff_parameters_random_values():
|
||||||
return SeqCutOffParameters(
|
return SeqCutOffParameters(
|
||||||
threshold=Size(random.randrange(1, 1000000), Unit.KibiByte),
|
threshold=Size(random.randrange(1, 1000000), Unit.KibiByte),
|
||||||
|
@ -33,7 +33,11 @@ def test_zero_metadata_negative_cases():
|
|||||||
- Load cache command failed after successfully zeroing metadata on the cache device.
|
- Load cache command failed after successfully zeroing metadata on the cache device.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare cache and core devices."):
|
with TestRun.step("Prepare cache and core devices."):
|
||||||
cache_dev, core_dev, cache_disk = prepare_devices()
|
cache_disk = TestRun.disks['cache']
|
||||||
|
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
||||||
|
cache_dev = cache_disk.partitions[0]
|
||||||
|
core_disk = TestRun.disks['core']
|
||||||
|
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache."):
|
||||||
casadm.start_cache(cache_dev, force=True)
|
casadm.start_cache(cache_dev, force=True)
|
||||||
@ -93,7 +97,11 @@ def test_zero_metadata_filesystem(filesystem):
|
|||||||
"""
|
"""
|
||||||
mount_point = "/mnt"
|
mount_point = "/mnt"
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
cache_dev, core_disk, cache_disk = prepare_devices()
|
cache_disk = TestRun.disks['cache']
|
||||||
|
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
||||||
|
cache_dev = cache_disk.partitions[0]
|
||||||
|
core_disk = TestRun.disks['core']
|
||||||
|
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step("Create filesystem on core device."):
|
with TestRun.step("Create filesystem on core device."):
|
||||||
core_disk.create_filesystem(filesystem)
|
core_disk.create_filesystem(filesystem)
|
||||||
@ -139,7 +147,11 @@ def test_zero_metadata_dirty_data():
|
|||||||
- Cache started successfully after zeroing metadata on cache with dirty data.
|
- Cache started successfully after zeroing metadata on cache with dirty data.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare cache and core devices."):
|
with TestRun.step("Prepare cache and core devices."):
|
||||||
cache_dev, core_disk, cache_disk = prepare_devices()
|
cache_disk = TestRun.disks['cache']
|
||||||
|
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
||||||
|
cache_dev = cache_disk.partitions[0]
|
||||||
|
core_disk = TestRun.disks['core']
|
||||||
|
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache."):
|
||||||
cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
|
cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
|
||||||
@ -204,7 +216,11 @@ def test_zero_metadata_dirty_shutdown():
|
|||||||
- Cache started successfully after dirty shutdown and zeroing metadata on cache.
|
- Cache started successfully after dirty shutdown and zeroing metadata on cache.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare cache and core devices."):
|
with TestRun.step("Prepare cache and core devices."):
|
||||||
cache_dev, core_disk, cache_disk = prepare_devices()
|
cache_disk = TestRun.disks['cache']
|
||||||
|
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
||||||
|
cache_dev = cache_disk.partitions[0]
|
||||||
|
core_disk = TestRun.disks['core']
|
||||||
|
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache."):
|
||||||
cache = casadm.start_cache(cache_dev, CacheMode.WT, force=True)
|
cache = casadm.start_cache(cache_dev, CacheMode.WT, force=True)
|
||||||
@ -251,13 +267,3 @@ def test_zero_metadata_dirty_shutdown():
|
|||||||
TestRun.LOGGER.info("Cache started successfully.")
|
TestRun.LOGGER.info("Cache started successfully.")
|
||||||
except CmdException:
|
except CmdException:
|
||||||
TestRun.LOGGER.error("Start cache failed.")
|
TestRun.LOGGER.error("Start cache failed.")
|
||||||
|
|
||||||
|
|
||||||
def prepare_devices():
|
|
||||||
cache_disk = TestRun.disks['cache']
|
|
||||||
cache_disk.create_partitions([Size(100, Unit.MebiByte)])
|
|
||||||
cache_part = cache_disk.partitions[0]
|
|
||||||
core_disk = TestRun.disks['core']
|
|
||||||
core_disk.create_partitions([Size(5, Unit.GibiByte)])
|
|
||||||
|
|
||||||
return cache_part, core_disk, cache_disk
|
|
||||||
|
Loading…
Reference in New Issue
Block a user