Merge pull request #1496 from Kamoppl/kamilg/update_tests
tests: update tests
This commit is contained in:
commit
de16763bec
4
test/functional/tests/512b/__init__.py
Normal file
4
test/functional/tests/512b/__init__.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
#
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
#
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -13,14 +14,14 @@ from storage_devices.disk import DiskType, DiskTypeSet
|
|||||||
from api.cas.cache_config import CacheMode
|
from api.cas.cache_config import CacheMode
|
||||||
from test_tools import fs_utils
|
from test_tools import fs_utils
|
||||||
from test_tools.disk_utils import Filesystem
|
from test_tools.disk_utils import Filesystem
|
||||||
from test_utils.filesystem.directory import Directory
|
|
||||||
from test_utils.filesystem.file import File
|
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
from test_tools.fio.fio import Fio
|
from test_tools.fio.fio import Fio
|
||||||
from test_tools.fio.fio_param import ReadWrite, IoEngine
|
from test_tools.fio.fio_param import ReadWrite, IoEngine
|
||||||
|
|
||||||
|
|
||||||
mountpoint = "/tmp/diff_io_size_support_test"
|
mountpoint = "/tmp/diff_io_size_support_test"
|
||||||
|
opencas_ioclass_conf_path = "/etc/opencas/ioclass-config.csv"
|
||||||
|
block_sizes = [1, 2, 4, 5, 8, 16, 32, 64, 128]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrizex("cache_mode", [CacheMode.WB, CacheMode.WT])
|
@pytest.mark.parametrizex("cache_mode", [CacheMode.WB, CacheMode.WT])
|
||||||
@ -29,65 +30,57 @@ mountpoint = "/tmp/diff_io_size_support_test"
|
|||||||
def test_support_different_io_size(cache_mode):
|
def test_support_different_io_size(cache_mode):
|
||||||
"""
|
"""
|
||||||
title: OpenCAS supports different IO sizes
|
title: OpenCAS supports different IO sizes
|
||||||
description: OpenCAS supports IO of size in rage from 512b to 128K
|
description: |
|
||||||
|
OpenCAS supports IO of size in rage from 512b to 128K
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No IO errors
|
- No IO errors
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices"):
|
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_disk = TestRun.disks["cache"]
|
cache_disk = TestRun.disks["cache"]
|
||||||
core_disk = TestRun.disks["core"]
|
core_disk = TestRun.disks["core"]
|
||||||
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
|
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
cache_disk = cache_disk.partitions[0]
|
core_disk.create_partitions([Size(45, Unit.GibiByte)])
|
||||||
core_disk.create_partitions([Size(50, Unit.GibiByte)])
|
|
||||||
core_disk = core_disk.partitions[0]
|
|
||||||
|
|
||||||
with TestRun.step("Start cache"):
|
with TestRun.step("Start cache"):
|
||||||
cache = casadm.start_cache(cache_dev=cache_disk, cache_mode=cache_mode, force=True)
|
cache = casadm.start_cache(
|
||||||
core = cache.add_core(core_disk)
|
cache_dev=cache_disk.partitions[0], cache_mode=cache_mode, force=True
|
||||||
|
)
|
||||||
|
core = cache.add_core(core_disk.partitions[0])
|
||||||
|
|
||||||
with TestRun.step("Load the default ioclass config file"):
|
with TestRun.step("Load the default ioclass config file"):
|
||||||
cache.load_io_class("/etc/opencas/ioclass-config.csv")
|
cache.load_io_class(opencas_ioclass_conf_path)
|
||||||
|
|
||||||
with TestRun.step("Create a filesystem on the core device and mount it"):
|
with TestRun.step("Create a filesystem on the core device and mount it"):
|
||||||
TestRun.executor.run(f"rm -rf {mountpoint}")
|
fs_utils.remove(path=mountpoint, force=True, recursive=True, ignore_errors=True)
|
||||||
fs_utils.create_directory(path=mountpoint)
|
fs_utils.create_directory(path=mountpoint)
|
||||||
core.create_filesystem(Filesystem.xfs)
|
core.create_filesystem(Filesystem.xfs)
|
||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
|
|
||||||
with TestRun.step("Run fio with block sizes: 512, 1k, 4k, 5k, 8k, 16k, 32k, 64 and 128k"):
|
with TestRun.step(f"Run fio"):
|
||||||
bs_list = [
|
bs_list = [Size(x, Unit.KibiByte) for x in block_sizes]
|
||||||
Size(512, Unit.Byte),
|
|
||||||
Size(1, Unit.KibiByte),
|
|
||||||
Size(4, Unit.KibiByte),
|
|
||||||
Size(5, Unit.KibiByte),
|
|
||||||
Size(8, Unit.KibiByte),
|
|
||||||
Size(16, Unit.KibiByte),
|
|
||||||
Size(32, Unit.KibiByte),
|
|
||||||
Size(64, Unit.KibiByte),
|
|
||||||
Size(128, Unit.KibiByte),
|
|
||||||
]
|
|
||||||
|
|
||||||
fio = Fio().create_command()
|
fio = (
|
||||||
fio.io_engine(IoEngine.libaio)
|
Fio()
|
||||||
fio.time_based()
|
.create_command()
|
||||||
fio.do_verify()
|
.io_engine(IoEngine.libaio)
|
||||||
fio.direct()
|
.time_based()
|
||||||
fio.read_write(ReadWrite.randwrite)
|
.do_verify()
|
||||||
fio.run_time(datetime.timedelta(seconds=1200))
|
.direct()
|
||||||
fio.io_depth(16)
|
.read_write(ReadWrite.randwrite)
|
||||||
fio.verify_pattern(0xABCD)
|
.run_time(datetime.timedelta(seconds=1200))
|
||||||
|
.io_depth(16)
|
||||||
|
.verify_pattern(0xABCD)
|
||||||
|
)
|
||||||
|
|
||||||
for i, bs in enumerate(bs_list):
|
for i, bs in enumerate(bs_list):
|
||||||
fio_job = fio.add_job()
|
fio_job = fio.add_job()
|
||||||
fio_job.target(os.path.join(mountpoint, str(bs.value)))
|
fio_job.target(os.path.join(mountpoint, str(bs.value)))
|
||||||
fio_job.block_size(bs)
|
fio_job.block_size(bs)
|
||||||
fio_job.file_size(Size((i + 1) * 200, Unit.MebiByte))
|
fio_job.file_size(Size((i + 1) * 200, Unit.MebiByte))
|
||||||
|
|
||||||
fio_output = fio.run()
|
fio_output = fio.run()
|
||||||
|
|
||||||
fio_errors = fio_output[0].total_errors()
|
fio_errors = fio_output[0].total_errors()
|
||||||
if fio_errors != 0:
|
if fio_errors != 0:
|
||||||
TestRun.fail(f"fio errors: {fio_errors}, should equal 0")
|
TestRun.fail(f"fio errors: {fio_errors}, should equal 0")
|
||||||
|
|
||||||
with TestRun.step("Cleanup"):
|
|
||||||
core.unmount()
|
|
||||||
TestRun.executor.run(f"rm -rf {mountpoint}")
|
|
||||||
|
170
test/functional/tests/basic/test_basic.py
Normal file
170
test/functional/tests/basic/test_basic.py
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
#
|
||||||
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
#
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from api.cas.cache_config import CacheMode, CacheLineSize
|
||||||
|
from api.cas.casadm_params import OutputFormat
|
||||||
|
from api.cas.cli import start_cmd
|
||||||
|
from core.test_run import TestRun
|
||||||
|
from api.cas import casadm
|
||||||
|
from api.cas.cli_messages import (
|
||||||
|
check_stderr_msg,
|
||||||
|
start_cache_on_already_used_dev,
|
||||||
|
start_cache_with_existing_id,
|
||||||
|
)
|
||||||
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
|
from test_tools import fs_utils
|
||||||
|
from test_tools.dd import Dd
|
||||||
|
from test_tools.disk_utils import Filesystem
|
||||||
|
from test_utils.filesystem.file import File
|
||||||
|
from test_utils.output import CmdException
|
||||||
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
|
version_file_path = r"/var/lib/opencas/cas_version"
|
||||||
|
mountpoint = "/mnt"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.CI
|
||||||
|
def test_cas_version():
|
||||||
|
"""
|
||||||
|
title: Test for CAS version
|
||||||
|
description:
|
||||||
|
Check if CAS print version cmd returns consistent version with version file
|
||||||
|
pass criteria:
|
||||||
|
- casadm version command succeeds
|
||||||
|
- versions from cmd and file in /var/lib/opencas/cas_version are consistent
|
||||||
|
"""
|
||||||
|
|
||||||
|
with TestRun.step("Read cas version using casadm cmd"):
|
||||||
|
output = casadm.print_version(output_format=OutputFormat.csv)
|
||||||
|
cmd_version = output.stdout
|
||||||
|
cmd_cas_versions = [version.split(",")[1] for version in cmd_version.split("\n")[1:]]
|
||||||
|
|
||||||
|
with TestRun.step(f"Read cas version from {version_file_path} location"):
|
||||||
|
file_read = fs_utils.read_file(version_file_path).split("\n")
|
||||||
|
file_cas_version = next(
|
||||||
|
(line.split("=")[1] for line in file_read if "CAS_VERSION=" in line)
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Compare cmd and file versions"):
|
||||||
|
if not all(file_cas_version == cmd_cas_version for cmd_cas_version in cmd_cas_versions):
|
||||||
|
TestRun.LOGGER.error(f"Cmd and file versions doesn`t match")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.CI
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
||||||
|
def test_negative_start_cache():
|
||||||
|
"""
|
||||||
|
title: Test start cache negative on cache device
|
||||||
|
description:
|
||||||
|
Check for negative cache start scenarios
|
||||||
|
pass criteria:
|
||||||
|
- Cache start succeeds
|
||||||
|
- Fails to start cache on the same device with another id
|
||||||
|
- Fails to start cache on another partition with the same id
|
||||||
|
"""
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache device"):
|
||||||
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
|
||||||
|
cache_dev.create_partitions([Size(2, Unit.GibiByte)] * 2)
|
||||||
|
|
||||||
|
cache_dev_1 = cache_dev.partitions[0]
|
||||||
|
cache_dev_2 = cache_dev.partitions[1]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache on cache device"):
|
||||||
|
casadm.start_cache(cache_dev=cache_dev_1, force=True)
|
||||||
|
|
||||||
|
with TestRun.step("Start cache on the same device but with another ID"):
|
||||||
|
try:
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
start_cmd(
|
||||||
|
cache_dev=cache_dev_1.path,
|
||||||
|
cache_id="2",
|
||||||
|
force=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
TestRun.fail("Two caches started on same device")
|
||||||
|
except CmdException:
|
||||||
|
if not check_stderr_msg(output, start_cache_on_already_used_dev):
|
||||||
|
TestRun.fail(f"Received unexpected error message: {output.stderr}")
|
||||||
|
|
||||||
|
with TestRun.step("Start cache with the same ID on another cache device"):
|
||||||
|
try:
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
start_cmd(
|
||||||
|
cache_dev=cache_dev_2.path,
|
||||||
|
cache_id="1",
|
||||||
|
force=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
TestRun.fail("Two caches started with same ID")
|
||||||
|
except CmdException:
|
||||||
|
if not check_stderr_msg(output, start_cache_with_existing_id):
|
||||||
|
TestRun.fail(f"Received unexpected error message: {output.stderr}")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.CI
|
||||||
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
|
@pytest.mark.parametrizex("cache_mode", CacheMode)
|
||||||
|
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_data_integrity(filesystem, cache_mode, cache_line_size):
|
||||||
|
"""
|
||||||
|
title: Basic data integrity test
|
||||||
|
description:
|
||||||
|
Check basic data integrity after stopping the cache
|
||||||
|
pass criteria:
|
||||||
|
- System does not crash.
|
||||||
|
- All operations complete successfully.
|
||||||
|
- Data consistency is preserved.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(100, Unit.MebiByte)])
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core device"):
|
||||||
|
cache = casadm.start_cache(
|
||||||
|
cache_dev=cache_part, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True
|
||||||
|
)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
|
with TestRun.step("Create filesystem on CAS device and mount it"):
|
||||||
|
core.create_filesystem(filesystem)
|
||||||
|
core.mount(mountpoint)
|
||||||
|
|
||||||
|
with TestRun.step("Create test file and calculate md5 checksum"):
|
||||||
|
(
|
||||||
|
Dd()
|
||||||
|
.input("/dev/urandom")
|
||||||
|
.output(f"{mountpoint}/test_file")
|
||||||
|
.count(1)
|
||||||
|
.block_size(Size(50, Unit.MebiByte))
|
||||||
|
.run()
|
||||||
|
)
|
||||||
|
test_file = File(f"{mountpoint}/test_file")
|
||||||
|
md5_before = test_file.md5sum()
|
||||||
|
|
||||||
|
with TestRun.step("Unmount core"):
|
||||||
|
core.unmount()
|
||||||
|
|
||||||
|
with TestRun.step("Stop cache"):
|
||||||
|
cache.stop()
|
||||||
|
|
||||||
|
with TestRun.step("Mount the core device and check for file"):
|
||||||
|
core_part.mount(mountpoint)
|
||||||
|
md5_after = test_file.md5sum()
|
||||||
|
if md5_before != md5_after:
|
||||||
|
TestRun.fail("md5 checksum mismatch!")
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -16,21 +17,22 @@ from test_utils.size import Size, Unit
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_start_cache_add_core():
|
def test_start_cache_add_core():
|
||||||
"""
|
"""
|
||||||
title: Basic test for starting cache and adding core.
|
title: Basic test for starting cache and adding core.
|
||||||
description: |
|
description: |
|
||||||
Test for start cache and add core.
|
Test for start cache and add core.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Cache started successfully.
|
- Cache started successfully.
|
||||||
- Core added successfully.
|
- Core added successfully.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare cache and core devices."):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_dev = TestRun.disks["cache"]
|
cache_dev = TestRun.disks["cache"]
|
||||||
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
|
||||||
core_dev = TestRun.disks["core"]
|
core_dev = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
core_dev.create_partitions([Size(2, Unit.GibiByte)])
|
core_dev.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache"):
|
||||||
cache = casadm.start_cache(cache_dev.partitions[0], force=True)
|
cache = casadm.start_cache(cache_dev.partitions[0], force=True)
|
||||||
|
|
||||||
with TestRun.step("Add core."):
|
with TestRun.step("Add core"):
|
||||||
core = cache.add_core(core_dev.partitions[0])
|
cache.add_core(core_dev.partitions[0])
|
4
test/functional/tests/cache_ops/__init__.py
Normal file
4
test/functional/tests/cache_ops/__init__.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
#
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
#
|
@ -1,13 +1,12 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from datetime import timedelta
|
|
||||||
from api.cas import casadm
|
from api.cas import casadm
|
||||||
from api.cas.cache_config import (
|
from api.cas.cache_config import (
|
||||||
CacheMode,
|
CacheMode,
|
||||||
@ -19,7 +18,7 @@ from api.cas.cache_config import (
|
|||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
from test_utils.os_utils import Udev, sync
|
from test_utils.os_utils import Udev
|
||||||
from test_tools.fio.fio import Fio
|
from test_tools.fio.fio import Fio
|
||||||
from test_tools.fio.fio_param import ReadWrite, IoEngine
|
from test_tools.fio.fio_param import ReadWrite, IoEngine
|
||||||
|
|
||||||
@ -39,216 +38,201 @@ cas_cleaner_process_name = "cas_cl_"
|
|||||||
@pytest.mark.parametrize("cleaning_policy", CleaningPolicy)
|
@pytest.mark.parametrize("cleaning_policy", CleaningPolicy)
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_cleaning_policies_in_write_back(cleaning_policy):
|
def test_cleaning_policies_in_write_back(cleaning_policy: CleaningPolicy):
|
||||||
"""
|
"""
|
||||||
title: Test for cleaning policy operation in Write-Back cache mode.
|
title: Test for cleaning policy operation in Write-Back cache mode.
|
||||||
description: |
|
description: |
|
||||||
Check if ALRU, NOP and ACP cleaning policies preserve their
|
Check if ALRU, NOP and ACP cleaning policies preserve their
|
||||||
parameters when changed and if they flush dirty data properly
|
parameters when changed and if they flush dirty data properly
|
||||||
in Write-Back cache mode.
|
in Write-Back cache mode.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Flush parameters preserve their values when changed.
|
- Flush parameters preserve their values when changed.
|
||||||
- Dirty data is flushed or not according to the policy used.
|
- Dirty data is flushed or not according to the policy used.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_dev, core_dev = storage_prepare()
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_dev.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
core_dev.create_partitions([Size(2, Unit.GibiByte)] * cores_count)
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(f"Start cache in Write-Back mode with {cleaning_policy} cleaning policy"):
|
||||||
f"Start cache in Write-Back mode with {cleaning_policy} cleaning policy"
|
|
||||||
):
|
|
||||||
cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WB, force=True)
|
cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WB, force=True)
|
||||||
set_cleaning_policy_and_params(cache, cleaning_policy)
|
cache.set_cleaning_policy(cleaning_policy=cleaning_policy)
|
||||||
|
set_cleaning_policy_params(cache, cleaning_policy)
|
||||||
|
|
||||||
with TestRun.step("Check for running CAS cleaner"):
|
with TestRun.step("Check for running CAS cleaner"):
|
||||||
if TestRun.executor.run(f"pgrep {cas_cleaner_process_name}").exit_code != 0:
|
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
||||||
|
if output.exit_code != 0:
|
||||||
TestRun.fail("CAS cleaner process is not running!")
|
TestRun.fail("CAS cleaner process is not running!")
|
||||||
|
|
||||||
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
||||||
core = []
|
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
||||||
for i in range(cores_count):
|
|
||||||
core.append(cache.add_core(core_dev.partitions[i]))
|
with TestRun.step("Run fio"):
|
||||||
|
fio = (
|
||||||
|
Fio()
|
||||||
|
.create_command()
|
||||||
|
.io_engine(IoEngine.libaio)
|
||||||
|
.block_size(Size(4, Unit.KibiByte))
|
||||||
|
.size(io_size)
|
||||||
|
.read_write(ReadWrite.randwrite)
|
||||||
|
.direct(True)
|
||||||
|
)
|
||||||
|
for core in cores:
|
||||||
|
fio.add_job().target(core.path)
|
||||||
|
|
||||||
with TestRun.step("Run 'fio'"):
|
|
||||||
fio = fio_prepare()
|
|
||||||
for i in range(cores_count):
|
|
||||||
fio.add_job().target(core[i].path)
|
|
||||||
fio.run()
|
fio.run()
|
||||||
time.sleep(3)
|
time.sleep(3)
|
||||||
core_writes_before_wait_for_cleaning = (
|
|
||||||
cache.get_statistics().block_stats.core.writes
|
core_writes_before_wait_for_cleaning = cache.get_statistics().block_stats.core.writes
|
||||||
)
|
|
||||||
|
|
||||||
with TestRun.step(f"Wait {time_to_wait} seconds"):
|
with TestRun.step(f"Wait {time_to_wait} seconds"):
|
||||||
time.sleep(time_to_wait)
|
time.sleep(time_to_wait)
|
||||||
|
|
||||||
with TestRun.step("Check write statistics for core device"):
|
with TestRun.step("Check write statistics for core device"):
|
||||||
core_writes_after_wait_for_cleaning = (
|
core_writes_after_wait_for_cleaning = cache.get_statistics().block_stats.core.writes
|
||||||
cache.get_statistics().block_stats.core.writes
|
|
||||||
)
|
|
||||||
check_cleaning_policy_operation(
|
check_cleaning_policy_operation(
|
||||||
cleaning_policy,
|
cleaning_policy,
|
||||||
core_writes_before_wait_for_cleaning,
|
core_writes_before_wait_for_cleaning,
|
||||||
core_writes_after_wait_for_cleaning,
|
core_writes_after_wait_for_cleaning,
|
||||||
)
|
)
|
||||||
|
|
||||||
with TestRun.step("Stop all caches"):
|
|
||||||
casadm.stop_all_caches()
|
|
||||||
Udev.enable()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("cleaning_policy", CleaningPolicy)
|
@pytest.mark.parametrize("cleaning_policy", CleaningPolicy)
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_cleaning_policies_in_write_through(cleaning_policy):
|
def test_cleaning_policies_in_write_through(cleaning_policy):
|
||||||
"""
|
"""
|
||||||
title: Test for cleaning policy operation in Write-Through cache mode.
|
title: Test for cleaning policy operation in Write-Through cache mode.
|
||||||
description: |
|
description: |
|
||||||
Check if ALRU, NOP and ACP cleaning policies preserve their
|
Check if ALRU, NOP and ACP cleaning policies preserve their
|
||||||
parameters when changed and if they flush dirty data properly
|
parameters when changed and if they flush dirty data properly
|
||||||
in Write-Through cache mode.
|
in Write-Through cache mode.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Flush parameters preserve their values when changed.
|
- Flush parameters preserve their values when changed.
|
||||||
- Dirty data is flushed or not according to the policy used.
|
- Dirty data is flushed or not according to the policy used.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_dev, core_dev = storage_prepare()
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_dev.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
core_dev.create_partitions([Size(2, Unit.GibiByte)] * cores_count)
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(f"Start cache in Write-Through mode with {cleaning_policy} cleaning policy"):
|
||||||
f"Start cache in Write-Through mode with {cleaning_policy} cleaning policy"
|
|
||||||
):
|
|
||||||
cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WT, force=True)
|
cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WT, force=True)
|
||||||
set_cleaning_policy_and_params(cache, cleaning_policy)
|
set_cleaning_policy_params(cache, cleaning_policy)
|
||||||
|
|
||||||
with TestRun.step("Check for running CAS cleaner"):
|
with TestRun.step("Check for running CAS cleaner"):
|
||||||
if TestRun.executor.run(f"pgrep {cas_cleaner_process_name}").exit_code != 0:
|
output = TestRun.executor.run(f"pgrep {cas_cleaner_process_name}")
|
||||||
|
if output.exit_code != 0:
|
||||||
TestRun.fail("CAS cleaner process is not running!")
|
TestRun.fail("CAS cleaner process is not running!")
|
||||||
|
|
||||||
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
with TestRun.step(f"Add {cores_count} cores to the cache"):
|
||||||
core = []
|
cores = [cache.add_core(partition) for partition in core_dev.partitions]
|
||||||
for i in range(cores_count):
|
|
||||||
core.append(cache.add_core(core_dev.partitions[i]))
|
|
||||||
|
|
||||||
with TestRun.step("Change cache mode to Write-Back"):
|
with TestRun.step("Change cache mode to Write-Back"):
|
||||||
cache.set_cache_mode(CacheMode.WB)
|
cache.set_cache_mode(CacheMode.WB)
|
||||||
|
|
||||||
with TestRun.step("Run 'fio'"):
|
with TestRun.step("Populate cache with dirty data"):
|
||||||
fio = fio_prepare()
|
fio = (
|
||||||
for i in range(cores_count):
|
Fio()
|
||||||
fio.add_job().target(core[i].path)
|
.create_command()
|
||||||
|
.io_engine(IoEngine.libaio)
|
||||||
|
.block_size(Size(4, Unit.KibiByte))
|
||||||
|
.size(io_size)
|
||||||
|
.read_write(ReadWrite.randwrite)
|
||||||
|
.direct(1)
|
||||||
|
)
|
||||||
|
for core in cores:
|
||||||
|
fio.add_job().target(core.path)
|
||||||
fio.run()
|
fio.run()
|
||||||
time.sleep(3)
|
|
||||||
|
|
||||||
with TestRun.step("Change cache mode back to Write-Through"):
|
with TestRun.step("Change cache mode back to Write-Through"):
|
||||||
cache.set_cache_mode(CacheMode.WT, flush=False)
|
cache.set_cache_mode(CacheMode.WT, flush=False)
|
||||||
core_writes_before_wait_for_cleaning = (
|
core_writes_before_wait_for_cleaning = cache.get_statistics().block_stats.core.writes
|
||||||
cache.get_statistics().block_stats.core.writes
|
|
||||||
)
|
|
||||||
|
|
||||||
with TestRun.step(f"Wait {time_to_wait} seconds"):
|
with TestRun.step(f"Wait {time_to_wait} seconds"):
|
||||||
time.sleep(time_to_wait)
|
time.sleep(time_to_wait)
|
||||||
|
|
||||||
with TestRun.step("Check write statistics for core device"):
|
with TestRun.step("Check write statistics for core device"):
|
||||||
core_writes_after_wait_for_cleaning = (
|
core_writes_after_wait_for_cleaning = cache.get_statistics().block_stats.core.writes
|
||||||
cache.get_statistics().block_stats.core.writes
|
|
||||||
)
|
|
||||||
check_cleaning_policy_operation(
|
check_cleaning_policy_operation(
|
||||||
cleaning_policy,
|
cleaning_policy,
|
||||||
core_writes_before_wait_for_cleaning,
|
core_writes_before_wait_for_cleaning,
|
||||||
core_writes_after_wait_for_cleaning,
|
core_writes_after_wait_for_cleaning,
|
||||||
)
|
)
|
||||||
|
|
||||||
with TestRun.step("Stop all caches"):
|
|
||||||
casadm.stop_all_caches()
|
|
||||||
Udev.enable()
|
|
||||||
|
|
||||||
|
def set_cleaning_policy_params(cache, cleaning_policy):
|
||||||
def storage_prepare():
|
|
||||||
cache_dev = TestRun.disks["cache"]
|
|
||||||
cache_dev.create_partitions([Size(1, Unit.GibiByte)])
|
|
||||||
core_dev = TestRun.disks["core"]
|
|
||||||
parts = [Size(2, Unit.GibiByte)] * cores_count
|
|
||||||
core_dev.create_partitions(parts)
|
|
||||||
return cache_dev, core_dev
|
|
||||||
|
|
||||||
|
|
||||||
def set_cleaning_policy_and_params(cache, cleaning_policy):
|
|
||||||
if cleaning_policy != CleaningPolicy.DEFAULT:
|
|
||||||
cache.set_cleaning_policy(cleaning_policy)
|
|
||||||
current_cleaning_policy = cache.get_cleaning_policy()
|
current_cleaning_policy = cache.get_cleaning_policy()
|
||||||
if current_cleaning_policy != cleaning_policy:
|
if current_cleaning_policy != cleaning_policy:
|
||||||
TestRun.LOGGER.error(
|
TestRun.LOGGER.error(
|
||||||
f"Cleaning policy is {current_cleaning_policy}, "
|
f"Cleaning policy is {current_cleaning_policy}, should be {cleaning_policy}"
|
||||||
f"should be {cleaning_policy}"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if cleaning_policy == CleaningPolicy.alru:
|
match cleaning_policy:
|
||||||
alru_params = FlushParametersAlru()
|
case CleaningPolicy.acp:
|
||||||
alru_params.wake_up_time = Time(seconds=10)
|
acp_params = FlushParametersAcp()
|
||||||
alru_params.staleness_time = Time(seconds=2)
|
acp_params.wake_up_time = Time(milliseconds=100)
|
||||||
alru_params.flush_max_buffers = 100
|
acp_params.flush_max_buffers = 64
|
||||||
alru_params.activity_threshold = Time(milliseconds=1000)
|
cache.set_params_acp(acp_params)
|
||||||
cache.set_params_alru(alru_params)
|
current_acp_params = cache.get_flush_parameters_acp()
|
||||||
current_alru_params = cache.get_flush_parameters_alru()
|
if current_acp_params != acp_params:
|
||||||
if current_alru_params != alru_params:
|
failed_params = ""
|
||||||
failed_params = ""
|
|
||||||
if current_alru_params.wake_up_time != alru_params.wake_up_time:
|
|
||||||
failed_params += (
|
|
||||||
f"Wake Up time is {current_alru_params.wake_up_time}, "
|
|
||||||
f"should be {alru_params.wake_up_time}\n"
|
|
||||||
)
|
|
||||||
if current_alru_params.staleness_time != alru_params.staleness_time:
|
|
||||||
failed_params += (
|
|
||||||
f"Staleness Time is {current_alru_params.staleness_time}, "
|
|
||||||
f"should be {alru_params.staleness_time}\n"
|
|
||||||
)
|
|
||||||
if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers:
|
|
||||||
failed_params += (
|
|
||||||
f"Flush Max Buffers is {current_alru_params.flush_max_buffers}, "
|
|
||||||
f"should be {alru_params.flush_max_buffers}\n"
|
|
||||||
)
|
|
||||||
if current_alru_params.activity_threshold != alru_params.activity_threshold:
|
|
||||||
failed_params += (
|
|
||||||
f"Activity Threshold is {current_alru_params.activity_threshold}, "
|
|
||||||
f"should be {alru_params.activity_threshold}\n"
|
|
||||||
)
|
|
||||||
TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}")
|
|
||||||
|
|
||||||
if cleaning_policy == CleaningPolicy.acp:
|
if current_acp_params.wake_up_time != acp_params.wake_up_time:
|
||||||
acp_params = FlushParametersAcp()
|
failed_params += (
|
||||||
acp_params.wake_up_time = Time(milliseconds=100)
|
f"Wake Up time is {current_acp_params.wake_up_time}, "
|
||||||
acp_params.flush_max_buffers = 64
|
f"should be {acp_params.wake_up_time}\n"
|
||||||
cache.set_params_acp(acp_params)
|
)
|
||||||
current_acp_params = cache.get_flush_parameters_acp()
|
if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers:
|
||||||
if current_acp_params != acp_params:
|
failed_params += (
|
||||||
failed_params = ""
|
f"Flush Max Buffers is {current_acp_params.flush_max_buffers}, "
|
||||||
if current_acp_params.wake_up_time != acp_params.wake_up_time:
|
f"should be {acp_params.flush_max_buffers}\n"
|
||||||
failed_params += (
|
)
|
||||||
f"Wake Up time is {current_acp_params.wake_up_time}, "
|
TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}")
|
||||||
f"should be {acp_params.wake_up_time}\n"
|
|
||||||
)
|
|
||||||
if current_acp_params.flush_max_buffers != acp_params.flush_max_buffers:
|
|
||||||
failed_params += (
|
|
||||||
f"Flush Max Buffers is {current_acp_params.flush_max_buffers}, "
|
|
||||||
f"should be {acp_params.flush_max_buffers}\n"
|
|
||||||
)
|
|
||||||
TestRun.LOGGER.error(f"ACP parameters did not switch properly:\n{failed_params}")
|
|
||||||
|
|
||||||
|
case CleaningPolicy.alru:
|
||||||
def fio_prepare():
|
alru_params = FlushParametersAlru()
|
||||||
fio = (
|
alru_params.wake_up_time = Time(seconds=10)
|
||||||
Fio()
|
alru_params.staleness_time = Time(seconds=2)
|
||||||
.create_command()
|
alru_params.flush_max_buffers = 100
|
||||||
.io_engine(IoEngine.libaio)
|
alru_params.activity_threshold = Time(milliseconds=1000)
|
||||||
.block_size(Size(4, Unit.KibiByte))
|
cache.set_params_alru(alru_params)
|
||||||
.size(io_size)
|
current_alru_params = cache.get_flush_parameters_alru()
|
||||||
.read_write(ReadWrite.randwrite)
|
if current_alru_params != alru_params:
|
||||||
.direct(1)
|
failed_params = ""
|
||||||
)
|
if current_alru_params.wake_up_time != alru_params.wake_up_time:
|
||||||
return fio
|
failed_params += (
|
||||||
|
f"Wake Up time is {current_alru_params.wake_up_time}, "
|
||||||
|
f"should be {alru_params.wake_up_time}\n"
|
||||||
|
)
|
||||||
|
if current_alru_params.staleness_time != alru_params.staleness_time:
|
||||||
|
failed_params += (
|
||||||
|
f"Staleness Time is {current_alru_params.staleness_time}, "
|
||||||
|
f"should be {alru_params.staleness_time}\n"
|
||||||
|
)
|
||||||
|
if current_alru_params.flush_max_buffers != alru_params.flush_max_buffers:
|
||||||
|
failed_params += (
|
||||||
|
f"Flush Max Buffers is {current_alru_params.flush_max_buffers}, "
|
||||||
|
f"should be {alru_params.flush_max_buffers}\n"
|
||||||
|
)
|
||||||
|
if current_alru_params.activity_threshold != alru_params.activity_threshold:
|
||||||
|
failed_params += (
|
||||||
|
f"Activity Threshold is {current_alru_params.activity_threshold}, "
|
||||||
|
f"should be {alru_params.activity_threshold}\n"
|
||||||
|
)
|
||||||
|
TestRun.LOGGER.error(f"ALRU parameters did not switch properly:\n{failed_params}")
|
||||||
|
|
||||||
|
|
||||||
def check_cleaning_policy_operation(
|
def check_cleaning_policy_operation(
|
||||||
@ -256,36 +240,37 @@ def check_cleaning_policy_operation(
|
|||||||
core_writes_before_wait_for_cleaning,
|
core_writes_before_wait_for_cleaning,
|
||||||
core_writes_after_wait_for_cleaning,
|
core_writes_after_wait_for_cleaning,
|
||||||
):
|
):
|
||||||
if cleaning_policy == CleaningPolicy.alru:
|
match cleaning_policy:
|
||||||
if core_writes_before_wait_for_cleaning.value != 0:
|
case CleaningPolicy.alru:
|
||||||
TestRun.LOGGER.error(
|
if core_writes_before_wait_for_cleaning != Size.zero():
|
||||||
"CAS cleaner started to clean dirty data right after IO! "
|
TestRun.LOGGER.error(
|
||||||
"According to ALRU parameters set in this test cleaner should "
|
"CAS cleaner started to clean dirty data right after IO! "
|
||||||
"wait 10 seconds after IO before cleaning dirty data."
|
"According to ALRU parameters set in this test cleaner should "
|
||||||
)
|
"wait 10 seconds after IO before cleaning dirty data"
|
||||||
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
)
|
||||||
TestRun.LOGGER.error(
|
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
||||||
"ALRU cleaning policy is not working properly! "
|
TestRun.LOGGER.error(
|
||||||
"Core writes should increase in time while cleaning dirty data."
|
"ALRU cleaning policy is not working properly! "
|
||||||
)
|
"Core writes should increase in time while cleaning dirty data"
|
||||||
if cleaning_policy == CleaningPolicy.nop:
|
)
|
||||||
if (
|
case CleaningPolicy.nop:
|
||||||
core_writes_after_wait_for_cleaning.value != 0
|
if (
|
||||||
or core_writes_before_wait_for_cleaning.value != 0
|
core_writes_after_wait_for_cleaning != Size.zero()
|
||||||
):
|
or core_writes_before_wait_for_cleaning.value != Size.zero()
|
||||||
TestRun.LOGGER.error(
|
):
|
||||||
"NOP cleaning policy is not working properly! "
|
TestRun.LOGGER.error(
|
||||||
"There should be no core writes as there is no cleaning of dirty data."
|
"NOP cleaning policy is not working properly! "
|
||||||
)
|
"There should be no core writes as there is no cleaning of dirty data"
|
||||||
if cleaning_policy == CleaningPolicy.acp:
|
)
|
||||||
if core_writes_before_wait_for_cleaning.value == 0:
|
case CleaningPolicy.acp:
|
||||||
TestRun.LOGGER.error(
|
if core_writes_before_wait_for_cleaning == Size.zero():
|
||||||
"CAS cleaner did not start cleaning dirty data right after IO! "
|
TestRun.LOGGER.error(
|
||||||
"According to ACP policy cleaner should start "
|
"CAS cleaner did not start cleaning dirty data right after IO! "
|
||||||
"cleaning dirty data right after IO."
|
"According to ACP policy cleaner should start "
|
||||||
)
|
"cleaning dirty data right after IO"
|
||||||
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
)
|
||||||
TestRun.LOGGER.error(
|
if core_writes_after_wait_for_cleaning <= core_writes_before_wait_for_cleaning:
|
||||||
"ACP cleaning policy is not working properly! "
|
TestRun.LOGGER.error(
|
||||||
"Core writes should increase in time while cleaning dirty data."
|
"ACP cleaning policy is not working properly! "
|
||||||
)
|
"Core writes should increase in time while cleaning dirty data"
|
||||||
|
)
|
||||||
|
@ -1,195 +1,221 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2021 Intel Corporation
|
# Copyright(c) 2020-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
from time import sleep
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
from api.cas import casadm, casadm_parser, cli
|
from api.cas import casadm, casadm_parser, cli
|
||||||
from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait, SeqCutOffPolicy
|
from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait, SeqCutOffPolicy
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools.dd import Dd
|
from test_tools.fio.fio import Fio
|
||||||
|
from test_tools.fio.fio_param import IoEngine, ReadWrite
|
||||||
from test_utils.output import CmdException
|
from test_utils.output import CmdException
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
cache_size = Size(2, Unit.GibiByte)
|
|
||||||
caches_number = 3
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites))
|
@pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites))
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.hdd4k]))
|
@pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.hdd4k]))
|
||||||
def test_concurrent_cores_flush(cache_mode):
|
def test_concurrent_cores_flush(cache_mode: CacheMode):
|
||||||
"""
|
"""
|
||||||
title: Fail to flush two cores simultaneously.
|
title: Fail to flush two cores simultaneously.
|
||||||
description: |
|
description: |
|
||||||
CAS should return an error on attempt to flush second core if there is already
|
CAS should return an error on attempt to flush second core if there is already
|
||||||
one flush in progress.
|
one flush in progress.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No system crash.
|
- No system crash.
|
||||||
- First core flushing should finish successfully.
|
- First core flushing should finish successfully.
|
||||||
- It should not be possible to run flushing command on cores within
|
- It should not be possible to run flushing command on cores within
|
||||||
the same cache simultaneously.
|
the same cache simultaneously.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare cache and core."):
|
|
||||||
cache_dev = TestRun.disks['cache']
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_dev.create_partitions([cache_size])
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_dev.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
|
core_dev.create_partitions([Size(5, Unit.GibiByte)] * 2)
|
||||||
|
|
||||||
cache_part = cache_dev.partitions[0]
|
cache_part = cache_dev.partitions[0]
|
||||||
core_dev = TestRun.disks['core']
|
|
||||||
core_dev.create_partitions([cache_size * 2] * 2)
|
|
||||||
core_part1 = core_dev.partitions[0]
|
core_part1 = core_dev.partitions[0]
|
||||||
core_part2 = core_dev.partitions[1]
|
core_part2 = core_dev.partitions[1]
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache"):
|
||||||
cache = casadm.start_cache(cache_part, cache_mode, force=True)
|
cache = casadm.start_cache(cache_part, cache_mode, force=True)
|
||||||
|
|
||||||
with TestRun.step("Disable cleaning and sequential cutoff."):
|
with TestRun.step(f"Add both core devices to cache"):
|
||||||
cache.set_cleaning_policy(CleaningPolicy.nop)
|
|
||||||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
|
||||||
|
|
||||||
with TestRun.step(f"Add both core devices to cache."):
|
|
||||||
core1 = cache.add_core(core_part1)
|
core1 = cache.add_core(core_part1)
|
||||||
core2 = cache.add_core(core_part2)
|
core2 = cache.add_core(core_part2)
|
||||||
|
|
||||||
with TestRun.step("Run workload on concurrent cores."):
|
with TestRun.step("Disable cleaning and sequential cutoff"):
|
||||||
block_size = Size(4, Unit.MebiByte)
|
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||||
count = int(cache_size.value / 2 / block_size.value)
|
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
||||||
|
|
||||||
dd_pid = Dd().output(core1.path) \
|
with TestRun.step("Run concurrent fio on both cores"):
|
||||||
.input("/dev/urandom") \
|
fio_pids = []
|
||||||
.block_size(block_size) \
|
for core in [core1, core2]:
|
||||||
.count(count) \
|
fio = (
|
||||||
.run_in_background()
|
Fio()
|
||||||
|
.create_command()
|
||||||
|
.io_engine(IoEngine.libaio)
|
||||||
|
.target(core.path)
|
||||||
|
.size(core.size)
|
||||||
|
.block_size(Size(4, Unit.MebiByte))
|
||||||
|
.read_write(ReadWrite.write)
|
||||||
|
.direct(1)
|
||||||
|
)
|
||||||
|
fio_pid = fio.run_in_background()
|
||||||
|
fio_pids.append(fio_pid)
|
||||||
|
|
||||||
Dd().output(core2.path) \
|
for fio_pid in fio_pids:
|
||||||
.input("/dev/urandom") \
|
if not TestRun.executor.check_if_process_exists(fio_pid):
|
||||||
.block_size(block_size) \
|
TestRun.fail("Fio failed to start")
|
||||||
.count(count) \
|
|
||||||
.run()
|
|
||||||
|
|
||||||
with TestRun.step("Check if both DD operations finished."):
|
with TestRun.step("Wait for fio to finish"):
|
||||||
while TestRun.executor.run(f"ls /proc/{dd_pid}").exit_code == 0:
|
for fio_pid in fio_pids:
|
||||||
sleep(1)
|
while TestRun.executor.check_if_process_exists(fio_pid):
|
||||||
|
sleep(1)
|
||||||
|
|
||||||
with TestRun.step("Check if both cores contain dirty blocks."):
|
with TestRun.step("Check if both cores contain dirty blocks"):
|
||||||
if int(core1.get_dirty_blocks()) == 0:
|
if core1.get_dirty_blocks() == Size.zero():
|
||||||
TestRun.fail("The first core does not contain dirty blocks.")
|
TestRun.fail("The first core does not contain dirty blocks")
|
||||||
if int(core2.get_dirty_blocks()) == 0:
|
if core2.get_dirty_blocks() == Size.zero():
|
||||||
TestRun.fail("The second core does not contain dirty blocks.")
|
TestRun.fail("The second core does not contain dirty blocks")
|
||||||
core2_dirty_blocks_before = int(core2.get_dirty_blocks())
|
core2_dirty_blocks_before = core2.get_dirty_blocks()
|
||||||
|
|
||||||
with TestRun.step("Start flushing the first core."):
|
with TestRun.step("Start flushing the first core in background"):
|
||||||
TestRun.executor.run_in_background(
|
output_pid = TestRun.executor.run_in_background(
|
||||||
cli.flush_core_cmd(str(cache.cache_id), str(core1.core_id))
|
cli.flush_core_cmd(str(cache.cache_id), str(core1.core_id))
|
||||||
)
|
)
|
||||||
|
if not TestRun.executor.check_if_process_exists(output_pid):
|
||||||
|
TestRun.fail("Failed to start core flush in background")
|
||||||
|
|
||||||
with TestRun.step("Wait some time and start flushing the second core."):
|
with TestRun.step("Wait until flush starts"):
|
||||||
sleep(2)
|
while TestRun.executor.check_if_process_exists(output_pid):
|
||||||
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
|
try:
|
||||||
|
casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
|
||||||
|
break
|
||||||
|
except CmdException:
|
||||||
|
pass
|
||||||
|
|
||||||
|
with TestRun.step(
|
||||||
|
"Wait until first core reach 40% flush and start flush operation on the second core"
|
||||||
|
):
|
||||||
|
percentage = 0
|
||||||
while percentage < 40:
|
while percentage < 40:
|
||||||
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
|
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
core2.flush_core()
|
core2.flush_core()
|
||||||
TestRun.fail("The first core is flushing right now so flush attempt of the second core "
|
TestRun.fail(
|
||||||
"should fail.")
|
"The first core is flushing right now so flush attempt of the second core "
|
||||||
|
"should fail"
|
||||||
|
)
|
||||||
except CmdException:
|
except CmdException:
|
||||||
TestRun.LOGGER.info("The first core is flushing right now so the second core's flush "
|
TestRun.LOGGER.info(
|
||||||
"fails as expected.")
|
"The first core is flushing right now so the second core's flush "
|
||||||
|
"fails as expected"
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Wait for the first core to finish flushing."):
|
with TestRun.step("Wait for the first core to finish flushing"):
|
||||||
try:
|
try:
|
||||||
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
|
percentage = 0
|
||||||
while percentage < 100:
|
while percentage < 100:
|
||||||
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
|
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
|
||||||
|
sleep(1)
|
||||||
except CmdException:
|
except CmdException:
|
||||||
TestRun.LOGGER.info("The first core is not flushing dirty data anymore.")
|
TestRun.LOGGER.info("The first core is not flushing dirty data anymore")
|
||||||
|
|
||||||
with TestRun.step("Check number of dirty data on both cores."):
|
with TestRun.step("Check number of dirty data on both cores"):
|
||||||
if int(core1.get_dirty_blocks()) > 0:
|
if core1.get_dirty_blocks() > Size.zero():
|
||||||
TestRun.LOGGER.error("The quantity of dirty cache lines on the first core "
|
TestRun.LOGGER.error(
|
||||||
"after completed flush should be zero.")
|
"The quantity of dirty cache lines on the first core "
|
||||||
|
"after completed flush should be zero"
|
||||||
|
)
|
||||||
|
|
||||||
core2_dirty_blocks_after = int(core2.get_dirty_blocks())
|
core2_dirty_blocks_after = core2.get_dirty_blocks()
|
||||||
if core2_dirty_blocks_before != core2_dirty_blocks_after:
|
if core2_dirty_blocks_before != core2_dirty_blocks_after:
|
||||||
TestRun.LOGGER.error("The quantity of dirty cache lines on the second core "
|
TestRun.LOGGER.error(
|
||||||
"after failed flush should not change.")
|
"The quantity of dirty cache lines on the second core "
|
||||||
|
"after failed flush should not change"
|
||||||
with TestRun.step("Stop cache."):
|
)
|
||||||
cache.stop()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites))
|
@pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites))
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_concurrent_caches_flush(cache_mode):
|
def test_concurrent_caches_flush(cache_mode: CacheMode):
|
||||||
"""
|
"""
|
||||||
title: Success to flush two caches simultaneously.
|
title: Success to flush two caches simultaneously.
|
||||||
description: |
|
description: |
|
||||||
CAS should successfully flush multiple caches if there is already other flush in progress.
|
CAS should successfully flush multiple caches if there is already other flush in progress.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No system crash.
|
- No system crash.
|
||||||
- Flush for each cache should finish successfully.
|
- Flush for each cache should finish successfully.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare caches and cores."):
|
caches_number = 3
|
||||||
cache_dev = TestRun.disks['cache']
|
|
||||||
cache_dev.create_partitions([cache_size] * caches_number)
|
|
||||||
core_dev = TestRun.disks['core']
|
|
||||||
core_dev.create_partitions([cache_size * 2] * caches_number)
|
|
||||||
|
|
||||||
with TestRun.step(f"Start {caches_number} caches."):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
caches = []
|
cache_dev = TestRun.disks["cache"]
|
||||||
for part in cache_dev.partitions:
|
core_dev = TestRun.disks["core"]
|
||||||
caches.append(casadm.start_cache(part, cache_mode, force=True))
|
|
||||||
|
|
||||||
with TestRun.step("Disable cleaning and sequential cutoff."):
|
cache_dev.create_partitions([Size(2, Unit.GibiByte)] * caches_number)
|
||||||
|
core_dev.create_partitions([Size(2, Unit.GibiByte) * 2] * caches_number)
|
||||||
|
|
||||||
|
with TestRun.step(f"Start {caches_number} caches"):
|
||||||
|
caches = [
|
||||||
|
casadm.start_cache(cache_dev=part, cache_mode=cache_mode, force=True)
|
||||||
|
for part in cache_dev.partitions
|
||||||
|
]
|
||||||
|
|
||||||
|
with TestRun.step("Disable cleaning and sequential cutoff"):
|
||||||
for cache in caches:
|
for cache in caches:
|
||||||
cache.set_cleaning_policy(CleaningPolicy.nop)
|
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
||||||
|
|
||||||
with TestRun.step(f"Add core devices to caches."):
|
with TestRun.step(f"Add core devices to caches"):
|
||||||
cores = []
|
cores = [cache.add_core(core_dev=core_dev.partitions[i]) for i, cache in enumerate(caches)]
|
||||||
for i, cache in enumerate(caches):
|
|
||||||
cores.append(cache.add_core(core_dev.partitions[i]))
|
|
||||||
|
|
||||||
with TestRun.step("Run workload on each OpenCAS device."):
|
with TestRun.step("Run fio on all cores"):
|
||||||
# Each cache has one core fully saturated with dirty blocks.
|
fio_pids = []
|
||||||
block_size = Size(4, Unit.MebiByte)
|
|
||||||
count = int(cache_size.value / block_size.value)
|
|
||||||
total_saturation = block_size * count
|
|
||||||
for core in cores:
|
for core in cores:
|
||||||
Dd().output(core.path) \
|
fio = (
|
||||||
.input("/dev/urandom") \
|
Fio()
|
||||||
.block_size(block_size) \
|
.create_command()
|
||||||
.count(count) \
|
.target(core)
|
||||||
.run()
|
.io_engine(IoEngine.libaio)
|
||||||
|
.block_size(Size(4, Unit.MebiByte))
|
||||||
with TestRun.step("Check if each cache is full of dirty blocks."):
|
.size(core.size)
|
||||||
for cache in caches:
|
.read_write(ReadWrite.write)
|
||||||
if not int(cache.get_dirty_blocks()) != total_saturation.get_value(Unit.Blocks4096):
|
.direct(1)
|
||||||
TestRun.fail(f"The cache {cache.cache_id} does not contain dirty blocks.")
|
|
||||||
|
|
||||||
with TestRun.step("Start flushing all caches simultaneously."):
|
|
||||||
flush_pids = []
|
|
||||||
for cache in caches:
|
|
||||||
flush_pids.append(
|
|
||||||
TestRun.executor.run_in_background(cli.flush_cache_cmd(str(cache.cache_id)))
|
|
||||||
)
|
)
|
||||||
|
fio_pids.append(fio.run_in_background())
|
||||||
|
|
||||||
with TestRun.step("Wait for all caches to finish flushing."):
|
with TestRun.step("Check if each cache is full of dirty blocks"):
|
||||||
is_flushing = [True] * len(flush_pids)
|
|
||||||
while any(is_flushing):
|
|
||||||
for i, pid in enumerate(flush_pids):
|
|
||||||
is_flushing[i] = (TestRun.executor.run(f"ls /proc/{pid}").exit_code == 0)
|
|
||||||
|
|
||||||
with TestRun.step("Check number of dirty data on each cache."):
|
|
||||||
for cache in caches:
|
for cache in caches:
|
||||||
if int(cache.get_dirty_blocks()) > 0:
|
if not cache.get_dirty_blocks() != core.size:
|
||||||
TestRun.LOGGER.error(f"The quantity of dirty cache lines on the cache "
|
TestRun.fail(f"The cache {cache.cache_id} does not contain dirty blocks")
|
||||||
f"{str(cache.cache_id)} after complete flush should be zero.")
|
|
||||||
|
|
||||||
with TestRun.step("Stop all caches."):
|
with TestRun.step("Start flush operation on all caches simultaneously"):
|
||||||
casadm.stop_all_caches()
|
flush_pids = [
|
||||||
|
TestRun.executor.run_in_background(cli.flush_cache_cmd(str(cache.cache_id)))
|
||||||
|
for cache in caches
|
||||||
|
]
|
||||||
|
|
||||||
|
with TestRun.step("Wait for all caches to finish flushing"):
|
||||||
|
for flush_pid in flush_pids:
|
||||||
|
while TestRun.executor.check_if_process_exists(flush_pid):
|
||||||
|
sleep(1)
|
||||||
|
|
||||||
|
with TestRun.step("Check number of dirty data on each cache"):
|
||||||
|
for cache in caches:
|
||||||
|
if cache.get_dirty_blocks() > Size.zero():
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"The quantity of dirty cache lines on the cache "
|
||||||
|
f"{str(cache.cache_id)} after complete flush should be zero"
|
||||||
|
)
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2021 Intel Corporation
|
# Copyright(c) 2020-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@ -21,35 +23,36 @@ cores_amount = 3
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_remove_core_when_other_mounted_auto_numeration():
|
def test_remove_core_when_other_mounted_auto_numeration():
|
||||||
"""
|
"""
|
||||||
title: |
|
title: Remove one core when other are mounted - auto-numerated.
|
||||||
Test for removing one core from the cache when the other core is mounted.
|
description: |
|
||||||
Cores are numerated automatically.
|
Test of the ability to remove the unmounted core from the cache when the other core
|
||||||
description: |
|
is mounted and its ID starts with a different digit.
|
||||||
Test of the ability to remove the unmounted core from the cache when the other core
|
pass_criteria:
|
||||||
is mounted and its ID starts with a different digit.
|
- No system crash.
|
||||||
pass_criteria:
|
- Removing unmounted core finished with success.
|
||||||
- No system crash.
|
|
||||||
- Removing unmounted core finished with success.
|
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
|
||||||
cache_device = TestRun.disks['cache']
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
cache_device.create_partitions([Size(50, Unit.MebiByte)])
|
cache_device.create_partitions([Size(50, Unit.MebiByte)])
|
||||||
cache_part = cache_device.partitions[0]
|
|
||||||
core_device = TestRun.disks['core']
|
|
||||||
core_device.create_partitions([Size(200, Unit.MebiByte)] * cores_amount)
|
core_device.create_partitions([Size(200, Unit.MebiByte)] * cores_amount)
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache"):
|
||||||
cache = casadm.start_cache(cache_part, force=True)
|
cache = casadm.start_cache(cache_device.partitions[0], force=True)
|
||||||
|
|
||||||
with TestRun.step("Add cores to cache and mount them except the first one."):
|
with TestRun.step(f"Add {cores_amount} cores to cache and mount them except the first one"):
|
||||||
free_core = cache.add_core(core_device.partitions[0])
|
free_core = cache.add_core(core_device.partitions[0])
|
||||||
mounted_cores = []
|
mounted_cores = []
|
||||||
for i, part in enumerate(core_device.partitions[1:]):
|
for i, part in enumerate(core_device.partitions[1:]):
|
||||||
part.create_filesystem(Filesystem.xfs)
|
part.create_filesystem(Filesystem.xfs)
|
||||||
mounted_cores.append(cache.add_core(part))
|
mounted_cores.append(cache.add_core(part))
|
||||||
mounted_cores[i].mount(f"{mount_point}{cache.cache_id}-{mounted_cores[i].core_id}")
|
mounted_cores[i].mount(
|
||||||
|
mount_point=f"{mount_point}{cache.cache_id}-{mounted_cores[i].core_id}"
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Remove the unmounted core."):
|
with TestRun.step("Remove the unmounted core"):
|
||||||
try:
|
try:
|
||||||
cache.remove_core(free_core.core_id)
|
cache.remove_core(free_core.core_id)
|
||||||
except CmdException as exc:
|
except CmdException as exc:
|
||||||
@ -60,38 +63,42 @@ def test_remove_core_when_other_mounted_auto_numeration():
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_remove_core_when_other_mounted_custom_numeration():
|
def test_remove_core_when_other_mounted_custom_numeration():
|
||||||
"""
|
"""
|
||||||
title: |
|
title: Remove one core when other are mounted - custom numeration.
|
||||||
Test for removing one core from the cache when the other core is mounted.
|
description: |
|
||||||
Cores have custom numeration, starting with the same digit.
|
Test of the ability to remove the unmounted core from the cache when the other core
|
||||||
description: |
|
is mounted and its ID starts with the same digit.
|
||||||
Test of the ability to remove the unmounted core from the cache when the other core
|
pass_criteria:
|
||||||
is mounted and its ID starts with the same digit.
|
- No system crash.
|
||||||
pass_criteria:
|
- Removing unmounted core finished with success.
|
||||||
- No system crash.
|
|
||||||
- Removing unmounted core finished with success.
|
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
|
||||||
cache_device = TestRun.disks['cache']
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
cache_device.create_partitions([Size(50, Unit.MebiByte)])
|
cache_device.create_partitions([Size(50, Unit.MebiByte)])
|
||||||
cache_part = cache_device.partitions[0]
|
core_device.create_partitions([Size(200, Unit.MebiByte)] * 3)
|
||||||
core_device = TestRun.disks['core']
|
|
||||||
core_device.create_partitions([Size(200, Unit.MebiByte)] * cores_amount)
|
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
with TestRun.step("Start cache"):
|
||||||
cache = casadm.start_cache(cache_part, force=True)
|
cache = casadm.start_cache(cache_device.partitions[0], force=True)
|
||||||
|
|
||||||
with TestRun.step("Add cores to cache and mount them except the first one."):
|
with TestRun.step(f"Add {cores_amount} cores to cache and mount them except the first one"):
|
||||||
random_prefix = random.randint(1, 9)
|
random_prefix = random.randint(1, 9)
|
||||||
random_interfix = random.randint(1, 9)
|
random_interfix = random.randint(1, 9)
|
||||||
|
|
||||||
free_core = cache.add_core(core_device.partitions[0], random_prefix)
|
free_core = cache.add_core(core_dev=core_device.partitions[0], core_id=random_prefix)
|
||||||
|
|
||||||
mounted_cores = []
|
mounted_cores = []
|
||||||
for i, part in enumerate(core_device.partitions[1:]):
|
for i, part in enumerate(core_device.partitions[1:]):
|
||||||
part.create_filesystem(Filesystem.xfs)
|
part.create_filesystem(Filesystem.xfs)
|
||||||
mounted_cores.append(cache.add_core(part, f"{random_prefix}{random_interfix}{i}"))
|
mounted_cores.append(
|
||||||
mounted_cores[i].mount(f"{mount_point}{cache.cache_id}-{mounted_cores[i].core_id}")
|
cache.add_core(core_dev=part, core_id=int(f"{random_prefix}{random_interfix}{i}"))
|
||||||
|
)
|
||||||
|
mounted_cores[i].mount(
|
||||||
|
mount_point=f"{mount_point}{cache.cache_id}-{mounted_cores[i].core_id}"
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Remove the unmounted core."):
|
with TestRun.step("Remove the unmounted core"):
|
||||||
try:
|
try:
|
||||||
cache.remove_core(free_core.core_id)
|
cache.remove_core(free_core.core_id)
|
||||||
except CmdException as exc:
|
except CmdException as exc:
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
from datetime import timedelta
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from datetime import timedelta
|
||||||
|
|
||||||
from api.cas import casadm
|
from api.cas import casadm
|
||||||
from api.cas.cache_config import CacheMode
|
from api.cas.cache_config import CacheMode
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
@ -25,12 +25,12 @@ io_size = Size(10000, Unit.Blocks4096)
|
|||||||
"cache_mode",
|
"cache_mode",
|
||||||
[
|
[
|
||||||
(CacheMode.WT, CacheMode.WB),
|
(CacheMode.WT, CacheMode.WB),
|
||||||
(CacheMode.WB, CacheMode.PT),
|
|
||||||
(CacheMode.WB, CacheMode.WT),
|
|
||||||
(CacheMode.PT, CacheMode.WT),
|
|
||||||
(CacheMode.WT, CacheMode.WA),
|
(CacheMode.WT, CacheMode.WA),
|
||||||
(CacheMode.WT, CacheMode.WO),
|
(CacheMode.WT, CacheMode.WO),
|
||||||
|
(CacheMode.WB, CacheMode.PT),
|
||||||
|
(CacheMode.WB, CacheMode.WT),
|
||||||
(CacheMode.WB, CacheMode.WO),
|
(CacheMode.WB, CacheMode.WO),
|
||||||
|
(CacheMode.PT, CacheMode.WT),
|
||||||
(CacheMode.PT, CacheMode.WO),
|
(CacheMode.PT, CacheMode.WO),
|
||||||
(CacheMode.WA, CacheMode.WO),
|
(CacheMode.WA, CacheMode.WO),
|
||||||
(CacheMode.WO, CacheMode.WT),
|
(CacheMode.WO, CacheMode.WT),
|
||||||
@ -43,26 +43,31 @@ io_size = Size(10000, Unit.Blocks4096)
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_cache_stop_and_load(cache_mode):
|
def test_cache_stop_and_load(cache_mode):
|
||||||
"""
|
"""
|
||||||
title: Test for stopping and loading cache back with dynamic cache mode switching.
|
title: Test for stopping and loading cache back with dynamic cache mode switching.
|
||||||
description: |
|
description: |
|
||||||
Validate the ability of the CAS to switch cache modes at runtime and
|
Validate the ability of the CAS to switch cache modes at runtime and
|
||||||
check if all of them are working properly after switching and
|
check if all of them are working properly after switching and
|
||||||
after stopping and reloading cache back.
|
after stopping and reloading cache back.
|
||||||
Check also other parameters consistency after reload.
|
Check also other parameters consistency after reload.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- In all cache modes data reads and writes are handled properly before and after reload.
|
- In all cache modes data reads and writes are handled properly before and after reload.
|
||||||
- All cache parameters preserve their values after reload.
|
- All cache parameters preserve their values after reload.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Partition cache and core devices"):
|
||||||
cache_dev, core_dev = storage_prepare()
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
cache_dev.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
core_dev.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step(f"Start cache in {cache_mode[0]} mode"):
|
with TestRun.step(f"Disable udev"):
|
||||||
cache = casadm.start_cache(cache_dev, cache_mode[0], force=True)
|
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step(f"Start cache in {cache_mode[0]} mode"):
|
||||||
|
cache = casadm.start_cache(cache_dev.partitions[0], cache_mode[0], force=True)
|
||||||
|
|
||||||
with TestRun.step("Add core to the cache"):
|
with TestRun.step("Add core to the cache"):
|
||||||
core = cache.add_core(core_dev)
|
core = cache.add_core(core_dev.partitions[0])
|
||||||
|
|
||||||
with TestRun.step(f"Change cache mode to {cache_mode[1]}"):
|
with TestRun.step(f"Change cache mode to {cache_mode[1]}"):
|
||||||
cache.set_cache_mode(cache_mode[1], flush=True)
|
cache.set_cache_mode(cache_mode[1], flush=True)
|
||||||
@ -73,7 +78,7 @@ def test_cache_stop_and_load(cache_mode):
|
|||||||
|
|
||||||
with TestRun.step("Stop and load cache back"):
|
with TestRun.step("Stop and load cache back"):
|
||||||
cache.stop()
|
cache.stop()
|
||||||
cache = casadm.load_cache(cache_dev)
|
cache = casadm.load_cache(cache_dev.partitions[0])
|
||||||
|
|
||||||
with TestRun.step("Check parameters consistency"):
|
with TestRun.step("Check parameters consistency"):
|
||||||
if check_cache_config != cache.get_cache_config():
|
if check_cache_config != cache.get_cache_config():
|
||||||
@ -95,18 +100,12 @@ def test_cache_stop_and_load(cache_mode):
|
|||||||
)
|
)
|
||||||
TestRun.fail(f"Parameters do not match after reload:\n{failed_params}")
|
TestRun.fail(f"Parameters do not match after reload:\n{failed_params}")
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(f"Check if {cache_mode[1]} cache mode works properly after reload"):
|
||||||
f"Check if {cache_mode[1]} cache mode works properly after reload"
|
|
||||||
):
|
|
||||||
if cache_mode[1] == CacheMode.WA or cache_mode[1] == CacheMode.WO:
|
if cache_mode[1] == CacheMode.WA or cache_mode[1] == CacheMode.WO:
|
||||||
check_separated_read_write_after_reload(cache, core, cache_mode[1], io_size)
|
check_separated_read_write_after_reload(cache, core, cache_mode[1], io_size)
|
||||||
else:
|
else:
|
||||||
check_cache_mode_operation(cache, core, cache_mode[1])
|
check_cache_mode_operation(cache, core, cache_mode[1])
|
||||||
|
|
||||||
with TestRun.step("Stop all caches"):
|
|
||||||
casadm.stop_all_caches()
|
|
||||||
Udev.enable()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"cache_mode_1,cache_mode_2,flush",
|
"cache_mode_1,cache_mode_2,flush",
|
||||||
@ -138,26 +137,38 @@ def test_cache_stop_and_load(cache_mode):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode):
|
def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode):
|
||||||
"""
|
"""
|
||||||
title: Test for dynamic cache mode switching during IO.
|
title: Test for dynamic cache mode switching during IO.
|
||||||
description: |
|
description: |
|
||||||
Validate the ability of CAS to switch cache modes
|
Validate the ability of CAS to switch cache modes
|
||||||
during working IO on CAS device.
|
during working IO on CAS device.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Cache mode is switched without errors.
|
- Cache mode is switched without errors.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with TestRun.step("Partition cache and core devices"):
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
cache_dev, core_dev = storage_prepare()
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_dev.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
core_dev.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
|
|
||||||
with TestRun.step(f"Start cache in {cache_mode_1} mode"):
|
with TestRun.step(f"Start cache in {cache_mode_1} mode"):
|
||||||
cache = casadm.start_cache(cache_dev, cache_mode_1, force=True)
|
cache = casadm.start_cache(
|
||||||
|
cache_dev=cache_dev.partitions[0], cache_mode=cache_mode_1, force=True
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Add core to the cache"):
|
with TestRun.step("Add core to the cache"):
|
||||||
core = cache.add_core(core_dev)
|
core = cache.add_core(core_dev.partitions[0])
|
||||||
|
|
||||||
with TestRun.step("Run 'fio'"):
|
with TestRun.step("Run fio in background"):
|
||||||
fio = (
|
fio = (
|
||||||
fio_prepare(core, io_mode)
|
Fio()
|
||||||
|
.create_command()
|
||||||
|
.io_engine(IoEngine.libaio)
|
||||||
|
.size(io_size)
|
||||||
|
.read_write(io_mode)
|
||||||
|
.target(core.path)
|
||||||
|
.direct(1)
|
||||||
.verify(VerifyMethod.sha1)
|
.verify(VerifyMethod.sha1)
|
||||||
.run_time(timedelta(minutes=4))
|
.run_time(timedelta(minutes=4))
|
||||||
.time_based()
|
.time_based()
|
||||||
@ -168,9 +179,7 @@ def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mo
|
|||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
f"Change cache mode to {cache_mode_2} with flush cache option set to: {flush}"
|
f"Change cache mode to {cache_mode_2} with flush cache option set to: {flush}"
|
||||||
):
|
):
|
||||||
cache_mode_switch_output = cache.set_cache_mode(cache_mode_2, flush)
|
cache.set_cache_mode(cache_mode=cache_mode_2, flush=flush)
|
||||||
if cache_mode_switch_output.exit_code != 0:
|
|
||||||
TestRun.fail("Cache mode switch failed!")
|
|
||||||
|
|
||||||
with TestRun.step(f"Check if cache mode has switched properly during IO"):
|
with TestRun.step(f"Check if cache mode has switched properly during IO"):
|
||||||
cache_mode_after_switch = cache.get_cache_mode()
|
cache_mode_after_switch = cache.get_cache_mode()
|
||||||
@ -188,162 +197,147 @@ def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mo
|
|||||||
casadm.stop_all_caches()
|
casadm.stop_all_caches()
|
||||||
|
|
||||||
|
|
||||||
def storage_prepare():
|
|
||||||
cache_dev = TestRun.disks["cache"]
|
|
||||||
cache_dev.create_partitions([Size(1, Unit.GibiByte)])
|
|
||||||
core_dev = TestRun.disks["core"]
|
|
||||||
core_dev.create_partitions([Size(2, Unit.GibiByte)])
|
|
||||||
return cache_dev.partitions[0], core_dev.partitions[0]
|
|
||||||
|
|
||||||
|
|
||||||
def check_cache_mode_operation(cache, core, cache_mode):
|
def check_cache_mode_operation(cache, core, cache_mode):
|
||||||
cache.reset_counters()
|
cache.reset_counters()
|
||||||
|
match cache_mode:
|
||||||
if cache_mode == CacheMode.WT:
|
case CacheMode.WT:
|
||||||
io_mode = ReadWrite.randwrite
|
io_mode = ReadWrite.randwrite
|
||||||
run_io_and_verify(cache, core, io_mode)
|
run_io_and_verify(cache, core, io_mode)
|
||||||
|
case CacheMode.WB | CacheMode.PT:
|
||||||
if cache_mode == CacheMode.WB or cache_mode == CacheMode.PT:
|
io_mode = ReadWrite.randrw
|
||||||
io_mode = ReadWrite.randrw
|
run_io_and_verify(cache, core, io_mode)
|
||||||
run_io_and_verify(cache, core, io_mode)
|
case CacheMode.WA | CacheMode.WO:
|
||||||
|
io_mode = ReadWrite.randread
|
||||||
if cache_mode == CacheMode.WA or cache_mode == CacheMode.WO:
|
run_io_and_verify(cache, core, io_mode)
|
||||||
io_mode = ReadWrite.randread
|
cache.reset_counters()
|
||||||
run_io_and_verify(cache, core, io_mode)
|
io_mode = ReadWrite.randwrite
|
||||||
cache.reset_counters()
|
run_io_and_verify(cache, core, io_mode)
|
||||||
io_mode = ReadWrite.randwrite
|
|
||||||
run_io_and_verify(cache, core, io_mode)
|
|
||||||
|
|
||||||
|
|
||||||
def run_io_and_verify(cache, core, io_mode):
|
def run_io_and_verify(cache, core, io_mode):
|
||||||
fio_prepare(core, io_mode).run()
|
fio_prepare(core, io_mode).run()
|
||||||
sync()
|
sync()
|
||||||
cache_mode = cache.get_cache_mode()
|
cache_mode = cache.get_cache_mode()
|
||||||
cache_stats = cache.get_statistics()
|
cache_block_stats = cache.get_statistics().block_stats
|
||||||
core_stats = core.get_statistics()
|
core_block_stats = core.get_statistics().block_stats
|
||||||
|
match cache_mode:
|
||||||
if cache_mode == CacheMode.WB:
|
case CacheMode.WB:
|
||||||
if (
|
|
||||||
core_stats.block_stats.core.writes.value != 0
|
|
||||||
or core_stats.block_stats.exp_obj.writes.value <= 0
|
|
||||||
):
|
|
||||||
TestRun.fail(
|
|
||||||
"Write-Back cache mode is not working properly! "
|
|
||||||
"There should be some writes to CAS device and none to the core."
|
|
||||||
)
|
|
||||||
|
|
||||||
if cache_mode == CacheMode.PT:
|
|
||||||
if (
|
|
||||||
cache_stats.block_stats.cache.writes.value != 0
|
|
||||||
or cache_stats.block_stats.cache.reads.value != 0
|
|
||||||
):
|
|
||||||
TestRun.fail(
|
|
||||||
"Pass-Through cache mode is not working properly! "
|
|
||||||
"There should be no reads or writes from/to cache."
|
|
||||||
)
|
|
||||||
|
|
||||||
if cache_mode == CacheMode.WT:
|
|
||||||
if cache_stats.block_stats.cache != cache_stats.block_stats.core:
|
|
||||||
TestRun.fail(
|
|
||||||
"Write-Through cache mode is not working properly! "
|
|
||||||
"'cache writes' and 'core writes' counts should be the same."
|
|
||||||
)
|
|
||||||
|
|
||||||
if cache_mode == CacheMode.WA:
|
|
||||||
if io_mode == ReadWrite.randread:
|
|
||||||
if (
|
if (
|
||||||
cache_stats.block_stats.cache.writes != io_size
|
cache_block_stats.core.writes.value != 0
|
||||||
or cache_stats.block_stats.core.reads != io_size
|
or cache_block_stats.exp_obj.writes.value <= 0
|
||||||
):
|
):
|
||||||
TestRun.fail(
|
TestRun.fail(
|
||||||
"Write-Around cache mode is not working properly for data reads! "
|
"Write-Back cache mode is not working properly! "
|
||||||
"'cache writes' and 'core reads' should equal total data reads."
|
"There should be some writes to CAS device and none to the core"
|
||||||
)
|
)
|
||||||
if io_mode == ReadWrite.randwrite:
|
case CacheMode.PT:
|
||||||
if cache_stats.block_stats.cache.writes != io_size:
|
|
||||||
TestRun.fail(
|
|
||||||
"Write-Around cache mode is not working properly for data writes! "
|
|
||||||
"There should be no writes to cache since previous read operation."
|
|
||||||
)
|
|
||||||
|
|
||||||
if cache_mode == CacheMode.WO:
|
|
||||||
if io_mode == ReadWrite.randread:
|
|
||||||
if (
|
if (
|
||||||
cache_stats.block_stats.cache.writes.value != 0
|
cache_block_stats.cache.writes.value != 0
|
||||||
or cache_stats.block_stats.cache.reads.value != 0
|
or cache_block_stats.cache.reads.value != 0
|
||||||
):
|
):
|
||||||
TestRun.fail(
|
TestRun.fail(
|
||||||
"Write-Only cache mode is not working properly for data reads! "
|
"Pass-Through cache mode is not working properly! "
|
||||||
"There should be no reads or writes from/to cache."
|
"There should be no reads or writes from/to cache"
|
||||||
)
|
)
|
||||||
if io_mode == ReadWrite.randwrite:
|
case CacheMode.WT:
|
||||||
if (
|
if cache_block_stats.cache != cache_block_stats.core:
|
||||||
core_stats.block_stats.core.writes.value != 0
|
|
||||||
or core_stats.block_stats.exp_obj.writes != io_size
|
|
||||||
):
|
|
||||||
TestRun.fail(
|
TestRun.fail(
|
||||||
"Write-Only cache mode is not working properly for data writes! "
|
"Write-Through cache mode is not working properly! "
|
||||||
"All writes should be passed to CAS device and none to the core."
|
"'cache writes' and 'core writes' counts should be the same"
|
||||||
)
|
)
|
||||||
|
case CacheMode.WA:
|
||||||
|
if io_mode == ReadWrite.randread:
|
||||||
|
if (
|
||||||
|
cache_block_stats.cache.writes != io_size
|
||||||
|
or cache_block_stats.core.reads != io_size
|
||||||
|
):
|
||||||
|
TestRun.fail(
|
||||||
|
"Write-Around cache mode is not working properly for data reads! "
|
||||||
|
"'cache writes' and 'core reads' should equal total data reads"
|
||||||
|
)
|
||||||
|
if io_mode == ReadWrite.randwrite:
|
||||||
|
if cache_block_stats.cache.writes != io_size:
|
||||||
|
TestRun.fail(
|
||||||
|
"Write-Around cache mode is not working properly for data writes! "
|
||||||
|
"There should be no writes to cache since previous read operation"
|
||||||
|
)
|
||||||
|
case CacheMode.WO:
|
||||||
|
if io_mode == ReadWrite.randread:
|
||||||
|
if (
|
||||||
|
cache_block_stats.cache.writes.value != 0
|
||||||
|
or cache_block_stats.cache.reads.value != 0
|
||||||
|
):
|
||||||
|
TestRun.fail(
|
||||||
|
"Write-Only cache mode is not working properly for data reads! "
|
||||||
|
"There should be no reads or writes from/to cache"
|
||||||
|
)
|
||||||
|
if io_mode == ReadWrite.randwrite:
|
||||||
|
if (
|
||||||
|
core_block_stats.core.writes.value != 0
|
||||||
|
or core_block_stats.exp_obj.writes != io_size
|
||||||
|
):
|
||||||
|
TestRun.fail(
|
||||||
|
"Write-Only cache mode is not working properly for data writes! "
|
||||||
|
"All writes should be passed to CAS device and none to the core"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def check_separated_read_write_after_reload(cache, core, cache_mode, io_size):
|
def check_separated_read_write_after_reload(cache, core, cache_mode, io_size):
|
||||||
# io_size_after_reload should be set to a greater value then global io_size value
|
# io_size_after_reload should be set to a greater value then global io_size value
|
||||||
io_size_after_reload = Size(12000, Unit.Blocks4096)
|
io_size_after_reload = Size(12000, Unit.Blocks4096)
|
||||||
if io_size_after_reload <= io_size:
|
if io_size_after_reload <= io_size:
|
||||||
TestRun.fail(
|
TestRun.fail("io_size_after_reload value is not greater then global io_size value!")
|
||||||
"io_size_after_reload value is not greater then global io_size value!"
|
|
||||||
)
|
|
||||||
|
|
||||||
io_mode = ReadWrite.randread
|
io_mode = ReadWrite.randread
|
||||||
fio_prepare(core, io_mode, io_size_after_reload).run()
|
fio_prepare(core, io_mode, io_size_after_reload).run()
|
||||||
sync()
|
sync()
|
||||||
cache_stats = cache.get_statistics()
|
cache_block_stats = cache.get_statistics().block_stats
|
||||||
io_new_data = io_size_after_reload - io_size
|
io_new_data = io_size_after_reload - io_size
|
||||||
|
|
||||||
if cache_mode == CacheMode.WA:
|
if cache_mode == CacheMode.WA:
|
||||||
if (
|
if (
|
||||||
cache_stats.block_stats.cache.writes != io_new_data
|
cache_block_stats.cache.writes != io_new_data
|
||||||
or cache_stats.block_stats.core.reads != io_new_data
|
or cache_block_stats.core.reads != io_new_data
|
||||||
):
|
):
|
||||||
TestRun.fail(
|
TestRun.fail(
|
||||||
"Write-Around cache mode is not working properly for data reads after reload! "
|
"Write-Around cache mode is not working properly for data reads after reload! "
|
||||||
"'cache writes' and 'core reads' should equal "
|
"'cache writes' and 'core reads' should equal "
|
||||||
"the difference from previous data reads."
|
"the difference from previous data reads"
|
||||||
)
|
)
|
||||||
if cache_mode == CacheMode.WO:
|
if cache_mode == CacheMode.WO:
|
||||||
if (
|
if (
|
||||||
cache_stats.block_stats.cache.writes.value != 0
|
cache_block_stats.cache.writes != Size.zero()
|
||||||
or cache_stats.block_stats.cache.reads != io_size
|
or cache_block_stats.cache.reads != io_size
|
||||||
):
|
):
|
||||||
TestRun.fail(
|
TestRun.fail(
|
||||||
"Write-Only cache mode is not working properly for data reads after reload! "
|
"Write-Only cache mode is not working properly for data reads after reload! "
|
||||||
"There should be no writes to cache and reads "
|
"There should be no writes to cache and reads "
|
||||||
"from cache should equal previous writes to it."
|
"from cache should equal previous writes to it"
|
||||||
)
|
)
|
||||||
|
|
||||||
cache.reset_counters()
|
cache.reset_counters()
|
||||||
io_mode = ReadWrite.randwrite
|
io_mode = ReadWrite.randwrite
|
||||||
fio_prepare(core, io_mode, io_size_after_reload).run()
|
fio_prepare(core, io_mode, io_size_after_reload).run()
|
||||||
sync()
|
sync()
|
||||||
cache_stats = cache.get_statistics()
|
cache_block_stats = cache.get_statistics().block_stats
|
||||||
core_stats = core.get_statistics()
|
core_block_stats = core.get_statistics().block_stats
|
||||||
|
|
||||||
if cache_mode == CacheMode.WA:
|
match cache_mode:
|
||||||
if cache_stats.block_stats.cache.writes != io_size_after_reload:
|
case CacheMode.WA:
|
||||||
TestRun.fail(
|
if cache_block_stats.cache.writes != io_size_after_reload:
|
||||||
"Write-Around cache mode is not working properly for data writes after reload! "
|
TestRun.fail(
|
||||||
"There should be no writes to cache since previous read operation."
|
"Write-Around cache mode is not working properly for data writes after reload! "
|
||||||
)
|
"There should be no writes to cache since previous read operation"
|
||||||
if cache_mode == CacheMode.WO:
|
)
|
||||||
if (
|
case CacheMode.WO:
|
||||||
core_stats.block_stats.core.writes.value != 0
|
if (
|
||||||
or core_stats.block_stats.exp_obj.writes != io_size_after_reload
|
core_block_stats.core.writes != Size.zero()
|
||||||
):
|
or core_block_stats.exp_obj.writes != io_size_after_reload
|
||||||
TestRun.fail(
|
):
|
||||||
"Write-Only cache mode is not working properly for data writes after reload! "
|
TestRun.fail(
|
||||||
"All writes should be passed to CAS device and none to the core."
|
"Write-Only cache mode is not working properly for data writes after reload! "
|
||||||
)
|
"All writes should be passed to CAS device and none to the core"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def fio_prepare(core, io_mode, io_size=io_size):
|
def fio_prepare(core, io_mode, io_size=io_size):
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2022 Intel Corporation
|
# Copyright(c) 2020-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -15,41 +16,40 @@ from test_utils.size import Size, Unit
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_remove_multilevel_core():
|
def test_remove_multilevel_core():
|
||||||
"""
|
"""
|
||||||
title: Test of the ability to remove a core used in a multilevel cache.
|
title: Test of the ability to remove a core used in a multilevel cache.
|
||||||
description: |
|
description: |
|
||||||
Negative test if OpenCAS does not allow to remove a core when the related exported object
|
Negative test if OpenCAS does not allow to remove a core when the related exported object
|
||||||
is used as a core device for another cache instance.
|
is used as a core device for another cache instance.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No system crash.
|
- No system crash.
|
||||||
- OpenCAS does not allow removing a core used in a multilevel cache instance.
|
- OpenCAS does not allow removing a core used in a multilevel cache instance.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare two devices for cache and one for core."):
|
|
||||||
cache_dev = TestRun.disks['cache']
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
|
||||||
cache_dev.create_partitions([Size(512, Unit.MebiByte)] * 2)
|
cache_dev.create_partitions([Size(512, Unit.MebiByte)] * 2)
|
||||||
cache_part1 = cache_dev.partitions[0]
|
|
||||||
cache_part2 = cache_dev.partitions[1]
|
|
||||||
core_dev = TestRun.disks['core']
|
|
||||||
core_dev.create_partitions([Size(1, Unit.GibiByte)])
|
core_dev.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
core_dev = core_dev.partitions[0]
|
|
||||||
|
|
||||||
with TestRun.step("Start the first cache instance"):
|
with TestRun.step("Start the first cache instance"):
|
||||||
cache1 = casadm.start_cache(cache_part1, force=True)
|
cache1 = casadm.start_cache(cache_dev.partitions[0], force=True)
|
||||||
|
|
||||||
with TestRun.step("Add a core device to the first cache instance."):
|
with TestRun.step("Add a core device to the first cache instance."):
|
||||||
core1 = cache1.add_core(core_dev)
|
core1 = cache1.add_core(core_dev.partitions[0])
|
||||||
|
|
||||||
with TestRun.step("Start the second cache instance"):
|
with TestRun.step("Start the second cache instance"):
|
||||||
cache2 = casadm.start_cache(cache_part2, force=True)
|
cache2 = casadm.start_cache(cache_dev.partitions[1], force=True)
|
||||||
|
|
||||||
with TestRun.step("Add the first cache's exported object as a core "
|
with TestRun.step(
|
||||||
"to the second cache instance."):
|
"Add the first cache's exported object as a core to the second cache instance."
|
||||||
|
):
|
||||||
cache2.add_core(core1)
|
cache2.add_core(core1)
|
||||||
|
|
||||||
with TestRun.step("Try to remove core from the first level cache."):
|
with TestRun.step("Try to remove core from the first level cache."):
|
||||||
output = TestRun.executor.run_expect_fail(cli.remove_core_cmd(cache_id=str(cache1.cache_id),
|
output = TestRun.executor.run_expect_fail(
|
||||||
core_id=str(core1.core_id),
|
cli.remove_core_cmd(
|
||||||
force=True))
|
cache_id=str(cache1.cache_id), core_id=str(core1.core_id), force=True
|
||||||
|
)
|
||||||
|
)
|
||||||
cli_messages.check_stderr_msg(output, cli_messages.remove_multilevel_core)
|
cli_messages.check_stderr_msg(output, cli_messages.remove_multilevel_core)
|
||||||
|
|
||||||
with TestRun.step("Stop cache."):
|
|
||||||
casadm.stop_all_caches()
|
|
||||||
|
@ -1,15 +1,21 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2022 Intel Corporation
|
# Copyright(c) 2020-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
import os
|
|
||||||
import random
|
|
||||||
from time import sleep
|
|
||||||
|
|
||||||
|
import posixpath
|
||||||
|
import random
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
from api.cas import casadm
|
from api.cas import casadm
|
||||||
from api.cas.cache_config import CacheMode, SeqCutOffPolicy, CacheModeTrait
|
from api.cas.cache_config import (
|
||||||
|
CacheMode,
|
||||||
|
SeqCutOffPolicy,
|
||||||
|
CacheModeTrait,
|
||||||
|
)
|
||||||
from core.test_run_utils import TestRun
|
from core.test_run_utils import TestRun
|
||||||
from storage_devices.disk import DiskTypeSet, DiskTypeLowerThan, DiskType
|
from storage_devices.disk import DiskTypeSet, DiskTypeLowerThan, DiskType
|
||||||
from test_tools.dd import Dd
|
from test_tools.dd import Dd
|
||||||
@ -19,45 +25,54 @@ from test_tools.fio.fio_param import IoEngine, ReadWrite
|
|||||||
from test_utils.os_utils import Udev
|
from test_utils.os_utils import Udev
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
random_thresholds = random.sample(range(1028, 1024 ** 2, 4), 3)
|
random_thresholds = random.sample(range(1028, 1024**2, 4), 3)
|
||||||
random_stream_numbers = random.sample(range(2, 128), 3)
|
random_stream_numbers = random.sample(range(2, 128), 3)
|
||||||
|
mount_point = "/mnt"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.os_dependent
|
@pytest.mark.os_dependent
|
||||||
@pytest.mark.parametrizex("streams_number", [1, 128] + random_stream_numbers)
|
@pytest.mark.parametrizex("streams_number", [1, 128] + random_stream_numbers)
|
||||||
@pytest.mark.parametrizex("threshold",
|
@pytest.mark.parametrizex(
|
||||||
[Size(1, Unit.MebiByte), Size(1, Unit.GibiByte)]
|
"threshold",
|
||||||
+ [Size(x, Unit.KibiByte) for x in random_thresholds])
|
[Size(1, Unit.MebiByte), Size(1, Unit.GibiByte)]
|
||||||
|
+ [Size(x, Unit.KibiByte) for x in random_thresholds],
|
||||||
|
)
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_multistream_seq_cutoff_functional(threshold, streams_number):
|
def test_multistream_seq_cutoff_functional(streams_number, threshold):
|
||||||
"""
|
"""
|
||||||
title: Functional test for multistream sequential cutoff
|
title: Functional test for multistream sequential cutoff
|
||||||
description: |
|
description: |
|
||||||
Testing if amount of data written to cache and core is correct after running sequential
|
Testing if amount of data written to cache and core is correct after running sequential
|
||||||
writes from multiple streams with different sequential cut-off thresholds.
|
writes from multiple streams with different sequential cut-off thresholds.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Amount of data written to cache is equal to amount set with sequential cutoff threshold
|
- Amount of data written to cache is equal to amount set with sequential cutoff threshold
|
||||||
- Amount of data written in pass-through is equal to io size run after reaching the
|
- Amount of data written in pass-through is equal to io size run after reaching the
|
||||||
sequential cutoff threshold
|
sequential cutoff threshold
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Start cache and add core device."):
|
|
||||||
cache_disk = TestRun.disks['cache']
|
|
||||||
core_disk = TestRun.disks['core']
|
|
||||||
|
|
||||||
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step(f"Start cache in Write-Back"):
|
||||||
|
cache_disk = TestRun.disks["cache"]
|
||||||
|
core_disk = TestRun.disks["core"]
|
||||||
|
cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
|
||||||
core = cache.add_core(core_disk)
|
core = cache.add_core(core_disk)
|
||||||
|
|
||||||
with TestRun.step(f"Set seq-cutoff policy to always, threshold to {threshold} "
|
with TestRun.step(
|
||||||
f"and reset statistics counters."):
|
f"Set seq-cutoff policy to always, threshold to {threshold} "
|
||||||
|
f"and reset statistics counters"
|
||||||
|
):
|
||||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||||
core.set_seq_cutoff_threshold(threshold)
|
core.set_seq_cutoff_threshold(threshold)
|
||||||
core.set_seq_cutoff_promotion_count(1)
|
core.set_seq_cutoff_promotion_count(1)
|
||||||
core.reset_counters()
|
core.reset_counters()
|
||||||
|
|
||||||
with TestRun.step(f"Run {streams_number} I/O streams with amount of sequential writes equal to "
|
with TestRun.step(
|
||||||
f"seq-cutoff threshold value minus one 4k block."):
|
f"Run {streams_number} I/O streams with amount of sequential writes equal to "
|
||||||
|
f"seq-cutoff threshold value minus one 4k block"
|
||||||
|
):
|
||||||
kib_between_streams = 100
|
kib_between_streams = 100
|
||||||
range_step = int(threshold.get_value(Unit.KibiByte)) + kib_between_streams
|
range_step = int(threshold.get_value(Unit.KibiByte)) + kib_between_streams
|
||||||
max_range_offset = streams_number * range_step
|
max_range_offset = streams_number * range_step
|
||||||
@ -69,68 +84,50 @@ def test_multistream_seq_cutoff_functional(threshold, streams_number):
|
|||||||
TestRun.LOGGER.info(f"Statistics before I/O:\n{core_statistics_before}")
|
TestRun.LOGGER.info(f"Statistics before I/O:\n{core_statistics_before}")
|
||||||
|
|
||||||
offset = Size(offsets[i], Unit.KibiByte)
|
offset = Size(offsets[i], Unit.KibiByte)
|
||||||
run_dd(core.path, count=int(threshold.get_value(Unit.Blocks4096) - 1),
|
run_dd(
|
||||||
seek=int(offset.get_value(Unit.Blocks4096)))
|
core.path,
|
||||||
|
count=int(threshold.get_value(Unit.Blocks4096) - 1),
|
||||||
|
seek=int(offset.get_value(Unit.Blocks4096)),
|
||||||
|
)
|
||||||
|
|
||||||
core_statistics_after = core.get_statistics()
|
core_statistics_after = core.get_statistics()
|
||||||
check_statistics(core_statistics_before,
|
check_statistics(
|
||||||
core_statistics_after,
|
core_statistics_before,
|
||||||
expected_pt=0,
|
core_statistics_after,
|
||||||
expected_writes_to_cache=threshold - Size(1, Unit.Blocks4096))
|
expected_pt=0,
|
||||||
|
expected_writes_to_cache=threshold - Size(1, Unit.Blocks4096),
|
||||||
|
)
|
||||||
core_statistics_before = core_statistics_after
|
core_statistics_before = core_statistics_after
|
||||||
|
|
||||||
with TestRun.step("Write random number of 4k block requests to each stream and check if all "
|
with TestRun.step(
|
||||||
"writes were sent in pass-through mode."):
|
"Write random number of 4k block requests to each stream and check if all "
|
||||||
|
"writes were sent in pass-through mode"
|
||||||
|
):
|
||||||
core_statistics_before = core.get_statistics()
|
core_statistics_before = core.get_statistics()
|
||||||
random.shuffle(offsets)
|
random.shuffle(offsets)
|
||||||
|
|
||||||
for i in TestRun.iteration(range(0, len(offsets))):
|
for i in TestRun.iteration(range(0, len(offsets))):
|
||||||
TestRun.LOGGER.info(f"Statistics before second I/O:\n{core_statistics_before}")
|
TestRun.LOGGER.info(f"Statistics before second I/O:\n{core_statistics_before}")
|
||||||
additional_4k_blocks_writes = random.randint(1, kib_between_streams / 4)
|
additional_4k_blocks_writes = random.randint(1, kib_between_streams // 4)
|
||||||
offset = Size(offsets[i], Unit.KibiByte)
|
offset = Size(offsets[i], Unit.KibiByte)
|
||||||
run_dd(
|
run_dd(
|
||||||
core.path, count=additional_4k_blocks_writes,
|
core.path,
|
||||||
seek=int(offset.get_value(Unit.Blocks4096)
|
count=additional_4k_blocks_writes,
|
||||||
+ threshold.get_value(Unit.Blocks4096) - 1))
|
seek=int(
|
||||||
|
offset.get_value(Unit.Blocks4096) + threshold.get_value(Unit.Blocks4096) - 1
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
core_statistics_after = core.get_statistics()
|
core_statistics_after = core.get_statistics()
|
||||||
check_statistics(core_statistics_before,
|
check_statistics(
|
||||||
core_statistics_after,
|
core_statistics_before,
|
||||||
expected_pt=additional_4k_blocks_writes,
|
core_statistics_after,
|
||||||
expected_writes_to_cache=Size.zero())
|
expected_pt=additional_4k_blocks_writes,
|
||||||
|
expected_writes_to_cache=Size.zero(),
|
||||||
|
)
|
||||||
core_statistics_before = core_statistics_after
|
core_statistics_before = core_statistics_after
|
||||||
|
|
||||||
|
|
||||||
def check_statistics(stats_before, stats_after, expected_pt, expected_writes_to_cache):
|
|
||||||
TestRun.LOGGER.info(f"Statistics after I/O:\n{stats_after}")
|
|
||||||
writes_to_cache_before = stats_before.block_stats.cache.writes
|
|
||||||
writes_to_cache_after = stats_after.block_stats.cache.writes
|
|
||||||
pt_writes_before = stats_before.request_stats.pass_through_writes
|
|
||||||
pt_writes_after = stats_after.request_stats.pass_through_writes
|
|
||||||
|
|
||||||
actual_pt = pt_writes_after - pt_writes_before
|
|
||||||
actual_writes_to_cache = writes_to_cache_after - writes_to_cache_before
|
|
||||||
if actual_pt != expected_pt:
|
|
||||||
TestRun.LOGGER.error(f"Expected pass-through writes: {expected_pt}\n"
|
|
||||||
f"Actual pass-through writes: {actual_pt}")
|
|
||||||
if actual_writes_to_cache != expected_writes_to_cache:
|
|
||||||
TestRun.LOGGER.error(
|
|
||||||
f"Expected writes to cache: {expected_writes_to_cache}\n"
|
|
||||||
f"Actual writes to cache: {actual_writes_to_cache}")
|
|
||||||
|
|
||||||
|
|
||||||
def run_dd(target_path, count, seek):
|
|
||||||
dd = Dd() \
|
|
||||||
.input("/dev/zero") \
|
|
||||||
.output(target_path) \
|
|
||||||
.block_size(Size(1, Unit.Blocks4096)) \
|
|
||||||
.count(count) \
|
|
||||||
.oflag("direct") \
|
|
||||||
.seek(seek)
|
|
||||||
dd.run()
|
|
||||||
TestRun.LOGGER.info(f"dd command:\n{dd}")
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.os_dependent
|
@pytest.mark.os_dependent
|
||||||
@pytest.mark.parametrizex("streams_seq_rand", [(64, 64), (64, 192)])
|
@pytest.mark.parametrizex("streams_seq_rand", [(64, 64), (64, 192)])
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@ -146,31 +143,41 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
|
|||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No system crash
|
- No system crash
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Start cache and add core device."):
|
|
||||||
cache_disk = TestRun.disks['cache']
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
core_disk = TestRun.disks['core']
|
cache_disk = TestRun.disks["cache"]
|
||||||
|
core_disk = TestRun.disks["core"]
|
||||||
|
|
||||||
cache_disk.create_partitions([Size(1.5, Unit.GibiByte)])
|
cache_disk.create_partitions([Size(1.5, Unit.GibiByte)])
|
||||||
cache_dev = cache_disk.partitions[0]
|
|
||||||
cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
|
with TestRun.step(f"Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step(f"Start cache in Write-Back mode and add core"):
|
||||||
|
cache = casadm.start_cache(
|
||||||
|
cache_dev=cache_disk.partitions[0], cache_mode=CacheMode.WB, force=True
|
||||||
|
)
|
||||||
core = cache.add_core(core_disk)
|
core = cache.add_core(core_disk)
|
||||||
|
|
||||||
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 512KiB."):
|
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 512KiB"):
|
||||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||||
core.set_seq_cutoff_threshold(Size(512, Unit.KibiByte))
|
core.set_seq_cutoff_threshold(Size(512, Unit.KibiByte))
|
||||||
|
|
||||||
with TestRun.step("Reset core statistics counters."):
|
with TestRun.step("Reset core statistics counters"):
|
||||||
core.reset_counters()
|
core.reset_counters()
|
||||||
|
|
||||||
with TestRun.step("Run I/O"):
|
with TestRun.step("Run FIO on core device"):
|
||||||
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
|
stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
|
||||||
sequential_streams = streams_seq_rand[0]
|
sequential_streams = streams_seq_rand[0]
|
||||||
random_streams = streams_seq_rand[1]
|
random_streams = streams_seq_rand[1]
|
||||||
fio = (Fio().create_command()
|
fio = (
|
||||||
.io_engine(IoEngine.libaio)
|
Fio()
|
||||||
.block_size(Size(1, Unit.Blocks4096))
|
.create_command()
|
||||||
.direct()
|
.io_engine(IoEngine.libaio)
|
||||||
.offset_increment(stream_size))
|
.block_size(Size(1, Unit.Blocks4096))
|
||||||
|
.direct()
|
||||||
|
.offset_increment(stream_size)
|
||||||
|
)
|
||||||
|
|
||||||
for i in range(0, sequential_streams + random_streams):
|
for i in range(0, sequential_streams + random_streams):
|
||||||
fio_job = fio.add_job(job_name=f"stream_{i}")
|
fio_job = fio.add_job(job_name=f"stream_{i}")
|
||||||
@ -181,8 +188,8 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
|
|||||||
else:
|
else:
|
||||||
fio_job.read_write(ReadWrite.randwrite)
|
fio_job.read_write(ReadWrite.randwrite)
|
||||||
|
|
||||||
pid = fio.run_in_background()
|
fio_pid = fio.run_in_background()
|
||||||
while TestRun.executor.check_if_process_exists(pid):
|
while TestRun.executor.check_if_process_exists(fio_pid):
|
||||||
sleep(5)
|
sleep(5)
|
||||||
TestRun.LOGGER.info(f"{core.get_statistics()}")
|
TestRun.LOGGER.info(f"{core.get_statistics()}")
|
||||||
|
|
||||||
@ -199,46 +206,51 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
|
|||||||
description: |
|
description: |
|
||||||
Testing the stability of a system when there are multiple sequential and random I/O streams
|
Testing the stability of a system when there are multiple sequential and random I/O streams
|
||||||
running against the exported object with a filesystem when the sequential cutoff policy is
|
running against the exported object with a filesystem when the sequential cutoff policy is
|
||||||
set to always and the sequential cutoff threshold is set to a value which is able
|
set to always and the sequential cutoff threshold is configured to a value that can be
|
||||||
to be reached by sequential I/O streams.
|
achieved by sequential I/O streams.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No system crash
|
- No system crash
|
||||||
"""
|
"""
|
||||||
mount_point = "/mnt"
|
|
||||||
with TestRun.step("Prepare devices. Create filesystem on core device."):
|
with TestRun.step(f"Disable udev"):
|
||||||
cache_disk = TestRun.disks['cache']
|
Udev.disable()
|
||||||
core_disk = TestRun.disks['core']
|
|
||||||
|
with TestRun.step("Create filesystem on core device"):
|
||||||
|
cache_disk = TestRun.disks["cache"]
|
||||||
|
core_disk = TestRun.disks["core"]
|
||||||
core_disk.create_filesystem(filesystem)
|
core_disk.create_filesystem(filesystem)
|
||||||
|
|
||||||
with TestRun.step("Start cache and add core."):
|
with TestRun.step("Start cache and add core"):
|
||||||
cache = casadm.start_cache(cache_disk, cache_mode, force=True)
|
cache = casadm.start_cache(cache_dev=cache_disk, cache_mode=cache_mode, force=True)
|
||||||
Udev.disable()
|
core = cache.add_core(core_dev=core_disk)
|
||||||
core = cache.add_core(core_disk)
|
|
||||||
|
|
||||||
with TestRun.step("Mount core."):
|
with TestRun.step("Mount core"):
|
||||||
core.mount(mount_point)
|
core.mount(mount_point=mount_point)
|
||||||
|
|
||||||
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB."):
|
with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB"):
|
||||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
core.set_seq_cutoff_policy(policy=SeqCutOffPolicy.always)
|
||||||
core.set_seq_cutoff_threshold(Size(20, Unit.MebiByte))
|
core.set_seq_cutoff_threshold(threshold=Size(20, Unit.MebiByte))
|
||||||
|
|
||||||
with TestRun.step("Reset core statistics counters."):
|
with TestRun.step("Reset core statistic counters"):
|
||||||
core.reset_counters()
|
core.reset_counters()
|
||||||
|
|
||||||
with TestRun.step("Run I/O"):
|
with TestRun.step("Run fio on exported object"):
|
||||||
sequential_streams = streams_seq_rand[0]
|
sequential_streams = streams_seq_rand[0]
|
||||||
random_streams = streams_seq_rand[1]
|
random_streams = streams_seq_rand[1]
|
||||||
stream_size = core_disk.size / 256
|
stream_size = core_disk.size / 256
|
||||||
fio = (Fio().create_command()
|
fio = (
|
||||||
.io_engine(IoEngine.libaio)
|
Fio()
|
||||||
.block_size(Size(1, Unit.Blocks4096))
|
.create_command()
|
||||||
.direct()
|
.io_engine(IoEngine.libaio)
|
||||||
.offset_increment(stream_size))
|
.block_size(Size(1, Unit.Blocks4096))
|
||||||
|
.direct()
|
||||||
|
.offset_increment(stream_size)
|
||||||
|
)
|
||||||
|
|
||||||
for i in range(0, sequential_streams + random_streams):
|
for i in range(0, sequential_streams + random_streams):
|
||||||
fio_job = fio.add_job(job_name=f"stream_{i}")
|
fio_job = fio.add_job(job_name=f"stream_{i}")
|
||||||
fio_job.size(stream_size)
|
fio_job.size(stream_size)
|
||||||
fio_job.target(os.path.join(mount_point, f"file_{i}"))
|
fio_job.target(posixpath.join(mount_point, f"file_{i}"))
|
||||||
if i < sequential_streams:
|
if i < sequential_streams:
|
||||||
fio_job.read_write(ReadWrite.write)
|
fio_job.read_write(ReadWrite.write)
|
||||||
else:
|
else:
|
||||||
@ -248,3 +260,38 @@ def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mo
|
|||||||
while TestRun.executor.check_if_process_exists(pid):
|
while TestRun.executor.check_if_process_exists(pid):
|
||||||
sleep(5)
|
sleep(5)
|
||||||
TestRun.LOGGER.info(f"{core.get_statistics()}")
|
TestRun.LOGGER.info(f"{core.get_statistics()}")
|
||||||
|
|
||||||
|
|
||||||
|
def run_dd(target_path, count, seek):
|
||||||
|
dd = (
|
||||||
|
Dd()
|
||||||
|
.input("/dev/zero")
|
||||||
|
.output(target_path)
|
||||||
|
.block_size(Size(1, Unit.Blocks4096))
|
||||||
|
.count(count)
|
||||||
|
.oflag("direct")
|
||||||
|
.seek(seek)
|
||||||
|
)
|
||||||
|
dd.run()
|
||||||
|
TestRun.LOGGER.info(f"dd command:\n{dd}")
|
||||||
|
|
||||||
|
|
||||||
|
def check_statistics(stats_before, stats_after, expected_pt, expected_writes_to_cache):
|
||||||
|
TestRun.LOGGER.info(f"Statistics after I/O:\n{stats_after}")
|
||||||
|
writes_to_cache_before = stats_before.block_stats.cache.writes
|
||||||
|
writes_to_cache_after = stats_after.block_stats.cache.writes
|
||||||
|
pt_writes_before = stats_before.request_stats.pass_through_writes
|
||||||
|
pt_writes_after = stats_after.request_stats.pass_through_writes
|
||||||
|
|
||||||
|
actual_pt = pt_writes_after - pt_writes_before
|
||||||
|
actual_writes_to_cache = writes_to_cache_after - writes_to_cache_before
|
||||||
|
if actual_pt != expected_pt:
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"Expected pass-through writes: {expected_pt}\n"
|
||||||
|
f"Actual pass-through writes: {actual_pt}"
|
||||||
|
)
|
||||||
|
if actual_writes_to_cache != expected_writes_to_cache:
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"Expected writes to cache: {expected_writes_to_cache}\n"
|
||||||
|
f"Actual writes to cache: {actual_writes_to_cache}"
|
||||||
|
)
|
||||||
|
@ -1,14 +1,13 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
import random
|
|
||||||
from enum import Enum, auto
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from enum import Enum, auto
|
||||||
|
|
||||||
from api.cas import casadm
|
from api.cas import casadm
|
||||||
from api.cas.cache_config import SeqCutOffPolicy, CacheMode, CacheLineSize
|
from api.cas.cache_config import SeqCutOffPolicy, CacheMode, CacheLineSize
|
||||||
from api.cas.core import SEQ_CUTOFF_THRESHOLD_MAX
|
from api.cas.core import SEQ_CUTOFF_THRESHOLD_MAX
|
||||||
@ -26,21 +25,19 @@ class VerifyType(Enum):
|
|||||||
EQUAL = auto()
|
EQUAL = auto()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("thresholds_list", [[
|
@pytest.mark.parametrize(
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte))),
|
"cache_mode, io_type, io_type_last",
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte))),
|
[
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte))),
|
(CacheMode.WB, ReadWrite.write, ReadWrite.randwrite),
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte))),
|
(CacheMode.WT, ReadWrite.write, ReadWrite.randwrite),
|
||||||
]])
|
(CacheMode.WO, ReadWrite.write, ReadWrite.randwrite),
|
||||||
@pytest.mark.parametrize("cache_mode, io_type, io_type_last", [
|
(CacheMode.WA, ReadWrite.read, ReadWrite.randread),
|
||||||
(CacheMode.WB, ReadWrite.write, ReadWrite.randwrite),
|
],
|
||||||
(CacheMode.WT, ReadWrite.write, ReadWrite.randwrite),
|
)
|
||||||
(CacheMode.WA, ReadWrite.read, ReadWrite.randread),
|
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||||
(CacheMode.WO, ReadWrite.write, ReadWrite.randwrite)])
|
|
||||||
@pytest.mark.parametrizex("cls", CacheLineSize)
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_multi_core(thresholds_list, cache_mode, io_type, io_type_last, cls):
|
def test_seq_cutoff_multi_core(cache_mode, io_type, io_type_last, cache_line_size):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
||||||
description: |
|
description: |
|
||||||
@ -48,87 +45,120 @@ def test_seq_cutoff_multi_core(thresholds_list, cache_mode, io_type, io_type_las
|
|||||||
sequential cut-off thresholds on each core, while running sequential IO on 3 out of 4
|
sequential cut-off thresholds on each core, while running sequential IO on 3 out of 4
|
||||||
cores and random IO against the last core, is correct.
|
cores and random IO against the last core, is correct.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Amount of written blocks to cache is less or equal than amount set
|
- Amount of written blocks to cache is less or equal than amount set
|
||||||
with sequential cut-off threshold for three first cores.
|
with sequential cut-off threshold for three first cores.
|
||||||
- Amount of written blocks to cache is equal to io size run against last core.
|
- Amount of written blocks to cache is equal to io size run against last core.
|
||||||
"""
|
"""
|
||||||
with TestRun.step(f"Test prepare (start cache (cache line size: {cls}) and add cores)"):
|
|
||||||
cache, cores = prepare(cores_count=len(thresholds_list), cache_line_size=cls)
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
writes_before = []
|
cache_device = TestRun.disks["cache"]
|
||||||
io_sizes = []
|
core_device = TestRun.disks["core"]
|
||||||
thresholds = []
|
|
||||||
|
cache_device.create_partitions(
|
||||||
|
[(SEQ_CUTOFF_THRESHOLD_MAX * 4 + Size(value=5, unit=Unit.GibiByte))]
|
||||||
|
)
|
||||||
|
core_device.create_partitions(
|
||||||
|
[(SEQ_CUTOFF_THRESHOLD_MAX + Size(value=10, unit=Unit.GibiByte))] * 4
|
||||||
|
)
|
||||||
|
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_parts = core_device.partitions
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step(
|
||||||
|
f"Start cache in {cache_mode} mode and add {len(core_parts)} cores to the cache"
|
||||||
|
):
|
||||||
|
cache = casadm.start_cache(
|
||||||
|
cache_dev=cache_part, cache_mode=cache_mode, force=True, cache_line_size=cache_line_size
|
||||||
|
)
|
||||||
|
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
||||||
|
|
||||||
|
with TestRun.step("Set sequential cut-off parameters for all cores"):
|
||||||
|
writes_before_list = []
|
||||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||||
for i in range(len(thresholds_list)):
|
thresholds_list = [
|
||||||
thresholds.append(Size(thresholds_list[i], Unit.KibiByte))
|
Size.generate_random_size(
|
||||||
io_sizes.append((thresholds[i] + fio_additional_size).align_down(0x1000))
|
min_size=1,
|
||||||
|
max_size=SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte),
|
||||||
|
unit=Unit.KibiByte,
|
||||||
|
)
|
||||||
|
for _ in core_list
|
||||||
|
]
|
||||||
|
io_sizes_list = [
|
||||||
|
(threshold_size + fio_additional_size).align_down(0x1000)
|
||||||
|
for threshold_size in thresholds_list
|
||||||
|
]
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache mode to {cache_mode}"):
|
for core, threshold in zip(core_list, thresholds_list):
|
||||||
cache.set_cache_mode(cache_mode)
|
|
||||||
|
|
||||||
for i, core in TestRun.iteration(enumerate(cores), "Set sequential cut-off parameters for "
|
|
||||||
"all cores"):
|
|
||||||
with TestRun.step(f"Setting core sequential cut off policy to {SeqCutOffPolicy.always}"):
|
|
||||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||||
|
core.set_seq_cutoff_threshold(threshold)
|
||||||
|
|
||||||
with TestRun.step(f"Setting core sequential cut off threshold to {thresholds[i]}"):
|
with TestRun.step("Prepare sequential IO against first three cores"):
|
||||||
core.set_seq_cutoff_threshold(thresholds[i])
|
|
||||||
|
|
||||||
with TestRun.step("Creating FIO command (one job per core)"):
|
|
||||||
block_size = Size(4, Unit.KibiByte)
|
block_size = Size(4, Unit.KibiByte)
|
||||||
fio = (Fio().create_command()
|
fio = Fio().create_command().io_engine(IoEngine.libaio).block_size(block_size).direct(True)
|
||||||
.io_engine(IoEngine.libaio)
|
|
||||||
.block_size(block_size)
|
|
||||||
.direct())
|
|
||||||
|
|
||||||
# Run sequential IO against first three cores
|
for core, io_size in zip(core_list[:-1], io_sizes_list[:-1]):
|
||||||
for i, core in enumerate(cores[:-1]):
|
fio_job = fio.add_job(f"core_{core.core_id}")
|
||||||
fio_job = fio.add_job(job_name=f"core_{core.core_id}")
|
fio_job.size(io_size)
|
||||||
fio_job.size(io_sizes[i])
|
|
||||||
fio_job.read_write(io_type)
|
fio_job.read_write(io_type)
|
||||||
fio_job.target(core.path)
|
fio_job.target(core.path)
|
||||||
writes_before.append(core.get_statistics().block_stats.cache.writes)
|
writes_before_list.append(core.get_statistics().block_stats.cache.writes)
|
||||||
|
|
||||||
# Run random IO against the last core
|
with TestRun.step("Prepare random IO against the last core"):
|
||||||
fio_job = fio.add_job(job_name=f"core_{cores[-1].core_id}")
|
fio_job = fio.add_job(f"core_{core_list[-1].core_id}")
|
||||||
fio_job.size(io_sizes[-1])
|
fio_job.size(io_sizes_list[-1])
|
||||||
fio_job.read_write(io_type_last)
|
fio_job.read_write(io_type_last)
|
||||||
fio_job.target(cores[-1].path)
|
fio_job.target(core_list[-1].path)
|
||||||
writes_before.append(cores[-1].get_statistics().block_stats.cache.writes)
|
writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes)
|
||||||
|
|
||||||
with TestRun.step("Running IO against all cores"):
|
with TestRun.step("Run fio against all cores"):
|
||||||
fio.run()
|
fio.run()
|
||||||
|
|
||||||
with TestRun.step("Verifying writes to cache count after IO"):
|
with TestRun.step("Verify writes to cache count after IO"):
|
||||||
margins = []
|
margins = [
|
||||||
for i, core in enumerate(cores[:-1]):
|
min(block_size * (core.get_seq_cut_off_parameters().promotion_count - 1), threshold)
|
||||||
promotion_count = core.get_seq_cut_off_parameters().promotion_count
|
for core, threshold in zip(core_list[:-1], thresholds_list[:-1])
|
||||||
cutoff_threshold = thresholds[i]
|
]
|
||||||
margins.append(min(block_size * (promotion_count - 1), cutoff_threshold))
|
margin = Size.zero()
|
||||||
margin = sum(margins)
|
for size in margins:
|
||||||
|
margin += size
|
||||||
|
|
||||||
for i, core in enumerate(cores[:-1]):
|
for core, writes, threshold, io_size in zip(
|
||||||
verify_writes_count(core, writes_before[i], thresholds[i], io_sizes[i],
|
core_list[:-1], writes_before_list[:-1], thresholds_list[:-1], io_sizes_list[:-1]
|
||||||
VerifyType.POSITIVE, io_margin=margin)
|
):
|
||||||
|
verify_writes_count(
|
||||||
|
core=core,
|
||||||
|
writes_before=writes,
|
||||||
|
threshold=threshold,
|
||||||
|
io_size=io_size,
|
||||||
|
ver_type=VerifyType.POSITIVE,
|
||||||
|
io_margin=margin,
|
||||||
|
)
|
||||||
|
|
||||||
verify_writes_count(cores[-1], writes_before[-1], thresholds[-1], io_sizes[-1],
|
verify_writes_count(
|
||||||
VerifyType.EQUAL)
|
core=core_list[-1],
|
||||||
|
writes_before=writes_before_list[-1],
|
||||||
|
threshold=thresholds_list[-1],
|
||||||
|
io_size=io_sizes_list[-1],
|
||||||
|
ver_type=VerifyType.EQUAL,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("thresholds_list", [[
|
@pytest.mark.parametrize(
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte))),
|
"cache_mode, io_type, io_type_last",
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte))),
|
[
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte))),
|
(CacheMode.WB, ReadWrite.write, ReadWrite.randwrite),
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte))),
|
(CacheMode.WT, ReadWrite.write, ReadWrite.randwrite),
|
||||||
]])
|
(CacheMode.WA, ReadWrite.read, ReadWrite.randread),
|
||||||
@pytest.mark.parametrize("cache_mode, io_type, io_type_last", [
|
(CacheMode.WO, ReadWrite.write, ReadWrite.randwrite),
|
||||||
(CacheMode.WB, ReadWrite.write, ReadWrite.randwrite),
|
],
|
||||||
(CacheMode.WT, ReadWrite.write, ReadWrite.randwrite),
|
)
|
||||||
(CacheMode.WA, ReadWrite.read, ReadWrite.randread),
|
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||||
(CacheMode.WO, ReadWrite.write, ReadWrite.randwrite)])
|
|
||||||
@pytest.mark.parametrizex("cls", CacheLineSize)
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_multi_core_io_pinned(thresholds_list, cache_mode, io_type, io_type_last, cls):
|
def test_seq_cutoff_multi_core_io_pinned(cache_mode, io_type, io_type_last, cache_line_size):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
title: Sequential cut-off tests during sequential and random IO 'always' policy with 4 cores
|
||||||
description: |
|
description: |
|
||||||
@ -136,77 +166,120 @@ def test_seq_cutoff_multi_core_io_pinned(thresholds_list, cache_mode, io_type, i
|
|||||||
sequential cut-off thresholds on each core, while running sequential IO, pinned,
|
sequential cut-off thresholds on each core, while running sequential IO, pinned,
|
||||||
on 3 out of 4 cores and random IO against the last core, is correct.
|
on 3 out of 4 cores and random IO against the last core, is correct.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Amount of written blocks to cache is less or equal than amount set
|
- Amount of written blocks to cache is less or equal than amount set
|
||||||
with sequential cut-off threshold for three first cores.
|
with sequential cut-off threshold for three first cores.
|
||||||
- Amount of written blocks to cache is equal to io size run against last core.
|
- Amount of written blocks to cache is equal to io size run against last core.
|
||||||
"""
|
"""
|
||||||
with TestRun.step(f"Test prepare (start cache (cache line size: {cls}) and add cores)"):
|
|
||||||
cache, cores = prepare(cores_count=len(thresholds_list), cache_line_size=cls)
|
with TestRun.step("Partition cache and core devices"):
|
||||||
writes_before = []
|
cache_device = TestRun.disks["cache"]
|
||||||
io_sizes = []
|
core_device = TestRun.disks["core"]
|
||||||
thresholds = []
|
cache_device.create_partitions(
|
||||||
|
[(SEQ_CUTOFF_THRESHOLD_MAX * 4 + Size(value=5, unit=Unit.GibiByte))]
|
||||||
|
)
|
||||||
|
core_device.create_partitions(
|
||||||
|
[(SEQ_CUTOFF_THRESHOLD_MAX + Size(value=10, unit=Unit.GibiByte))] * 4
|
||||||
|
)
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_parts = core_device.partitions
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step(
|
||||||
|
f"Start cache in {cache_mode} mode and add {len(core_parts)} cores to the cache"
|
||||||
|
):
|
||||||
|
cache = casadm.start_cache(
|
||||||
|
cache_dev=cache_part,
|
||||||
|
cache_mode=cache_mode,
|
||||||
|
force=True,
|
||||||
|
cache_line_size=cache_line_size,
|
||||||
|
)
|
||||||
|
core_list = [cache.add_core(core_dev=core_part) for core_part in core_parts]
|
||||||
|
|
||||||
|
with TestRun.step(f"Set sequential cut-off parameters for all cores"):
|
||||||
|
writes_before_list = []
|
||||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||||
for i in range(len(thresholds_list)):
|
thresholds_list = [
|
||||||
thresholds.append(Size(thresholds_list[i], Unit.KibiByte))
|
Size.generate_random_size(
|
||||||
io_sizes.append((thresholds[i] + fio_additional_size).align_down(0x1000))
|
min_size=1,
|
||||||
|
max_size=SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte),
|
||||||
|
unit=Unit.KibiByte,
|
||||||
|
)
|
||||||
|
for _ in core_list
|
||||||
|
]
|
||||||
|
io_sizes_list = [
|
||||||
|
(threshold_size + fio_additional_size).align_down(0x1000)
|
||||||
|
for threshold_size in thresholds_list
|
||||||
|
]
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache mode to {cache_mode}"):
|
for core, threshold in zip(core_list, thresholds_list):
|
||||||
cache.set_cache_mode(cache_mode)
|
|
||||||
|
|
||||||
for i, core in TestRun.iteration(enumerate(cores), "Set sequential cut-off parameters for "
|
|
||||||
"all cores"):
|
|
||||||
with TestRun.step(f"Setting core sequential cut off policy to {SeqCutOffPolicy.always}"):
|
|
||||||
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
|
||||||
|
core.set_seq_cutoff_threshold(threshold)
|
||||||
|
|
||||||
with TestRun.step(f"Setting core sequential cut off threshold to {thresholds[i]}"):
|
with TestRun.step("Prepare sequential IO against first three cores"):
|
||||||
core.set_seq_cutoff_threshold(thresholds[i])
|
fio = (
|
||||||
|
Fio()
|
||||||
with TestRun.step("Creating FIO command (one job per core)"):
|
.create_command()
|
||||||
fio = (Fio().create_command()
|
.io_engine(IoEngine.libaio)
|
||||||
.io_engine(IoEngine.libaio)
|
.block_size(Size(1, Unit.Blocks4096))
|
||||||
.block_size(Size(1, Unit.Blocks4096))
|
.direct(True)
|
||||||
.direct()
|
.cpus_allowed(get_dut_cpu_physical_cores())
|
||||||
.cpus_allowed(get_dut_cpu_physical_cores())
|
.cpus_allowed_policy(CpusAllowedPolicy.split)
|
||||||
.cpus_allowed_policy(CpusAllowedPolicy.split))
|
)
|
||||||
|
|
||||||
# Run sequential IO against first three cores
|
# Run sequential IO against first three cores
|
||||||
for i, core in enumerate(cores[:-1]):
|
for core, io_size in zip(core_list[:-1], io_sizes_list[:-1]):
|
||||||
fio_job = fio.add_job(job_name=f"core_{core.core_id}")
|
fio_job = fio.add_job(job_name=f"core_{core.core_id}")
|
||||||
fio_job.size(io_sizes[i])
|
fio_job.size(io_size)
|
||||||
fio_job.read_write(io_type)
|
fio_job.read_write(io_type)
|
||||||
fio_job.target(core.path)
|
fio_job.target(core.path)
|
||||||
writes_before.append(core.get_statistics().block_stats.cache.writes)
|
writes_before_list.append(core.get_statistics().block_stats.cache.writes)
|
||||||
|
|
||||||
# Run random IO against the last core
|
# Run random IO against the last core
|
||||||
fio_job = fio.add_job(job_name=f"core_{cores[-1].core_id}")
|
fio_job = fio.add_job(job_name=f"core_{core_list[-1].core_id}")
|
||||||
fio_job.size(io_sizes[-1])
|
fio_job.size(io_sizes_list[-1])
|
||||||
fio_job.read_write(io_type_last)
|
fio_job.read_write(io_type_last)
|
||||||
fio_job.target(cores[-1].path)
|
fio_job.target(core_list[-1].path)
|
||||||
writes_before.append(cores[-1].get_statistics().block_stats.cache.writes)
|
writes_before_list.append(core_list[-1].get_statistics().block_stats.cache.writes)
|
||||||
|
|
||||||
with TestRun.step("Running IO against all cores"):
|
with TestRun.step("Running IO against all cores"):
|
||||||
fio.run()
|
fio.run()
|
||||||
|
|
||||||
with TestRun.step("Verifying writes to cache count after IO"):
|
with TestRun.step("Verifying writes to cache count after IO"):
|
||||||
for i, core in enumerate(cores[:-1]):
|
for core, writes, threshold, io_size in zip(
|
||||||
verify_writes_count(core, writes_before[i], thresholds[i], io_sizes[i],
|
core_list[:-1], writes_before_list[:-1], thresholds_list[:-1], io_sizes_list[:-1]
|
||||||
VerifyType.POSITIVE)
|
):
|
||||||
|
verify_writes_count(
|
||||||
|
core=core,
|
||||||
|
writes_before=writes,
|
||||||
|
threshold=threshold,
|
||||||
|
io_size=io_size,
|
||||||
|
ver_type=VerifyType.POSITIVE,
|
||||||
|
)
|
||||||
|
|
||||||
verify_writes_count(cores[-1], writes_before[-1], thresholds[-1], io_sizes[-1],
|
verify_writes_count(
|
||||||
VerifyType.EQUAL)
|
core=core_list[-1],
|
||||||
|
writes_before=writes_before_list[-1],
|
||||||
|
threshold=thresholds_list[-1],
|
||||||
|
io_size=io_sizes_list[-1],
|
||||||
|
ver_type=VerifyType.EQUAL,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("threshold_param", [
|
@pytest.mark.parametrize(
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte)))
|
"policy, verify_type",
|
||||||
])
|
[
|
||||||
@pytest.mark.parametrize("policy, verify_type", [(SeqCutOffPolicy.never, VerifyType.NEGATIVE),
|
(SeqCutOffPolicy.never, VerifyType.NEGATIVE),
|
||||||
(SeqCutOffPolicy.always, VerifyType.POSITIVE),
|
(SeqCutOffPolicy.always, VerifyType.POSITIVE),
|
||||||
(SeqCutOffPolicy.full, VerifyType.NEGATIVE)])
|
(SeqCutOffPolicy.full, VerifyType.NEGATIVE),
|
||||||
@pytest.mark.parametrizex("cls", CacheLineSize)
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||||
@pytest.mark.parametrizex("io_dir", [ReadWrite.write, ReadWrite.read])
|
@pytest.mark.parametrizex("io_dir", [ReadWrite.write, ReadWrite.read])
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_thresh(threshold_param, cls, io_dir, policy, verify_type):
|
def test_seq_cutoff_thresh(cache_line_size, io_dir, policy, verify_type):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off tests for writes and reads for 'never', 'always' and 'full' policies
|
title: Sequential cut-off tests for writes and reads for 'never', 'always' and 'full' policies
|
||||||
description: |
|
description: |
|
||||||
@ -215,47 +288,80 @@ def test_seq_cutoff_thresh(threshold_param, cls, io_dir, policy, verify_type):
|
|||||||
is valid for sequential cut-off threshold parameter, assuming that cache occupancy
|
is valid for sequential cut-off threshold parameter, assuming that cache occupancy
|
||||||
doesn't reach 100% during test.
|
doesn't reach 100% during test.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Amount of written blocks to cache is less or equal than amount set
|
- Amount of written blocks to cache is less or equal than amount set
|
||||||
with sequential cut-off parameter in case of 'always' policy.
|
with sequential cut-off parameter in case of 'always' policy.
|
||||||
- Amount of written blocks to cache is at least equal io size in case of 'never' and 'full'
|
- Amount of written blocks to cache is at least equal io size in case of 'never' and 'full'
|
||||||
policy.
|
policy.
|
||||||
"""
|
"""
|
||||||
with TestRun.step(f"Test prepare (start cache (cache line size: {cls}) and add cores)"):
|
|
||||||
cache, cores = prepare(cores_count=1, cache_line_size=cls)
|
with TestRun.step("Partition cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
cache_device.create_partitions(
|
||||||
|
[(SEQ_CUTOFF_THRESHOLD_MAX * 4 + Size(value=5, unit=Unit.GibiByte))]
|
||||||
|
)
|
||||||
|
core_device.create_partitions(
|
||||||
|
[(SEQ_CUTOFF_THRESHOLD_MAX + Size(value=10, unit=Unit.GibiByte))]
|
||||||
|
)
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step(f"Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(
|
||||||
|
cache_dev=cache_part,
|
||||||
|
force=True,
|
||||||
|
cache_line_size=cache_line_size,
|
||||||
|
)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||||
threshold = Size(threshold_param, Unit.KibiByte)
|
threshold = Size.generate_random_size(
|
||||||
|
min_size=1,
|
||||||
|
max_size=SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte),
|
||||||
|
unit=Unit.KibiByte,
|
||||||
|
)
|
||||||
io_size = (threshold + fio_additional_size).align_down(0x1000)
|
io_size = (threshold + fio_additional_size).align_down(0x1000)
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cut off policy mode to {policy}"):
|
with TestRun.step(f"Setting cache sequential cut off policy mode to {policy}"):
|
||||||
cache.set_seq_cutoff_policy(policy)
|
cache.set_seq_cutoff_policy(policy)
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cut off policy threshold to "
|
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"):
|
||||||
f"{threshold}"):
|
|
||||||
cache.set_seq_cutoff_threshold(threshold)
|
cache.set_seq_cutoff_threshold(threshold)
|
||||||
|
|
||||||
with TestRun.step(f"Running sequential IO ({io_dir})"):
|
with TestRun.step("Prepare sequential IO against core"):
|
||||||
sync()
|
sync()
|
||||||
writes_before = cores[0].get_statistics().block_stats.cache.writes
|
writes_before = core.get_statistics().block_stats.cache.writes
|
||||||
(Fio().create_command()
|
fio = (
|
||||||
.io_engine(IoEngine.libaio)
|
Fio()
|
||||||
.size(io_size)
|
.create_command()
|
||||||
.read_write(io_dir)
|
.io_engine(IoEngine.libaio)
|
||||||
.target(f"{cores[0].path}")
|
.size(io_size)
|
||||||
.direct()
|
.read_write(io_dir)
|
||||||
).run()
|
.target(f"{core.path}")
|
||||||
|
.direct()
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Run fio"):
|
||||||
|
fio.run()
|
||||||
|
|
||||||
with TestRun.step("Verify writes to cache count"):
|
with TestRun.step("Verify writes to cache count"):
|
||||||
verify_writes_count(cores[0], writes_before, threshold, io_size, verify_type)
|
verify_writes_count(
|
||||||
|
core=core,
|
||||||
|
writes_before=writes_before,
|
||||||
|
threshold=threshold,
|
||||||
|
io_size=io_size,
|
||||||
|
ver_type=verify_type,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("threshold_param", [
|
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
||||||
random.randint(1, int(SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte)))
|
|
||||||
])
|
|
||||||
@pytest.mark.parametrizex("cls", CacheLineSize)
|
|
||||||
@pytest.mark.parametrizex("io_dir", [ReadWrite.write, ReadWrite.read])
|
@pytest.mark.parametrizex("io_dir", [ReadWrite.write, ReadWrite.read])
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_seq_cutoff_thresh_fill(threshold_param, cls, io_dir):
|
def test_seq_cutoff_thresh_fill(cache_line_size, io_dir):
|
||||||
"""
|
"""
|
||||||
title: Sequential cut-off tests during writes and reads on full cache for 'full' policy
|
title: Sequential cut-off tests during writes and reads on full cache for 'full' policy
|
||||||
description: |
|
description: |
|
||||||
@ -263,93 +369,116 @@ def test_seq_cutoff_thresh_fill(threshold_param, cls, io_dir):
|
|||||||
cache for 'full' sequential cut-off policy with cache configured with different cache
|
cache for 'full' sequential cut-off policy with cache configured with different cache
|
||||||
line sizes is valid for sequential cut-off threshold parameter.
|
line sizes is valid for sequential cut-off threshold parameter.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Amount of written blocks to cache is big enough to fill cache when 'never' sequential
|
- Amount of written blocks to cache is big enough to fill cache when 'never' sequential
|
||||||
cut-off policy is set
|
cut-off policy is set
|
||||||
- Amount of written blocks to cache is less or equal than amount set
|
- Amount of written blocks to cache is less or equal than amount set
|
||||||
with sequential cut-off parameter in case of 'full' policy.
|
with sequential cut-off parameter in case of 'full' policy.
|
||||||
"""
|
"""
|
||||||
with TestRun.step(f"Test prepare (start cache (cache line size: {cls}) and add cores)"):
|
|
||||||
cache, cores = prepare(cores_count=1, cache_line_size=cls)
|
with TestRun.step("Partition cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
cache_device.create_partitions(
|
||||||
|
[(SEQ_CUTOFF_THRESHOLD_MAX + Size(value=5, unit=Unit.GibiByte))]
|
||||||
|
)
|
||||||
|
core_device.create_partitions(
|
||||||
|
[(SEQ_CUTOFF_THRESHOLD_MAX + Size(value=10, unit=Unit.GibiByte))]
|
||||||
|
)
|
||||||
|
cache_part = cache_device.partitions[0]
|
||||||
|
core_part = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step(f"Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(
|
||||||
|
cache_dev=cache_part,
|
||||||
|
force=True,
|
||||||
|
cache_line_size=cache_line_size,
|
||||||
|
)
|
||||||
|
core = cache.add_core(core_dev=core_part)
|
||||||
|
|
||||||
fio_additional_size = Size(10, Unit.Blocks4096)
|
fio_additional_size = Size(10, Unit.Blocks4096)
|
||||||
threshold = Size(threshold_param, Unit.KibiByte)
|
threshold = Size.generate_random_size(
|
||||||
|
min_size=1,
|
||||||
|
max_size=SEQ_CUTOFF_THRESHOLD_MAX.get_value(Unit.KibiByte),
|
||||||
|
unit=Unit.KibiByte,
|
||||||
|
)
|
||||||
io_size = (threshold + fio_additional_size).align_down(0x1000)
|
io_size = (threshold + fio_additional_size).align_down(0x1000)
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cut off policy mode to "
|
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.never}"):
|
||||||
f"{SeqCutOffPolicy.never}"):
|
|
||||||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
|
||||||
|
|
||||||
with TestRun.step("Filling cache (sequential writes IO with size of cache device)"):
|
with TestRun.step("Prepare sequential IO against core"):
|
||||||
sync()
|
sync()
|
||||||
(Fio().create_command()
|
fio = (
|
||||||
.io_engine(IoEngine.libaio)
|
Fio()
|
||||||
.size(cache.cache_device.size)
|
.create_command()
|
||||||
.read_write(io_dir)
|
.io_engine(IoEngine.libaio)
|
||||||
.target(f"{cores[0].path}")
|
.size(cache.size)
|
||||||
.direct()
|
.read_write(io_dir)
|
||||||
).run()
|
.target(f"{core.path}")
|
||||||
|
.direct()
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Check if cache is filled enough (expecting occupancy not less than "
|
with TestRun.step("Run fio"):
|
||||||
"95%)"):
|
fio.run()
|
||||||
occupancy = cache.get_statistics(percentage_val=True).usage_stats.occupancy
|
|
||||||
if occupancy < 95:
|
|
||||||
TestRun.fail(f"Cache occupancy is too small: {occupancy}, expected at least 95%")
|
|
||||||
|
|
||||||
with TestRun.step(f"Setting cache sequential cut off policy mode to "
|
with TestRun.step("Check if cache is filled enough (expecting occupancy not less than 95%)"):
|
||||||
f"{SeqCutOffPolicy.full}"):
|
occupancy_percentage = cache.get_statistics(percentage_val=True).usage_stats.occupancy
|
||||||
|
if occupancy_percentage < 95:
|
||||||
|
TestRun.fail(
|
||||||
|
f"Cache occupancy is too small: {occupancy_percentage}, expected at least 95%"
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step(f"Setting cache sequential cut off policy mode to {SeqCutOffPolicy.full}"):
|
||||||
cache.set_seq_cutoff_policy(SeqCutOffPolicy.full)
|
cache.set_seq_cutoff_policy(SeqCutOffPolicy.full)
|
||||||
with TestRun.step(f"Setting cache sequential cut off policy threshold to "
|
|
||||||
f"{threshold}"):
|
with TestRun.step(f"Setting cache sequential cut off policy threshold to {threshold}"):
|
||||||
cache.set_seq_cutoff_threshold(threshold)
|
cache.set_seq_cutoff_threshold(threshold)
|
||||||
|
|
||||||
with TestRun.step(f"Running sequential IO ({io_dir})"):
|
with TestRun.step(f"Running sequential IO ({io_dir})"):
|
||||||
sync()
|
sync()
|
||||||
writes_before = cores[0].get_statistics().block_stats.cache.writes
|
writes_before = core.get_statistics().block_stats.cache.writes
|
||||||
(Fio().create_command()
|
fio = (
|
||||||
.io_engine(IoEngine.libaio)
|
Fio()
|
||||||
.size(io_size)
|
.create_command()
|
||||||
.read_write(io_dir)
|
.io_engine(IoEngine.libaio)
|
||||||
.target(f"{cores[0].path}")
|
.size(io_size)
|
||||||
.direct()
|
.read_write(io_dir)
|
||||||
).run()
|
.target(f"{core.path}")
|
||||||
|
.direct()
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Run fio"):
|
||||||
|
fio.run()
|
||||||
|
|
||||||
with TestRun.step("Verify writes to cache count"):
|
with TestRun.step("Verify writes to cache count"):
|
||||||
verify_writes_count(cores[0], writes_before, threshold, io_size, VerifyType.POSITIVE)
|
verify_writes_count(core, writes_before, threshold, io_size, VerifyType.POSITIVE)
|
||||||
|
|
||||||
|
|
||||||
def verify_writes_count(core, writes_before, threshold, io_size, ver_type=VerifyType.NEGATIVE,
|
def verify_writes_count(
|
||||||
io_margin=Size(8, Unit.KibiByte)):
|
core,
|
||||||
|
writes_before,
|
||||||
|
threshold,
|
||||||
|
io_size,
|
||||||
|
ver_type=VerifyType.NEGATIVE,
|
||||||
|
io_margin=Size(8, Unit.KibiByte),
|
||||||
|
):
|
||||||
writes_after = core.get_statistics().block_stats.cache.writes
|
writes_after = core.get_statistics().block_stats.cache.writes
|
||||||
writes_difference = writes_after - writes_before
|
writes_difference = writes_after - writes_before
|
||||||
|
match ver_type:
|
||||||
if ver_type is VerifyType.NEGATIVE:
|
case VerifyType.NEGATIVE:
|
||||||
if writes_difference < io_size:
|
if writes_difference < io_size:
|
||||||
TestRun.fail(f"Wrong writes count: {writes_difference} , expected at least "
|
TestRun.fail(
|
||||||
f"{io_size}")
|
f"Wrong writes count: {writes_difference} , expected at least {io_size}"
|
||||||
elif ver_type is VerifyType.POSITIVE:
|
)
|
||||||
if writes_difference >= threshold + io_margin:
|
case VerifyType.POSITIVE:
|
||||||
TestRun.fail(f"Wrong writes count: {writes_difference} , expected at most "
|
if writes_difference >= threshold + io_margin:
|
||||||
f"{threshold + io_margin}")
|
TestRun.fail(
|
||||||
elif ver_type is VerifyType.EQUAL:
|
f"Wrong writes count: {writes_difference} , expected at most "
|
||||||
if writes_difference != io_size:
|
f"{threshold + io_margin}"
|
||||||
TestRun.fail(f"Wrong writes count: {writes_difference} , expected {io_size}")
|
)
|
||||||
|
case VerifyType.EQUAL:
|
||||||
|
if writes_difference != io_size:
|
||||||
def prepare(cores_count=1, cache_line_size: CacheLineSize = None):
|
TestRun.fail(f"Wrong writes count: {writes_difference} , expected {io_size}")
|
||||||
cache_device = TestRun.disks['cache']
|
|
||||||
core_device = TestRun.disks['core']
|
|
||||||
cache_device.create_partitions(
|
|
||||||
[(SEQ_CUTOFF_THRESHOLD_MAX * cores_count + Size(5, Unit.GibiByte)).align_down(0x1000)])
|
|
||||||
partitions = \
|
|
||||||
[(SEQ_CUTOFF_THRESHOLD_MAX + Size(10, Unit.GibiByte)).align_down(0x1000)] * cores_count
|
|
||||||
core_device.create_partitions(partitions)
|
|
||||||
cache_part = cache_device.partitions[0]
|
|
||||||
core_parts = core_device.partitions
|
|
||||||
TestRun.LOGGER.info("Starting cache")
|
|
||||||
|
|
||||||
cache = casadm.start_cache(cache_part, force=True, cache_line_size=cache_line_size)
|
|
||||||
Udev.disable()
|
|
||||||
TestRun.LOGGER.info("Adding core devices")
|
|
||||||
core_list = []
|
|
||||||
for core_part in core_parts:
|
|
||||||
core_list.append(cache.add_core(core_dev=core_part))
|
|
||||||
return cache, core_list
|
|
||||||
|
@ -1,142 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright(c) 2022 Intel Corporation
|
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
#
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from api.cas.cache_config import CacheMode, CacheLineSize
|
|
||||||
from core.test_run import TestRun
|
|
||||||
from api.cas import cli, casadm
|
|
||||||
from api.cas.cli_messages import (
|
|
||||||
check_stderr_msg,
|
|
||||||
start_cache_on_already_used_dev,
|
|
||||||
start_cache_with_existing_id
|
|
||||||
)
|
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
|
||||||
from test_tools.dd import Dd
|
|
||||||
from test_tools.disk_utils import Filesystem
|
|
||||||
from test_utils.filesystem.file import File
|
|
||||||
from test_utils.size import Size, Unit
|
|
||||||
|
|
||||||
|
|
||||||
def test_cas_version():
|
|
||||||
"""
|
|
||||||
title: Check if CAS is installed
|
|
||||||
description:
|
|
||||||
Check if CAS is installed with --version flag and later
|
|
||||||
checks if components version is consistent with version file
|
|
||||||
pass criteria:
|
|
||||||
- casadm command succeeds
|
|
||||||
- Versions are matched from cmd and file in /var/lib/opencas/cas_version
|
|
||||||
"""
|
|
||||||
cmd = f"casadm --version -o csv"
|
|
||||||
output = TestRun.executor.run_expect_success(cmd).stdout
|
|
||||||
cmd_cas_versions = output.split("\n")[1:]
|
|
||||||
|
|
||||||
version_file_path = r"/var/lib/opencas/cas_version"
|
|
||||||
file_read_cmd = f"cat {version_file_path} | grep CAS_VERSION="
|
|
||||||
file_cas_version_str = TestRun.executor.run_expect_success(file_read_cmd).stdout
|
|
||||||
file_cas_version = file_cas_version_str.split('=')[1]
|
|
||||||
|
|
||||||
for version in cmd_cas_versions:
|
|
||||||
splitted_version = version.split(",")
|
|
||||||
if splitted_version[1] != file_cas_version:
|
|
||||||
TestRun.LOGGER.error(f"""Version of {splitted_version[0]} from cmd doesn't match
|
|
||||||
with file. Expected: {file_cas_version} Actual: {splitted_version[1]}""")
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
|
||||||
@pytest.mark.require_disk("cache_1", DiskTypeSet([DiskType.nand, DiskType.optane]))
|
|
||||||
def test_negative_start_cache():
|
|
||||||
"""
|
|
||||||
title: Test start cache negative on cache device
|
|
||||||
description:
|
|
||||||
Check for negative cache start scenarios
|
|
||||||
pass criteria:
|
|
||||||
- Cache start succeeds
|
|
||||||
- Fails to start cache on the same device with another id
|
|
||||||
- Fails to start cache on another partition with the same id
|
|
||||||
"""
|
|
||||||
with TestRun.step("Set up device"):
|
|
||||||
cache_dev = TestRun.disks["cache_1"]
|
|
||||||
cache_dev.create_partitions([Size(2000, Unit.MebiByte)] * 2)
|
|
||||||
cache_dev_1 = cache_dev.partitions[0]
|
|
||||||
cache_dev_2 = cache_dev.partitions[1]
|
|
||||||
|
|
||||||
with TestRun.step("Start cache on cache device"):
|
|
||||||
TestRun.executor.run_expect_success(
|
|
||||||
cli.start_cmd(cache_dev_1.path, cache_id="1", force=True)
|
|
||||||
)
|
|
||||||
|
|
||||||
with TestRun.step("Start cache on the same device but with another ID"):
|
|
||||||
output = TestRun.executor.run_expect_fail(
|
|
||||||
cli.start_cmd(cache_dev_1.path, cache_id="2", force=True)
|
|
||||||
)
|
|
||||||
if not check_stderr_msg(output, start_cache_on_already_used_dev):
|
|
||||||
TestRun.fail(f"Received unexpected error message: {output.stderr}")
|
|
||||||
|
|
||||||
with TestRun.step("Start cache with the same ID on another cache device"):
|
|
||||||
output = TestRun.executor.run_expect_fail(
|
|
||||||
cli.start_cmd(cache_dev_2.path, cache_id="1", force=True)
|
|
||||||
)
|
|
||||||
if not check_stderr_msg(output, start_cache_with_existing_id):
|
|
||||||
TestRun.fail(f"Received unexpected error message: {output.stderr}")
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
|
||||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
|
||||||
@pytest.mark.parametrizex("cache_mode", CacheMode)
|
|
||||||
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
|
||||||
def test_data_integrity(cache_mode, cache_line_size, filesystem):
|
|
||||||
"""
|
|
||||||
title: Check basic data integrity after stopping the cache
|
|
||||||
pass criteria:
|
|
||||||
- System does not crash.
|
|
||||||
- All operations complete successfully.
|
|
||||||
- Data consistency is preserved.
|
|
||||||
"""
|
|
||||||
cache_id = core_id = 1
|
|
||||||
mountpoint = "/mnt"
|
|
||||||
filepath = f"{mountpoint}/file"
|
|
||||||
|
|
||||||
with TestRun.step("Prepare partitions for cache (200MiB) and for core (100MiB)"):
|
|
||||||
cache_device = TestRun.disks["cache"]
|
|
||||||
cache_device.create_partitions([Size(200, Unit.MebiByte)])
|
|
||||||
cache_part = cache_device.partitions[0]
|
|
||||||
|
|
||||||
core_device = TestRun.disks["core"]
|
|
||||||
core_device.create_partitions([Size(100, Unit.MebiByte)])
|
|
||||||
core_part = core_device.partitions[0]
|
|
||||||
|
|
||||||
with TestRun.step("Start cache and add core device"):
|
|
||||||
cache = casadm.start_cache(cache_part, cache_mode, cache_line_size, cache_id, True)
|
|
||||||
core = cache.add_core(core_part, core_id)
|
|
||||||
|
|
||||||
with TestRun.step("Create filesystem on CAS device and mount it"):
|
|
||||||
core.create_filesystem(filesystem)
|
|
||||||
core.mount(mountpoint)
|
|
||||||
|
|
||||||
with TestRun.step("Create test file and calculate md5 checksum"):
|
|
||||||
(
|
|
||||||
Dd()
|
|
||||||
.input("/dev/urandom")
|
|
||||||
.output(filepath)
|
|
||||||
.count(1)
|
|
||||||
.block_size(Size(50, Unit.MebiByte))
|
|
||||||
.run()
|
|
||||||
)
|
|
||||||
test_file = File(filepath)
|
|
||||||
md5_before = test_file.md5sum()
|
|
||||||
|
|
||||||
with TestRun.step("Unmount and stop the cache"):
|
|
||||||
core.unmount()
|
|
||||||
cache.flush_cache()
|
|
||||||
cache.stop()
|
|
||||||
|
|
||||||
with TestRun.step("Mount the core device and check for file"):
|
|
||||||
core_part.mount(mountpoint)
|
|
||||||
md5_after = test_file.md5sum()
|
|
||||||
if md5_before != md5_after:
|
|
||||||
TestRun.fail("md5 checksum mismatch!")
|
|
@ -1,124 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
#
|
|
||||||
from time import sleep
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from api.cas import casadm
|
|
||||||
from api.cas.cache_config import CacheStatus
|
|
||||||
from api.cas.core import CoreStatus
|
|
||||||
from core.test_run import TestRun
|
|
||||||
from storage_devices.disk import DiskTypeLowerThan, DiskTypeSet, DiskType
|
|
||||||
from test_utils.size import Size, Unit
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
|
||||||
@pytest.mark.os_dependent
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
|
||||||
def test_incremental_load_basic():
|
|
||||||
"""
|
|
||||||
title: Incremental load test basic
|
|
||||||
description: |
|
|
||||||
Test incremental load and core pool functionality
|
|
||||||
pass_criteria:
|
|
||||||
- cores after start and load should be in active state and cache in running state
|
|
||||||
- cores after adding to core pool are in inactive state and cache in incomplete state
|
|
||||||
"""
|
|
||||||
with TestRun.step("Prepare devices."):
|
|
||||||
cache_disk = TestRun.disks["cache"]
|
|
||||||
cache_disk.create_partitions([Size(2, Unit.GibiByte)])
|
|
||||||
cache_dev = cache_disk.partitions[0]
|
|
||||||
core_disk = TestRun.disks["core"]
|
|
||||||
core_disk.create_partitions([Size(4, Unit.GibiByte)] * 3)
|
|
||||||
core_devs = core_disk.partitions
|
|
||||||
cache_id = 1
|
|
||||||
core_ids = [1, 2, 3]
|
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
|
||||||
cache = casadm.start_cache(cache_dev, cache_id=cache_id)
|
|
||||||
if cache.get_status() is not CacheStatus.running:
|
|
||||||
TestRun.fail(f"Cache {cache.core_id} should be running but is {cache.get_status()}.")
|
|
||||||
|
|
||||||
with TestRun.step("Add cores."):
|
|
||||||
for core_dev in core_devs:
|
|
||||||
core = cache.add_core(core_dev)
|
|
||||||
if core.get_status() is not CoreStatus.active:
|
|
||||||
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
|
|
||||||
|
|
||||||
with TestRun.step("Stop cache."):
|
|
||||||
cache.stop()
|
|
||||||
|
|
||||||
with TestRun.step("Add cores to core pool."):
|
|
||||||
cores = []
|
|
||||||
for core_dev, core_id in zip(core_devs, core_ids):
|
|
||||||
core = casadm.try_add(core_device=core_dev, cache_id=cache_id, core_id=core_id)
|
|
||||||
cores.append(core)
|
|
||||||
if core.get_status() is not CoreStatus.detached:
|
|
||||||
TestRun.fail(f"Core {core.core_id} should be detached but is {core.get_status()}.")
|
|
||||||
|
|
||||||
with TestRun.step("Load cache"):
|
|
||||||
cache = casadm.load_cache(cache_dev)
|
|
||||||
if cache.get_status() is not CacheStatus.running:
|
|
||||||
TestRun.fail(f"Cache {cache.cache_id} should be running but is {cache.get_status()}.")
|
|
||||||
for core in cores:
|
|
||||||
if core.get_status() is not CoreStatus.active:
|
|
||||||
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
|
||||||
@pytest.mark.os_dependent
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
|
||||||
def test_incremental_load_hidden_core():
|
|
||||||
"""
|
|
||||||
title: Incremental load test with hidden core
|
|
||||||
description: |
|
|
||||||
Test incremental load and core pool functionality with hidden core partition
|
|
||||||
pass_criteria:
|
|
||||||
- cores after adding to core pool are in detached state
|
|
||||||
- visible cores after start and load should be in active state
|
|
||||||
- hidden core after load should be in detached state
|
|
||||||
"""
|
|
||||||
with TestRun.step("Prepare devices."):
|
|
||||||
cache_disk = TestRun.disks["cache"]
|
|
||||||
cache_disk.create_partitions([Size(2, Unit.GibiByte)])
|
|
||||||
cache_dev = cache_disk.partitions[0]
|
|
||||||
core_disk = TestRun.disks["core"]
|
|
||||||
core_disk.create_partitions([Size(4, Unit.GibiByte)] * 3)
|
|
||||||
core_devs = core_disk.partitions
|
|
||||||
cache_id = 1
|
|
||||||
|
|
||||||
with TestRun.step("Start cache."):
|
|
||||||
cache = casadm.start_cache(cache_dev, cache_id=cache_id)
|
|
||||||
if cache.get_status() is not CacheStatus.running:
|
|
||||||
TestRun.fail(f"Cache {cache.core_id} should be running but is {cache.get_status()}.")
|
|
||||||
|
|
||||||
with TestRun.step("Add cores."):
|
|
||||||
for core_dev in core_devs:
|
|
||||||
core = cache.add_core(core_dev)
|
|
||||||
if core.get_status() is not CoreStatus.active:
|
|
||||||
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
|
|
||||||
hidden_core = cache.get_core_devices()[2]
|
|
||||||
|
|
||||||
with TestRun.step("Stop cache."):
|
|
||||||
cache.stop()
|
|
||||||
|
|
||||||
with TestRun.step("Hide core part form from being loaded"):
|
|
||||||
core_disk.remove_partitions()
|
|
||||||
core_disk.create_partitions([Size(4, Unit.GibiByte)] * 2)
|
|
||||||
|
|
||||||
with TestRun.step("Load cache"):
|
|
||||||
cache = casadm.load_cache(cache_dev)
|
|
||||||
if cache.get_status() is not CacheStatus.incomplete:
|
|
||||||
TestRun.fail(
|
|
||||||
f"Cache {cache.cache_id} should be incomplete but is "
|
|
||||||
f"{cache.get_status()}."
|
|
||||||
)
|
|
||||||
for core in cache.get_core_devices():
|
|
||||||
if core.get_status() is not CoreStatus.active:
|
|
||||||
TestRun.fail(f"Core {core.core_id} should be Active but is {core.get_status()}.")
|
|
||||||
if hidden_core.get_status() is not CoreStatus.inactive:
|
|
||||||
TestRun.fail(f"Hidden core should be Inactive but is {hidden_core.get_status()}.")
|
|
@ -1,211 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
#
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from api.cas import casadm
|
|
||||||
from api.cas.cache_config import CacheMode
|
|
||||||
from api.cas.cli import casadm_bin
|
|
||||||
from api.cas.cli_messages import check_stderr_msg, stop_cache_errors
|
|
||||||
from core.test_run import TestRun
|
|
||||||
from storage_devices.disk import DiskTypeLowerThan, DiskTypeSet, DiskType, Disk
|
|
||||||
from test_tools.dd import Dd
|
|
||||||
from test_tools.disk_utils import Filesystem, unmount, mount
|
|
||||||
from test_tools.fs_utils import check_if_file_exists
|
|
||||||
from test_utils.filesystem.file import File
|
|
||||||
from test_utils.os_utils import sync
|
|
||||||
from test_utils.size import Size, Unit
|
|
||||||
|
|
||||||
mount_point = "/mnt/cas"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
|
||||||
def test_recover_cache_verify_core():
|
|
||||||
"""
|
|
||||||
title: Recovery after turning off/on devices
|
|
||||||
description: |
|
|
||||||
Test data integrity after turning off cache device.
|
|
||||||
pass_criteria:
|
|
||||||
- Cache devices successfully loaded with metadata after turning devices off/on
|
|
||||||
- md5sums before and after all operations match each other
|
|
||||||
- creation, mount, unmount of filesystems on the core device succeeds
|
|
||||||
"""
|
|
||||||
filesystems = [Filesystem.xfs, Filesystem.ext3, Filesystem.ext4]
|
|
||||||
cache_cnt = len(filesystems)
|
|
||||||
|
|
||||||
with TestRun.step("Prepare devices."):
|
|
||||||
cache_disk = TestRun.disks["cache"]
|
|
||||||
cache_disk.create_partitions([Size(2, Unit.GibiByte)] * cache_cnt)
|
|
||||||
cache_devs = cache_disk.partitions
|
|
||||||
core_disk = TestRun.disks["core"]
|
|
||||||
core_disk.create_partitions([Size(4, Unit.GibiByte)] * cache_cnt)
|
|
||||||
core_devs = core_disk.partitions
|
|
||||||
|
|
||||||
with TestRun.step("Start caches and add cores."):
|
|
||||||
caches, cores = [], []
|
|
||||||
for (cache_dev, core_dev) in zip(cache_devs, core_devs):
|
|
||||||
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB)
|
|
||||||
core = cache.add_core(core_dev)
|
|
||||||
caches.append(cache)
|
|
||||||
cores.append(core)
|
|
||||||
|
|
||||||
with TestRun.step("Create filesystem on core devices."):
|
|
||||||
for (core, filesystem) in zip(cores, filesystems):
|
|
||||||
core.create_filesystem(filesystem)
|
|
||||||
|
|
||||||
with TestRun.step("Mount cache devices."):
|
|
||||||
for (cache, core) in zip(caches, cores):
|
|
||||||
core_mnt_point = f"{mount_point}-{cache.cache_id}-{core.core_id}"
|
|
||||||
core.mount(core_mnt_point)
|
|
||||||
|
|
||||||
with TestRun.step("Run IO"):
|
|
||||||
dd = (
|
|
||||||
Dd()
|
|
||||||
.input("/dev/urandom")
|
|
||||||
.output(f"{core_mnt_point}/test")
|
|
||||||
.count(1)
|
|
||||||
.block_size(Size(50, Unit.MegaByte))
|
|
||||||
)
|
|
||||||
dd.run()
|
|
||||||
|
|
||||||
with TestRun.step("Calculate cache md5sums before unplug."):
|
|
||||||
core_mnt_md5s_before = [File(f"{core.mount_point}/test").md5sum() for core in cores]
|
|
||||||
|
|
||||||
with TestRun.step("Umount core devices."):
|
|
||||||
for core in cores:
|
|
||||||
core.unmount()
|
|
||||||
|
|
||||||
with TestRun.step("Dirty stop"):
|
|
||||||
dirty_stop(cache_disk, caches)
|
|
||||||
|
|
||||||
with TestRun.step("Start caches with load metadata and later stop them."):
|
|
||||||
for cache_dev in cache_devs:
|
|
||||||
cache = casadm.load_cache(cache_dev)
|
|
||||||
cache.stop()
|
|
||||||
|
|
||||||
with TestRun.step("Mount core devices."):
|
|
||||||
for core, cache in zip(cores, caches):
|
|
||||||
core_mnt_point = f"{mount_point}-{cache.cache_id}-{core.core_id}"
|
|
||||||
mount(core.core_device, core_mnt_point)
|
|
||||||
core.mount_point = core_mnt_point
|
|
||||||
if not check_if_file_exists(f"{core_mnt_point}/test"):
|
|
||||||
TestRun.LOGGER.error(f"Mounting core device {core_mnt_point} failed.")
|
|
||||||
|
|
||||||
with TestRun.step("Calculate cache md5sums after recovery."):
|
|
||||||
core_mnt_md5s_after = [File(f"{core.mount_point}/test").md5sum() for core in cores]
|
|
||||||
|
|
||||||
with TestRun.step("Compare md5 sums for cores and core devices"):
|
|
||||||
if core_mnt_md5s_before != core_mnt_md5s_after:
|
|
||||||
TestRun.fail(
|
|
||||||
f"MD5 sums of core before and after does not match."
|
|
||||||
f"Expected: {core_mnt_md5s_before}, Actual: {core_mnt_md5s_after}"
|
|
||||||
)
|
|
||||||
|
|
||||||
with TestRun.step("Umount core devices."):
|
|
||||||
for core_dev in core_devs:
|
|
||||||
unmount(core_dev)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
|
||||||
def test_recover_cache_verify_exp_obj():
|
|
||||||
"""
|
|
||||||
title: Recovery after turning off/on devices
|
|
||||||
description: |
|
|
||||||
Test data integrity after turning off cache device.
|
|
||||||
pass_criteria:
|
|
||||||
- Cache devices successfully loaded with metadata after turning devices off/on
|
|
||||||
- md5sums before and after all operations match each other
|
|
||||||
- creation, mount, unmount of filesystems succeeds on core exported object
|
|
||||||
"""
|
|
||||||
with TestRun.step("Prepare devices."):
|
|
||||||
cache_disk = TestRun.disks["cache"]
|
|
||||||
cache_disk.create_partitions([Size(2, Unit.GibiByte)] * 3)
|
|
||||||
cache_devs = cache_disk.partitions
|
|
||||||
core_disk = TestRun.disks["core"]
|
|
||||||
core_disk.create_partitions([Size(4, Unit.GibiByte)] * 3)
|
|
||||||
core_devs = core_disk.partitions
|
|
||||||
|
|
||||||
with TestRun.step("Start caches and add cores."):
|
|
||||||
caches, cores = [], []
|
|
||||||
for (cache_dev, core_dev) in zip(cache_devs, core_devs):
|
|
||||||
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB)
|
|
||||||
core = cache.add_core(core_dev)
|
|
||||||
caches.append(cache)
|
|
||||||
cores.append(core)
|
|
||||||
|
|
||||||
with TestRun.step("Create filesystem on core devices."):
|
|
||||||
filesystems = [Filesystem.xfs, Filesystem.ext3, Filesystem.ext4]
|
|
||||||
for (core, filesystem) in zip(cores, filesystems):
|
|
||||||
core.create_filesystem(filesystem)
|
|
||||||
|
|
||||||
with TestRun.step("Mount cache devices."):
|
|
||||||
for (cache, core) in zip(caches, cores):
|
|
||||||
core_mnt_point = f"{mount_point}-{cache.cache_id}-{core.core_id}"
|
|
||||||
core.mount(core_mnt_point)
|
|
||||||
|
|
||||||
with TestRun.step("Run IO"):
|
|
||||||
dd = (
|
|
||||||
Dd()
|
|
||||||
.input("/dev/urandom")
|
|
||||||
.output(f"{core_mnt_point}/test")
|
|
||||||
.count(1)
|
|
||||||
.block_size(Size(50, Unit.MegaByte))
|
|
||||||
)
|
|
||||||
dd.run()
|
|
||||||
sync()
|
|
||||||
|
|
||||||
with TestRun.step("Calculate cache md5sums before unplug."):
|
|
||||||
core_mnt_md5s_before = [File(f"{core.mount_point}/test").md5sum() for core in cores]
|
|
||||||
|
|
||||||
with TestRun.step("Umount core devices."):
|
|
||||||
for core in cores:
|
|
||||||
core.unmount()
|
|
||||||
|
|
||||||
with TestRun.step("Dirty stop"):
|
|
||||||
dirty_stop(cache_disk, caches)
|
|
||||||
|
|
||||||
with TestRun.step("Load caches with metadata."):
|
|
||||||
for cache_dev in cache_devs:
|
|
||||||
casadm.load_cache(cache_dev)
|
|
||||||
|
|
||||||
with TestRun.step("Mount core devices."):
|
|
||||||
for core, cache in zip(cores, caches):
|
|
||||||
core_mnt_point = f"{mount_point}-{cache.cache_id}-{core.core_id}"
|
|
||||||
core.mount(core_mnt_point)
|
|
||||||
if not check_if_file_exists(f"{core_mnt_point}/test"):
|
|
||||||
TestRun.LOGGER.error(f"Mounting core device {core_mnt_point} failed.")
|
|
||||||
|
|
||||||
with TestRun.step("Calculate cache md5sums after recovery."):
|
|
||||||
core_mnt_md5s_after = [File(f"{core.mount_point}/test").md5sum() for core in cores]
|
|
||||||
|
|
||||||
with TestRun.step("Compare md5 sums for cores and core devices"):
|
|
||||||
if core_mnt_md5s_before != core_mnt_md5s_after:
|
|
||||||
TestRun.fail(
|
|
||||||
f"MD5 sums of core before and after does not match."
|
|
||||||
f"Expected: {core_mnt_md5s_before}, Actual: {core_mnt_md5s_after}"
|
|
||||||
)
|
|
||||||
|
|
||||||
with TestRun.step("Umount core devices."):
|
|
||||||
for core in cores:
|
|
||||||
core.unmount()
|
|
||||||
|
|
||||||
|
|
||||||
def dirty_stop(cache_disk, caches: list):
|
|
||||||
with TestRun.step("Turn off cache devices."):
|
|
||||||
cache_disk.unplug()
|
|
||||||
|
|
||||||
with TestRun.step("Stop caches without flushing."):
|
|
||||||
for cache in caches:
|
|
||||||
cmd = f"{casadm_bin} --stop-cache --cache-id {cache.cache_id} --no-data-flush"
|
|
||||||
output = TestRun.executor.run(cmd)
|
|
||||||
if not check_stderr_msg(output, stop_cache_errors):
|
|
||||||
TestRun.fail(f"Cache {cache.cache_id} stopping should fail.")
|
|
||||||
|
|
||||||
with TestRun.step("Turn on devices."):
|
|
||||||
Disk.plug_all_disks()
|
|
@ -1,54 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright(c) 2022 Intel Corporation
|
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
#
|
|
||||||
from time import sleep
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from api.cas import casadm_parser
|
|
||||||
from api.cas.cache_config import CacheStatus
|
|
||||||
from api.cas.core import CoreStatus
|
|
||||||
from api.cas.init_config import InitConfig
|
|
||||||
from core.test_run import TestRun
|
|
||||||
from storage_devices.disk import DiskTypeLowerThan, DiskTypeSet, DiskType
|
|
||||||
from test_utils.size import Size, Unit
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
|
||||||
@pytest.mark.os_dependent
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
|
||||||
def test_simulation_startup_from_config():
|
|
||||||
with TestRun.step("Prepare devices."):
|
|
||||||
cache_disk = TestRun.disks["cache"]
|
|
||||||
cache_disk.create_partitions([Size(2, Unit.GibiByte)])
|
|
||||||
cache_dev = cache_disk.partitions[0]
|
|
||||||
core_disk = TestRun.disks["core"]
|
|
||||||
core_disk.create_partitions([Size(4, Unit.GibiByte)])
|
|
||||||
core_dev = core_disk.partitions[0]
|
|
||||||
cache_id, core_id = 1, 1
|
|
||||||
|
|
||||||
with TestRun.step("prepare CAS config."):
|
|
||||||
cache_config = InitConfig()
|
|
||||||
cache_config.add_cache(cache_id, cache_dev)
|
|
||||||
cache_config.add_core(cache_id, core_id, core_dev)
|
|
||||||
cache_config.save_config_file()
|
|
||||||
|
|
||||||
with TestRun.step("Initialize CAS from config."):
|
|
||||||
TestRun.executor.run_expect_success(f"casctl init")
|
|
||||||
|
|
||||||
with TestRun.step("Stop all CAS instances."):
|
|
||||||
TestRun.executor.run_expect_success(f"casctl stop")
|
|
||||||
|
|
||||||
with TestRun.step("Simulate boot process."):
|
|
||||||
TestRun.executor.run_expect_success(f"udevadm trigger")
|
|
||||||
sleep(1)
|
|
||||||
|
|
||||||
with TestRun.step("Verify if cache is up and working."):
|
|
||||||
cache = casadm_parser.get_caches()[0]
|
|
||||||
if cache.get_status() is not CacheStatus.running:
|
|
||||||
TestRun.fail(f"Cache {cache.cache_id} should be running but is {cache.get_status()}.")
|
|
||||||
core = cache.get_core_devices()[0]
|
|
||||||
if core.get_status() is not CoreStatus.active:
|
|
||||||
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
|
|
@ -10,69 +10,60 @@ from api.cas.cas_module import CasModule
|
|||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from test_utils.size import Unit
|
from test_utils.size import Unit
|
||||||
from test_utils.os_utils import (allocate_memory,
|
from test_utils.os_utils import (allocate_memory,
|
||||||
defaultize_memory_affecting_functions,
|
|
||||||
disable_memory_affecting_functions,
|
disable_memory_affecting_functions,
|
||||||
drop_caches,
|
drop_caches,
|
||||||
get_mem_free,
|
get_mem_free,
|
||||||
is_kernel_module_loaded,
|
is_kernel_module_loaded,
|
||||||
load_kernel_module,
|
load_kernel_module,
|
||||||
unload_kernel_module,
|
unload_kernel_module,
|
||||||
unmount_ramfs)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.os_dependent
|
@pytest.mark.os_dependent
|
||||||
def test_insufficient_memory_for_cas_module():
|
def test_insufficient_memory_for_cas_module():
|
||||||
"""
|
"""
|
||||||
title: Negative test of ability to load OpenCAS kernel module with insufficient memory.
|
title: Negative test for the ability of CAS to load the kernel module with insufficient memory.
|
||||||
description: |
|
description: |
|
||||||
Check that OpenCAS kernel module won’t be loaded in case not enough memory is available.
|
Check that the CAS kernel module won’t be loaded if enough memory is not available
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Loading OpenCAS kernel module returns error.
|
- CAS module cannot be loaded with not enough memory.
|
||||||
|
- Loading CAS with not enough memory returns error.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Disable caching and memory over-committing."):
|
|
||||||
|
with TestRun.step("Disable caching and memory over-committing"):
|
||||||
disable_memory_affecting_functions()
|
disable_memory_affecting_functions()
|
||||||
drop_caches()
|
drop_caches()
|
||||||
|
|
||||||
with TestRun.step("Measure memory usage without OpenCAS module."):
|
with TestRun.step("Measure memory usage without OpenCAS module"):
|
||||||
if is_kernel_module_loaded(CasModule.cache.value):
|
if is_kernel_module_loaded(CasModule.cache.value):
|
||||||
unload_kernel_module(CasModule.cache.value)
|
unload_kernel_module(CasModule.cache.value)
|
||||||
available_mem_before_cas = get_mem_free()
|
available_mem_before_cas = get_mem_free()
|
||||||
|
|
||||||
with TestRun.step("Load OpenCAS module"):
|
with TestRun.step("Load CAS module"):
|
||||||
output = load_kernel_module(CasModule.cache.value)
|
load_kernel_module(CasModule.cache.value)
|
||||||
if output.exit_code != 0:
|
|
||||||
TestRun.fail("Cannot load OpenCAS module!")
|
|
||||||
|
|
||||||
with TestRun.step("Measure memory usage with OpenCAS module."):
|
with TestRun.step("Measure memory usage with CAS module"):
|
||||||
available_mem_with_cas = get_mem_free()
|
available_mem_with_cas = get_mem_free()
|
||||||
memory_used_by_cas = available_mem_before_cas - available_mem_with_cas
|
memory_used_by_cas = available_mem_before_cas - available_mem_with_cas
|
||||||
TestRun.LOGGER.info(
|
TestRun.LOGGER.info(
|
||||||
f"OpenCAS module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM."
|
f"OpenCAS module uses {memory_used_by_cas.get_value(Unit.MiB):.2f} MiB of DRAM."
|
||||||
)
|
)
|
||||||
|
|
||||||
with TestRun.step("Unload OpenCAS module."):
|
with TestRun.step("Unload CAS module"):
|
||||||
unload_kernel_module(CasModule.cache.value)
|
unload_kernel_module(CasModule.cache.value)
|
||||||
|
|
||||||
with TestRun.step("Allocate memory leaving not enough memory for OpenCAS module."):
|
with TestRun.step("Allocate memory, leaving not enough memory for CAS module"):
|
||||||
memory_to_leave = memory_used_by_cas * (3 / 4)
|
memory_to_leave = get_mem_free() - (memory_used_by_cas * (3 / 4))
|
||||||
try:
|
allocate_memory(memory_to_leave)
|
||||||
allocate_memory(get_mem_free() - memory_to_leave)
|
TestRun.LOGGER.info(
|
||||||
except Exception as ex:
|
f"Memory left for OpenCAS module: {get_mem_free().get_value(Unit.MiB):0.2f} MiB."
|
||||||
TestRun.LOGGER.error(f"{ex}")
|
)
|
||||||
|
|
||||||
with TestRun.step(
|
with TestRun.step(
|
||||||
"Try to load OpenCAS module and check if error message is printed on failure."
|
"Try to load OpenCAS module and check if correct error message is printed on failure"
|
||||||
):
|
):
|
||||||
output = load_kernel_module(CasModule.cache.value)
|
output = load_kernel_module(CasModule.cache.value)
|
||||||
if output.stderr and output.exit_code != 0:
|
if output.stderr and output.exit_code != 0:
|
||||||
memory_left = get_mem_free()
|
|
||||||
TestRun.LOGGER.info(
|
|
||||||
f"Memory left for OpenCAS module: {memory_left.get_value(Unit.MiB):0.2f} MiB."
|
|
||||||
)
|
|
||||||
TestRun.LOGGER.info(f"Cannot load OpenCAS module as expected.\n{output.stderr}")
|
TestRun.LOGGER.info(f"Cannot load OpenCAS module as expected.\n{output.stderr}")
|
||||||
else:
|
else:
|
||||||
TestRun.LOGGER.error("Loading OpenCAS module successfully finished, but should fail.")
|
TestRun.LOGGER.error("Loading OpenCAS module successfully finished, but should fail.")
|
||||||
|
|
||||||
with TestRun.step("Set memory options to default"):
|
|
||||||
unmount_ramfs()
|
|
||||||
defaultize_memory_affecting_functions()
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -16,8 +17,6 @@ from api.cas import casadm
|
|||||||
from api.cas.cache_config import CacheMode, CleaningPolicy
|
from api.cas.cache_config import CacheMode, CleaningPolicy
|
||||||
from test_utils.os_utils import Udev
|
from test_utils.os_utils import Udev
|
||||||
|
|
||||||
wait_time_s = 30
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.CI
|
@pytest.mark.CI
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@ -25,53 +24,63 @@ wait_time_s = 30
|
|||||||
def test_cleaning_policy():
|
def test_cleaning_policy():
|
||||||
"""
|
"""
|
||||||
Title: test_cleaning_policy
|
Title: test_cleaning_policy
|
||||||
description: | The test is to see if dirty data will be removed from the Cache
|
description: |
|
||||||
after changing the cleaning policy from NOP to one that expects a flush.
|
The test is to see if dirty data will be removed from the Cache after changing the
|
||||||
|
cleaning policy from NOP to one that expects a flush.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Cache is successfully populated with dirty data
|
- Cache is successfully populated with dirty data
|
||||||
- Cleaning policy is changed successfully
|
- Cleaning policy is changed successfully
|
||||||
- There is no dirty data after the policy change
|
- There is no dirty data after the policy change
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
wait_time = 60
|
||||||
cache_disk = TestRun.disks["cache"]
|
|
||||||
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
|
|
||||||
cache_dev = cache_disk.partitions[0]
|
|
||||||
|
|
||||||
core_disk = TestRun.disks["core"]
|
with TestRun.step("Partition cache and core devices"):
|
||||||
core_disk.create_partitions([Size(1, Unit.GibiByte)])
|
cache_device = TestRun.disks["cache"]
|
||||||
core_dev = core_disk.partitions[0]
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB)
|
cache_device.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
cache.set_cleaning_policy(CleaningPolicy.nop)
|
core_device.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
core = cache.add_core(core_dev)
|
|
||||||
|
cache_dev = cache_device.partitions[0]
|
||||||
|
core_dev = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step("Populate cache with dirty data."):
|
with TestRun.step(f"Start cache in Write-Back mode and set cleaning policy to NOP"):
|
||||||
|
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
|
||||||
|
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||||
|
|
||||||
|
with TestRun.step("Add core"):
|
||||||
|
core = cache.add_core(core_dev)
|
||||||
|
|
||||||
|
with TestRun.step("Populate cache with dirty data"):
|
||||||
fio = (
|
fio = (
|
||||||
Fio()
|
Fio()
|
||||||
.create_command()
|
.create_command()
|
||||||
.size(Size(1, Unit.MiB))
|
.size(cache.size)
|
||||||
.read_write(ReadWrite.randwrite)
|
.read_write(ReadWrite.randwrite)
|
||||||
.io_engine(IoEngine.libaio)
|
.io_engine(IoEngine.libaio)
|
||||||
.block_size(Size(1, Unit.Blocks4096))
|
.block_size(Size(1, Unit.Blocks4096))
|
||||||
|
.direct()
|
||||||
|
.target(core.path)
|
||||||
)
|
)
|
||||||
fio.add_job("core0").target(core.path)
|
|
||||||
fio.run()
|
fio.run()
|
||||||
|
|
||||||
if cache.get_dirty_blocks() <= Size.zero():
|
if cache.get_dirty_blocks() <= Size.zero():
|
||||||
TestRun.fail("Cache does not contain dirty data.")
|
TestRun.fail("Cache does not contain dirty data")
|
||||||
|
|
||||||
with TestRun.step(f"Change cleaning policy and wait up to {wait_time_s} for flush"):
|
with TestRun.step("Change cleaning policy"):
|
||||||
cache.set_cleaning_policy(CleaningPolicy.acp)
|
cache.set_cleaning_policy(CleaningPolicy.acp)
|
||||||
t_end = time.time() + wait_time_s
|
t_end = time.time() + wait_time
|
||||||
while time.time() < t_end:
|
while time.time() < t_end:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
if cache.get_dirty_blocks() == Size.zero():
|
if cache.get_dirty_blocks() == Size.zero():
|
||||||
TestRun.LOGGER.info(
|
TestRun.LOGGER.info(
|
||||||
f"Cache flushed after {round(time.time() - (t_end - wait_time_s), 2)} seconds."
|
f"Cache flushed after {round(time.time() - (t_end - wait_time), 2)} seconds."
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
|
|
||||||
with TestRun.step("Check if dirty data exists."):
|
with TestRun.step("Check if cache contains dirty data"):
|
||||||
if not cache.get_dirty_blocks() == Size.zero():
|
if cache.get_dirty_blocks() != Size.zero():
|
||||||
TestRun.fail("There is dirty data on cache after changing cleaning policy.")
|
TestRun.fail("There is dirty data on cache after changing cleaning policy")
|
||||||
|
@ -1,16 +1,23 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
|
import pytest
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from random import shuffle
|
from random import shuffle
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from api.cas import casadm, cli, cli_messages
|
from api.cas import casadm, cli, cli_messages
|
||||||
from api.cas.cache_config import CacheStatus, SeqCutOffPolicy, CacheModeTrait, CacheMode, \
|
from api.cas.cache_config import (
|
||||||
CleaningPolicy, FlushParametersAlru
|
CacheStatus,
|
||||||
|
SeqCutOffPolicy,
|
||||||
|
CacheModeTrait,
|
||||||
|
CacheMode,
|
||||||
|
CleaningPolicy,
|
||||||
|
FlushParametersAlru,
|
||||||
|
)
|
||||||
from api.cas.casadm_params import OutputFormat
|
from api.cas.casadm_params import OutputFormat
|
||||||
from api.cas.core import CoreStatus
|
from api.cas.core import CoreStatus
|
||||||
from api.cas.init_config import InitConfig
|
from api.cas.init_config import InitConfig
|
||||||
@ -28,20 +35,135 @@ from test_utils.size import Size, Unit
|
|||||||
from test_utils.time import Time
|
from test_utils.time import Time
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.CI
|
||||||
|
@pytest.mark.os_dependent
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_incremental_load_basic():
|
||||||
|
"""
|
||||||
|
title: Incremental load test basic
|
||||||
|
description: |
|
||||||
|
Test incremental load and core pool functionality
|
||||||
|
pass_criteria:
|
||||||
|
- after start and load cores should be in active state and cache in running state
|
||||||
|
- after adding to core pool cores are in inactive state and cache in incomplete state
|
||||||
|
|
||||||
|
"""
|
||||||
|
core_count = 3
|
||||||
|
|
||||||
|
with TestRun.step("Partition cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
|
core_device.create_partitions([Size(4, Unit.GibiByte)] * core_count)
|
||||||
|
|
||||||
|
cache_dev = cache_device.partitions[0]
|
||||||
|
core_dev_list = core_device.partitions
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add cores"):
|
||||||
|
cache = casadm.start_cache(cache_dev=cache_dev, force=True)
|
||||||
|
core_list = [cache.add_core(core_dev=core_dev) for core_dev in core_dev_list]
|
||||||
|
|
||||||
|
with TestRun.step("Check if all cores have active status"):
|
||||||
|
for core in core_list:
|
||||||
|
if core.get_status() is not CoreStatus.active:
|
||||||
|
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
|
||||||
|
|
||||||
|
with TestRun.step("Stop cache"):
|
||||||
|
cache.stop()
|
||||||
|
|
||||||
|
with TestRun.step("Add cores to core pool"):
|
||||||
|
core_pool_list = [
|
||||||
|
casadm.try_add(core_device=core_dev, cache_id=1, core_id=core_id)
|
||||||
|
for core_dev, core_id in zip(core_dev_list, range(1, core_count))
|
||||||
|
]
|
||||||
|
|
||||||
|
with TestRun.step("Check if all cores in core pool have detached status"):
|
||||||
|
for core in core_pool_list:
|
||||||
|
if core.get_status() is not CoreStatus.detached:
|
||||||
|
TestRun.fail(f"Core {core.core_id} should be detached but is {core.get_status()}.")
|
||||||
|
|
||||||
|
with TestRun.step("Load cache"):
|
||||||
|
cache = casadm.load_cache(cache_dev)
|
||||||
|
if cache.get_status() is not CacheStatus.running:
|
||||||
|
TestRun.fail(f"Cache {cache.cache_id} should be running but is {cache.get_status()}.")
|
||||||
|
|
||||||
|
for core in core_pool_list:
|
||||||
|
if core.get_status() is not CoreStatus.active:
|
||||||
|
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.CI
|
||||||
|
@pytest.mark.os_dependent
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_incremental_load_missing_core_device():
|
||||||
|
"""
|
||||||
|
title: Incremental load test with missing core device
|
||||||
|
description: |
|
||||||
|
Test incremental load and core pool functionality with missing core partition
|
||||||
|
pass_criteria:
|
||||||
|
- cores are in detached state after adding to core pool
|
||||||
|
- visible cores should be in active state after start and load
|
||||||
|
- core with missing device should be in detached state after load
|
||||||
|
"""
|
||||||
|
|
||||||
|
with TestRun.step("Partition cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
|
core_device.create_partitions([Size(4, Unit.GibiByte)] * 3)
|
||||||
|
|
||||||
|
cache_dev = cache_device.partitions[0]
|
||||||
|
core_dev_list = core_device.partitions
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add cores"):
|
||||||
|
cache = casadm.start_cache(cache_dev=cache_dev, force=True)
|
||||||
|
core_list = [cache.add_core(core_dev=core_dev) for core_dev in core_dev_list]
|
||||||
|
|
||||||
|
with TestRun.step("Check if all cores have active status"):
|
||||||
|
for core in core_list:
|
||||||
|
if core.get_status() is not CoreStatus.active:
|
||||||
|
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
|
||||||
|
|
||||||
|
core_with_missing_device = cache.get_core_devices()[-1]
|
||||||
|
|
||||||
|
with TestRun.step("Stop cache."):
|
||||||
|
cache.stop()
|
||||||
|
|
||||||
|
with TestRun.step("Remove last core partition"):
|
||||||
|
core_device.remove_partition(core_device.partitions[-1])
|
||||||
|
|
||||||
|
with TestRun.step("Load cache"):
|
||||||
|
cache = casadm.load_cache(cache_dev)
|
||||||
|
if cache.get_status() is not CacheStatus.incomplete:
|
||||||
|
TestRun.fail(
|
||||||
|
f"Cache {cache.cache_id} should be incomplete but is " f"{cache.get_status()}."
|
||||||
|
)
|
||||||
|
for core in cache.get_core_devices():
|
||||||
|
if core.get_status() is not CoreStatus.active:
|
||||||
|
TestRun.fail(f"Core {core.core_id} should be Active but is {core.get_status()}.")
|
||||||
|
if core_with_missing_device.get_status() is not CoreStatus.inactive:
|
||||||
|
TestRun.fail(f"Core without backend should be Inactive but is "
|
||||||
|
f"{core_with_missing_device.get_status()}.")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_attach_core_to_incomplete_cache_volume():
|
def test_attach_core_to_incomplete_cache_volume():
|
||||||
"""
|
"""
|
||||||
title: Test for attaching device to inactive cache volume.
|
title: Test for attaching device to inactive cache volume.
|
||||||
description: |
|
description: |
|
||||||
Try to attach core device to inactive cache volume and check if it is visible in OS
|
Try to attach core device to inactive cache volume and check if it is visible in OS
|
||||||
properly.
|
properly.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No kernel error
|
- No kernel error
|
||||||
- Core status changes properly
|
- Core status changes properly
|
||||||
- Cache loads with inactive core device
|
- Cache loads with inactive core device
|
||||||
- Cache status changes properly
|
- Cache status changes properly
|
||||||
- Exported object is present only for active core
|
- Exported object is present only for active core
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
devices = prepare_devices([("cache", 1), ("core", 1)])
|
devices = prepare_devices([("cache", 1), ("core", 1)])
|
||||||
@ -98,13 +220,13 @@ def test_attach_core_to_incomplete_cache_volume():
|
|||||||
@pytest.mark.require_disk("core2", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core2", DiskTypeLowerThan("cache"))
|
||||||
def test_flush_inactive_devices():
|
def test_flush_inactive_devices():
|
||||||
"""
|
"""
|
||||||
title: Negative test for flushing inactive CAS devices.
|
title: Negative test for flushing inactive CAS devices.
|
||||||
description: Validate that CAS prevents flushing dirty data from inactive CAS devices.
|
description: Validate that CAS prevents flushing dirty data from inactive CAS devices.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No kernel error
|
- No kernel error
|
||||||
- Exported object appears after plugging core device
|
- Exported object appears after plugging core device
|
||||||
- Flushing inactive CAS devices is possible neither by cleaning thread,
|
- Flushing inactive CAS devices is possible neither by cleaning thread,
|
||||||
nor by calling cleaning methods
|
nor by calling cleaning methods
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
devices = prepare_devices([("cache", 1), ("core1", 1), ("core2", 1)])
|
devices = prepare_devices([("cache", 1), ("core1", 1), ("core2", 1)])
|
||||||
@ -116,10 +238,13 @@ def test_flush_inactive_devices():
|
|||||||
with TestRun.step("Start cache in WB mode and set alru cleaning policy."):
|
with TestRun.step("Start cache in WB mode and set alru cleaning policy."):
|
||||||
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
|
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
|
||||||
cache.set_cleaning_policy(CleaningPolicy.alru)
|
cache.set_cleaning_policy(CleaningPolicy.alru)
|
||||||
cache.set_params_alru(FlushParametersAlru(
|
cache.set_params_alru(
|
||||||
staleness_time=Time(seconds=10),
|
FlushParametersAlru(
|
||||||
wake_up_time=Time(seconds=1),
|
staleness_time=Time(seconds=10),
|
||||||
activity_threshold=Time(milliseconds=500)))
|
wake_up_time=Time(seconds=1),
|
||||||
|
activity_threshold=Time(milliseconds=500),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Add two cores."):
|
with TestRun.step("Add two cores."):
|
||||||
first_core = cache.add_core(first_core_dev)
|
first_core = cache.add_core(first_core_dev)
|
||||||
@ -140,31 +265,43 @@ def test_flush_inactive_devices():
|
|||||||
with TestRun.step("Load cache."):
|
with TestRun.step("Load cache."):
|
||||||
cache = casadm.load_cache(cache_dev)
|
cache = casadm.load_cache(cache_dev)
|
||||||
|
|
||||||
with TestRun.step("Wait longer than required for alru cleaning thread to start and verify "
|
with TestRun.step(
|
||||||
"that dirty data is flushed only from active device."):
|
"Wait longer than required for alru cleaning thread to start and verify "
|
||||||
dirty_lines_before = {first_core: first_core.get_dirty_blocks(),
|
"that dirty data is flushed only from active device."
|
||||||
second_core: second_core.get_dirty_blocks()}
|
):
|
||||||
|
dirty_lines_before = {
|
||||||
|
first_core: first_core.get_dirty_blocks(),
|
||||||
|
second_core: second_core.get_dirty_blocks(),
|
||||||
|
}
|
||||||
time.sleep(30)
|
time.sleep(30)
|
||||||
check_amount_of_dirty_data(dirty_lines_before)
|
check_amount_of_dirty_data(dirty_lines_before)
|
||||||
|
|
||||||
with TestRun.step("Try to call 'flush cache' command."):
|
with TestRun.step("Try to call 'flush cache' command."):
|
||||||
dirty_lines_before = {first_core: first_core.get_dirty_blocks(),
|
dirty_lines_before = {
|
||||||
second_core: second_core.get_dirty_blocks()}
|
first_core: first_core.get_dirty_blocks(),
|
||||||
|
second_core: second_core.get_dirty_blocks(),
|
||||||
|
}
|
||||||
try:
|
try:
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
TestRun.fail("Flush cache operation should be blocked due to inactive cache devices, "
|
TestRun.fail(
|
||||||
"but it executed successfully.")
|
"Flush cache operation should be blocked due to inactive cache devices, "
|
||||||
|
"but it executed successfully."
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
TestRun.LOGGER.info(f"Flush cache operation is blocked as expected.\n{str(e)}")
|
TestRun.LOGGER.info(f"Flush cache operation is blocked as expected.\n{str(e)}")
|
||||||
check_amount_of_dirty_data(dirty_lines_before)
|
check_amount_of_dirty_data(dirty_lines_before)
|
||||||
|
|
||||||
with TestRun.step("Try to call 'flush core' command for inactive core."):
|
with TestRun.step("Try to call 'flush core' command for inactive core."):
|
||||||
dirty_lines_before = {first_core: first_core.get_dirty_blocks(),
|
dirty_lines_before = {
|
||||||
second_core: second_core.get_dirty_blocks()}
|
first_core: first_core.get_dirty_blocks(),
|
||||||
|
second_core: second_core.get_dirty_blocks(),
|
||||||
|
}
|
||||||
try:
|
try:
|
||||||
first_core.flush_core()
|
first_core.flush_core()
|
||||||
TestRun.fail("Flush core operation should be blocked for inactive CAS devices, "
|
TestRun.fail(
|
||||||
"but it executed successfully.")
|
"Flush core operation should be blocked for inactive CAS devices, "
|
||||||
|
"but it executed successfully."
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
TestRun.LOGGER.info(f"Flush core operation is blocked as expected.\n{str(e)}")
|
TestRun.LOGGER.info(f"Flush core operation is blocked as expected.\n{str(e)}")
|
||||||
check_amount_of_dirty_data(dirty_lines_before)
|
check_amount_of_dirty_data(dirty_lines_before)
|
||||||
@ -175,8 +312,10 @@ def test_flush_inactive_devices():
|
|||||||
first_core.wait_for_status_change(CoreStatus.active)
|
first_core.wait_for_status_change(CoreStatus.active)
|
||||||
cache_status = cache.get_status()
|
cache_status = cache.get_status()
|
||||||
if cache_status != CacheStatus.running:
|
if cache_status != CacheStatus.running:
|
||||||
TestRun.fail(f"Cache did not change status to 'running' after plugging core device. "
|
TestRun.fail(
|
||||||
f"Actual state: {cache_status}.")
|
f"Cache did not change status to 'running' after plugging core device. "
|
||||||
|
f"Actual state: {cache_status}."
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Stop cache."):
|
with TestRun.step("Stop cache."):
|
||||||
cache.stop()
|
cache.stop()
|
||||||
@ -186,12 +325,12 @@ def test_flush_inactive_devices():
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_list_cache_and_cache_volumes():
|
def test_list_cache_and_cache_volumes():
|
||||||
"""
|
"""
|
||||||
title: List cache with cache volumes and check their status.
|
title: List cache with cache volumes and check their status.
|
||||||
description: |
|
description: |
|
||||||
Check if casadm command correctly lists caches and cache volumes with their statuses.
|
Check if casadm command correctly lists caches and cache volumes with their statuses.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No kernel error
|
- No kernel error
|
||||||
- Output of list command should be correct in each case (as described in test steps)
|
- Output of list command should be correct in each case (as described in test steps)
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
devices = prepare_devices([("cache", 1), ("core", 1)])
|
devices = prepare_devices([("cache", 1), ("core", 1)])
|
||||||
@ -203,8 +342,10 @@ def test_list_cache_and_cache_volumes():
|
|||||||
cache = casadm.start_cache(cache_dev, force=True)
|
cache = casadm.start_cache(cache_dev, force=True)
|
||||||
core = cache.add_core(core_dev)
|
core = cache.add_core(core_dev)
|
||||||
|
|
||||||
with TestRun.step("Check if list caches command shows proper output (cache should have status "
|
with TestRun.step(
|
||||||
"Running and cache volume should be Active)."):
|
"Check if list caches command shows proper output (cache should have status "
|
||||||
|
"Running and cache volume should be Active)."
|
||||||
|
):
|
||||||
core_status = core.get_status()
|
core_status = core.get_status()
|
||||||
if core_status != CoreStatus.active:
|
if core_status != CoreStatus.active:
|
||||||
TestRun.fail(f"Core should be in active state. Actual state: {core_status}.")
|
TestRun.fail(f"Core should be in active state. Actual state: {core_status}.")
|
||||||
@ -224,8 +365,10 @@ def test_list_cache_and_cache_volumes():
|
|||||||
with TestRun.step("Load cache."):
|
with TestRun.step("Load cache."):
|
||||||
cache = casadm.load_cache(cache_dev)
|
cache = casadm.load_cache(cache_dev)
|
||||||
|
|
||||||
with TestRun.step("Check if list cache command shows proper output (cache should have status "
|
with TestRun.step(
|
||||||
"Incomplete and cache volume should be Inactive)."):
|
"Check if list cache command shows proper output (cache should have status "
|
||||||
|
"Incomplete and cache volume should be Inactive)."
|
||||||
|
):
|
||||||
core_status = core.get_status()
|
core_status = core.get_status()
|
||||||
if core_status != CoreStatus.inactive:
|
if core_status != CoreStatus.inactive:
|
||||||
TestRun.fail(f"Core should be in inactive state. Actual state: {core_status}.")
|
TestRun.fail(f"Core should be in inactive state. Actual state: {core_status}.")
|
||||||
@ -239,8 +382,10 @@ def test_list_cache_and_cache_volumes():
|
|||||||
core.wait_for_status_change(CoreStatus.active)
|
core.wait_for_status_change(CoreStatus.active)
|
||||||
cache_status = cache.get_status()
|
cache_status = cache.get_status()
|
||||||
if cache_status != CacheStatus.running:
|
if cache_status != CacheStatus.running:
|
||||||
TestRun.fail(f"Cache did not change status to 'running' after plugging core device. "
|
TestRun.fail(
|
||||||
f"Actual state: {cache_status}")
|
f"Cache did not change status to 'running' after plugging core device. "
|
||||||
|
f"Actual state: {cache_status}"
|
||||||
|
)
|
||||||
cache.stop()
|
cache.stop()
|
||||||
|
|
||||||
|
|
||||||
@ -248,13 +393,13 @@ def test_list_cache_and_cache_volumes():
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_load_cache_with_inactive_core():
|
def test_load_cache_with_inactive_core():
|
||||||
"""
|
"""
|
||||||
title: Load cache with unavailable core devices.
|
title: Load cache with unavailable core devices.
|
||||||
description: Check if it is possible to load cache with unavailable core devices.
|
description: Check if it is possible to load cache with unavailable core devices.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No kernel error
|
- No kernel error
|
||||||
- It is possible to perform cache load operation with unavailable devices.
|
- It is possible to perform cache load operation with unavailable devices.
|
||||||
- Warning message about not available core device should appear.
|
- Warning message about not available core device should appear.
|
||||||
- Cache status should change to active after plugging missing core device.
|
- Cache status should change to active after plugging missing core device.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
devices = prepare_devices([("cache", 1), ("core", 1)])
|
devices = prepare_devices([("cache", 1), ("core", 1)])
|
||||||
@ -285,8 +430,10 @@ def test_load_cache_with_inactive_core():
|
|||||||
core.wait_for_status_change(CoreStatus.active)
|
core.wait_for_status_change(CoreStatus.active)
|
||||||
cache_status = cache.get_status()
|
cache_status = cache.get_status()
|
||||||
if cache_status != CacheStatus.running:
|
if cache_status != CacheStatus.running:
|
||||||
TestRun.fail(f"Cache did not change status to 'running' after plugging core device. "
|
TestRun.fail(
|
||||||
f"Actual state: {cache_status}.")
|
f"Cache did not change status to 'running' after plugging core device. "
|
||||||
|
f"Actual state: {cache_status}."
|
||||||
|
)
|
||||||
cache.stop()
|
cache.stop()
|
||||||
|
|
||||||
|
|
||||||
@ -294,12 +441,12 @@ def test_load_cache_with_inactive_core():
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_preserve_data_for_inactive_device():
|
def test_preserve_data_for_inactive_device():
|
||||||
"""
|
"""
|
||||||
title: Validate preserving data for inactive CAS devices.
|
title: Validate preserving data for inactive CAS devices.
|
||||||
description: Validate that cached data for inactive CAS devices is preserved.
|
description: Validate that cached data for inactive CAS devices is preserved.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No kernel error
|
- No kernel error
|
||||||
- File md5 checksums match in every iteration.
|
- File md5 checksums match in every iteration.
|
||||||
- Cache read hits increase after reads (md5 checksum) from CAS device with attached core.
|
- Cache read hits increase after reads (md5 checksum) from CAS device with attached core.
|
||||||
"""
|
"""
|
||||||
mount_dir = "/mnt/test"
|
mount_dir = "/mnt/test"
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
@ -324,10 +471,13 @@ def test_preserve_data_for_inactive_device():
|
|||||||
with TestRun.step("Create a test file with random writes on mount point and count it's md5."):
|
with TestRun.step("Create a test file with random writes on mount point and count it's md5."):
|
||||||
file_path = f"{mount_dir}/test_file"
|
file_path = f"{mount_dir}/test_file"
|
||||||
test_file = File.create_file(file_path)
|
test_file = File.create_file(file_path)
|
||||||
dd = Dd().input("/dev/random") \
|
dd = (
|
||||||
.output(file_path) \
|
Dd()
|
||||||
.count(100) \
|
.input("/dev/random")
|
||||||
|
.output(file_path)
|
||||||
|
.count(100)
|
||||||
.block_size(Size(1, Unit.Blocks512))
|
.block_size(Size(1, Unit.Blocks512))
|
||||||
|
)
|
||||||
dd.run()
|
dd.run()
|
||||||
sync()
|
sync()
|
||||||
md5_after_create = test_file.md5sum()
|
md5_after_create = test_file.md5sum()
|
||||||
@ -353,19 +503,24 @@ def test_preserve_data_for_inactive_device():
|
|||||||
or core_stats_before_stop.usage_stats.clean != core_stats_after_load.usage_stats.clean
|
or core_stats_before_stop.usage_stats.clean != core_stats_after_load.usage_stats.clean
|
||||||
or core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty
|
or core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty
|
||||||
):
|
):
|
||||||
TestRun.fail(f"Statistics after counting md5 are different than after cache load.\n"
|
TestRun.fail(
|
||||||
f"Cache stats before: {cache_stats_before_stop}\n"
|
f"Statistics after counting md5 are different than after cache load.\n"
|
||||||
f"Cache stats after: {cache_stats_after_load}\n"
|
f"Cache stats before: {cache_stats_before_stop}\n"
|
||||||
f"Core stats before: {core_stats_before_stop}\n"
|
f"Cache stats after: {cache_stats_after_load}\n"
|
||||||
f"Core stats after: {core_stats_after_load}")
|
f"Core stats before: {core_stats_before_stop}\n"
|
||||||
|
f"Core stats after: {core_stats_after_load}"
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Plug core disk using sysfs and verify this change is reflected "
|
with TestRun.step(
|
||||||
"on the cache list."):
|
"Plug core disk using sysfs and verify this change is reflected " "on the cache list."
|
||||||
|
):
|
||||||
plug_device.plug()
|
plug_device.plug()
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
if cache.get_status() != CacheStatus.running or core.get_status() != CoreStatus.active:
|
if cache.get_status() != CacheStatus.running or core.get_status() != CoreStatus.active:
|
||||||
TestRun.fail(f"Expected cache status is running (actual - {cache.get_status()}).\n"
|
TestRun.fail(
|
||||||
f"Expected core status is active (actual - {core.get_status()}).")
|
f"Expected cache status is running (actual - {cache.get_status()}).\n"
|
||||||
|
f"Expected core status is active (actual - {core.get_status()})."
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Mount CAS device"):
|
with TestRun.step("Mount CAS device"):
|
||||||
core.mount(mount_dir)
|
core.mount(mount_dir)
|
||||||
@ -374,18 +529,24 @@ def test_preserve_data_for_inactive_device():
|
|||||||
cache_read_hits_before_md5 = cache.get_statistics().request_stats.read.hits
|
cache_read_hits_before_md5 = cache.get_statistics().request_stats.read.hits
|
||||||
md5_after_cache_load = test_file.md5sum()
|
md5_after_cache_load = test_file.md5sum()
|
||||||
if md5_after_create != md5_after_cache_load:
|
if md5_after_create != md5_after_cache_load:
|
||||||
TestRun.fail("Md5 checksum after cache load operation is different than before "
|
TestRun.fail(
|
||||||
"stopping cache.")
|
"Md5 checksum after cache load operation is different than before "
|
||||||
|
"stopping cache."
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
TestRun.LOGGER.info("Md5 checksum is identical before and after cache load operation "
|
TestRun.LOGGER.info(
|
||||||
"with inactive CAS device.")
|
"Md5 checksum is identical before and after cache load operation "
|
||||||
|
"with inactive CAS device."
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Verify that cache read hits increased after counting md5 checksum."):
|
with TestRun.step("Verify that cache read hits increased after counting md5 checksum."):
|
||||||
cache_read_hits_after_md5 = cache.get_statistics().request_stats.read.hits
|
cache_read_hits_after_md5 = cache.get_statistics().request_stats.read.hits
|
||||||
if cache_read_hits_after_md5 - cache_read_hits_before_md5 < 0:
|
if cache_read_hits_after_md5 - cache_read_hits_before_md5 < 0:
|
||||||
TestRun.fail(f"Cache read hits did not increase after counting md5 checksum. "
|
TestRun.fail(
|
||||||
f"Before: {cache_read_hits_before_md5}. "
|
f"Cache read hits did not increase after counting md5 checksum. "
|
||||||
f"After: {cache_read_hits_after_md5}.")
|
f"Before: {cache_read_hits_before_md5}. "
|
||||||
|
f"After: {cache_read_hits_after_md5}."
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
TestRun.LOGGER.info("Cache read hits increased as expected.")
|
TestRun.LOGGER.info("Cache read hits increased as expected.")
|
||||||
|
|
||||||
@ -400,14 +561,14 @@ def test_preserve_data_for_inactive_device():
|
|||||||
@pytest.mark.require_disk("core2", DiskTypeSet([DiskType.sata, DiskType.hdd, DiskType.hdd4k]))
|
@pytest.mark.require_disk("core2", DiskTypeSet([DiskType.sata, DiskType.hdd, DiskType.hdd4k]))
|
||||||
def test_print_statistics_inactive(cache_mode):
|
def test_print_statistics_inactive(cache_mode):
|
||||||
"""
|
"""
|
||||||
title: Print statistics for cache with inactive cache volumes.
|
title: Print statistics for cache with inactive cache volumes.
|
||||||
description: |
|
description: |
|
||||||
Check if statistics are displayed properly when there is one or more
|
Check if statistics are displayed properly when there is one or more
|
||||||
inactive cache volumes added to cache.
|
inactive cache volumes added to cache.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No kernel error
|
- No kernel error
|
||||||
- All statistics should contain appropriate information depending on situation of
|
- All statistics should contain appropriate information depending on situation of
|
||||||
cache and core devices (as described in test steps)
|
cache and core devices (as described in test steps)
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
devices = prepare_devices([("cache", 1), ("core1", 1), ("core2", 1)])
|
devices = prepare_devices([("cache", 1), ("core1", 1), ("core2", 1)])
|
||||||
@ -452,8 +613,9 @@ def test_print_statistics_inactive(cache_mode):
|
|||||||
with TestRun.step("Load cache."):
|
with TestRun.step("Load cache."):
|
||||||
cache = casadm.load_cache(cache_dev)
|
cache = casadm.load_cache(cache_dev)
|
||||||
|
|
||||||
with TestRun.step("Check if inactive devices section appeared and contains appropriate "
|
with TestRun.step(
|
||||||
"information."):
|
"Check if inactive devices section appeared and contains appropriate " "information."
|
||||||
|
):
|
||||||
inactive_stats_before = cache.get_statistics()
|
inactive_stats_before = cache.get_statistics()
|
||||||
check_if_inactive_section_exists(inactive_stats_before)
|
check_if_inactive_section_exists(inactive_stats_before)
|
||||||
check_number_of_inactive_devices(inactive_stats_before, 2)
|
check_number_of_inactive_devices(inactive_stats_before, 2)
|
||||||
@ -463,8 +625,10 @@ def test_print_statistics_inactive(cache_mode):
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
first_core_status = first_core.get_status()
|
first_core_status = first_core.get_status()
|
||||||
if first_core_status != CoreStatus.active:
|
if first_core_status != CoreStatus.active:
|
||||||
TestRun.fail(f"Core {first_core.path} should be in active state but it is not. "
|
TestRun.fail(
|
||||||
f"Actual state: {first_core_status}.")
|
f"Core {first_core.path} should be in active state but it is not. "
|
||||||
|
f"Actual state: {first_core_status}."
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Check cache statistics section of inactive devices."):
|
with TestRun.step("Check cache statistics section of inactive devices."):
|
||||||
inactive_stats_after = cache.get_statistics()
|
inactive_stats_after = cache.get_statistics()
|
||||||
@ -473,42 +637,56 @@ def test_print_statistics_inactive(cache_mode):
|
|||||||
# criteria for checks below
|
# criteria for checks below
|
||||||
insert_write_traits = CacheModeTrait.InsertWrite in cache_mode_traits
|
insert_write_traits = CacheModeTrait.InsertWrite in cache_mode_traits
|
||||||
lazy_write_traits = CacheModeTrait.LazyWrites in cache_mode_traits
|
lazy_write_traits = CacheModeTrait.LazyWrites in cache_mode_traits
|
||||||
lazy_writes_or_no_insert_write_traits = (not insert_write_traits
|
lazy_writes_or_no_insert_write_traits = not insert_write_traits or lazy_write_traits
|
||||||
or lazy_write_traits)
|
|
||||||
|
|
||||||
check_inactive_usage_stats(inactive_stats_before.inactive_usage_stats.inactive_occupancy,
|
check_inactive_usage_stats(
|
||||||
inactive_stats_after.inactive_usage_stats.inactive_occupancy,
|
inactive_stats_before.inactive_usage_stats.inactive_occupancy,
|
||||||
"inactive occupancy",
|
inactive_stats_after.inactive_usage_stats.inactive_occupancy,
|
||||||
not insert_write_traits)
|
"inactive occupancy",
|
||||||
check_inactive_usage_stats(inactive_stats_before.inactive_usage_stats.inactive_clean,
|
not insert_write_traits,
|
||||||
inactive_stats_after.inactive_usage_stats.inactive_clean,
|
)
|
||||||
"inactive clean",
|
check_inactive_usage_stats(
|
||||||
lazy_writes_or_no_insert_write_traits)
|
inactive_stats_before.inactive_usage_stats.inactive_clean,
|
||||||
check_inactive_usage_stats(inactive_stats_before.inactive_usage_stats.inactive_dirty,
|
inactive_stats_after.inactive_usage_stats.inactive_clean,
|
||||||
inactive_stats_after.inactive_usage_stats.inactive_dirty,
|
"inactive clean",
|
||||||
"inactive dirty",
|
lazy_writes_or_no_insert_write_traits,
|
||||||
not lazy_write_traits)
|
)
|
||||||
|
check_inactive_usage_stats(
|
||||||
|
inactive_stats_before.inactive_usage_stats.inactive_dirty,
|
||||||
|
inactive_stats_after.inactive_usage_stats.inactive_dirty,
|
||||||
|
"inactive dirty",
|
||||||
|
not lazy_write_traits,
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Check statistics per inactive core."):
|
with TestRun.step("Check statistics per inactive core."):
|
||||||
inactive_core_stats = second_core.get_statistics()
|
inactive_core_stats = second_core.get_statistics()
|
||||||
if inactive_stats_after.inactive_usage_stats.inactive_occupancy == \
|
if (
|
||||||
inactive_core_stats.usage_stats.occupancy:
|
inactive_stats_after.inactive_usage_stats.inactive_occupancy
|
||||||
TestRun.LOGGER.info("Inactive occupancy in cache statistics is equal to inactive core "
|
== inactive_core_stats.usage_stats.occupancy
|
||||||
"occupancy.")
|
):
|
||||||
|
TestRun.LOGGER.info(
|
||||||
|
"Inactive occupancy in cache statistics is equal to inactive core " "occupancy."
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
TestRun.fail(f"Inactive core occupancy ({inactive_core_stats.usage_stats.occupancy}) "
|
TestRun.fail(
|
||||||
f"should be the same as cache inactive occupancy "
|
f"Inactive core occupancy ({inactive_core_stats.usage_stats.occupancy}) "
|
||||||
f"({inactive_stats_after.inactive_usage_stats.inactive_occupancy}).")
|
f"should be the same as cache inactive occupancy "
|
||||||
|
f"({inactive_stats_after.inactive_usage_stats.inactive_occupancy})."
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Remove inactive core from cache and check if cache is in running state."):
|
with TestRun.step("Remove inactive core from cache and check if cache is in running state."):
|
||||||
cache.remove_inactive_core(second_core.core_id, force=True)
|
cache.remove_inactive_core(second_core.core_id, force=True)
|
||||||
cache_status = cache.get_status()
|
cache_status = cache.get_status()
|
||||||
if cache_status != CacheStatus.running:
|
if cache_status != CacheStatus.running:
|
||||||
TestRun.fail(f"Cache did not change status to 'running' after plugging core device. "
|
TestRun.fail(
|
||||||
f"Actual status: {cache_status}.")
|
f"Cache did not change status to 'running' after plugging core device. "
|
||||||
|
f"Actual status: {cache_status}."
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Check if there is no inactive devices statistics section and if cache has "
|
with TestRun.step(
|
||||||
"Running status."):
|
"Check if there is no inactive devices statistics section and if cache has "
|
||||||
|
"Running status."
|
||||||
|
):
|
||||||
cache_stats = cache.get_statistics()
|
cache_stats = cache.get_statistics()
|
||||||
check_if_inactive_section_exists(cache_stats, False)
|
check_if_inactive_section_exists(cache_stats, False)
|
||||||
check_number_of_inactive_devices(cache_stats, 0)
|
check_number_of_inactive_devices(cache_stats, 0)
|
||||||
@ -523,12 +701,12 @@ def test_print_statistics_inactive(cache_mode):
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_remove_detached_cores():
|
def test_remove_detached_cores():
|
||||||
"""
|
"""
|
||||||
title: Validate removing core devices from core pool.
|
title: Validate removing core devices from core pool.
|
||||||
description: Validate that it is possible to remove core devices from core pool.
|
description: Validate that it is possible to remove core devices from core pool.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No kernel error
|
- No kernel error
|
||||||
- All core devices are correctly added after plugging core disk.
|
- All core devices are correctly added after plugging core disk.
|
||||||
- All cores are successfully removed.
|
- All cores are successfully removed.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
devices = prepare_devices([("cache", 1), ("core", 4)])
|
devices = prepare_devices([("cache", 1), ("core", 4)])
|
||||||
@ -548,8 +726,9 @@ def test_remove_detached_cores():
|
|||||||
with TestRun.step("Run random writes to all CAS devices."):
|
with TestRun.step("Run random writes to all CAS devices."):
|
||||||
run_fio([c.path for c in cores])
|
run_fio([c.path for c in cores])
|
||||||
|
|
||||||
with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain "
|
with TestRun.step(
|
||||||
"dirty data."):
|
"Flush dirty data from two CAS devices and verify than other two contain " "dirty data."
|
||||||
|
):
|
||||||
for core in cores:
|
for core in cores:
|
||||||
if core.core_id % 2 == 0:
|
if core.core_id % 2 == 0:
|
||||||
core.flush_core()
|
core.flush_core()
|
||||||
@ -567,12 +746,15 @@ def test_remove_detached_cores():
|
|||||||
plug_device.plug()
|
plug_device.plug()
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
with TestRun.step("Verify that all cores from plugged core device are listed with "
|
with TestRun.step(
|
||||||
"proper status."):
|
"Verify that all cores from plugged core device are listed with " "proper status."
|
||||||
|
):
|
||||||
for core in cores:
|
for core in cores:
|
||||||
if core.get_status() != CoreStatus.detached:
|
if core.get_status() != CoreStatus.detached:
|
||||||
TestRun.fail(f"Each core should be in detached state. "
|
TestRun.fail(
|
||||||
f"Actual states: {casadm.list_caches().stdout}")
|
f"Each core should be in detached state. "
|
||||||
|
f"Actual states: {casadm.list_caches().stdout}"
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Remove CAS devices from core pool."):
|
with TestRun.step("Remove CAS devices from core pool."):
|
||||||
casadm.remove_all_detached_cores()
|
casadm.remove_all_detached_cores()
|
||||||
@ -588,16 +770,16 @@ def test_remove_detached_cores():
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_remove_inactive_devices():
|
def test_remove_inactive_devices():
|
||||||
"""
|
"""
|
||||||
title: Validate removing inactive CAS devices.
|
title: Validate removing inactive CAS devices.
|
||||||
description: |
|
description: |
|
||||||
Validate that it is possible to remove inactive CAS devices when there are no dirty
|
Validate that it is possible to remove inactive CAS devices when there are no dirty
|
||||||
cache lines associated with them and that removing CAS devices is prevented otherwise
|
cache lines associated with them and that removing CAS devices is prevented otherwise
|
||||||
(unless ‘force’ option is used).
|
(unless ‘force’ option is used).
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No kernel error
|
- No kernel error
|
||||||
- Removing CAS devices without dirty data is successful.
|
- Removing CAS devices without dirty data is successful.
|
||||||
- Removing CAS devices with dirty data without ‘force’ option is blocked.
|
- Removing CAS devices with dirty data without ‘force’ option is blocked.
|
||||||
- Removing CAS devices with dirty data with ‘force’ option is successful.
|
- Removing CAS devices with dirty data with ‘force’ option is successful.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
devices = prepare_devices([("cache", 1), ("core", 4)])
|
devices = prepare_devices([("cache", 1), ("core", 4)])
|
||||||
@ -617,8 +799,9 @@ def test_remove_inactive_devices():
|
|||||||
with TestRun.step("Run random writes to all CAS devices."):
|
with TestRun.step("Run random writes to all CAS devices."):
|
||||||
run_fio([c.path for c in cores])
|
run_fio([c.path for c in cores])
|
||||||
|
|
||||||
with TestRun.step("Flush dirty data from two CAS devices and verify than other two "
|
with TestRun.step(
|
||||||
"contain dirty data."):
|
"Flush dirty data from two CAS devices and verify than other two " "contain dirty data."
|
||||||
|
):
|
||||||
for core in cores:
|
for core in cores:
|
||||||
if core.core_id % 2 == 0:
|
if core.core_id % 2 == 0:
|
||||||
core.flush_core()
|
core.flush_core()
|
||||||
@ -636,56 +819,75 @@ def test_remove_inactive_devices():
|
|||||||
with TestRun.step("Load cache."):
|
with TestRun.step("Load cache."):
|
||||||
casadm.load_cache(cache_dev)
|
casadm.load_cache(cache_dev)
|
||||||
|
|
||||||
with TestRun.step("Verify that all previously created CAS devices are listed with "
|
with TestRun.step(
|
||||||
"proper status."):
|
"Verify that all previously created CAS devices are listed with " "proper status."
|
||||||
|
):
|
||||||
for core in cores:
|
for core in cores:
|
||||||
if core.get_status() != CoreStatus.inactive:
|
if core.get_status() != CoreStatus.inactive:
|
||||||
TestRun.fail(f"Each core should be in inactive state. "
|
TestRun.fail(
|
||||||
f"Actual states:\n{casadm.list_caches().stdout}")
|
f"Each core should be in inactive state. "
|
||||||
|
f"Actual states:\n{casadm.list_caches().stdout}"
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Try removing CAS devices using remove command. "
|
with TestRun.step(
|
||||||
"Operation should be blocked and proper message displayed."):
|
"Try removing CAS devices using remove command. "
|
||||||
|
"Operation should be blocked and proper message displayed."
|
||||||
|
):
|
||||||
shuffle(cores)
|
shuffle(cores)
|
||||||
for force in [False, True]:
|
for force in [False, True]:
|
||||||
for core in cores:
|
for core in cores:
|
||||||
try:
|
try:
|
||||||
core.remove_core(force)
|
core.remove_core(force)
|
||||||
TestRun.fail(f"Removing inactive CAS device should be possible by "
|
TestRun.fail(
|
||||||
f"'remove-inactive' command only but it worked with 'remove' "
|
f"Removing inactive CAS device should be possible by "
|
||||||
f"command with force option set to {force}.")
|
f"'remove-inactive' command only but it worked with 'remove' "
|
||||||
|
f"command with force option set to {force}."
|
||||||
|
)
|
||||||
except CmdException as e:
|
except CmdException as e:
|
||||||
TestRun.LOGGER.info(f"Remove core operation is blocked for inactive CAS device "
|
TestRun.LOGGER.info(
|
||||||
f"as expected. Force option set to: {force}")
|
f"Remove core operation is blocked for inactive CAS device "
|
||||||
|
f"as expected. Force option set to: {force}"
|
||||||
|
)
|
||||||
cli_messages.check_stderr_msg(
|
cli_messages.check_stderr_msg(
|
||||||
e.output, cli_messages.remove_inactive_core_with_remove_command)
|
e.output, cli_messages.remove_inactive_core_with_remove_command
|
||||||
|
)
|
||||||
|
|
||||||
output = casadm.list_caches(output_format=OutputFormat.csv).stdout
|
output = casadm.list_caches(output_format=OutputFormat.csv).stdout
|
||||||
if core.core_device.path not in output:
|
if core.core_device.path not in output:
|
||||||
TestRun.fail(
|
TestRun.fail(
|
||||||
f"CAS device is not listed in casadm list output but it should be."
|
f"CAS device is not listed in casadm list output but it should be."
|
||||||
f"\n{output}")
|
f"\n{output}"
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Try removing CAS devices using remove-inactive command without ‘force’ "
|
with TestRun.step(
|
||||||
"option. Verify that for dirty CAS devices operation is blocked, proper "
|
"Try removing CAS devices using remove-inactive command without ‘force’ "
|
||||||
"message is displayed and device is still listed."):
|
"option. Verify that for dirty CAS devices operation is blocked, proper "
|
||||||
|
"message is displayed and device is still listed."
|
||||||
|
):
|
||||||
shuffle(cores)
|
shuffle(cores)
|
||||||
for core in cores:
|
for core in cores:
|
||||||
try:
|
try:
|
||||||
dirty_blocks = core.get_dirty_blocks()
|
dirty_blocks = core.get_dirty_blocks()
|
||||||
core.remove_inactive()
|
core.remove_inactive()
|
||||||
if dirty_blocks != Size.zero():
|
if dirty_blocks != Size.zero():
|
||||||
TestRun.fail("Removing dirty inactive CAS device should be impossible without "
|
TestRun.fail(
|
||||||
"force option but remove-inactive command executed without "
|
"Removing dirty inactive CAS device should be impossible without "
|
||||||
"any error.")
|
"force option but remove-inactive command executed without "
|
||||||
|
"any error."
|
||||||
|
)
|
||||||
TestRun.LOGGER.info("Removing core with force option skipped for clean CAS device.")
|
TestRun.LOGGER.info("Removing core with force option skipped for clean CAS device.")
|
||||||
except CmdException as e:
|
except CmdException as e:
|
||||||
TestRun.LOGGER.info("Remove-inactive operation without force option is blocked for "
|
TestRun.LOGGER.info(
|
||||||
"dirty CAS device as expected.")
|
"Remove-inactive operation without force option is blocked for "
|
||||||
|
"dirty CAS device as expected."
|
||||||
|
)
|
||||||
cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_dirty_core)
|
cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_dirty_core)
|
||||||
output = casadm.list_caches(output_format=OutputFormat.csv).stdout
|
output = casadm.list_caches(output_format=OutputFormat.csv).stdout
|
||||||
if core.core_device.path not in output:
|
if core.core_device.path not in output:
|
||||||
TestRun.fail(f"CAS device is not listed in casadm list output but it should be."
|
TestRun.fail(
|
||||||
f"\n{output}")
|
f"CAS device is not listed in casadm list output but it should be."
|
||||||
|
f"\n{output}"
|
||||||
|
)
|
||||||
core.remove_inactive(force=True)
|
core.remove_inactive(force=True)
|
||||||
|
|
||||||
with TestRun.step("Plug missing disk and stop cache."):
|
with TestRun.step("Plug missing disk and stop cache."):
|
||||||
@ -698,14 +900,14 @@ def test_remove_inactive_devices():
|
|||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_stop_cache_with_inactive_devices():
|
def test_stop_cache_with_inactive_devices():
|
||||||
"""
|
"""
|
||||||
title: Validate stopping cache with inactive CAS devices.
|
title: Validate stopping cache with inactive CAS devices.
|
||||||
description: |
|
description: |
|
||||||
Validate that cache with inactive CAS devices cannot be stopped
|
Validate that cache with inactive CAS devices cannot be stopped
|
||||||
unless ‘force’ option is used.
|
unless ‘force’ option is used.
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No kernel error
|
- No kernel error
|
||||||
- Stopping cache with inactive CAS devices without ‘force’ option is blocked.
|
- Stopping cache with inactive CAS devices without ‘force’ option is blocked.
|
||||||
- Stopping cache with inactive CAS devices with ‘force’ option is successful.
|
- Stopping cache with inactive CAS devices with ‘force’ option is successful.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare devices."):
|
with TestRun.step("Prepare devices."):
|
||||||
devices = prepare_devices([("cache", 1), ("core", 1)])
|
devices = prepare_devices([("cache", 1), ("core", 1)])
|
||||||
@ -739,8 +941,10 @@ def test_stop_cache_with_inactive_devices():
|
|||||||
if core_status != CoreStatus.inactive:
|
if core_status != CoreStatus.inactive:
|
||||||
TestRun.fail(f"CAS device should be in inactive state. Actual status: {core_status}.")
|
TestRun.fail(f"CAS device should be in inactive state. Actual status: {core_status}.")
|
||||||
|
|
||||||
with TestRun.step("Try stopping cache without ‘no data flush’ option, verify that operation "
|
with TestRun.step(
|
||||||
"was blocked and proper message is displayed."):
|
"Try stopping cache without ‘no data flush’ option, verify that operation "
|
||||||
|
"was blocked and proper message is displayed."
|
||||||
|
):
|
||||||
try_stop_incomplete_cache(cache)
|
try_stop_incomplete_cache(cache)
|
||||||
|
|
||||||
with TestRun.step("Stop cache with force option."):
|
with TestRun.step("Stop cache with force option."):
|
||||||
@ -765,8 +969,10 @@ def test_stop_cache_with_inactive_devices():
|
|||||||
if core_status != CoreStatus.inactive:
|
if core_status != CoreStatus.inactive:
|
||||||
TestRun.fail(f"CAS device should be in inactive state. Actual state: {core_status}.")
|
TestRun.fail(f"CAS device should be in inactive state. Actual state: {core_status}.")
|
||||||
|
|
||||||
with TestRun.step("Try stopping cache without ‘no data flush’ option, verify that "
|
with TestRun.step(
|
||||||
"operation was blocked and proper message is displayed."):
|
"Try stopping cache without ‘no data flush’ option, verify that "
|
||||||
|
"operation was blocked and proper message is displayed."
|
||||||
|
):
|
||||||
try_stop_incomplete_cache(cache)
|
try_stop_incomplete_cache(cache)
|
||||||
|
|
||||||
with TestRun.step("Stop cache with 'no data flush' option and plug missing core device."):
|
with TestRun.step("Stop cache with 'no data flush' option and plug missing core device."):
|
||||||
@ -789,15 +995,18 @@ def check_inactive_usage_stats(stats_before, stats_after, stat_name, should_be_z
|
|||||||
elif not should_be_zero and stats_after < stats_before:
|
elif not should_be_zero and stats_after < stats_before:
|
||||||
TestRun.LOGGER.info(f"{stat_name} is lower than before as expected.")
|
TestRun.LOGGER.info(f"{stat_name} is lower than before as expected.")
|
||||||
else:
|
else:
|
||||||
TestRun.LOGGER.error(f"{stat_name} ({stats_after}) is not lower than before "
|
TestRun.LOGGER.error(
|
||||||
f"({stats_before}).")
|
f"{stat_name} ({stats_after}) is not lower than before " f"({stats_before})."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def check_number_of_inactive_devices(stats: CacheStats, expected_num):
|
def check_number_of_inactive_devices(stats: CacheStats, expected_num):
|
||||||
inactive_core_num = stats.config_stats.inactive_core_dev
|
inactive_core_num = stats.config_stats.inactive_core_dev
|
||||||
if inactive_core_num != expected_num:
|
if inactive_core_num != expected_num:
|
||||||
TestRun.fail(f"There is wrong number of inactive core devices in cache statistics. "
|
TestRun.fail(
|
||||||
f"(Expected: {expected_num}, actual: {inactive_core_num}")
|
f"There is wrong number of inactive core devices in cache statistics. "
|
||||||
|
f"(Expected: {expected_num}, actual: {inactive_core_num}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def check_if_inactive_section_exists(stats, should_exist: bool = True):
|
def check_if_inactive_section_exists(stats, should_exist: bool = True):
|
||||||
@ -812,8 +1021,10 @@ def check_amount_of_dirty_data(devices_dirty_lines_before):
|
|||||||
for dev in devices_dirty_lines_before:
|
for dev in devices_dirty_lines_before:
|
||||||
if dev.get_status() == CoreStatus.active and dev.get_dirty_blocks() != Size.zero():
|
if dev.get_status() == CoreStatus.active and dev.get_dirty_blocks() != Size.zero():
|
||||||
TestRun.fail("Amount of dirty data is not 0.")
|
TestRun.fail("Amount of dirty data is not 0.")
|
||||||
if dev.get_status() == CoreStatus.inactive and \
|
if (
|
||||||
dev.get_dirty_blocks() != devices_dirty_lines_before[dev]:
|
dev.get_status() == CoreStatus.inactive
|
||||||
|
and dev.get_dirty_blocks() != devices_dirty_lines_before[dev]
|
||||||
|
):
|
||||||
TestRun.fail("Data from inactive cache is flushed.")
|
TestRun.fail("Data from inactive cache is flushed.")
|
||||||
|
|
||||||
|
|
||||||
@ -829,13 +1040,15 @@ def prepare_devices(devices):
|
|||||||
|
|
||||||
def run_fio(targets):
|
def run_fio(targets):
|
||||||
for target in targets:
|
for target in targets:
|
||||||
fio = (Fio()
|
fio = (
|
||||||
.create_command()
|
Fio()
|
||||||
.io_engine(IoEngine.libaio)
|
.create_command()
|
||||||
.read_write(ReadWrite.randwrite)
|
.io_engine(IoEngine.libaio)
|
||||||
.direct(1)
|
.read_write(ReadWrite.randwrite)
|
||||||
.size(Size(100, Unit.MebiByte))
|
.direct(1)
|
||||||
.sync()
|
.size(Size(100, Unit.MebiByte))
|
||||||
.io_depth(32)
|
.sync()
|
||||||
.target(f"{target}"))
|
.io_depth(32)
|
||||||
|
.target(f"{target}")
|
||||||
|
)
|
||||||
fio.run()
|
fio.run()
|
||||||
|
193
test/functional/tests/initialize/test_recovery.py
Normal file
193
test/functional/tests/initialize/test_recovery.py
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
#
|
||||||
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
#
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from api.cas import casadm
|
||||||
|
from api.cas.cache_config import CacheMode
|
||||||
|
from api.cas.casadm_parser import get_caches
|
||||||
|
from api.cas.cli import stop_cmd
|
||||||
|
from api.cas.cli_messages import check_stderr_msg, stop_cache_errors
|
||||||
|
from core.test_run import TestRun
|
||||||
|
from storage_devices.disk import DiskTypeLowerThan, DiskTypeSet, DiskType
|
||||||
|
from test_tools.dd import Dd
|
||||||
|
from test_tools.disk_utils import Filesystem, mount
|
||||||
|
from test_tools.fs_utils import check_if_file_exists
|
||||||
|
from test_utils.filesystem.file import File
|
||||||
|
from test_utils.os_utils import sync
|
||||||
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
|
mount_point = "/mnt/cas"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.CI
|
||||||
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_recover_cache_verify_core_device(filesystem):
|
||||||
|
"""
|
||||||
|
title: Recovery after unplug/plug cache device
|
||||||
|
description: |
|
||||||
|
Test data integrity after unplug cache device.
|
||||||
|
pass_criteria:
|
||||||
|
- Cache devices successfully loaded with metadata after unplug/plug device
|
||||||
|
- md5sums before and after all operations match each other
|
||||||
|
- creation, mount, unmount of filesystems on the core device succeeds
|
||||||
|
- correct error warning after cache stop
|
||||||
|
"""
|
||||||
|
|
||||||
|
with TestRun.step("Partition cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
|
core_device.create_partitions([Size(4, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_dev = cache_device.partitions[0]
|
||||||
|
core_dev = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_dev=cache_dev, cache_mode=CacheMode.WB)
|
||||||
|
core = cache.add_core(core_dev=core_dev)
|
||||||
|
|
||||||
|
with TestRun.step("Create filesystem on core"):
|
||||||
|
core.create_filesystem(filesystem)
|
||||||
|
|
||||||
|
with TestRun.step("Mount exported object"):
|
||||||
|
core.mount(mount_point)
|
||||||
|
|
||||||
|
with TestRun.step("Run IO"):
|
||||||
|
dd = (
|
||||||
|
Dd()
|
||||||
|
.input("/dev/urandom")
|
||||||
|
.output(f"{mount_point}/test")
|
||||||
|
.count(1)
|
||||||
|
.block_size(Size(50, Unit.MegaByte))
|
||||||
|
)
|
||||||
|
dd.run()
|
||||||
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step("Calculate test file md5sums before unplug"):
|
||||||
|
core_mnt_md5s_before = File(f"{mount_point}/test").md5sum()
|
||||||
|
|
||||||
|
with TestRun.step("Unmount exported object"):
|
||||||
|
core.unmount()
|
||||||
|
|
||||||
|
with TestRun.step("Unplug cache device"):
|
||||||
|
cache_device.unplug()
|
||||||
|
|
||||||
|
with TestRun.step("Stop cache without flushing and check error message"):
|
||||||
|
output = TestRun.executor.run(stop_cmd(cache_id=str(cache.cache_id), no_data_flush=True))
|
||||||
|
if len(get_caches()) > 0:
|
||||||
|
TestRun.fail("CAS failed to stop cache")
|
||||||
|
if not check_stderr_msg(output, stop_cache_errors):
|
||||||
|
TestRun.fail(f"Wrong error message during cache stop")
|
||||||
|
|
||||||
|
with TestRun.step("Plug cache device"):
|
||||||
|
cache_device.plug()
|
||||||
|
|
||||||
|
with TestRun.step("Load cache"):
|
||||||
|
cache = casadm.load_cache(cache_dev)
|
||||||
|
|
||||||
|
with TestRun.step("Stop cache"):
|
||||||
|
cache.stop()
|
||||||
|
|
||||||
|
with TestRun.step("Mount core device"):
|
||||||
|
core.core_device.mount(mount_point)
|
||||||
|
|
||||||
|
with TestRun.step("Calculate test file md5sums after recovery"):
|
||||||
|
core_mnt_md5s_after = File(f"{mount_point}/test").md5sum()
|
||||||
|
|
||||||
|
with TestRun.step("Compare test file md5 sums"):
|
||||||
|
if core_mnt_md5s_before != core_mnt_md5s_after:
|
||||||
|
TestRun.fail(
|
||||||
|
f"MD5 sums do not match\n"
|
||||||
|
f"Expected: {core_mnt_md5s_before}, Actual: {core_mnt_md5s_after}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.CI
|
||||||
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_recover_cache_verify_exp_obj(filesystem):
|
||||||
|
"""
|
||||||
|
title: Recovery after unplug/plug cache device
|
||||||
|
description: |
|
||||||
|
Test data integrity after unplug cache device.
|
||||||
|
pass_criteria:
|
||||||
|
- Cache device successfully loaded with metadata after unplug/plug cache device
|
||||||
|
- md5sums before and after all operations match each other
|
||||||
|
- creation, mount, unmount of filesystems succeeds on core exported object
|
||||||
|
"""
|
||||||
|
|
||||||
|
with TestRun.step("Partition cache and core device"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
|
core_device.create_partitions([Size(4, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_dev = cache_device.partitions[0]
|
||||||
|
core_dev = core_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and add core"):
|
||||||
|
cache = casadm.start_cache(cache_dev=cache_dev, cache_mode=CacheMode.WB)
|
||||||
|
core = cache.add_core(core_dev=core_dev)
|
||||||
|
|
||||||
|
with TestRun.step("Create filesystem on core"):
|
||||||
|
core.create_filesystem(filesystem)
|
||||||
|
|
||||||
|
with TestRun.step("Mount exported object"):
|
||||||
|
core.mount(mount_point)
|
||||||
|
|
||||||
|
with TestRun.step("Run IO"):
|
||||||
|
dd = (
|
||||||
|
Dd()
|
||||||
|
.input("/dev/urandom")
|
||||||
|
.output(f"{mount_point}/test")
|
||||||
|
.count(1)
|
||||||
|
.block_size(Size(50, Unit.MegaByte))
|
||||||
|
)
|
||||||
|
dd.run()
|
||||||
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step("Calculate test file md5sums before unplug"):
|
||||||
|
core_mnt_md5s_before = File(f"{mount_point}/test").md5sum()
|
||||||
|
|
||||||
|
with TestRun.step("Unmount exported object"):
|
||||||
|
core.unmount()
|
||||||
|
|
||||||
|
with TestRun.step("Unplug cache device"):
|
||||||
|
cache_device.unplug()
|
||||||
|
|
||||||
|
with TestRun.step("Stop cache without flushing and check error message"):
|
||||||
|
output = TestRun.executor.run(stop_cmd(cache_id=str(cache.cache_id), no_data_flush=True))
|
||||||
|
if len(get_caches()) > 0:
|
||||||
|
TestRun.fail("CAS failed to stop cache")
|
||||||
|
if not check_stderr_msg(output, stop_cache_errors):
|
||||||
|
TestRun.fail(f"Wrong error message during cache stop")
|
||||||
|
|
||||||
|
with TestRun.step("Plug cache device"):
|
||||||
|
cache_device.plug()
|
||||||
|
|
||||||
|
with TestRun.step("Load cache"):
|
||||||
|
casadm.load_cache(cache_dev)
|
||||||
|
|
||||||
|
with TestRun.step("Mount exported object"):
|
||||||
|
core.mount(mount_point)
|
||||||
|
if not check_if_file_exists(core.mount_point):
|
||||||
|
TestRun.LOGGER.error(f"Mounting exported object {mount_point} failed")
|
||||||
|
|
||||||
|
with TestRun.step("Calculate test file md5sums after recovery"):
|
||||||
|
core_mnt_md5s_after = File(f"{core.mount_point}/test").md5sum()
|
||||||
|
|
||||||
|
with TestRun.step("Compare test file md5 sums"):
|
||||||
|
if core_mnt_md5s_before != core_mnt_md5s_after:
|
||||||
|
TestRun.fail(
|
||||||
|
f"MD5 sums do not match\n"
|
||||||
|
f"Expected: {core_mnt_md5s_before}, Actual: {core_mnt_md5s_after}"
|
||||||
|
)
|
90
test/functional/tests/initialize/test_simulation_startup.py
Normal file
90
test/functional/tests/initialize/test_simulation_startup.py
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
#
|
||||||
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
#
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from api.cas import casadm_parser
|
||||||
|
from api.cas.cache_config import CacheStatus
|
||||||
|
from api.cas.cli import ctl_init, ctl_stop
|
||||||
|
from api.cas.core import CoreStatus
|
||||||
|
from api.cas.init_config import InitConfig
|
||||||
|
from core.test_run import TestRun
|
||||||
|
from storage_devices.disk import DiskTypeLowerThan, DiskTypeSet, DiskType
|
||||||
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.CI
|
||||||
|
@pytest.mark.os_dependent
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_simulation_startup_from_config():
|
||||||
|
"""
|
||||||
|
title: Test for CAS initialization from a configuration file
|
||||||
|
description: |
|
||||||
|
Verify that CAS can be properly initialized from a configuration file and subsequently
|
||||||
|
started correctly after udev trigger.
|
||||||
|
pass_criteria:
|
||||||
|
- Cache initialization from configuration file works properly
|
||||||
|
- Cache is working after udev trigger
|
||||||
|
- Core is working after udev trigger
|
||||||
|
"""
|
||||||
|
|
||||||
|
with TestRun.step("Partition cache and core devices"):
|
||||||
|
cache_device = TestRun.disks["cache"]
|
||||||
|
core_device = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
|
core_device.create_partitions([Size(4, Unit.GibiByte)])
|
||||||
|
|
||||||
|
cache_dev = cache_device.partitions[0]
|
||||||
|
core_dev = core_device.partitions[0]
|
||||||
|
|
||||||
|
cache_id, core_id = 1, 1
|
||||||
|
|
||||||
|
with TestRun.step("Prepare CAS config"):
|
||||||
|
cache_config = InitConfig()
|
||||||
|
cache_config.add_cache(cache_id, cache_dev)
|
||||||
|
cache_config.add_core(cache_id, core_id, core_dev)
|
||||||
|
cache_config.save_config_file()
|
||||||
|
|
||||||
|
with TestRun.step("Initialize cache from config"):
|
||||||
|
TestRun.executor.run_expect_success(ctl_init())
|
||||||
|
|
||||||
|
with TestRun.step("Verify if cache is working"):
|
||||||
|
cache = casadm_parser.get_caches()[0]
|
||||||
|
if cache.get_status() is not CacheStatus.running:
|
||||||
|
TestRun.fail(
|
||||||
|
f"Cache {cache.cache_id} should be running but is in {cache.get_status()} "
|
||||||
|
f"state."
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Verify if core is working"):
|
||||||
|
core = cache.get_core_devices()[0]
|
||||||
|
if core.get_status() is not CoreStatus.active:
|
||||||
|
TestRun.fail(
|
||||||
|
f"Core {core.core_id} should be active but is in {core.get_status()} " f"state."
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Stop cache instance using casctl"):
|
||||||
|
TestRun.executor.run_expect_success(ctl_stop())
|
||||||
|
|
||||||
|
with TestRun.step("Trigger udev"):
|
||||||
|
TestRun.executor.run_expect_success(f"udevadm trigger")
|
||||||
|
|
||||||
|
with TestRun.step("Verify if cache is working"):
|
||||||
|
cache = casadm_parser.get_caches()[0]
|
||||||
|
if cache.get_status() is not CacheStatus.running:
|
||||||
|
TestRun.fail(
|
||||||
|
f"Cache {cache.cache_id} should be running but is in {cache.get_status()} "
|
||||||
|
f"state."
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Verify if core is working"):
|
||||||
|
core = cache.get_core_devices()[0]
|
||||||
|
if core.get_status() is not CoreStatus.active:
|
||||||
|
TestRun.fail(
|
||||||
|
f"Core {core.core_id} should be active but is in {core.get_status()} " f"state."
|
||||||
|
)
|
@ -1,12 +1,13 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from api.cas.git import get_repo_files
|
from test_utils.git import get_repo_files
|
||||||
from api.cas.installer import (
|
from api.cas.installer import (
|
||||||
clean_opencas_repo,
|
clean_opencas_repo,
|
||||||
build_opencas,
|
build_opencas,
|
||||||
|
@ -0,0 +1,137 @@
|
|||||||
|
#
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
#
|
||||||
|
|
||||||
|
import posixpath
|
||||||
|
import random
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from api.cas import casadm
|
||||||
|
from api.cas.cache_config import (
|
||||||
|
CacheMode,
|
||||||
|
CleaningPolicy,
|
||||||
|
)
|
||||||
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
|
from core.test_run import TestRun
|
||||||
|
from test_tools.disk_utils import Filesystem
|
||||||
|
from test_utils.size import Size, Unit
|
||||||
|
from test_utils.os_utils import Udev
|
||||||
|
from test_tools.fio.fio import Fio
|
||||||
|
from test_tools.fio.fio_param import ReadWrite, IoEngine
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_change_cleaning_policy_during_io_raw():
|
||||||
|
"""
|
||||||
|
title: Test for changing the cleaning policy during background IO on raw device.
|
||||||
|
description: |
|
||||||
|
Stress test to change the cleaning policy during background IO operations on raw exported
|
||||||
|
object in Write-Back cache mode.
|
||||||
|
pass_criteria:
|
||||||
|
- No system crash
|
||||||
|
- Successful change of cleaning policy
|
||||||
|
"""
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_dev.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step(f"Start cache in Write-Back mode"):
|
||||||
|
cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WB, force=True)
|
||||||
|
|
||||||
|
with TestRun.step(f"Add core to the cache"):
|
||||||
|
core = cache.add_core(core_dev)
|
||||||
|
|
||||||
|
with TestRun.step("Run I/O in background"):
|
||||||
|
fio = (
|
||||||
|
Fio()
|
||||||
|
.create_command()
|
||||||
|
.target(core.path)
|
||||||
|
.size(core.size)
|
||||||
|
.read_write(ReadWrite.randrw)
|
||||||
|
.io_engine(IoEngine.sync)
|
||||||
|
.block_size(Size(1, Unit.Blocks4096))
|
||||||
|
)
|
||||||
|
|
||||||
|
fio_pid = fio.run_in_background()
|
||||||
|
|
||||||
|
with TestRun.step(f"Start changing the cleaning policy during I/O operations"):
|
||||||
|
current_policy = cache.get_cleaning_policy()
|
||||||
|
while TestRun.executor.check_if_process_exists(fio_pid):
|
||||||
|
random_policy = [policy for policy in list(CleaningPolicy) if policy != current_policy]
|
||||||
|
policy_to_change = random.choice(random_policy)
|
||||||
|
cache.set_cleaning_policy(policy_to_change)
|
||||||
|
cache_policy = cache.get_cleaning_policy()
|
||||||
|
if cache_policy != policy_to_change:
|
||||||
|
TestRun.fail("Wrong cleaning policy after changing it during I/O")
|
||||||
|
current_policy = cache_policy
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
def test_change_cleaning_policy_during_io_fs(filesystem):
|
||||||
|
"""
|
||||||
|
title: Test for changing the cleaning policy during IO on exported object.
|
||||||
|
description: |
|
||||||
|
Stress test for changing the cleaning policy during IO operations on CAS device with a
|
||||||
|
filesystem in Write-Back cache mode.
|
||||||
|
pass_criteria:
|
||||||
|
- No system crash
|
||||||
|
- Successful change of cleaning policy
|
||||||
|
"""
|
||||||
|
mount_point = "/mnt"
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_dev = TestRun.disks["cache"]
|
||||||
|
core_dev = TestRun.disks["core"]
|
||||||
|
|
||||||
|
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_dev.create_partitions([Size(1, Unit.GibiByte)])
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step(f"Start cache in Write-Back mode"):
|
||||||
|
cache = casadm.start_cache(cache_dev.partitions[0], CacheMode.WB, force=True)
|
||||||
|
|
||||||
|
with TestRun.step(f"Create filesystem on core device"):
|
||||||
|
core_dev.create_filesystem(filesystem)
|
||||||
|
|
||||||
|
with TestRun.step(f"Add core to the cache"):
|
||||||
|
core = cache.add_core(core_dev)
|
||||||
|
|
||||||
|
with TestRun.step("Mount exported object"):
|
||||||
|
core.mount(mount_point=mount_point)
|
||||||
|
|
||||||
|
with TestRun.step("Run I/O in background on exported object"):
|
||||||
|
fio = (
|
||||||
|
Fio()
|
||||||
|
.create_command()
|
||||||
|
.size(core.size)
|
||||||
|
.target(posixpath.join(mount_point, "test_file"))
|
||||||
|
.read_write(ReadWrite.randrw)
|
||||||
|
.io_engine(IoEngine.sync)
|
||||||
|
.block_size(Size(1, Unit.Blocks4096))
|
||||||
|
)
|
||||||
|
|
||||||
|
fio_pid = fio.run_in_background()
|
||||||
|
|
||||||
|
with TestRun.step(f"Start changing the cleaning policy during I/O operations"):
|
||||||
|
current_policy = cache.get_cleaning_policy()
|
||||||
|
while TestRun.executor.check_if_process_exists(fio_pid):
|
||||||
|
random_policy = [policy for policy in list(CleaningPolicy) if policy != current_policy]
|
||||||
|
policy_to_change = random.choice(random_policy)
|
||||||
|
cache.set_cleaning_policy(policy_to_change)
|
||||||
|
cache_policy = cache.get_cleaning_policy()
|
||||||
|
if cache_policy != policy_to_change:
|
||||||
|
TestRun.fail("Wrong cleaning policy after changing it during I/O")
|
||||||
|
current_policy = cache_policy
|
Loading…
Reference in New Issue
Block a user