Introduce tests for cache attach/detach feature

Signed-off-by: Katarzyna Treder <katarzyna.treder@h-partners.com>
This commit is contained in:
Katarzyna Treder 2025-02-26 13:08:03 +01:00
parent 3893fc2aa7
commit d973b3850e
7 changed files with 663 additions and 6 deletions

View File

@ -1,6 +1,6 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -125,6 +125,7 @@ class CacheStatus(Enum):
incomplete = "incomplete" incomplete = "incomplete"
standby = "standby" standby = "standby"
standby_detached = "standby detached" standby_detached = "standby detached"
detached = "detached"
def __str__(self): def __str__(self):
return self.value return self.value

View File

@ -9,6 +9,20 @@ import re
from connection.utils.output import Output from connection.utils.output import Output
from core.test_run import TestRun from core.test_run import TestRun
attach_not_enough_memory = [
r"Not enough free RAM\.\nYou need at least \d+.\d+GB to attach a device to cache "
r"with cache line size equal \d+kB.\n"
r"Try with greater cache line size\."
]
attach_with_existing_metadata = [
r"Error inserting cache \d+",
r"Old metadata found on device",
r"Please attach another device or use --force to discard on-disk metadata",
r" and attach this device to cache instance\."
]
load_inactive_core_missing = [ load_inactive_core_missing = [
r"WARNING: Can not resolve path to core \d+ from cache \d+\. By-id path will be shown for that " r"WARNING: Can not resolve path to core \d+ from cache \d+\. By-id path will be shown for that "
r"core\.", r"core\.",

View File

@ -0,0 +1,262 @@
#
# Copyright(c) 2023-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
import random
import time
import pytest
from api.cas import casadm_parser, casadm
from api.cas.cache_config import CacheLineSize, CacheMode
from api.cas.cli import attach_cache_cmd
from api.cas.cli_messages import check_stderr_msg, attach_with_existing_metadata
from connection.utils.output import CmdException
from core.test_run import TestRun
from core.test_run_utils import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from storage_devices.nullblk import NullBlk
from test_tools.dmesg import clear_dmesg
from test_tools.fs_tools import Filesystem, create_directory, create_random_test_file, \
check_if_directory_exists, remove
from type_def.size import Size, Unit
mountpoint = "/mnt/cas"
test_file_path = f"{mountpoint}/test_file"
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("cache2", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.require_disk("core2", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_attach_device_with_existing_metadata(cache_mode, cache_line_size):
"""
title: Test attaching cache with valid and relevant metadata.
description: |
Attach disk with valid and relevant metadata and verify whether the running configuration
wasn't affected by the values from the old metadata.
pass_criteria:
- no cache crash during attach and detach.
- old metadata doesn't affect running cache.
- no kernel panic
"""
with TestRun.step("Prepare random cache line size and cache mode (different than tested)"):
random_cache_mode = _get_random_uniq_cache_mode(cache_mode)
cache_mode1, cache_mode2 = cache_mode, random_cache_mode
random_cache_line_size = _get_random_uniq_cache_line_size(cache_line_size)
cache_line_size1, cache_line_size2 = cache_line_size, random_cache_line_size
with TestRun.step("Clear dmesg log"):
clear_dmesg()
with TestRun.step("Prepare devices for caches and cores"):
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(2, Unit.GibiByte)])
cache_dev = cache_dev.partitions[0]
cache_dev2 = TestRun.disks["cache2"]
cache_dev2.create_partitions([Size(2, Unit.GibiByte)])
cache_dev2 = cache_dev2.partitions[0]
core_dev1 = TestRun.disks["core"]
core_dev2 = TestRun.disks["core2"]
core_dev1.create_partitions([Size(2, Unit.GibiByte)] * 2)
core_dev2.create_partitions([Size(2, Unit.GibiByte)] * 2)
with TestRun.step("Start 2 caches with different parameters and add core to each"):
cache1 = casadm.start_cache(
cache_dev, force=True, cache_line_size=cache_line_size1
)
if cache1.has_volatile_metadata():
pytest.skip("Non-volatile metadata needed to run this test")
for core in core_dev1.partitions:
cache1.add_core(core)
cache2 = casadm.start_cache(
cache_dev2, force=True, cache_line_size=cache_line_size2
)
for core in core_dev2.partitions:
cache2.add_core(core)
cores_in_cache1_before = {
core.core_device.path for core in casadm_parser.get_cores(cache_id=cache1.cache_id)
}
with TestRun.step(f"Set cache modes for caches to {cache_mode1} and {cache_mode2}"):
cache1.set_cache_mode(cache_mode1)
cache2.set_cache_mode(cache_mode2)
with TestRun.step("Stop second cache"):
cache2.stop()
with TestRun.step("Detach first cache device"):
cache1.detach()
with TestRun.step("Try to attach the other cache device to first cache without force flag"):
try:
cache1.attach(device=cache_dev2)
TestRun.fail("Cache attached successfully"
"Expected: cache fail to attach")
except CmdException as exc:
check_stderr_msg(exc.output, attach_with_existing_metadata)
TestRun.LOGGER.info("Cache attach failed as expected")
with TestRun.step("Attach the other cache device to first cache with force flag"):
cache1.attach(device=cache_dev2, force=True)
cores_after_attach = casadm_parser.get_cores(cache_id=cache1.cache_id)
with TestRun.step("Verify if old configuration doesn`t affect new cache"):
cores_in_cache1 = {core.core_device.path for core in cores_after_attach}
if cores_in_cache1 != cores_in_cache1_before:
TestRun.fail(
f"After attaching cache device, core list has changed:"
f"\nUsed {cores_in_cache1}"
f"\nShould use {cores_in_cache1_before}."
)
if cache1.get_cache_line_size() == cache_line_size2:
TestRun.fail(
f"After attaching cache device, cache line size changed:"
f"\nUsed {cache_line_size2}"
f"\nShould use {cache_line_size1}."
)
if cache1.get_cache_mode() != cache_mode1:
TestRun.fail(
f"After attaching cache device, cache mode changed:"
f"\nUsed {cache1.get_cache_mode()}"
f"\nShould use {cache_mode1}."
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("cache2", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", [CacheMode.WB, CacheMode.WT])
def test_attach_detach_md5sum(cache_mode):
"""
title: Test for md5sum of file after attach/detach operation.
description: |
Test data integrity after detach/attach operations
pass_criteria:
- CAS doesn't crash during attach and detach.
- md5sums before and after operations match each other
"""
with TestRun.step("Prepare cache and core devices"):
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(2, Unit.GibiByte)])
cache_dev = cache_dev.partitions[0]
cache_dev2 = TestRun.disks["cache2"]
cache_dev2.create_partitions([Size(3, Unit.GibiByte)])
cache_dev2 = cache_dev2.partitions[0]
core_dev = TestRun.disks["core"]
core_dev.create_partitions([Size(6, Unit.GibiByte)])
core_dev = core_dev.partitions[0]
with TestRun.step("Start cache and add core"):
cache = casadm.start_cache(cache_dev, force=True, cache_mode=cache_mode)
core = cache.add_core(core_dev)
with TestRun.step(f"Change cache mode to {cache_mode}"):
cache.set_cache_mode(cache_mode)
with TestRun.step("Create a filesystem on the core device and mount it"):
if check_if_directory_exists(mountpoint):
remove(mountpoint, force=True, recursive=True)
create_directory(path=mountpoint)
core.create_filesystem(Filesystem.xfs)
core.mount(mountpoint)
with TestRun.step("Write data to the exported object"):
test_file_main = create_random_test_file(
target_file_path=posixpath.join(mountpoint, "test_file"),
file_size=Size(5, Unit.GibiByte),
)
with TestRun.step("Calculate test file md5sums before detach"):
test_file_md5sum_before = test_file_main.md5sum()
with TestRun.step("Detach cache device"):
cache.detach()
with TestRun.step("Attach different cache device"):
cache.attach(device=cache_dev2, force=True)
with TestRun.step("Calculate cache test file md5sums after cache attach"):
test_file_md5sum_after = test_file_main.md5sum()
with TestRun.step("Compare test file md5sums"):
if test_file_md5sum_before != test_file_md5sum_after:
TestRun.fail(
f"MD5 sums of core before and after do not match."
f"Expected: {test_file_md5sum_before}"
f"Actual: {test_file_md5sum_after}"
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
def test_stop_cache_during_attach(cache_mode):
"""
title: Test cache stop during attach.
description: Test for handling concurrent cache attach and stop.
pass_criteria:
- No system crash.
- Stop operation completed successfully.
"""
with TestRun.step("Create null_blk device for cache"):
nullblk = NullBlk.create(size_gb=1500)
with TestRun.step("Prepare cache and core devices"):
cache_dev = nullblk[0]
core_dev = TestRun.disks["core"]
core_dev.create_partitions([Size(2, Unit.GibiByte)])
core_dev = core_dev.partitions[0]
with TestRun.step(f"Start cache and add core"):
cache = casadm.start_cache(cache_dev, force=True, cache_mode=cache_mode)
cache.add_core(core_dev)
with TestRun.step(f"Change cache mode to {cache_mode}"):
cache.set_cache_mode(cache_mode)
with TestRun.step("Detach cache"):
cache.detach()
with TestRun.step("Start cache re-attach in background"):
TestRun.executor.run_in_background(
attach_cache_cmd(str(cache.cache_id), cache_dev.path)
)
time.sleep(1)
with TestRun.step("Stop cache"):
cache.stop()
with TestRun.step("Verify if cache stopped"):
caches = casadm_parser.get_caches()
if caches:
TestRun.fail(
"Cache is still running despite stop operation"
"expected behaviour: Cache stopped"
"actual behaviour: Cache running"
)
def _get_random_uniq_cache_line_size(cache_line_size) -> CacheLineSize:
return random.choice([c for c in list(CacheLineSize) if c is not cache_line_size])
def _get_random_uniq_cache_mode(cache_mode) -> CacheMode:
return random.choice([c for c in list(CacheMode) if c is not cache_mode])

View File

@ -1,20 +1,25 @@
# #
# Copyright(c) 2020-2021 Intel Corporation # Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd. # Copyright(c) 2023-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import pytest import pytest
from api.cas import casadm
from api.cas.cas_module import CasModule from api.cas.cas_module import CasModule
from api.cas.cli_messages import check_stderr_msg, attach_not_enough_memory
from connection.utils.output import CmdException
from core.test_run import TestRun from core.test_run import TestRun
from type_def.size import Unit from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from type_def.size import Unit, Size
from test_tools.os_tools import (drop_caches, from test_tools.os_tools import (drop_caches,
is_kernel_module_loaded, is_kernel_module_loaded,
load_kernel_module, load_kernel_module,
unload_kernel_module, unload_kernel_module,
) )
from test_tools.memory import disable_memory_affecting_functions, get_mem_free, allocate_memory from test_tools.memory import disable_memory_affecting_functions, get_mem_free, allocate_memory, \
get_mem_available, unmount_ramfs
@pytest.mark.os_dependent @pytest.mark.os_dependent
@ -65,3 +70,51 @@ def test_insufficient_memory_for_cas_module():
TestRun.LOGGER.info(f"Cannot load OpenCAS module as expected.\n{output.stderr}") TestRun.LOGGER.info(f"Cannot load OpenCAS module as expected.\n{output.stderr}")
else: else:
TestRun.LOGGER.error("Loading OpenCAS module successfully finished, but should fail.") TestRun.LOGGER.error("Loading OpenCAS module successfully finished, but should fail.")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("cache2", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_attach_cache_min_ram():
"""
title: Test attach cache with insufficient memory.
description: |
Check for valid message when attaching cache with insufficient memory.
pass_criteria:
- CAS attach operation fail due to insufficient RAM.
- No system crash.
"""
with TestRun.step("Prepare devices"):
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(2, Unit.GibiByte)])
cache_dev = cache_dev.partitions[0]
cache_dev2 = TestRun.disks["cache2"]
core_dev = TestRun.disks["core"]
with TestRun.step("Start cache and add core"):
cache = casadm.start_cache(cache_dev, force=True)
cache.add_core(core_dev)
with TestRun.step("Detach cache"):
cache.detach()
with TestRun.step("Set RAM workload"):
disable_memory_affecting_functions()
allocate_memory(get_mem_available() - Size(100, Unit.MegaByte))
with TestRun.step("Try to attach cache"):
try:
TestRun.LOGGER.info(
f"There is {get_mem_available().unit.MebiByte.value} available memory left"
)
cache.attach(device=cache_dev2, force=True)
TestRun.LOGGER.error(
f"Cache attached not as expected."
f"{get_mem_available()} is enough memory to complete operation")
except CmdException as exc:
check_stderr_msg(exc.output, attach_not_enough_memory)
with TestRun.step("Unlock RAM memory"):
unmount_ramfs()

View File

@ -1,17 +1,25 @@
# #
# Copyright(c) 2020-2022 Intel Corporation # Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies # Copyright(c) 2023-2025 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import posixpath
import pytest import pytest
from datetime import timedelta
from api.cas import casadm, casadm_parser, cli from api.cas import casadm, casadm_parser, cli
from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait
from api.cas.casadm_parser import wait_for_flushing from api.cas.casadm_parser import wait_for_flushing
from api.cas.cli import attach_cache_cmd
from connection.utils.output import CmdException
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from storage_devices.nullblk import NullBlk
from test_tools.dd import Dd from test_tools.dd import Dd
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import IoEngine, ReadWrite
from test_tools.fs_tools import Filesystem, create_random_test_file from test_tools.fs_tools import Filesystem, create_random_test_file
from test_tools.os_tools import DropCachesMode, sync, drop_caches from test_tools.os_tools import DropCachesMode, sync, drop_caches
from test_tools.udev import Udev from test_tools.udev import Udev
@ -446,7 +454,7 @@ def test_interrupt_cache_stop(cache_mode, filesystem):
core.mount(mount_point) core.mount(mount_point)
with TestRun.step(f"Create test file in mount point of exported object."): with TestRun.step(f"Create test file in mount point of exported object."):
test_file = create_test_file() create_test_file()
with TestRun.step("Get number of dirty data on exported object before interruption."): with TestRun.step("Get number of dirty data on exported object before interruption."):
sync() sync()
@ -487,6 +495,144 @@ def test_interrupt_cache_stop(cache_mode, filesystem):
core_part.unmount() core_part.unmount()
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
def test_interrupt_attach(cache_mode):
"""
title: Test for attach interruption.
description: Validate handling interruption of cache attach.
pass_criteria:
- No crash during attach interruption.
- Cache attach completed successfully.
- No system crash.
"""
with TestRun.step("Prepare cache and core devices"):
nullblk = NullBlk.create(size_gb=1500)
cache_dev = nullblk[0]
core_dev = TestRun.disks["core"]
core_dev.create_partitions([Size(2, Unit.GibiByte)])
core_dev = core_dev.partitions[0]
with TestRun.step("Start cache and add core"):
cache = casadm.start_cache(cache_dev, force=True, cache_mode=cache_mode)
cache.add_core(core_dev)
with TestRun.step(f"Change cache mode to {cache_mode}"):
cache.set_cache_mode(cache_mode)
with TestRun.step("Detach cache"):
cache.detach()
with TestRun.step("Start attaching cache in background"):
cache_attach_pid = TestRun.executor.run_in_background(
attach_cache_cmd(
cache_id=str(cache.cache_id),
cache_dev=cache_dev.path
)
)
with TestRun.step("Try to interrupt cache attaching"):
TestRun.executor.kill_process(cache_attach_pid)
with TestRun.step("Wait for cache attach to end"):
TestRun.executor.wait_cmd_finish(
cache_attach_pid, timeout=timedelta(minutes=10)
)
with TestRun.step("Verify if cache attach ended successfully"):
caches = casadm_parser.get_caches()
if len(caches) != 1:
TestRun.fail(f"Wrong amount of caches: {len(caches)}, expected: 1")
if caches[0].cache_device.path == cache_dev.path:
TestRun.LOGGER.info("Operation ended successfully")
else:
TestRun.fail(
"Cache attaching failed"
"expected behaviour: attach completed successfully"
"actual behaviour: attach interrupted"
)
@pytest.mark.parametrizex("filesystem", Filesystem)
@pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites))
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_detach_interrupt_cache_flush(filesystem, cache_mode):
"""
title: Test for flush interruption using cache detach operation.
description: Validate handling detach during cache flush.
pass_criteria:
- No system crash.
- Detach operation doesn't stop cache flush.
"""
with TestRun.step("Prepare cache and core devices"):
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(2, Unit.GibiByte)])
cache_dev = cache_dev.partitions[0]
core_dev = TestRun.disks["core"]
core_dev.create_partitions([Size(5, Unit.GibiByte)])
core_dev = core_dev.partitions[0]
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Start cache"):
cache = casadm.start_cache(cache_dev, force=True, cache_mode=cache_mode)
with TestRun.step(f"Change cache mode to {cache_mode}"):
cache.set_cache_mode(cache_mode)
with TestRun.step(f"Add core device with {filesystem} filesystem and mount it"):
core_dev.create_filesystem(filesystem)
core = cache.add_core(core_dev)
core.mount(mount_point)
with TestRun.step("Populate cache with dirty data"):
fio = (
Fio()
.create_command()
.size(Size(4, Unit.GibiByte))
.read_write(ReadWrite.randrw)
.io_engine(IoEngine.libaio)
.block_size(Size(1, Unit.Blocks4096))
.target(posixpath.join(mount_point, "test_file"))
)
fio.run()
if cache.get_dirty_blocks() <= Size.zero():
TestRun.fail("Failed to populate cache with dirty data")
if core.get_dirty_blocks() <= Size.zero():
TestRun.fail("There is no dirty data on core")
with TestRun.step("Start flushing cache"):
flush_pid = TestRun.executor.run_in_background(
cli.flush_cache_cmd(str(cache.cache_id))
)
with TestRun.step("Interrupt cache flushing by cache detach"):
wait_for_flushing(cache, core)
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core.core_id)
while percentage < 50:
percentage = casadm_parser.get_flushing_progress(
cache.cache_id, core.core_id
)
with TestRun.step("Detach cache"):
try:
cache.detach()
TestRun.fail("Cache detach during flush succeed, expected: fail")
except CmdException:
TestRun.LOGGER.info(
"Cache detach during flush failed, as expected"
)
TestRun.executor.wait_cmd_finish(flush_pid)
cache.detach()
def prepare(): def prepare():
cache_dev = TestRun.disks["cache"] cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([cache_size]) cache_dev.create_partitions([cache_size])

View File

@ -0,0 +1,103 @@
#
# Copyright(c) 2023-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
import random
import time
import pytest
from datetime import timedelta
from api.cas.cache_config import CacheMode
from api.cas.casadm import start_cache
from core.test_run import TestRun
from core.test_run_utils import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from type_def.size import Size, Unit
mountpoint = "/mnt/cas"
test_file_path = f"{mountpoint}/test_file"
iterations_per_config = 10
cache_size = Size(16, Unit.GibiByte)
start_size = Size(512, Unit.Byte).get_value()
stop_size = Size(32, Unit.MegaByte).get_value()
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("cache2", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
def test_attach_detach_during_io(cache_mode):
"""
title: Test for attach/detach cache during I/O.
description: |
Validate if attach and detach operation doesn't interrupt
I/O on exported object
pass_criteria:
- No crash during attach and detach.
- Detaching cache doesn't stop I/O on exported object.
- Cache can be stopped after operations.
"""
with TestRun.step("Prepare cache and core devices"):
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(40, Unit.MebiByte)])
cache_dev = cache_dev.partitions[0]
cache_dev2 = TestRun.disks["cache2"]
cache_dev2.create_partitions([Size(60, Unit.MebiByte), Size(100, Unit.MebiByte),
Size(50, Unit.MebiByte), Size(80, Unit.MebiByte)])
core_dev = TestRun.disks["core"]
core_dev.create_partitions([Size(1, Unit.GibiByte)])
core_dev = core_dev.partitions[0]
with TestRun.step("Start cache and add core"):
cache = start_cache(cache_dev, force=True)
core = cache.add_core(core_dev)
with TestRun.step(f"Change cache mode to {cache_mode}"):
cache.set_cache_mode(cache_mode)
with TestRun.step("Run random mixed read and write workload"):
fio = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.read_write(ReadWrite.randrw)
.run_time(timedelta(minutes=20))
.time_based()
.target(core.path)
.blocksize_range([(start_size, stop_size)])
)
fio_pid = fio.run_in_background()
time.sleep(5)
with TestRun.step("Randomly detach and attach cache during I/O"):
while TestRun.executor.check_if_process_exists(fio_pid):
time.sleep(random.randint(2, 10))
cache.detach()
if cache.get_statistics().error_stats.cache.total != 0.0:
TestRun.LOGGER.error(
f"Cache error(s) occurred after "
f"{cache_to_attach} detach"
)
time.sleep(5)
cache_to_attach = random.choice(cache_dev2.partitions)
cache.attach(device=cache_to_attach, force=True)
if cache.get_statistics().error_stats.cache.total != 0.0:
TestRun.LOGGER.error(
f"Cache error(s) occurred after "
f"{cache_to_attach} attach"
)
with TestRun.step("Check fio result after I/O finish."):
TestRun.executor.wait_cmd_finish(fio_pid)
fio_output = TestRun.executor.run(f"cat {fio.fio.fio_file}")
fio_errors = fio.get_results(fio_output.stdout)[0].total_errors()
if fio_output.exit_code != 0 and fio_errors != 0:
TestRun.fail("Fio error(s) occurred!")

View File

@ -0,0 +1,78 @@
#
# Copyright(c) 2023-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
import math
import pytest
from api.cas.casadm import start_cache
from core.test_run import TestRun
from core.test_run_utils import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.memory import get_mem_free
from test_tools.os_tools import sync, drop_caches
from test_tools.udev import Udev
from type_def.size import Size, Unit
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_detach_memory_release():
"""
title: Test for detecting if memory was released after detach operation.
description: |
Validate if ctx was released after detach operation.
pass_criteria:
- Memory used by cache device is released after detach operation.
- No system crash.
"""
with TestRun.step("Prepare disks"):
cache_dev = TestRun.disks["cache"]
if cache_dev.size < Size(100, Unit.GibiByte):
TestRun.LOGGER.warning(
f"To avoid false-positive scenarios it is better to use "
f"cache disk greater than 100GiB. "
f"Current cache device size: {cache_dev.size.get_value(Unit.GibiByte)}GiB"
)
cache_dev.create_partitions([cache_dev.size - Size(1, Unit.GibiByte)])
else:
cache_dev.create_partitions([Size(100, Unit.GibiByte)])
cache_dev = cache_dev.partitions[0]
core_dev = TestRun.disks["core"]
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Get RAM size before cache start"):
sync()
drop_caches()
memory_before_cache_start = get_mem_free()
with TestRun.step("Start cache and add core"):
cache = start_cache(cache_dev, force=True)
cache.add_core(core_dev)
with TestRun.step("Detach cache"):
cache.detach()
sync()
drop_caches()
memory_after_detach = get_mem_free()
with TestRun.step("Calculate memory usage"):
metadata_released = math.isclose(
memory_after_detach.get_value(),
memory_before_cache_start.get_value(),
rel_tol=0.1
)
if not metadata_released:
TestRun.fail(
f"Memory kept by ctx after detach operation\n"
f"Memory before cache start: {memory_before_cache_start}\n"
f"Memory after detach: {memory_after_detach}"
)
TestRun.LOGGER.info("Memory released successfully")