tests: update tests

Signed-off-by: Kamil Gierszewski <kamil.gierszewski@huawei.com>
This commit is contained in:
Kamil Gierszewski
2024-08-29 12:04:26 +02:00
parent ec0e03fb39
commit e8bdcdae4f
23 changed files with 2129 additions and 1665 deletions

View File

@@ -1,142 +0,0 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from api.cas.cache_config import CacheMode, CacheLineSize
from core.test_run import TestRun
from api.cas import cli, casadm
from api.cas.cli_messages import (
check_stderr_msg,
start_cache_on_already_used_dev,
start_cache_with_existing_id
)
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File
from test_utils.size import Size, Unit
def test_cas_version():
"""
title: Check if CAS is installed
description:
Check if CAS is installed with --version flag and later
checks if components version is consistent with version file
pass criteria:
- casadm command succeeds
- Versions are matched from cmd and file in /var/lib/opencas/cas_version
"""
cmd = f"casadm --version -o csv"
output = TestRun.executor.run_expect_success(cmd).stdout
cmd_cas_versions = output.split("\n")[1:]
version_file_path = r"/var/lib/opencas/cas_version"
file_read_cmd = f"cat {version_file_path} | grep CAS_VERSION="
file_cas_version_str = TestRun.executor.run_expect_success(file_read_cmd).stdout
file_cas_version = file_cas_version_str.split('=')[1]
for version in cmd_cas_versions:
splitted_version = version.split(",")
if splitted_version[1] != file_cas_version:
TestRun.LOGGER.error(f"""Version of {splitted_version[0]} from cmd doesn't match
with file. Expected: {file_cas_version} Actual: {splitted_version[1]}""")
@pytest.mark.CI
@pytest.mark.require_disk("cache_1", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_negative_start_cache():
"""
title: Test start cache negative on cache device
description:
Check for negative cache start scenarios
pass criteria:
- Cache start succeeds
- Fails to start cache on the same device with another id
- Fails to start cache on another partition with the same id
"""
with TestRun.step("Set up device"):
cache_dev = TestRun.disks["cache_1"]
cache_dev.create_partitions([Size(2000, Unit.MebiByte)] * 2)
cache_dev_1 = cache_dev.partitions[0]
cache_dev_2 = cache_dev.partitions[1]
with TestRun.step("Start cache on cache device"):
TestRun.executor.run_expect_success(
cli.start_cmd(cache_dev_1.path, cache_id="1", force=True)
)
with TestRun.step("Start cache on the same device but with another ID"):
output = TestRun.executor.run_expect_fail(
cli.start_cmd(cache_dev_1.path, cache_id="2", force=True)
)
if not check_stderr_msg(output, start_cache_on_already_used_dev):
TestRun.fail(f"Received unexpected error message: {output.stderr}")
with TestRun.step("Start cache with the same ID on another cache device"):
output = TestRun.executor.run_expect_fail(
cli.start_cmd(cache_dev_2.path, cache_id="1", force=True)
)
if not check_stderr_msg(output, start_cache_with_existing_id):
TestRun.fail(f"Received unexpected error message: {output.stderr}")
@pytest.mark.CI
@pytest.mark.parametrizex("filesystem", Filesystem)
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_data_integrity(cache_mode, cache_line_size, filesystem):
"""
title: Check basic data integrity after stopping the cache
pass criteria:
- System does not crash.
- All operations complete successfully.
- Data consistency is preserved.
"""
cache_id = core_id = 1
mountpoint = "/mnt"
filepath = f"{mountpoint}/file"
with TestRun.step("Prepare partitions for cache (200MiB) and for core (100MiB)"):
cache_device = TestRun.disks["cache"]
cache_device.create_partitions([Size(200, Unit.MebiByte)])
cache_part = cache_device.partitions[0]
core_device = TestRun.disks["core"]
core_device.create_partitions([Size(100, Unit.MebiByte)])
core_part = core_device.partitions[0]
with TestRun.step("Start cache and add core device"):
cache = casadm.start_cache(cache_part, cache_mode, cache_line_size, cache_id, True)
core = cache.add_core(core_part, core_id)
with TestRun.step("Create filesystem on CAS device and mount it"):
core.create_filesystem(filesystem)
core.mount(mountpoint)
with TestRun.step("Create test file and calculate md5 checksum"):
(
Dd()
.input("/dev/urandom")
.output(filepath)
.count(1)
.block_size(Size(50, Unit.MebiByte))
.run()
)
test_file = File(filepath)
md5_before = test_file.md5sum()
with TestRun.step("Unmount and stop the cache"):
core.unmount()
cache.flush_cache()
cache.stop()
with TestRun.step("Mount the core device and check for file"):
core_part.mount(mountpoint)
md5_after = test_file.md5sum()
if md5_before != md5_after:
TestRun.fail("md5 checksum mismatch!")

View File

@@ -1,124 +0,0 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from time import sleep
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheStatus
from api.cas.core import CoreStatus
from core.test_run import TestRun
from storage_devices.disk import DiskTypeLowerThan, DiskTypeSet, DiskType
from test_utils.size import Size, Unit
@pytest.mark.CI
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_incremental_load_basic():
"""
title: Incremental load test basic
description: |
Test incremental load and core pool functionality
pass_criteria:
- cores after start and load should be in active state and cache in running state
- cores after adding to core pool are in inactive state and cache in incomplete state
"""
with TestRun.step("Prepare devices."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(2, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(4, Unit.GibiByte)] * 3)
core_devs = core_disk.partitions
cache_id = 1
core_ids = [1, 2, 3]
with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, cache_id=cache_id)
if cache.get_status() is not CacheStatus.running:
TestRun.fail(f"Cache {cache.core_id} should be running but is {cache.get_status()}.")
with TestRun.step("Add cores."):
for core_dev in core_devs:
core = cache.add_core(core_dev)
if core.get_status() is not CoreStatus.active:
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Add cores to core pool."):
cores = []
for core_dev, core_id in zip(core_devs, core_ids):
core = casadm.try_add(core_device=core_dev, cache_id=cache_id, core_id=core_id)
cores.append(core)
if core.get_status() is not CoreStatus.detached:
TestRun.fail(f"Core {core.core_id} should be detached but is {core.get_status()}.")
with TestRun.step("Load cache"):
cache = casadm.load_cache(cache_dev)
if cache.get_status() is not CacheStatus.running:
TestRun.fail(f"Cache {cache.cache_id} should be running but is {cache.get_status()}.")
for core in cores:
if core.get_status() is not CoreStatus.active:
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
@pytest.mark.CI
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_incremental_load_hidden_core():
"""
title: Incremental load test with hidden core
description: |
Test incremental load and core pool functionality with hidden core partition
pass_criteria:
- cores after adding to core pool are in detached state
- visible cores after start and load should be in active state
- hidden core after load should be in detached state
"""
with TestRun.step("Prepare devices."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(2, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(4, Unit.GibiByte)] * 3)
core_devs = core_disk.partitions
cache_id = 1
with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, cache_id=cache_id)
if cache.get_status() is not CacheStatus.running:
TestRun.fail(f"Cache {cache.core_id} should be running but is {cache.get_status()}.")
with TestRun.step("Add cores."):
for core_dev in core_devs:
core = cache.add_core(core_dev)
if core.get_status() is not CoreStatus.active:
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")
hidden_core = cache.get_core_devices()[2]
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Hide core part form from being loaded"):
core_disk.remove_partitions()
core_disk.create_partitions([Size(4, Unit.GibiByte)] * 2)
with TestRun.step("Load cache"):
cache = casadm.load_cache(cache_dev)
if cache.get_status() is not CacheStatus.incomplete:
TestRun.fail(
f"Cache {cache.cache_id} should be incomplete but is "
f"{cache.get_status()}."
)
for core in cache.get_core_devices():
if core.get_status() is not CoreStatus.active:
TestRun.fail(f"Core {core.core_id} should be Active but is {core.get_status()}.")
if hidden_core.get_status() is not CoreStatus.inactive:
TestRun.fail(f"Hidden core should be Inactive but is {hidden_core.get_status()}.")

View File

@@ -1,211 +0,0 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode
from api.cas.cli import casadm_bin
from api.cas.cli_messages import check_stderr_msg, stop_cache_errors
from core.test_run import TestRun
from storage_devices.disk import DiskTypeLowerThan, DiskTypeSet, DiskType, Disk
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem, unmount, mount
from test_tools.fs_utils import check_if_file_exists
from test_utils.filesystem.file import File
from test_utils.os_utils import sync
from test_utils.size import Size, Unit
mount_point = "/mnt/cas"
@pytest.mark.CI
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_recover_cache_verify_core():
"""
title: Recovery after turning off/on devices
description: |
Test data integrity after turning off cache device.
pass_criteria:
- Cache devices successfully loaded with metadata after turning devices off/on
- md5sums before and after all operations match each other
- creation, mount, unmount of filesystems on the core device succeeds
"""
filesystems = [Filesystem.xfs, Filesystem.ext3, Filesystem.ext4]
cache_cnt = len(filesystems)
with TestRun.step("Prepare devices."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(2, Unit.GibiByte)] * cache_cnt)
cache_devs = cache_disk.partitions
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(4, Unit.GibiByte)] * cache_cnt)
core_devs = core_disk.partitions
with TestRun.step("Start caches and add cores."):
caches, cores = [], []
for (cache_dev, core_dev) in zip(cache_devs, core_devs):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB)
core = cache.add_core(core_dev)
caches.append(cache)
cores.append(core)
with TestRun.step("Create filesystem on core devices."):
for (core, filesystem) in zip(cores, filesystems):
core.create_filesystem(filesystem)
with TestRun.step("Mount cache devices."):
for (cache, core) in zip(caches, cores):
core_mnt_point = f"{mount_point}-{cache.cache_id}-{core.core_id}"
core.mount(core_mnt_point)
with TestRun.step("Run IO"):
dd = (
Dd()
.input("/dev/urandom")
.output(f"{core_mnt_point}/test")
.count(1)
.block_size(Size(50, Unit.MegaByte))
)
dd.run()
with TestRun.step("Calculate cache md5sums before unplug."):
core_mnt_md5s_before = [File(f"{core.mount_point}/test").md5sum() for core in cores]
with TestRun.step("Umount core devices."):
for core in cores:
core.unmount()
with TestRun.step("Dirty stop"):
dirty_stop(cache_disk, caches)
with TestRun.step("Start caches with load metadata and later stop them."):
for cache_dev in cache_devs:
cache = casadm.load_cache(cache_dev)
cache.stop()
with TestRun.step("Mount core devices."):
for core, cache in zip(cores, caches):
core_mnt_point = f"{mount_point}-{cache.cache_id}-{core.core_id}"
mount(core.core_device, core_mnt_point)
core.mount_point = core_mnt_point
if not check_if_file_exists(f"{core_mnt_point}/test"):
TestRun.LOGGER.error(f"Mounting core device {core_mnt_point} failed.")
with TestRun.step("Calculate cache md5sums after recovery."):
core_mnt_md5s_after = [File(f"{core.mount_point}/test").md5sum() for core in cores]
with TestRun.step("Compare md5 sums for cores and core devices"):
if core_mnt_md5s_before != core_mnt_md5s_after:
TestRun.fail(
f"MD5 sums of core before and after does not match."
f"Expected: {core_mnt_md5s_before}, Actual: {core_mnt_md5s_after}"
)
with TestRun.step("Umount core devices."):
for core_dev in core_devs:
unmount(core_dev)
@pytest.mark.CI
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_recover_cache_verify_exp_obj():
"""
title: Recovery after turning off/on devices
description: |
Test data integrity after turning off cache device.
pass_criteria:
- Cache devices successfully loaded with metadata after turning devices off/on
- md5sums before and after all operations match each other
- creation, mount, unmount of filesystems succeeds on core exported object
"""
with TestRun.step("Prepare devices."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(2, Unit.GibiByte)] * 3)
cache_devs = cache_disk.partitions
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(4, Unit.GibiByte)] * 3)
core_devs = core_disk.partitions
with TestRun.step("Start caches and add cores."):
caches, cores = [], []
for (cache_dev, core_dev) in zip(cache_devs, core_devs):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB)
core = cache.add_core(core_dev)
caches.append(cache)
cores.append(core)
with TestRun.step("Create filesystem on core devices."):
filesystems = [Filesystem.xfs, Filesystem.ext3, Filesystem.ext4]
for (core, filesystem) in zip(cores, filesystems):
core.create_filesystem(filesystem)
with TestRun.step("Mount cache devices."):
for (cache, core) in zip(caches, cores):
core_mnt_point = f"{mount_point}-{cache.cache_id}-{core.core_id}"
core.mount(core_mnt_point)
with TestRun.step("Run IO"):
dd = (
Dd()
.input("/dev/urandom")
.output(f"{core_mnt_point}/test")
.count(1)
.block_size(Size(50, Unit.MegaByte))
)
dd.run()
sync()
with TestRun.step("Calculate cache md5sums before unplug."):
core_mnt_md5s_before = [File(f"{core.mount_point}/test").md5sum() for core in cores]
with TestRun.step("Umount core devices."):
for core in cores:
core.unmount()
with TestRun.step("Dirty stop"):
dirty_stop(cache_disk, caches)
with TestRun.step("Load caches with metadata."):
for cache_dev in cache_devs:
casadm.load_cache(cache_dev)
with TestRun.step("Mount core devices."):
for core, cache in zip(cores, caches):
core_mnt_point = f"{mount_point}-{cache.cache_id}-{core.core_id}"
core.mount(core_mnt_point)
if not check_if_file_exists(f"{core_mnt_point}/test"):
TestRun.LOGGER.error(f"Mounting core device {core_mnt_point} failed.")
with TestRun.step("Calculate cache md5sums after recovery."):
core_mnt_md5s_after = [File(f"{core.mount_point}/test").md5sum() for core in cores]
with TestRun.step("Compare md5 sums for cores and core devices"):
if core_mnt_md5s_before != core_mnt_md5s_after:
TestRun.fail(
f"MD5 sums of core before and after does not match."
f"Expected: {core_mnt_md5s_before}, Actual: {core_mnt_md5s_after}"
)
with TestRun.step("Umount core devices."):
for core in cores:
core.unmount()
def dirty_stop(cache_disk, caches: list):
with TestRun.step("Turn off cache devices."):
cache_disk.unplug()
with TestRun.step("Stop caches without flushing."):
for cache in caches:
cmd = f"{casadm_bin} --stop-cache --cache-id {cache.cache_id} --no-data-flush"
output = TestRun.executor.run(cmd)
if not check_stderr_msg(output, stop_cache_errors):
TestRun.fail(f"Cache {cache.cache_id} stopping should fail.")
with TestRun.step("Turn on devices."):
Disk.plug_all_disks()

View File

@@ -1,54 +0,0 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from time import sleep
import pytest
from api.cas import casadm_parser
from api.cas.cache_config import CacheStatus
from api.cas.core import CoreStatus
from api.cas.init_config import InitConfig
from core.test_run import TestRun
from storage_devices.disk import DiskTypeLowerThan, DiskTypeSet, DiskType
from test_utils.size import Size, Unit
@pytest.mark.CI
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_simulation_startup_from_config():
with TestRun.step("Prepare devices."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(2, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(4, Unit.GibiByte)])
core_dev = core_disk.partitions[0]
cache_id, core_id = 1, 1
with TestRun.step("prepare CAS config."):
cache_config = InitConfig()
cache_config.add_cache(cache_id, cache_dev)
cache_config.add_core(cache_id, core_id, core_dev)
cache_config.save_config_file()
with TestRun.step("Initialize CAS from config."):
TestRun.executor.run_expect_success(f"casctl init")
with TestRun.step("Stop all CAS instances."):
TestRun.executor.run_expect_success(f"casctl stop")
with TestRun.step("Simulate boot process."):
TestRun.executor.run_expect_success(f"udevadm trigger")
sleep(1)
with TestRun.step("Verify if cache is up and working."):
cache = casadm_parser.get_caches()[0]
if cache.get_status() is not CacheStatus.running:
TestRun.fail(f"Cache {cache.cache_id} should be running but is {cache.get_status()}.")
core = cache.get_core_devices()[0]
if core.get_status() is not CoreStatus.active:
TestRun.fail(f"Core {core.core_id} should be active but is {core.get_status()}.")