diff --git a/test/functional/tests/conftest.py b/test/functional/tests/conftest.py index a103f15..34523fb 100644 --- a/test/functional/tests/conftest.py +++ b/test/functional/tests/conftest.py @@ -5,11 +5,11 @@ import os import sys +import traceback from datetime import timedelta import pytest import yaml -import traceback sys.path.append(os.path.join(os.path.dirname(__file__), "../test-framework")) @@ -17,9 +17,11 @@ from core.test_run_utils import TestRun from api.cas import installer from api.cas import casadm from api.cas import git +from storage_devices.raid import Raid from test_utils.os_utils import Udev, kill_all_io from test_tools.disk_utils import PartitionTable, create_partition_table from test_tools.device_mapper import DeviceMapper +from test_tools.mdadm import Mdadm from log.logger import create_log, Log from test_utils.singleton import Singleton @@ -178,8 +180,20 @@ def base_prepare(item): except Exception: pass # TODO: Reboot DUT if test is executed remotely + raids = Raid.discover() + for raid in raids: + # stop only those RAIDs, which are comprised of test disks + if all(map( + lambda d: d.system_path in [bd.system_path for bd in TestRun.dut.disks], + raid.array_devices + )): + raid.umount_all_partitions() + raid.remove_partitions() + raid.stop() + for disk in TestRun.dut.disks: disk.umount_all_partitions() + Mdadm.zero_superblock(disk.system_path) disk.remove_partitions() create_partition_table(disk, PartitionTable.gpt) diff --git a/test/functional/tests/example/example_test.py b/test/functional/tests/example/example_test.py index 050774d..5d550e8 100644 --- a/test/functional/tests/example/example_test.py +++ b/test/functional/tests/example/example_test.py @@ -4,13 +4,15 @@ # import pytest -from test_tools.disk_utils import Filesystem -from test_utils.size import Size, Unit + from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet -from test_utils.filesystem.file import File -from test_utils.filesystem.directory import Directory +from storage_devices.raid import Raid, RaidConfiguration, MetadataVariant, Level from test_tools import fs_utils +from test_tools.disk_utils import Filesystem +from test_utils.filesystem.directory import Directory +from test_utils.filesystem.file import File +from test_utils.size import Size, Unit def setup_module(): @@ -30,7 +32,7 @@ def test_create_example_partitions(): test_disk = TestRun.disks['cache'] with TestRun.group("Repartition disk"): - with TestRun.step("Genetare partitions table"): + with TestRun.step("Generate partitions table"): part_sizes = [] for i in range(1, 6): part_sizes.append(Size(10 * i + 100, Unit.MebiByte)) @@ -41,11 +43,41 @@ def test_create_example_partitions(): test_disk.partitions[i].create_filesystem(Filesystem.ext3) +@pytest.mark.require_disk("cache1", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("cache2", DiskTypeSet([DiskType.optane, DiskType.nand])) +def test_raid_example(): + """ + title: Example test using RAID API. + description: Create and discover RAID volumes. + pass_criteria: + - RAID created. + - RAID discovered. + """ + with TestRun.step("Prepare"): + test_disk_1 = TestRun.disks['cache1'] + test_disk_2 = TestRun.disks['cache2'] + + with TestRun.step("Create RAID"): + config = RaidConfiguration( + level=Level.Raid1, + metadata=MetadataVariant.Imsm, + number_of_devices=2, + size=Size(20, Unit.GiB) + ) + raid = Raid.create(config, [test_disk_1, test_disk_2]) + + with TestRun.group("Discover RAIDs"): + raids = Raid.discover() + + with TestRun.group("Check if created RAID was discovered"): + if raid not in raids: + TestRun.LOGGER.error("Created RAID not discovered in system!") + def test_create_example_files(): """ title: Example test manipulating on filesystem. - description: Perform various operaations on filesystem. + description: Perform various operations on filesystem. pass_criteria: - System does not crash. - All operations complete successfully. @@ -84,4 +116,3 @@ def test_create_example_files(): TestRun.LOGGER.info(f"Item {str(item)} - {type(item).__name__}") with TestRun.step("Remove file"): fs_utils.remove(file1.full_path, True) - diff --git a/test/functional/tests/incremental_load/test_udev.py b/test/functional/tests/incremental_load/test_udev.py index 45082d5..3e23589 100644 --- a/test/functional/tests/incremental_load/test_udev.py +++ b/test/functional/tests/incremental_load/test_udev.py @@ -11,6 +11,7 @@ from api.cas.core import CoreStatus, CacheMode, CacheStatus from api.cas.init_config import InitConfig from core.test_run import TestRun from storage_devices.disk import DiskTypeSet, DiskTypeLowerThan, DiskType +from storage_devices.raid import RaidConfiguration, Raid, Level, MetadataVariant from test_utils.size import Size, Unit @@ -99,6 +100,51 @@ def test_udev_core(): TestRun.fail(f"Core status is {core.get_status()} instead of active.") +@pytest.mark.os_dependent +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.hdd4k, DiskType.sata])) +@pytest.mark.require_disk("core2", DiskTypeSet([DiskType.hdd, DiskType.hdd4k, DiskType.sata])) +def test_udev_raid_core(): + """ + title: CAS udev rule execution for core after recreating RAID device existing in + configuration file as core. + description: | + Verify if CAS udev rule is executed for RAID volume recreated after soft reboot. + pass_criteria: + - No kernel error + - After reboot, the RAID volume is added to the cache instance and is in 'active' state + """ + with TestRun.step("Test prepare."): + cache_disk = TestRun.disks["cache"] + cache_disk.create_partitions([Size(1, Unit.GibiByte)]) + cache_dev = cache_disk.partitions[0] + core_disk = TestRun.disks["core"] + core_disk2 = TestRun.disks["core2"] + + with TestRun.step("Create RAID0 volume."): + config = RaidConfiguration( + level=Level.Raid0, + metadata=MetadataVariant.Legacy, + number_of_devices=2 + ) + core_dev = Raid.create(config, [core_disk, core_disk2]) + + with TestRun.step("Start cache and add core."): + cache = casadm.start_cache(cache_dev, force=True) + core = cache.add_core(core_dev) + + with TestRun.step("Create init config from running CAS configuration."): + InitConfig.create_init_config_from_running_configuration() + + with TestRun.step("Reboot system."): + TestRun.executor.reboot() + + with TestRun.step("Check if core device is active and not in the core pool."): + check_if_dev_in_core_pool(core_dev, False) + if core.get_status() != CoreStatus.active: + TestRun.fail(f"Core status is {core.get_status()} instead of active.") + + @pytest.mark.os_dependent @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.parametrizex("cache_mode", CacheMode)