diff --git a/test/functional/tests/conftest.py b/test/functional/tests/conftest.py index 2b68485..ff7b3aa 100644 --- a/test/functional/tests/conftest.py +++ b/test/functional/tests/conftest.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # @@ -19,6 +19,7 @@ from api.cas import installer from api.cas import casadm from api.cas import git from storage_devices.raid import Raid +from storage_devices.ramdisk import RamDisk from test_utils.os_utils import Udev, kill_all_io from test_utils.disk_finder import get_disk_serial_number from test_tools.disk_utils import PartitionTable, create_partition_table @@ -134,12 +135,23 @@ def pytest_runtest_teardown(): Udev.enable() kill_all_io() unmount_cas_devices() + + from storage_devices.drbd import Drbd + if installer.check_if_installed() and Drbd.is_installed(): + try: + casadm.stop_all_caches() + finally: + __drbd_cleanup() + elif Drbd.is_installed(): + Drbd.down_all() + if installer.check_if_installed(): casadm.remove_all_detached_cores() casadm.stop_all_caches() from api.cas.init_config import InitConfig InitConfig.create_default_init_config() DeviceMapper.remove_all() + RamDisk.remove_all() except Exception as ex: TestRun.LOGGER.warning(f"Exception occurred during platform cleanup.\n" f"{str(ex)}\n{traceback.format_exc()}") @@ -197,6 +209,15 @@ def get_force_param(item): return item.config.getoption("--force-reinstall") +def __drbd_cleanup(): + from storage_devices.drbd import Drbd + Drbd.down_all() + # If drbd instance had been configured on top of the CAS, the previos attempt to stop + # failed. As drbd has been stopped try to stop CAS one more time. + if installer.check_if_installed(): + casadm.stop_all_caches() + + def base_prepare(item): with TestRun.LOGGER.step("Cleanup before test"): TestRun.executor.run("pkill --signal=SIGKILL fsck") @@ -214,6 +235,10 @@ def base_prepare(item): except Exception: pass # TODO: Reboot DUT if test is executed remotely + from storage_devices.drbd import Drbd + if Drbd.is_installed(): + __drbd_cleanup() + raids = Raid.discover() for raid in raids: # stop only those RAIDs, which are comprised of test disks @@ -229,6 +254,8 @@ def base_prepare(item): Mdadm.zero_superblock(os.path.join('/dev', device.get_device_id())) Udev.settle() + RamDisk.remove_all() + for disk in TestRun.dut.disks: disk_serial = get_disk_serial_number(disk.path) if disk.serial_number != disk_serial: diff --git a/test/functional/tests/example/example_test.py b/test/functional/tests/example/example_test.py index 2b5af32..4cf06cc 100644 --- a/test/functional/tests/example/example_test.py +++ b/test/functional/tests/example/example_test.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # @@ -8,6 +8,10 @@ import pytest from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet from storage_devices.raid import Raid, RaidConfiguration, MetadataVariant, Level +from storage_devices.ramdisk import RamDisk +from test_utils.drbd import Resource, Node +from storage_devices.drbd import Drbd +from test_tools.drbdadm import Drbdadm from test_tools import fs_utils from test_tools.disk_utils import Filesystem from test_utils.filesystem.directory import Directory @@ -147,3 +151,65 @@ def test_example_multidut(): TestRun.LOGGER.info(dut1_ex.run_expect_success("which casctl").stdout) for name, disk in TestRun.disks.items(): TestRun.LOGGER.info(f"{name}: {disk.path}") + + +@pytest.mark.require_disk("drbd_device", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.multidut(2) +def test_drbd_example(): + """ + title: Example test using DRBD API. + description: Create primary and secondary resources on two DUTs using drbd. + pass_criteria: + - primary drbd resource created. + - secondary drbd resource created. + """ + with TestRun.step("Check if DRBD is installed"): + for dut in TestRun.duts: + with TestRun.use_dut(dut): + if not Drbd.is_installed(): + TestRun.fail(f"DRBD is not installed on DUT {dut.ip}") + + with TestRun.step("Prepare DUTs"): + dut1, dut2 = TestRun.duts + + nodes = [] + for dut in TestRun.duts: + with TestRun.use_dut(dut): + TestRun.dut.hostname = TestRun.executor.run_expect_success("uname -n").stdout + drbd_dev = TestRun.disks["drbd_device"] + drbd_md_dev = RamDisk.create(Size(100, Unit.MebiByte), 1)[0] + drbd_dev.create_partitions([Size(200, Unit.MebiByte)]) + drbd_dev = drbd_dev.partitions[0] + + nodes.append( + Node(TestRun.dut.hostname, drbd_dev.path, drbd_md_dev.path, dut.ip, "7790") + ) + + caches = Resource(name="caches", device="/dev/drbd0", nodes=nodes) + + with TestRun.step("Create DRBD config file on both DUTs"): + for dut in TestRun.duts: + with TestRun.use_dut(dut): + TestRun.LOGGER.info(f"Saving config file on dut {dut.ip}") + caches.save() + + with TestRun.use_dut(dut1), TestRun.step(f"Create a DRBD instance on {dut1}"): + primary = Drbd(caches) + primary.create_metadata() + primary.up() + + with TestRun.use_dut(dut2), TestRun.step(f"Create a DRBD instance on {dut2}"): + secondary = Drbd(caches) + secondary.create_metadata() + secondary.up() + + with TestRun.use_dut(dut1), TestRun.step(f"Set {dut1} as primary node"): + primary.set_primary(force=True) + + with TestRun.use_dut(dut1), TestRun.step("Wait for drbd to sync"): + primary.wait_for_sync() + + with TestRun.step("Test cleanup"): + for dut in TestRun.duts: + with TestRun.use_dut(dut): + Drbdadm.down(caches.name)