Merge pull request #1576 from Kamoppl/kamilg/speed_up_TF

speed up tests/conftest
This commit is contained in:
Katarzyna Treder 2024-11-25 14:23:08 +01:00 committed by GitHub
commit c3bb599f0e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 144 additions and 140 deletions

View File

@ -8,12 +8,12 @@ import os
import posixpath
import sys
import traceback
from datetime import timedelta
import paramiko
import pytest
import yaml
from datetime import timedelta
sys.path.append(os.path.join(os.path.dirname(__file__), "../test-framework"))
from core.test_run import Blocked
@ -35,12 +35,23 @@ from test_utils.singleton import Singleton
from storage_devices.lvm import Lvm, LvmConfiguration
class Opencas(metaclass=Singleton):
def __init__(self, repo_dir, working_dir):
self.repo_dir = repo_dir
self.working_dir = working_dir
self.already_updated = False
self.fuzzy_iter_count = 1000
def pytest_addoption(parser):
TestRun.addoption(parser)
parser.addoption("--dut-config", action="append", type=str)
parser.addoption(
"--log-path",
action="store",
default=f"{os.path.join(os.path.dirname(__file__), '../results')}",
)
parser.addoption("--fuzzy-iter-count", action="store")
def pytest_configure(config):
TestRun.configure(config)
def pytest_generate_tests(metafunc):
TestRun.generate_tests(metafunc)
def pytest_collection_modifyitems(config, items):
@ -63,15 +74,16 @@ def pytest_runtest_setup(item):
# User can also have own test wrapper, which runs test prepare, cleanup, etc.
# Then it should be placed in plugins package
test_name = item.name.split('[')[0]
TestRun.LOGGER = create_log(item.config.getoption('--log-path'), test_name)
test_name = item.name.split("[")[0]
TestRun.LOGGER = create_log(item.config.getoption("--log-path"), test_name)
duts = item.config.getoption('--dut-config')
duts = item.config.getoption("--dut-config")
required_duts = next(item.iter_markers(name="multidut"), None)
required_duts = required_duts.args[0] if required_duts is not None else 1
if required_duts > len(duts):
raise Exception(f"Test requires {required_duts} DUTs, only {len(duts)} DUT configs "
f"provided")
raise Exception(
f"Test requires {required_duts} DUTs, only {len(duts)} DUT configs provided"
)
else:
duts = duts[:required_duts]
@ -81,12 +93,13 @@ def pytest_runtest_setup(item):
with open(dut) as cfg:
dut_config = yaml.safe_load(cfg)
except Exception as ex:
raise Exception(f"{ex}\n"
f"You need to specify DUT config. See the example_dut_config.py file")
raise Exception(
f"{ex}\nYou need to specify DUT config. See the example_dut_config.py file"
)
dut_config['plugins_dir'] = os.path.join(os.path.dirname(__file__), "../lib")
dut_config['opt_plugins'] = {"test_wrapper": {}, "serial_log": {}, "power_control": {}}
dut_config['extra_logs'] = {"cas": "/var/log/opencas.log"}
dut_config["plugins_dir"] = os.path.join(os.path.dirname(__file__), "../lib")
dut_config["opt_plugins"] = {"test_wrapper": {}, "serial_log": {}, "power_control": {}}
dut_config["extra_logs"] = {"cas": "/var/log/opencas.log"}
try:
TestRun.prepare(item, dut_config)
@ -98,20 +111,22 @@ def pytest_runtest_setup(item):
raise
except Exception:
try:
TestRun.plugin_manager.get_plugin('power_control').power_cycle()
TestRun.plugin_manager.get_plugin("power_control").power_cycle()
TestRun.executor.wait_for_connection()
except Exception:
raise Exception("Failed to connect to DUT.")
TestRun.setup()
except Exception as ex:
raise Exception(f"Exception occurred during test setup:\n"
f"{str(ex)}\n{traceback.format_exc()}")
raise Exception(
f"Exception occurred during test setup:\n{str(ex)}\n{traceback.format_exc()}"
)
TestRun.usr = Opencas(
repo_dir=os.path.join(os.path.dirname(__file__), "../../.."),
working_dir=dut_config['working_dir'])
if item.config.getoption('--fuzzy-iter-count'):
TestRun.usr.fuzzy_iter_count = int(item.config.getoption('--fuzzy-iter-count'))
working_dir=dut_config["working_dir"],
)
if item.config.getoption("--fuzzy-iter-count"):
TestRun.usr.fuzzy_iter_count = int(item.config.getoption("--fuzzy-iter-count"))
TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")
TestRun.dut.plugin_manager = TestRun.plugin_manager
@ -123,6 +138,69 @@ def pytest_runtest_setup(item):
TestRun.LOGGER.start_group("Test body")
def base_prepare(item):
with TestRun.LOGGER.step("Cleanup before test"):
TestRun.executor.run("pkill --signal=SIGKILL fsck")
Udev.enable()
kill_all_io(graceful=False)
DeviceMapper.remove_all()
if installer.check_if_installed():
try:
from api.cas.init_config import InitConfig
InitConfig.create_default_init_config()
unmount_cas_devices()
casadm.stop_all_caches()
casadm.remove_all_detached_cores()
except Exception:
pass # TODO: Reboot DUT if test is executed remotely
remove(str(opencas_drop_in_directory), recursive=True, ignore_errors=True)
from storage_devices.drbd import Drbd
if Drbd.is_installed():
__drbd_cleanup()
lvms = Lvm.discover()
if lvms:
Lvm.remove_all()
LvmConfiguration.remove_filters_from_config()
raids = Raid.discover()
if len(TestRun.disks):
test_run_disk_ids = {dev.device_id for dev in TestRun.disks.values()}
for raid in raids:
# stop only those RAIDs, which are comprised of test disks
if filter(lambda dev: dev.device_id in test_run_disk_ids, raid.array_devices):
raid.remove_partitions()
raid.unmount()
raid.stop()
for device in raid.array_devices:
Mdadm.zero_superblock(posixpath.join("/dev", device.get_device_id()))
Udev.settle()
RamDisk.remove_all()
for disk in TestRun.disks.values():
disk_serial = get_disk_serial_number(disk.path)
if disk.serial_number and disk.serial_number != disk_serial:
raise Exception(
f"Serial for {disk.path} doesn't match the one from the config."
f"Serial from config {disk.serial_number}, actual serial {disk_serial}"
)
disk.remove_partitions()
disk.unmount()
Mdadm.zero_superblock(posixpath.join("/dev", disk.get_device_id()))
create_partition_table(disk, PartitionTable.gpt)
TestRun.usr.already_updated = True
TestRun.LOGGER.add_build_info(f"Commit hash:")
TestRun.LOGGER.add_build_info(f"{git.get_current_commit_hash()}")
TestRun.LOGGER.add_build_info(f"Commit message:")
TestRun.LOGGER.add_build_info(f"{git.get_current_commit_message()}")
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
res = (yield).get_result()
@ -142,16 +220,18 @@ def pytest_runtest_teardown():
if not TestRun.executor.is_active():
TestRun.executor.wait_for_connection()
Udev.enable()
kill_all_io()
kill_all_io(graceful=False)
unmount_cas_devices()
if installer.check_if_installed():
casadm.remove_all_detached_cores()
casadm.stop_all_caches()
from api.cas.init_config import InitConfig
InitConfig.create_default_init_config()
from storage_devices.drbd import Drbd
if installer.check_if_installed() and Drbd.is_installed():
try:
casadm.stop_all_caches()
@ -163,38 +243,28 @@ def pytest_runtest_teardown():
DeviceMapper.remove_all()
RamDisk.remove_all()
except Exception as ex:
TestRun.LOGGER.warning(f"Exception occurred during platform cleanup.\n"
f"{str(ex)}\n{traceback.format_exc()}")
TestRun.LOGGER.warning(
f"Exception occurred during platform cleanup.\n"
f"{str(ex)}\n{traceback.format_exc()}"
)
TestRun.LOGGER.end()
for dut in TestRun.duts:
with TestRun.use_dut(dut):
if TestRun.executor:
os.makedirs(os.path.join(TestRun.LOGGER.base_dir, "dut_info",
dut.ip if dut.ip is not None
else dut.config.get("host")),
exist_ok=True)
os.makedirs(
os.path.join(
TestRun.LOGGER.base_dir,
"dut_info",
dut.ip if dut.ip is not None else dut.config.get("host"),
),
exist_ok=True,
)
TestRun.LOGGER.get_additional_logs()
Log.destroy()
TestRun.teardown()
def pytest_configure(config):
TestRun.configure(config)
def pytest_generate_tests(metafunc):
TestRun.generate_tests(metafunc)
def pytest_addoption(parser):
TestRun.addoption(parser)
parser.addoption("--dut-config", action="append", type=str)
parser.addoption("--log-path", action="store",
default=f"{os.path.join(os.path.dirname(__file__), '../results')}")
parser.addoption("--fuzzy-iter-count", action="store")
def unmount_cas_devices():
output = TestRun.executor.run("cat /proc/mounts | grep cas")
# If exit code is '1' but stdout is empty, there is no mounted cas devices
@ -219,6 +289,7 @@ def unmount_cas_devices():
def __drbd_cleanup():
from storage_devices.drbd import Drbd
Drbd.down_all()
# If drbd instance had been configured on top of the CAS, the previos attempt to stop
# failed. As drbd has been stopped try to stop CAS one more time.
@ -226,64 +297,9 @@ def __drbd_cleanup():
casadm.stop_all_caches()
def base_prepare(item):
with TestRun.LOGGER.step("Cleanup before test"):
TestRun.executor.run("pkill --signal=SIGKILL fsck")
Udev.enable()
kill_all_io()
DeviceMapper.remove_all()
if installer.check_if_installed():
try:
from api.cas.init_config import InitConfig
InitConfig.create_default_init_config()
unmount_cas_devices()
casadm.stop_all_caches()
casadm.remove_all_detached_cores()
except Exception:
pass # TODO: Reboot DUT if test is executed remotely
remove(str(opencas_drop_in_directory), recursive=True, ignore_errors=True)
from storage_devices.drbd import Drbd
if Drbd.is_installed():
__drbd_cleanup()
lvms = Lvm.discover()
if lvms:
Lvm.remove_all()
LvmConfiguration.remove_filters_from_config()
raids = Raid.discover()
for raid in raids:
# stop only those RAIDs, which are comprised of test disks
if all(map(lambda device:
any(map(lambda disk_path:
disk_path in device.get_device_id(),
[bd.get_device_id() for bd in TestRun.dut.disks])),
raid.array_devices)):
raid.remove_partitions()
raid.unmount()
raid.stop()
for device in raid.array_devices:
Mdadm.zero_superblock(posixpath.join('/dev', device.get_device_id()))
Udev.settle()
RamDisk.remove_all()
for disk in TestRun.dut.disks:
disk_serial = get_disk_serial_number(disk.path)
if disk.serial_number and disk.serial_number != disk_serial:
raise Exception(
f"Serial for {disk.path} doesn't match the one from the config."
f"Serial from config {disk.serial_number}, actual serial {disk_serial}"
)
disk.remove_partitions()
disk.unmount()
Mdadm.zero_superblock(posixpath.join('/dev', disk.get_device_id()))
create_partition_table(disk, PartitionTable.gpt)
TestRun.usr.already_updated = True
TestRun.LOGGER.add_build_info(f'Commit hash:')
TestRun.LOGGER.add_build_info(f"{git.get_current_commit_hash()}")
TestRun.LOGGER.add_build_info(f'Commit message:')
TestRun.LOGGER.add_build_info(f'{git.get_current_commit_message()}')
class Opencas(metaclass=Singleton):
def __init__(self, repo_dir, working_dir):
self.repo_dir = repo_dir
self.working_dir = working_dir
self.already_updated = False
self.fuzzy_iter_count = 1000

View File

@ -11,6 +11,7 @@ import pytest
from api.cas import casadm, cli_messages
from api.cas.cache_config import CacheLineSize
from core.test_run import TestRun
from storage_devices.device import Device
from storage_devices.disk import DiskTypeSet, DiskType
from storage_devices.partition import Partition
from test_tools import disk_utils, fs_utils
@ -24,20 +25,17 @@ from test_utils.size import Size, Unit
@pytest.mark.require_plugin("scsi_debug")
def test_device_capabilities():
"""
title: Test whether CAS device capabilities are properly set.
description: |
Test if CAS device takes into consideration differences between devices which are used to
create it.
pass_criteria:
- CAS device starts successfully using differently configured devices.
- CAS device capabilities are as expected.
title: Test whether CAS device capabilities are properly set.
description: |
Test if CAS device takes into consideration differences between devices which are used to
create it.
pass_criteria:
- CAS device starts successfully using differently configured devices.
- CAS device capabilities are as expected.
"""
core_device = TestRun.disks['core']
max_io_size_path = posixpath.join(disk_utils.get_sysfs_path(core_device.get_device_id()),
'queue/max_sectors_kb')
default_max_io_size = fs_utils.read_file(max_io_size_path)
core_device = TestRun.disks["core"]
default_max_io_size = core_device.get_max_io_size()
iteration_settings = [
{"device": "SCSI-debug module",
"dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 1024},
@ -48,7 +46,7 @@ def test_device_capabilities():
{"device": "SCSI-debug module",
"dev_size_mb": 2048, "logical_block_size": 2048, "max_sectors_kb": 1024},
{"device": "standard core device",
"max_sectors_kb": int(default_max_io_size)},
"max_sectors_kb": int(default_max_io_size.get_value(Unit.KibiByte))},
{"device": "standard core device", "max_sectors_kb": 128}
]
@ -106,8 +104,8 @@ def create_scsi_debug_device(sector_size: int, physblk_exp: int, dev_size_mb=102
def prepare_cas_device(cache_device, core_device):
cache = casadm.start_cache(cache_device, cache_line_size=CacheLineSize.LINE_64KiB, force=True)
try:
cache_dev_bs = disk_utils.get_block_size(cache_device.get_device_id())
core_dev_bs = disk_utils.get_block_size(core_device.get_device_id())
cache_dev_bs = disk_utils.get_block_size(cache_device.device_id)
core_dev_bs = disk_utils.get_block_size(core_device.device_id)
core = cache.add_core(core_device)
if cache_dev_bs > core_dev_bs:
TestRun.LOGGER.error(
@ -145,19 +143,9 @@ capabilities = {"logical_block_size": max,
"write_same_max_bytes": min}
def measure_capabilities(dev):
dev_capabilities = {}
dev_id = dev.parent_device.get_device_id() if isinstance(dev, Partition) \
else dev.get_device_id()
for c in capabilities:
path = posixpath.join(disk_utils.get_sysfs_path(dev_id), 'queue', c)
command = f"cat {path}"
output = TestRun.executor.run(command)
if output.exit_code == 0:
val = int(output.stdout)
dev_capabilities.update({c: val})
else:
TestRun.LOGGER.info(f"Could not measure capability: {c} for {dev_id}")
def measure_capabilities(dev: Device) -> dict:
dev_capabilities = {capability: int(dev.get_sysfs_property(capability))
for capability in capabilities}
return dev_capabilities
@ -167,10 +155,10 @@ def compare_capabilities(cache_device, core_device, cache, core, msg):
cli_messages.try_add_core_sector_size_mismatch)
else:
core_dev_sectors_num = \
disk_utils.get_size(core_device.get_device_id()) / disk_utils.get_block_size(
core_device.get_device_id())
core_sectors_num = disk_utils.get_size(core.get_device_id()) / disk_utils.get_block_size(
core.get_device_id())
disk_utils.get_size(core_device.device_id) / disk_utils.get_block_size(
core_device.device_id)
core_sectors_num = disk_utils.get_size(core.device_id) / disk_utils.get_block_size(
core.device_id)
if core_dev_sectors_num != core_sectors_num:
TestRun.LOGGER.error(
"Number of sectors in CAS device and attached core device is different.")