Move test-framework to its own repository

Signed-off-by: Robert Baldyga <baldyga.r@gmail.com>
This commit is contained in:
Robert Baldyga
2023-05-01 18:55:34 +02:00
commit 40f08a369a
89 changed files with 9914 additions and 0 deletions

4
test_utils/__init__.py Normal file
View File

@@ -0,0 +1,4 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#

View File

@@ -0,0 +1,18 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import concurrent
def start_async_func(func, *args):
"""
Starts asynchronous task and returns an Future object, which in turn returns an
actual result after triggering result() method on it.
- result() method is waiting for the task to be completed.
- done() method returns True when task ended (have a result or ended with an exception)
otherwise returns False
"""
executor = concurrent.futures.ThreadPoolExecutor()
return executor.submit(func, *args)

190
test_utils/disk_finder.py Normal file
View File

@@ -0,0 +1,190 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
from core.test_run import TestRun
from test_tools import disk_utils
from test_tools.fs_utils import check_if_file_exists, readlink
from test_utils import os_utils
from test_utils.output import CmdException
def find_disks():
devices_result = []
TestRun.LOGGER.info("Finding platform's disks.")
# TODO: intelmas should be implemented as a separate tool in the future.
# There will be intelmas installer in case, when it is not installed
output = TestRun.executor.run('intelmas')
if output.exit_code != 0:
raise Exception(f"Error while executing command: 'intelmas'.\n"
f"stdout: {output.stdout}\nstderr: {output.stderr}")
block_devices = get_block_devices_list()
try:
discover_ssd_devices(block_devices, devices_result)
discover_hdd_devices(block_devices, devices_result)
except Exception as e:
raise Exception(f"Exception occurred while looking for disks: {str(e)}")
return devices_result
def get_block_devices_list():
devices = TestRun.executor.run_expect_success("ls /sys/block -1").stdout.splitlines()
os_disks = get_system_disks()
block_devices = []
for dev in devices:
if ('sd' in dev or 'nvme' in dev) and dev not in os_disks:
block_devices.append(dev)
return block_devices
def discover_hdd_devices(block_devices, devices_res):
for dev in block_devices:
if TestRun.executor.run_expect_success(f"cat /sys/block/{dev}/removable").stdout == "1":
continue # skip removable drives
block_size = disk_utils.get_block_size(dev)
if int(block_size) == 4096:
disk_type = 'hdd4k'
else:
disk_type = 'hdd'
devices_res.append({
"type": disk_type,
"path": f"{resolve_to_by_id_link(dev)}",
"serial": TestRun.executor.run_expect_success(
f"sg_inq /dev/{dev} | grep -i 'serial number'"
).stdout.split(': ')[1].strip(),
"blocksize": block_size,
"size": disk_utils.get_size(dev)})
block_devices.clear()
# This method discovers only Intel SSD devices
def discover_ssd_devices(block_devices, devices_res):
ssd_count = int(TestRun.executor.run_expect_success(
'intelmas show -intelssd | grep DevicePath | wc -l').stdout)
for i in range(0, ssd_count):
# Workaround for intelmas bug that lists all of the devices (non intel included)
# with -intelssd flag
if TestRun.executor.run(
f"intelmas show -display index -intelssd {i} | grep -w Intel").exit_code == 0:
device_path = TestRun.executor.run_expect_success(
f"intelmas show -intelssd {i} | grep DevicePath").stdout.split()[2]
dev = device_path.replace("/dev/", "")
if "sg" in dev:
sata_dev = TestRun.executor.run_expect_success(
f"sg_map | grep {dev}").stdout.split()[1]
dev = sata_dev.replace("/dev/", "")
if dev not in block_devices:
continue
serial_number = TestRun.executor.run_expect_success(
f"intelmas show -intelssd {i} | grep SerialNumber").stdout.split()[2].strip()
if 'nvme' not in device_path:
disk_type = 'sata'
device_path = dev
elif TestRun.executor.run(
f"intelmas show -intelssd {i} | grep Optane").exit_code == 0:
disk_type = 'optane'
else:
disk_type = 'nand'
devices_res.append({
"type": disk_type,
"path": resolve_to_by_id_link(device_path),
"serial": serial_number,
"blocksize": disk_utils.get_block_size(dev),
"size": disk_utils.get_size(dev)})
block_devices.remove(dev)
def get_disk_serial_number(dev_path):
commands = [
f"(udevadm info --query=all --name={dev_path} | grep 'SCSI.*_SERIAL' || "
f"udevadm info --query=all --name={dev_path} | grep 'ID_SERIAL_SHORT') | "
"awk --field-separator '=' '{print $NF}'",
f"sg_inq {dev_path} 2> /dev/null | grep '[Ss]erial number:' | "
"awk '{print $NF}'",
f"udevadm info --query=all --name={dev_path} | grep 'ID_SERIAL' | "
"awk --field-separator '=' '{print $NF}'"
]
for command in commands:
serial = TestRun.executor.run(command).stdout
if serial:
return serial.split('\n')[0]
return None
def get_all_serial_numbers():
serial_numbers = {}
block_devices = get_block_devices_list()
for dev in block_devices:
serial = get_disk_serial_number(dev)
try:
path = resolve_to_by_id_link(dev)
except Exception:
continue
if serial:
serial_numbers[serial] = path
else:
TestRun.LOGGER.warning(f"Device {path} ({dev}) does not have a serial number.")
serial_numbers[path] = path
return serial_numbers
def get_system_disks():
system_device = TestRun.executor.run_expect_success('mount | grep " / "').stdout.split()[0]
readlink_output = readlink(system_device)
device_name = readlink_output.split('/')[-1]
sys_block_path = os_utils.get_sys_block_path()
used_device_names = __get_slaves(device_name)
if not used_device_names:
used_device_names = [device_name]
disk_names = []
for device_name in used_device_names:
if check_if_file_exists(f'{sys_block_path}/{device_name}/partition'):
parent_device = readlink(f'{sys_block_path}/{device_name}/..').split('/')[-1]
disk_names.append(parent_device)
else:
disk_names.append(device_name)
return disk_names
def __get_slaves(device_name: str):
try:
device_names = TestRun.executor.run_expect_success(
f'ls {os_utils.get_sys_block_path()}/{device_name}/slaves').stdout.splitlines()
except CmdException as e:
if "No such file or directory" not in e.output.stderr:
raise
return None
device_list = []
for device_name in device_names:
slaves = __get_slaves(device_name)
if slaves:
for slave in slaves:
device_list.append(slave)
else:
device_list.append(device_name)
return device_list
def resolve_to_by_id_link(path):
by_id_paths = TestRun.executor.run_expect_success("ls /dev/disk/by-id -1").stdout.splitlines()
dev_full_paths = [posixpath.join("/dev/disk/by-id", by_id_path) for by_id_path in by_id_paths]
for full_path in dev_full_paths:
# handle exception for broken links
try:
if readlink(full_path) == readlink(posixpath.join("/dev", path)):
return full_path
except CmdException:
continue
raise ValueError(f'By-id device link not found for device {path}')

61
test_utils/drbd.py Normal file
View File

@@ -0,0 +1,61 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import os
from test_utils.filesystem.file import File
class Resource:
def __init__(self, name, device, nodes, options=None):
self.name = name
self.device = device
self.nodes = nodes
self.options = options
def __str__(self):
output = (
f"resource {self.name} {{ \n"
f" device {self.device}; \n"
f"{''.join([str(node) for node in self.nodes])}"
)
if self.options:
output += f" options {{\n"
for (k, v) in self.options.items():
output += f" {k} {v};\n"
output += f" }}\n"
output += f"}}"
return output
def __repr__(self):
return str(self)
def save(self, path="/etc/drbd.d/", filename=None):
filename = filename if filename else f"{self.name}.res"
file = File(path + filename)
file.write(str(self))
class Node:
def __init__(self, name, disk, meta_disk, ip, port):
self.name = name
self.disk = disk
self.meta_disk = meta_disk
self.ip = ip
self.port = port
def __str__(self):
return (
f" on {self.name} {{ \n"
f" disk {self.disk};\n"
f" meta-disk {self.meta_disk};\n"
f" address {self.ip}:{self.port};\n"
f" }} \n"
)
def __repr__(self):
return str(self)

43
test_utils/dut.py Normal file
View File

@@ -0,0 +1,43 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from storage_devices.disk import Disk, DiskType
class Dut:
def __init__(self, dut_info):
self.config = dut_info
self.disks = []
for disk_info in dut_info.get('disks', []):
self.disks.append(Disk.create_disk(disk_info['path'],
DiskType[disk_info['type']],
disk_info['serial'],
disk_info['blocksize']))
self.disks.sort(key=lambda disk: disk.disk_type, reverse=True)
self.ipmi = dut_info['ipmi'] if 'ipmi' in dut_info else None
self.spider = dut_info['spider'] if 'spider' in dut_info else None
self.wps = dut_info['wps'] if 'wps' in dut_info else None
self.env = dut_info['env'] if 'env' in dut_info else None
self.ip = dut_info['ip'] if 'ip' in dut_info else "127.0.0.1"
def __str__(self):
dut_str = f'ip: {self.ip}\n'
dut_str += f'ipmi: {self.ipmi["ip"]}\n' if self.ipmi is not None else ''
dut_str += f'spider: {self.spider["ip"]}\n' if self.spider is not None else ''
dut_str += f'wps: {self.wps["ip"]} port: {self.wps["port"]}\n' \
if self.wps is not None else ''
dut_str += f'disks:\n'
for disk in self.disks:
dut_str += f"\t{disk}"
dut_str += "\n"
return dut_str
def get_disks_of_type(self, disk_type: DiskType):
ret_list = []
for d in self.disks:
if d.disk_type == disk_type:
ret_list.append(d)
return ret_list

View File

@@ -0,0 +1,113 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from textwrap import dedent
from string import Template
from pathlib import Path
from .systemd import enable_service, reload_daemon, systemd_service_directory, disable_service
from test_tools.fs_utils import (
create_file,
write_file,
remove,
)
class EmergencyEscape:
escape_marker = "EMERGENCY_ESCAPE"
escape_service = Path("emergency-escape.service")
escape_service_template = Template(
dedent(
f"""
[Unit]
After=emergency.target
IgnoreOnIsolate=true
DefaultDependencies=no
[Service]
Type=oneshot
ExecStart=/bin/sh -c '/usr/bin/echo "{escape_marker}" > /dev/kmsg'
$user_method
ExecStart=/usr/bin/systemctl daemon-reload
ExecStart=/usr/bin/systemctl default --no-block
[Install]
WantedBy=emergency.target
"""
).strip()
)
cleanup_service = Path("emergency-escape-cleanup.service")
cleanup_service_template = Template(
dedent(
"""
[Unit]
After=emergency-escape.service
IgnoreOnIsolate=true
DefaultDependencies=no
[Service]
Type=oneshot
$user_method
ExecStart=/usr/bin/systemctl disable emergency-escape.service
ExecStart=/usr/bin/rm -f /usr/lib/systemd/system/emergency-escape.service
ExecStart=/usr/bin/systemctl daemon-reload
[Install]
WantedBy=emergency-escape.service
"""
).strip()
)
def __init__(self):
self.escape_method = []
self.cleanup_method = []
def arm(self):
escape_path = str(systemd_service_directory / EmergencyEscape.escape_service)
cleanup_path = str(systemd_service_directory / EmergencyEscape.cleanup_service)
create_file(escape_path)
create_file(cleanup_path)
user_escape = "\n".join([f"ExecStart={method}" for method in self.escape_method])
user_cleanup = "\n".join([f"ExecStart={method}" for method in self.cleanup_method])
escape_contents = EmergencyEscape.escape_service_template.substitute(
user_method=user_escape
)
cleanup_contents = EmergencyEscape.cleanup_service_template.substitute(
user_method=user_cleanup
)
write_file(escape_path, escape_contents)
write_file(cleanup_path, cleanup_contents)
enable_service(EmergencyEscape.escape_service)
enable_service(EmergencyEscape.cleanup_service)
def cleanup(self):
remove(str(systemd_service_directory / EmergencyEscape.cleanup_service), ignore_errors=True)
remove(str(systemd_service_directory / EmergencyEscape.escape_service), ignore_errors=True)
reload_daemon()
@classmethod
def verify_trigger_in_log(cls, log_list):
for l in log_list:
if cls.escape_marker in l:
return True
return False
def add_escape_method_command(self, method):
self.escape_method.append(method)
def add_cleanup_method_command(self, method):
self.cleanup_method.append(method)
def __enter__(self):
self.arm()
def __exit__(self, exc_type, exc_value, exc_traceback):
self.cleanup()

View File

View File

@@ -0,0 +1,31 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from core.test_run import TestRun
from test_tools import fs_utils
from test_tools.fs_utils import check_if_directory_exists
from test_utils.filesystem.fs_item import FsItem
class Directory(FsItem):
def __init__(self, full_path):
FsItem.__init__(self, full_path)
def ls(self):
output = fs_utils.ls(f"{self.full_path}")
return fs_utils.parse_ls_output(output, self.full_path)
@staticmethod
def create_directory(path: str, parents: bool = False):
fs_utils.create_directory(path, parents)
output = fs_utils.ls_item(path)
return fs_utils.parse_ls_output(output)[0]
@staticmethod
def create_temp_directory(parent_dir_path: str = "/tmp"):
command = f"mktemp --directory --tmpdir={parent_dir_path}"
output = TestRun.executor.run_expect_success(command)
if not check_if_directory_exists(output.stdout):
TestRun.LOGGER.exception("'mktemp' succeeded, but created directory does not exist")
return Directory(output.stdout)

View File

@@ -0,0 +1,83 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from core.test_run import TestRun
from test_tools import fs_utils
from test_tools.dd import Dd
from test_utils.filesystem.fs_item import FsItem
from test_utils.size import Size
class File(FsItem):
def __init__(self, full_path):
FsItem.__init__(self, full_path)
def compare(self, other_file):
return fs_utils.compare(str(self), str(other_file))
def diff(self, other_file):
return fs_utils.diff(str(self), str(other_file))
def md5sum(self, binary=True):
output = TestRun.executor.run(
f"md5sum {'-b' if binary else ''} {self.full_path}")
if output.exit_code != 0:
raise Exception(f"Md5sum command execution failed! {output.stdout}\n{output.stderr}")
return output.stdout.split()[0]
def read(self):
return fs_utils.read_file(str(self))
def write(self, content, overwrite: bool = True):
fs_utils.write_file(str(self), content, overwrite)
self.refresh_item()
def get_properties(self):
return FileProperties(self)
@staticmethod
def create_file(path: str):
fs_utils.create_file(path)
output = fs_utils.ls_item(path)
return fs_utils.parse_ls_output(output)[0]
def padding(self, size: Size):
dd = Dd().input("/dev/zero").output(self).count(1).block_size(size)
dd.run()
self.refresh_item()
def remove(self, force: bool = False, ignore_errors: bool = False):
fs_utils.remove(str(self), force=force, ignore_errors=ignore_errors)
def copy(self,
destination,
force: bool = False,
recursive: bool = False,
dereference: bool = False):
fs_utils.copy(str(self), destination, force, recursive, dereference)
if fs_utils.check_if_directory_exists(destination):
path = f"{destination}{'/' if destination[-1] != '/' else ''}{self.name}"
else:
path = destination
output = fs_utils.ls_item(path)
return fs_utils.parse_ls_output(output)[0]
class FileProperties:
def __init__(self, file):
file = fs_utils.parse_ls_output(fs_utils.ls_item(file.full_path))[0]
self.full_path = file.full_path
self.parent_dir = FsItem.get_parent_dir(self.full_path)
self.name = FsItem.get_name(self.full_path)
self.modification_time = file.modification_time
self.owner = file.owner
self.group = file.group
self.permissions = file.permissions
self.size = file.size
def __eq__(self, other):
return (self.permissions == other.permissions and self.size == other.size
and self.owner == other.owner and self.group == other.group
and self.name == other.name)

View File

@@ -0,0 +1,102 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
from test_tools import fs_utils
class FsItem:
def __init__(self, full_path):
self.full_path = full_path
# all below values must be refreshed in refresh_item()
self.parent_dir = self.get_parent_dir(self.full_path)
self.name = self.get_name(self.full_path)
self.modification_time = None
self.owner = None
self.group = None
self.permissions = FsPermissions()
self.size = None
@staticmethod
def get_name(path):
head, tail = posixpath.split(path)
return tail or posixpath.basename(head)
@staticmethod
def get_parent_dir(path):
head, tail = posixpath.split(path)
if tail:
return head
else:
head, tail = posixpath.split(head)
return head
def __str__(self):
return self.full_path
def chmod_numerical(self, permissions: int, recursive: bool = False):
fs_utils.chmod_numerical(self.full_path, permissions, recursive)
self.refresh_item()
def chmod(self,
permissions: fs_utils.Permissions,
users: fs_utils.PermissionsUsers,
sign: fs_utils.PermissionSign = fs_utils.PermissionSign.set,
recursive: bool = False):
fs_utils.chmod(self.full_path, permissions, users, sign=sign, recursive=recursive)
self.refresh_item()
def chown(self, owner, group, recursive: bool = False):
fs_utils.chown(self.full_path, owner, group, recursive)
self.refresh_item()
def copy(self,
destination,
force: bool = False,
recursive: bool = False,
dereference: bool = False):
target_dir_exists = fs_utils.check_if_directory_exists(destination)
fs_utils.copy(str(self), destination, force, recursive, dereference)
if target_dir_exists:
path = f"{destination}{'/' if destination[-1] != '/' else ''}{self.name}"
else:
path = destination
output = fs_utils.ls_item(f"{path}")
return fs_utils.parse_ls_output(output)[0]
def move(self,
destination,
force: bool = False):
target_dir_exists = fs_utils.check_if_directory_exists(destination)
fs_utils.move(str(self), destination, force)
if target_dir_exists:
self.full_path = f"{destination}{'/' if destination[-1] != '/' else ''}{self.name}"
else:
self.full_path = destination
self.refresh_item()
return self
def refresh_item(self):
updated_file = fs_utils.parse_ls_output(fs_utils.ls_item(self.full_path))[0]
# keep order the same as in __init__()
self.parent_dir = updated_file.parent_dir
self.name = updated_file.name
self.modification_time = updated_file.modification_time
self.owner = updated_file.owner
self.group = updated_file.group
self.permissions = updated_file.permissions
self.size = updated_file.size
return self
class FsPermissions:
def __init__(self, user=None, group=None, other=None):
self.user = user
self.group = group
self.other = other
def __eq__(self, other):
return self.user == other.user and self.group == other.group and self.other == other.other

View File

@@ -0,0 +1,91 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from core.test_run import TestRun
from test_tools.fs_utils import (
readlink,
create_directory,
check_if_symlink_exists,
check_if_directory_exists,
)
from test_utils.filesystem.file import File
class Symlink(File):
def __init__(self, full_path):
File.__init__(self, full_path)
def md5sum(self, binary=True):
output = TestRun.executor.run_expect_success(
f"md5sum {'-b' if binary else ''} {self.get_target()}"
)
return output.stdout.split()[0]
def get_target(self):
return readlink(self.full_path)
def get_symlink_path(self):
return self.full_path
def remove_symlink(self):
path = self.get_symlink_path()
TestRun.executor.run_expect_success(f"rm -f {path}")
@classmethod
def create_symlink(cls, link_path: str, target: str, force: bool = False):
"""
Creates a Symlink - new or overwrites existing one if force parameter is True
:param link_path: path to the place where we want to create a symlink
:param target: the path of an object that the requested Symlink points to
:param force: determines if the existing symlink with the same name should be overridden
return: Symlink object located under link_path
"""
cmd = f"ln --symbolic {target} {link_path}"
is_dir = check_if_directory_exists(link_path)
parent_dir = cls.get_parent_dir(link_path)
if is_dir:
raise IsADirectoryError(f"'{link_path}' is an existing directory.")
if force:
if not check_if_directory_exists(parent_dir):
create_directory(parent_dir, True)
TestRun.executor.run_expect_success(f"rm -f {link_path}")
TestRun.executor.run_expect_success(cmd)
return cls(link_path)
@classmethod
def get_symlink(cls, link_path: str, target: str = None, create: bool = False):
"""
Request a Symlink (create new or identify existing)
:param link_path: full path of the requested Symlink
:param target: path of an object that the requested Symlink points to
(required if create is True)
:param create: determines if the requested Symlink should be created if it does not exist
:return: Symlink object located under link_path
"""
if create and not target:
raise AttributeError("Target is required for symlink creation.")
is_symlink = check_if_symlink_exists(link_path)
if is_symlink:
if not target or readlink(link_path) == readlink(target):
return cls(link_path)
else:
raise FileExistsError("Existing symlink points to a different target.")
elif not create:
raise FileNotFoundError("Requested symlink does not exist.")
is_dir = check_if_directory_exists(link_path)
if is_dir:
raise IsADirectoryError(
f"'{link_path}' is an existing directory." "\nUse a full path for symlink creation."
)
parent_dir = cls.get_parent_dir(link_path)
if not check_if_directory_exists(parent_dir):
create_directory(parent_dir, True)
cmd = f"ln --symbolic {target} {link_path}"
TestRun.executor.run_expect_success(cmd)
return cls(link_path)

20
test_utils/fstab.py Normal file
View File

@@ -0,0 +1,20 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from test_tools import fs_utils
from test_utils import systemd
def add_mountpoint(device, mount_point, fs_type, mount_now=True):
fs_utils.append_line("/etc/fstab",
f"{device.path} {mount_point} {fs_type.name} defaults 0 0")
systemd.reload_daemon()
if mount_now:
systemd.restart_service("local-fs.target")
def remove_mountpoint(device):
fs_utils.remove_lines("/etc/fstab", device.path)
systemd.reload_daemon()

11
test_utils/generator.py Normal file
View File

@@ -0,0 +1,11 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import random
import string
def random_string(length: int, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for i in range(length))

112
test_utils/io_stats.py Normal file
View File

@@ -0,0 +1,112 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import re
from core.test_run import TestRun
from test_utils.output import CmdException
SYSFS_LINE_FORMAT = r"^(\d+\s+){10,}\d+$"
PROCFS_LINE_FORMAT = r"^\d+\s+\d+\s+\w+\s+" + SYSFS_LINE_FORMAT[1:]
# This class represents block device I/O statistics.
# For more information see:
# https://www.kernel.org/doc/Documentation/admin-guide/iostats.rst
class IoStats:
def __init__(self):
self.reads = None # field 0
self.reads_merged = None # field 1
self.sectors_read = None # field 2
self.read_time_ms = None # field 3
self.writes = None # field 4
self.writes_merged = None # field 5
self.sectors_written = None # field 6
self.write_time_ms = None # field 7
self.ios_in_progress = None # field 8
self.io_time_ms = None # field 9
self.io_time_weighed_ms = None # field 10
# only in kernels 4.18+
self.discards = None # field 11
self.discards_merged = None # field 12
self.sectors_discarded = None # field 13
self.discard_time_ms = None # field 14
# only in kernels 5.5+
self.flushes = None # field 15
self.flush_time_ms = None # field 16
def __sub__(self, other):
if self.reads < other.reads:
raise Exception("Cannot subtract Reads")
if self.writes < other.writes:
raise Exception("Cannot subtract Writes")
stats = IoStats()
stats.reads = self.reads - other.reads
stats.reads_merged = self.reads_merged - other.reads_merged
stats.sectors_read = self.sectors_read - other.sectors_read
stats.read_time_ms = self.read_time_ms - other.read_time_ms
stats.writes = self.writes - other.writes
stats.writes_merged = self.writes_merged - other.writes_merged
stats.sectors_written = self.sectors_written - other.sectors_written
stats.write_time_ms = self.write_time_ms - other.write_time_ms
stats.ios_in_progress = 0
stats.io_time_ms = self.io_time_ms - other.io_time_ms
stats.io_time_weighed_ms = self.io_time_weighed_ms - other.io_time_weighed_ms
if stats.discards and other.discards:
stats.discards = self.discards - other.discards
if stats.discards_merged and other.discards_merged:
stats.discards_merged = self.discards_merged - other.discards_merged
if stats.sectors_discarded and other.sectors_discarded:
stats.sectors_discarded = self.sectors_discarded - other.sectors_discarded
if stats.discard_time_ms and other.discard_time_ms:
stats.discard_time_ms = self.discard_time_ms - other.discard_time_ms
if stats.flushes and other.flushes:
stats.flushes = self.flushes - other.flushes
if stats.flush_time_ms and other.flush_time_ms:
stats.flush_time_ms = self.flush_time_ms - other.flush_time_ms
return stats
@staticmethod
def parse(stats_line: str):
stats_line = stats_line.strip()
if re.match(SYSFS_LINE_FORMAT, stats_line):
fields = stats_line.split()
elif re.match(PROCFS_LINE_FORMAT, stats_line):
fields = stats_line.split()[3:]
else:
raise Exception(f"Wrong input format for diskstat parser")
values = [int(f) for f in fields]
stats = IoStats()
stats.reads = values[0]
stats.reads_merged = values[1]
stats.sectors_read = values[2]
stats.read_time_ms = values[3]
stats.writes = values[4]
stats.writes_merged = values[5]
stats.sectors_written = values[6]
stats.write_time_ms = values[7]
stats.ios_in_progress = values[8]
stats.io_time_ms = values[9]
stats.io_time_weighed_ms = values[10]
if len(values) > 11:
stats.discards = values[11]
stats.discards_merged = values[12]
stats.sectors_discarded = values[13]
stats.discard_time_ms = values[14]
if len(values) > 15:
stats.flushes = values[15]
stats.flush_time_ms = values[16]
return stats
@staticmethod
def get_io_stats(device_id):
stats_output = TestRun.executor.run_expect_success(
f"cat /proc/diskstats | grep '{device_id} '")
if not stats_output.stdout.strip():
raise CmdException("Failed to get statistics for device " + device_id, stats_output)
return IoStats.parse(stats_line=stats_output.stdout.splitlines()[0])

View File

@@ -0,0 +1,79 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from collections import defaultdict
class LinuxCommand:
def __init__(self, command_executor, command_name):
self.command_executor = command_executor
self.command_param = defaultdict(list)
self.command_flags = []
self.command_name = command_name
self.param_name_prefix = ''
self.param_separator = ' '
self.param_value_prefix = '='
self.param_value_list_separator = ','
self.command_env_var = defaultdict(list)
self.env_var_separator = ' '
self.env_var_value_prefix = '='
def run(self):
return self.command_executor.run(str(self))
def run_in_background(self):
return self.command_executor.run_in_background(str(self))
def set_flags(self, *flag):
for f in flag:
self.command_flags.append(f)
return self
def remove_flag(self, flag):
if flag in self.command_flags:
self.command_flags.remove(flag)
return self
def set_param(self, key, *values):
self.remove_param(key)
for val in values:
self.command_param[key].append(str(val))
return self
def remove_param(self, key):
if key in self.command_param:
del self.command_param[key]
return self
def set_env_var(self, key, *values):
self.remove_env_var(key)
for val in values:
self.command_env_var[key].append(str(val))
return self
def remove_env_var(self, key):
if key in self.command_env_var:
del self.command_env_var[key]
return self
def get_parameter_value(self, param_name):
if param_name in self.command_param.keys():
return self.command_param[param_name]
return None
def __str__(self):
command = ''
for key, value in self.command_env_var.items():
command += f'{key}{self.env_var_value_prefix}{",".join(value)}' \
f'{self.env_var_separator}'
command += self.command_name
for key, value in self.command_param.items():
command += f'{self.param_separator}{self.param_name_prefix}' \
f'{key}{self.param_value_prefix}{",".join(value)}'
for flag in self.command_flags:
command += f'{self.param_separator}{self.param_name_prefix}{flag}'
return command

462
test_utils/os_utils.py Normal file
View File

@@ -0,0 +1,462 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import math
import posixpath
import re
import time
from datetime import timedelta, datetime
from aenum import IntFlag, Enum, IntEnum
from packaging import version
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools.dd import Dd
from test_tools.disk_utils import get_sysfs_path
from test_tools.fs_utils import check_if_directory_exists, create_directory, check_if_file_exists
from test_utils.filesystem.file import File
from test_utils.output import CmdException
from test_utils.retry import Retry
from test_utils.size import Size, Unit
DEBUGFS_MOUNT_POINT = "/sys/kernel/debug"
MEMORY_MOUNT_POINT = "/mnt/memspace"
class DropCachesMode(IntFlag):
PAGECACHE = 1
SLAB = 2
ALL = PAGECACHE | SLAB
class OvercommitMemoryMode(Enum):
DEFAULT = 0
ALWAYS = 1
NEVER = 2
class Runlevel(IntEnum):
"""
Halt the system.
SysV Runlevel: 0
systemd Target: runlevel0.target, poweroff.target
"""
runlevel0 = 0
poweroff = runlevel0
"""
Single user mode.
SysV Runlevel: 1, s, single
systemd Target: runlevel1.target, rescue.target
"""
runlevel1 = 1
rescue = runlevel1
"""
User-defined/Site-specific runlevels. By default, identical to 3.
SysV Runlevel: 2, 4
systemd Target: runlevel2.target, runlevel4.target, multi-user.target
"""
runlevel2 = 2
"""
Multi-user, non-graphical. Users can usually login via multiple consoles or via the network.
SysV Runlevel: 3
systemd Target: runlevel3.target, multi-user.target
"""
runlevel3 = 3
multi_user = runlevel3
"""
Multi-user, graphical. Usually has all the services of runlevel 3 plus a graphical login.
SysV Runlevel: 5
systemd Target: runlevel5.target, graphical.target
"""
runlevel5 = 5
graphical = runlevel5
"""
Reboot
SysV Runlevel: 6
systemd Target: runlevel6.target, reboot.target
"""
runlevel6 = 6
reboot = runlevel6
"""
Emergency shell
SysV Runlevel: emergency
systemd Target: emergency.target
"""
runlevel7 = 7
emergency = runlevel7
class SystemManagerType(Enum):
sysv = 0
systemd = 1
def get_system_manager():
output = TestRun.executor.run_expect_success("ps -p 1").stdout
type = output.split('\n')[1].split()[3]
if type == "init":
return SystemManagerType.sysv
elif type == "systemd":
return SystemManagerType.systemd
raise Exception(f"Unknown system manager type ({type}).")
def change_runlevel(runlevel: Runlevel):
if runlevel == get_runlevel():
return
if Runlevel.runlevel0 < runlevel < Runlevel.runlevel6:
system_manager = get_system_manager()
if system_manager == SystemManagerType.systemd:
TestRun.executor.run_expect_success(f"systemctl set-default {runlevel.name}.target")
else:
TestRun.executor.run_expect_success(
f"sed -i 's/^.*id:.*$/id:{runlevel.value}:initdefault: /' /etc/inittab")
TestRun.executor.run_expect_success(f"init {runlevel.value}")
def get_runlevel():
system_manager = get_system_manager()
if system_manager == SystemManagerType.systemd:
result = TestRun.executor.run_expect_success("systemctl get-default")
try:
name = result.stdout.split(".")[0].replace("-", "_")
return Runlevel[name]
except Exception:
raise Exception(f"Cannot parse '{result.output}' to runlevel.")
else:
result = TestRun.executor.run_expect_success("runlevel")
try:
split_output = result.stdout.split()
runlevel = Runlevel(int(split_output[1]))
return runlevel
except Exception:
raise Exception(f"Cannot parse '{result.output}' to runlevel.")
class Udev(object):
@staticmethod
def enable():
TestRun.LOGGER.info("Enabling udev")
TestRun.executor.run_expect_success("udevadm control --start-exec-queue")
@staticmethod
def disable():
TestRun.LOGGER.info("Disabling udev")
TestRun.executor.run_expect_success("udevadm control --stop-exec-queue")
@staticmethod
def trigger():
TestRun.executor.run_expect_success("udevadm trigger")
@staticmethod
def settle():
TestRun.executor.run_expect_success("udevadm settle")
def drop_caches(level: DropCachesMode = DropCachesMode.ALL):
TestRun.executor.run_expect_success(
f"echo {level.value} > /proc/sys/vm/drop_caches")
def disable_memory_affecting_functions():
"""Disables system functions affecting memory"""
# Don't allow sshd to be killed in case of out-of-memory:
TestRun.executor.run(
"echo '-1000' > /proc/`cat /var/run/sshd.pid`/oom_score_adj"
)
TestRun.executor.run(
"echo -17 > /proc/`cat /var/run/sshd.pid`/oom_adj"
) # deprecated
TestRun.executor.run_expect_success(
f"echo {OvercommitMemoryMode.NEVER.value} > /proc/sys/vm/overcommit_memory"
)
TestRun.executor.run_expect_success("echo '100' > /proc/sys/vm/overcommit_ratio")
TestRun.executor.run_expect_success(
"echo '64 64 32' > /proc/sys/vm/lowmem_reserve_ratio"
)
TestRun.executor.run_expect_success("swapoff --all")
drop_caches(DropCachesMode.SLAB)
def defaultize_memory_affecting_functions():
"""Sets default values to system functions affecting memory"""
TestRun.executor.run_expect_success(
f"echo {OvercommitMemoryMode.DEFAULT.value} > /proc/sys/vm/overcommit_memory"
)
TestRun.executor.run_expect_success("echo 50 > /proc/sys/vm/overcommit_ratio")
TestRun.executor.run_expect_success(
"echo '256 256 32' > /proc/sys/vm/lowmem_reserve_ratio"
)
TestRun.executor.run_expect_success("swapon --all")
def get_free_memory():
"""Returns free amount of memory in bytes"""
output = TestRun.executor.run_expect_success("free -b")
output = output.stdout.splitlines()
for line in output:
if 'free' in line:
index = line.split().index('free') + 1 # 1st row has 1 element less than following rows
if 'Mem' in line:
mem_line = line.split()
return Size(int(mem_line[index]))
def get_mem_available():
"""Returns amount of available memory from /proc/meminfo"""
cmd = "cat /proc/meminfo | grep MemAvailable | awk '{ print $2 }'"
mem_available = TestRun.executor.run(cmd).stdout
return Size(int(mem_available), Unit.KibiByte)
def get_module_mem_footprint(module_name):
"""Returns allocated size of specific module's metadata from /proc/vmallocinfo"""
cmd = f"cat /proc/vmallocinfo | grep {module_name} | awk '{{ print $2 }}' "
output_lines = TestRun.executor.run(cmd).stdout.splitlines()
memory_used = 0
for line in output_lines:
memory_used += int(line)
return Size(memory_used)
def allocate_memory(size: Size):
"""Allocates given amount of memory"""
mount_ramfs()
TestRun.LOGGER.info(f"Allocating {size.get_value(Unit.MiB):0.2f} MiB of memory.")
bs = Size(1, Unit.Blocks512)
dd = (
Dd()
.block_size(bs)
.count(math.ceil(size / bs))
.input("/dev/zero")
.output(f"{MEMORY_MOUNT_POINT}/data")
)
output = dd.run()
if output.exit_code != 0:
raise CmdException("Allocating memory failed.", output)
def get_number_of_processors_from_cpuinfo():
"""Returns number of processors (count) which are listed out in /proc/cpuinfo"""
cmd = f"cat /proc/cpuinfo | grep processor | wc -l"
output = TestRun.executor.run(cmd).stdout
return int(output)
def get_number_of_processes(process_name):
cmd = f"ps aux | grep {process_name} | grep -v grep | wc -l"
output = TestRun.executor.run(cmd).stdout
return int(output)
def mount_ramfs():
"""Mounts ramfs to enable allocating memory space"""
if not check_if_directory_exists(MEMORY_MOUNT_POINT):
create_directory(MEMORY_MOUNT_POINT)
if not is_mounted(MEMORY_MOUNT_POINT):
TestRun.executor.run_expect_success(f"mount -t ramfs ramfs {MEMORY_MOUNT_POINT}")
def unmount_ramfs():
"""Unmounts ramfs and releases whole space allocated by it in memory"""
TestRun.executor.run_expect_success(f"umount {MEMORY_MOUNT_POINT}")
def download_file(url, destination_dir="/tmp"):
# TODO use wget module instead
command = ("wget --tries=3 --timeout=5 --continue --quiet "
f"--directory-prefix={destination_dir} {url}")
TestRun.executor.run_expect_success(command)
path = f"{destination_dir.rstrip('/')}/{File.get_name(url)}"
return File(path)
def get_kernel_version():
version_string = TestRun.executor.run_expect_success("uname -r").stdout
version_string = version_string.split('-')[0]
return version.Version(version_string)
class ModuleRemoveMethod(Enum):
rmmod = "rmmod"
modprobe = "modprobe -r"
def is_kernel_module_loaded(module_name):
output = TestRun.executor.run(f"lsmod | grep ^{module_name}")
return output.exit_code == 0
def get_sys_block_path():
sys_block = "/sys/class/block"
if not check_if_directory_exists(sys_block):
sys_block = "/sys/block"
return sys_block
def load_kernel_module(module_name, module_args: {str, str}=None):
cmd = f"modprobe {module_name}"
if module_args is not None:
for key, value in module_args.items():
cmd += f" {key}={value}"
return TestRun.executor.run(cmd)
def unload_kernel_module(module_name, unload_method: ModuleRemoveMethod = ModuleRemoveMethod.rmmod):
cmd = f"{unload_method.value} {module_name}"
return TestRun.executor.run_expect_success(cmd)
def get_kernel_module_parameter(module_name, parameter):
param_file_path = f"/sys/module/{module_name}/parameters/{parameter}"
if not check_if_file_exists(param_file_path):
raise FileNotFoundError(f"File {param_file_path} does not exist!")
return File(param_file_path).read()
def is_mounted(path: str):
if path is None or path.isspace():
raise Exception("Checked path cannot be empty")
command = f"mount | grep --fixed-strings '{path.rstrip('/')} '"
return TestRun.executor.run(command).exit_code == 0
def mount_debugfs():
if not is_mounted(DEBUGFS_MOUNT_POINT):
TestRun.executor.run_expect_success(f"mount -t debugfs none {DEBUGFS_MOUNT_POINT}")
def reload_kernel_module(module_name, module_args: {str, str}=None,
unload_method: ModuleRemoveMethod = ModuleRemoveMethod.rmmod):
if is_kernel_module_loaded(module_name):
unload_kernel_module(module_name, unload_method)
Retry.run_while_false(
lambda: load_kernel_module(module_name, module_args).exit_code == 0,
timeout=timedelta(seconds=5)
)
def get_module_path(module_name):
cmd = f"modinfo {module_name}"
# module path is in second column of first line of `modinfo` output
module_info = TestRun.executor.run_expect_success(cmd).stdout
module_path = module_info.splitlines()[0].split()[1]
return module_path
def get_executable_path(exec_name):
cmd = f"which {exec_name}"
path = TestRun.executor.run_expect_success(cmd).stdout
return path
def get_udev_service_path(unit_name):
cmd = f"systemctl cat {unit_name}"
# path is in second column of first line of output
info = TestRun.executor.run_expect_success(cmd).stdout
path = info.splitlines()[0].split()[1]
return path
def kill_all_io():
# TERM signal should be used in preference to the KILL signal, since a
# process may install a handler for the TERM signal in order to perform
# clean-up steps before terminating in an orderly fashion.
TestRun.executor.run("killall -q --signal TERM dd fio blktrace")
time.sleep(3)
TestRun.executor.run("killall -q --signal KILL dd fio blktrace")
TestRun.executor.run("kill -9 `ps aux | grep -i vdbench.* | awk '{ print $2 }'`")
if TestRun.executor.run("pgrep -x dd").exit_code == 0:
raise Exception(f"Failed to stop dd!")
if TestRun.executor.run("pgrep -x fio").exit_code == 0:
raise Exception(f"Failed to stop fio!")
if TestRun.executor.run("pgrep -x blktrace").exit_code == 0:
raise Exception(f"Failed to stop blktrace!")
if TestRun.executor.run("pgrep vdbench").exit_code == 0:
raise Exception(f"Failed to stop vdbench!")
def wait(predicate, timeout: timedelta, interval: timedelta = None):
start_time = datetime.now()
result = False
while start_time + timeout > datetime.now():
result = predicate()
if result:
break
if interval is not None:
time.sleep(interval.total_seconds())
return result
def sync():
TestRun.executor.run_expect_success("sync")
def get_dut_cpu_number():
return int(TestRun.executor.run_expect_success("nproc").stdout)
def get_dut_cpu_physical_cores():
""" Get list of CPU numbers that don't share physical cores """
output = TestRun.executor.run_expect_success("lscpu --all --parse").stdout
core_list = []
visited_phys_cores = []
for line in output.split("\n"):
if "#" in line:
continue
cpu_no, phys_core_no = line.split(",")[:2]
if phys_core_no not in visited_phys_cores:
core_list.append(cpu_no)
visited_phys_cores.append(phys_core_no)
return core_list
def set_wbt_lat(device: Device, value: int):
if value < 0:
raise ValueError("Write back latency can't be negative number")
wbt_lat_config_path = posixpath.join(
get_sysfs_path(device.get_device_id()), "queue/wbt_lat_usec"
)
return TestRun.executor.run_expect_success(f"echo {value} > {wbt_lat_config_path}")
def get_wbt_lat(device: Device):
wbt_lat_config_path = posixpath.join(
get_sysfs_path(device.get_device_id()), "queue/wbt_lat_usec"
)
return int(TestRun.executor.run_expect_success(f"cat {wbt_lat_config_path}").stdout)
def get_cores_ids_range(numa_node: int):
output = TestRun.executor.run_expect_success(f"lscpu --all --parse").stdout
parse_output = re.findall(r'(\d+),(\d+),(?:\d+),(\d+),,', output, re.I)
return [element[0] for element in parse_output if int(element[2]) == numa_node]

22
test_utils/output.py Normal file
View File

@@ -0,0 +1,22 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
class Output:
def __init__(self, output_out, output_err, return_code):
self.stdout = output_out.decode('utf-8', errors="ignore").rstrip() if \
type(output_out) == bytes else output_out
self.stderr = output_err.decode('utf-8', errors="ignore").rstrip() if \
type(output_err) == bytes else output_err
self.exit_code = return_code
def __str__(self):
return f"exit_code: {self.exit_code}\nstdout: {self.stdout}\nstderr: {self.stderr}"
class CmdException(Exception):
def __init__(self, message: str, output: Output):
super().__init__(f"{message}\n{str(output)}")
self.output = output

57
test_utils/retry.py Normal file
View File

@@ -0,0 +1,57 @@
#
# Copyright(c) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from datetime import datetime, timedelta
from functools import partial
from core.test_run import TestRun
class Retry:
"""
The Retry class implements methods designed to retry execution until desired result.
The func parameter is meant to be a method. If this method needs args/kwargs, they should be
encapsulated with the method, i.e. using a partial function (an example of this is contained
within run_command_until_success())
"""
@classmethod
def run_command_until_success(
cls, command: str, retries: int = None, timeout: timedelta = None
):
# encapsulate method and args/kwargs as a partial function
func = partial(TestRun.executor.run_expect_success, command)
return cls.run_while_exception(func, retries=retries, timeout=timeout)
@classmethod
def run_while_exception(cls, func, retries: int = None, timeout: timedelta = None):
result = None
def wrapped_func():
nonlocal result
try:
result = func()
return True
except:
return False
cls.run_while_false(wrapped_func, retries=retries, timeout=timeout)
return result
@classmethod
def run_while_false(cls, func, retries: int = None, timeout: timedelta = None):
if retries is None and timeout is None:
raise AttributeError("At least one stop condition is required for Retry calls!")
start = datetime.now()
retry_calls = 0
result = func()
while not result:
result = func()
retry_calls += 1
if result \
or (timeout is not None and datetime.now() - start > timeout) \
or (retries is not None and retry_calls == retries):
break
return result

77
test_utils/scsi_debug.py Normal file
View File

@@ -0,0 +1,77 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import re
from core.test_run import TestRun
syslog_path = "/var/log/messages"
class Logs:
last_read_line = 1
FLUSH = re.compile(r"scsi_debug:[\s\S]*cmd 35")
FUA = re.compile(r"scsi_debug:[\s\S]*cmd 2a 08")
@staticmethod
def check_syslog_for_signals():
Logs.check_syslog_for_flush()
Logs.check_syslog_for_fua()
@staticmethod
def check_syslog_for_flush():
"""Check syslog for FLUSH logs"""
log_lines = Logs._read_syslog(Logs.last_read_line)
flush_logs_counter = Logs._count_logs(log_lines, Logs.FLUSH)
log_type = "FLUSH"
Logs._validate_logs_amount(flush_logs_counter, log_type)
@staticmethod
def check_syslog_for_fua():
"""Check syslog for FUA logs"""
log_lines = Logs._read_syslog(Logs.last_read_line)
fua_logs_counter = Logs._count_logs(log_lines, Logs.FUA)
log_type = "FUA"
Logs._validate_logs_amount(fua_logs_counter, log_type)
@staticmethod
def _read_syslog(last_read_line: int):
"""Read recent lines in syslog, mark last line and return read lines as list."""
log_lines = TestRun.executor.run_expect_success(
f"tail -qn +{last_read_line} {syslog_path}"
).stdout.splitlines()
# mark last read line to continue next reading from here
Logs.last_read_line += len(log_lines)
return log_lines
@staticmethod
def _count_logs(log_lines: list, expected_log):
"""Count specified log in list and return its amount."""
logs_counter = 0
for line in log_lines:
is_log_in_line = expected_log.search(line)
if is_log_in_line is not None:
logs_counter += 1
return logs_counter
@staticmethod
def _validate_logs_amount(logs_counter: int, log_type: str):
"""Validate amount of logs and return"""
if logs_counter == 0:
if Logs._is_flush(log_type):
TestRun.LOGGER.error(f"{log_type} log not occured")
else:
TestRun.LOGGER.warning(f"{log_type} log not occured")
elif logs_counter == 1:
TestRun.LOGGER.warning(f"{log_type} log occured only once.")
else:
TestRun.LOGGER.info(f"{log_type} log occured {logs_counter} times.")
@staticmethod
def _is_flush(log_type: str):
return log_type == "FLUSH"

16
test_utils/singleton.py Normal file
View File

@@ -0,0 +1,16 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
class Singleton(type):
"""
Singleton class
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]

211
test_utils/size.py Normal file
View File

@@ -0,0 +1,211 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import enum
import math
from multimethod import multimethod
def parse_unit(str_unit: str):
for u in Unit:
if str_unit == u.name:
return u
if str_unit == "KiB":
return Unit.KibiByte
elif str_unit in ["4KiB blocks", "4KiB Blocks"]:
return Unit.Blocks4096
elif str_unit == "MiB":
return Unit.MebiByte
elif str_unit == "GiB":
return Unit.GibiByte
elif str_unit == "TiB":
return Unit.TebiByte
if str_unit == "B":
return Unit.Byte
elif str_unit == "KB":
return Unit.KiloByte
elif str_unit == "MB":
return Unit.MegaByte
elif str_unit == "GB":
return Unit.GigaByte
elif str_unit == "TB":
return Unit.TeraByte
raise ValueError(f"Unable to parse {str_unit}")
class Unit(enum.Enum):
Byte = 1
KiloByte = 1000
KibiByte = 1024
MegaByte = 1000 * KiloByte
MebiByte = 1024 * KibiByte
GigaByte = 1000 * MegaByte
GibiByte = 1024 * MebiByte
TeraByte = 1000 * GigaByte
TebiByte = 1024 * GibiByte
Blocks512 = 512
Blocks4096 = 4096
KiB = KibiByte
KB = KiloByte
MiB = MebiByte
MB = MegaByte
GiB = GibiByte
GB = GigaByte
TiB = TebiByte
TB = TeraByte
def get_value(self):
return self.value
def __str__(self):
return self.get_name()
def get_name(self):
return self.name
def get_short_name(self):
if self == Unit.Byte:
return "B"
elif self == Unit.KibiByte:
return "KiB"
elif self == Unit.KiloByte:
return "KB"
elif self == Unit.MebiByte:
return "MiB"
elif self == Unit.MegaByte:
return "MB"
elif self == Unit.GibiByte:
return "GiB"
elif self == Unit.GigaByte:
return "GB"
elif self == Unit.TebiByte:
return "TiB"
elif self == Unit.TeraByte:
return "TB"
raise ValueError(f"Unable to get short unit name for {self}.")
class UnitPerSecond:
def __init__(self, unit):
self.value = unit.get_value()
self.name = unit.name + "/s"
def get_value(self):
return self.value
class Size:
def __init__(self, value: float, unit: Unit = Unit.Byte):
if value < 0:
raise ValueError("Size has to be positive.")
self.value = value * unit.value
self.unit = unit
def __str__(self):
return f"{self.get_value(self.unit)} {self.unit}"
def __hash__(self):
return self.value.__hash__()
def __int__(self):
return int(self.get_value())
def __add__(self, other):
return Size(self.get_value() + other.get_value())
def __lt__(self, other):
return self.get_value() < other.get_value()
def __le__(self, other):
return self.get_value() <= other.get_value()
def __eq__(self, other):
return self.get_value() == other.get_value()
def __ne__(self, other):
return self.get_value() != other.get_value()
def __gt__(self, other):
return self.get_value() > other.get_value()
def __ge__(self, other):
return self.get_value() >= other.get_value()
def __radd__(self, other):
return Size(other + self.get_value())
def __sub__(self, other):
if self < other:
raise ValueError("Subtracted value is too big. Result size cannot be negative.")
return Size(self.get_value() - other.get_value())
@multimethod
def __mul__(self, other: int):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __rmul__(self, other: int):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __mul__(self, other: float):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __rmul__(self, other: float):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __truediv__(self, other):
if other.get_value() == 0:
raise ValueError("Divisor must not be equal to 0.")
return self.get_value() / other.get_value()
@multimethod
def __truediv__(self, other: int):
if other == 0:
raise ValueError("Divisor must not be equal to 0.")
return Size(math.ceil(self.get_value() / other))
def set_unit(self, new_unit: Unit):
new_size = Size(self.get_value(target_unit=new_unit), unit=new_unit)
if new_size != self:
raise ValueError(f"{new_unit} is not precise enough for {self}")
self.value = new_size.value
self.unit = new_size.unit
return self
def get_value(self, target_unit: Unit = Unit.Byte):
return self.value / target_unit.value
def is_zero(self):
if self.value == 0:
return True
else:
return False
def align_up(self, alignment):
if self == self.align_down(alignment):
return Size(int(self))
return Size(int(self.align_down(alignment)) + alignment)
def align_down(self, alignment):
if alignment <= 0:
raise ValueError("Alignment must be a positive value!")
if alignment & (alignment - 1):
raise ValueError("Alignment must be a power of two!")
return Size(int(self) & ~(alignment - 1))
@staticmethod
def zero():
return Size(0)

25
test_utils/systemd.py Normal file
View File

@@ -0,0 +1,25 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from pathlib import Path
from core.test_run import TestRun
systemd_service_directory = Path("/usr/lib/systemd/system/")
def enable_service(name):
TestRun.executor.run_expect_success(f"systemctl enable {name}")
def disable_service(name):
TestRun.executor.run_expect_success(f"systemctl disable {name}")
def reload_daemon():
TestRun.executor.run_expect_success("systemctl daemon-reload")
def restart_service(name):
TestRun.executor.run_expect_success(f"systemctl restart {name}")

14
test_utils/time.py Normal file
View File

@@ -0,0 +1,14 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from attotime import attotimedelta
class Time(attotimedelta):
def total_microseconds(self):
return self.total_nanoseconds() / 1_000
def total_milliseconds(self):
return self.total_nanoseconds() / 1_000_000