Move test-framework to its own repository

Signed-off-by: Robert Baldyga <baldyga.r@gmail.com>
This commit is contained in:
Robert Baldyga 2023-05-01 18:55:34 +02:00
commit 40f08a369a
89 changed files with 9914 additions and 0 deletions

4
__init__.py Normal file
View File

@ -0,0 +1,4 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#

4
connection/__init__.py Normal file
View File

@ -0,0 +1,4 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#

View File

@ -0,0 +1,85 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import time
from datetime import timedelta
from core.test_run import TestRun
from test_utils.output import CmdException
class BaseExecutor:
def _execute(self, command, timeout):
raise NotImplementedError()
def _rsync(self, src, dst, delete, symlinks, checksum, exclude_list, timeout,
dut_to_controller):
raise NotImplementedError()
def rsync_to(self, src, dst, delete=False, symlinks=False, checksum=False, exclude_list=[],
timeout: timedelta = timedelta(seconds=90)):
return self._rsync(src, dst, delete, symlinks, checksum, exclude_list, timeout, False)
def rsync_from(self, src, dst, delete=False, symlinks=False, checksum=False, exclude_list=[],
timeout: timedelta = timedelta(seconds=90)):
return self._rsync(src, dst, delete, symlinks, checksum, exclude_list, timeout, True)
def is_remote(self):
return False
def is_active(self):
return True
def wait_for_connection(self, timeout: timedelta = None):
pass
def run(self, command, timeout: timedelta = timedelta(minutes=30)):
if TestRun.dut and TestRun.dut.env:
command = f"{TestRun.dut.env} && {command}"
command_id = TestRun.LOGGER.get_new_command_id()
ip_info = TestRun.dut.ip if len(TestRun.duts) > 1 else ""
TestRun.LOGGER.write_command_to_command_log(command, command_id, info=ip_info)
output = self._execute(command, timeout)
TestRun.LOGGER.write_output_to_command_log(output, command_id)
return output
def run_in_background(self,
command,
stdout_redirect_path="/dev/null",
stderr_redirect_path="/dev/null"):
command += f" > {stdout_redirect_path} 2> {stderr_redirect_path} &echo $!"
output = self.run(command)
if output is not None:
return int(output.stdout)
def wait_cmd_finish(self, pid: int, timeout: timedelta = timedelta(minutes=30)):
self.run(f"tail --pid={pid} -f /dev/null", timeout)
def check_if_process_exists(self, pid: int):
output = self.run(f"ps aux | awk '{{print $2 }}' | grep ^{pid}$", timedelta(seconds=10))
return True if output.exit_code == 0 else False
def kill_process(self, pid: int):
# TERM signal should be used in preference to the KILL signal, since a
# process may install a handler for the TERM signal in order to perform
# clean-up steps before terminating in an orderly fashion.
self.run(f"kill -s SIGTERM {pid} &> /dev/null")
time.sleep(3)
self.run(f"kill -s SIGKILL {pid} &> /dev/null")
def run_expect_success(self, command, timeout: timedelta = timedelta(minutes=30)):
output = self.run(command, timeout)
if output.exit_code != 0:
raise CmdException(f"Exception occurred while trying to execute '{command}' command.",
output)
return output
def run_expect_fail(self, command, timeout: timedelta = timedelta(minutes=30)):
output = self.run(command, timeout)
if output.exit_code == 0:
raise CmdException(f"Command '{command}' executed properly but error was expected.",
output)
return output

View File

@ -0,0 +1,15 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from connection.base_executor import BaseExecutor
class DummyExecutor(BaseExecutor):
def _execute(self, command, timeout=None):
print(command)
def _rsync(self, src, dst, delete, symlinks, checksum, exclude_list, timeout,
dut_to_controller):
print(f'COPY FROM "{src}" TO "{dst}"')

View File

@ -0,0 +1,48 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import subprocess
from datetime import timedelta
from connection.base_executor import BaseExecutor
from test_utils.output import Output
class LocalExecutor(BaseExecutor):
def _execute(self, command, timeout):
completed_process = subprocess.run(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=timeout.total_seconds())
return Output(completed_process.stdout,
completed_process.stderr,
completed_process.returncode)
def _rsync(self, src, dst, delete=False, symlinks=False, checksum=False, exclude_list=[],
timeout: timedelta = timedelta(seconds=90), dut_to_controller=False):
options = []
if delete:
options.append("--delete")
if symlinks:
options.append("--links")
if checksum:
options.append("--checksum")
for exclude in exclude_list:
options.append(f"--exclude {exclude}")
completed_process = subprocess.run(
f'rsync -r {src} {dst} {" ".join(options)}',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=timeout.total_seconds())
if completed_process.returncode:
raise Exception(f"rsync failed:\n{completed_process}")

142
connection/ssh_executor.py Normal file
View File

@ -0,0 +1,142 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import socket
import subprocess
import paramiko
from datetime import timedelta, datetime
from connection.base_executor import BaseExecutor
from core.test_run import TestRun
from test_utils.output import Output
class SshExecutor(BaseExecutor):
def __init__(self, ip, username, port=22):
self.ip = ip
self.user = username
self.port = port
self.ssh = paramiko.SSHClient()
self._check_config_for_reboot_timeout()
def __del__(self):
self.ssh.close()
def connect(self, user=None, port=None,
timeout: timedelta = timedelta(seconds=30)):
user = user or self.user
port = port or self.port
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.ssh.connect(self.ip, username=user,
port=port, timeout=timeout.total_seconds(),
banner_timeout=timeout.total_seconds())
except paramiko.AuthenticationException as e:
raise paramiko.AuthenticationException(
f"Authentication exception occurred while trying to connect to DUT. "
f"Please check your SSH key-based authentication.\n{e}")
except (paramiko.SSHException, socket.timeout) as e:
raise ConnectionError(f"An exception of type '{type(e)}' occurred while trying to "
f"connect to {self.ip}.\n {e}")
def disconnect(self):
try:
self.ssh.close()
except Exception:
raise Exception(f"An exception occurred while trying to disconnect from {self.ip}")
def _execute(self, command, timeout):
try:
(stdin, stdout, stderr) = self.ssh.exec_command(command,
timeout=timeout.total_seconds())
except paramiko.SSHException as e:
raise ConnectionError(f"An exception occurred while executing command '{command}' on"
f" {self.ip}\n{e}")
return Output(stdout.read(), stderr.read(), stdout.channel.recv_exit_status())
def _rsync(self, src, dst, delete=False, symlinks=False, checksum=False, exclude_list=[],
timeout: timedelta = timedelta(seconds=90), dut_to_controller=False):
options = []
if delete:
options.append("--delete")
if symlinks:
options.append("--links")
if checksum:
options.append("--checksum")
for exclude in exclude_list:
options.append(f"--exclude {exclude}")
src_to_dst = f"{self.user}@{self.ip}:{src} {dst} " if dut_to_controller else\
f"{src} {self.user}@{self.ip}:{dst} "
try:
completed_process = subprocess.run(
f'rsync -r -e "ssh -p {self.port} '
f'-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" '
+ src_to_dst + f'{" ".join(options)}',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=timeout.total_seconds())
except Exception as e:
TestRun.LOGGER.exception(f"Exception occurred during rsync process. "
f"Please check your SSH key-based authentication.\n{e}")
if completed_process.returncode:
raise Exception(f"rsync failed:\n{completed_process}")
def is_remote(self):
return True
def _check_config_for_reboot_timeout(self):
if "reboot_timeout" in TestRun.config.keys():
self._parse_timeout_to_int()
else:
self.reboot_timeout = None
def _parse_timeout_to_int(self):
self.reboot_timeout = int(TestRun.config["reboot_timeout"])
if self.reboot_timeout < 0:
raise ValueError("Reboot timeout cannot be negative.")
def reboot(self):
self.run("reboot")
self.wait_for_connection_loss()
self.wait_for_connection(timedelta(seconds=self.reboot_timeout)) \
if self.reboot_timeout is not None else self.wait_for_connection()
def is_active(self):
try:
self.ssh.exec_command('', timeout=5)
return True
except Exception:
return False
def wait_for_connection(self, timeout: timedelta = timedelta(minutes=10)):
start_time = datetime.now()
with TestRun.group("Waiting for DUT ssh connection"):
while start_time + timeout > datetime.now():
try:
self.connect()
return
except paramiko.AuthenticationException:
raise
except Exception:
continue
raise ConnectionError("Timeout occurred while trying to establish ssh connection")
def wait_for_connection_loss(self, timeout: timedelta = timedelta(minutes=1)):
with TestRun.group("Waiting for DUT ssh connection loss"):
end_time = datetime.now() + timeout
while end_time > datetime.now():
self.disconnect()
try:
self.connect(timeout=timedelta(seconds=5))
except Exception:
return
raise ConnectionError("Timeout occurred before ssh connection loss")

0
core/__init__.py Normal file
View File

107
core/pair_testing.py Normal file
View File

@ -0,0 +1,107 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
# The MIT License (MIT)
#
# Copyright (c) 2004-2020 Holger Krekel and others
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from itertools import product, combinations
import random
from core.test_run import TestRun
def testcase_id(param_set):
if len(param_set.values) == 1:
return param_set.values[0]
return "-".join([str(value) for value in param_set.values])
def generate_pair_testing_testcases(*argvals):
"""
Generate test_cases from provided argument values lists in such way that each possible
(argX, argY) pair will be used.
"""
# if only one argument is used, yield from it
if len(argvals) == 1:
for val in argvals[0]:
yield (val,)
# append argument index to argument values list to avoid confusion when there are two arguments
# with the same type
for i, arg in enumerate(argvals):
for j, val in enumerate(arg):
arg[j] = (i, val)
# generate all possible test cases
all_test_cases = list(product(*argvals))
random.seed(TestRun.random_seed)
random.shuffle(all_test_cases)
used_pairs = set()
for tc in all_test_cases:
current_pairs = set(combinations(tc, 2))
# if cardinality of (current_pairs & used_pairs) is lesser than cardinality of current_pairs
# it means not all argument pairs in this tc have been used. return current tc
# and update used_pairs set
if len(current_pairs & used_pairs) != len(current_pairs):
used_pairs.update(current_pairs)
# unpack testcase by deleting argument index
yield list(list(zip(*tc))[1])
def register_testcases(metafunc, argnames, argvals):
"""
Add custom parametrization test cases. Based on metafunc's parametrize method.
"""
from _pytest.python import CallSpec2, _find_parametrized_scope
from _pytest.mark import ParameterSet
from _pytest.fixtures import scope2index
parameter_sets = [ParameterSet(values=val, marks=[], id=None) for val in argvals]
metafunc._validate_if_using_arg_names(argnames, False)
arg_value_types = metafunc._resolve_arg_value_types(argnames, False)
ids = [testcase_id(param_set) for param_set in parameter_sets]
scope = _find_parametrized_scope(argnames, metafunc._arg2fixturedefs, False)
scopenum = scope2index(scope, descr=f"parametrizex() call in {metafunc.function.__name__}")
calls = []
for callspec in metafunc._calls or [CallSpec2(metafunc)]:
for param_index, (param_id, param_set) in enumerate(zip(ids, parameter_sets)):
newcallspec = callspec.copy()
newcallspec.setmulti2(
arg_value_types,
argnames,
param_set.values,
param_id,
param_set.marks,
scopenum,
param_index,
)
calls.append(newcallspec)
metafunc._calls = calls

124
core/plugins.py Normal file
View File

@ -0,0 +1,124 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
import sys
import importlib
import signal
from core.test_run import TestRun
class PluginManager:
def __init__(self, item, config):
if 'plugins_dir' in config:
sys.path.append(config['plugins_dir'])
self.plugins = {}
self.plugins_config = config.get('plugins', {})
self.req_plugins = config.get('req_plugins', {})
self.opt_plugins = config.get('opt_plugins', {})
self.req_plugins.update(dict(map(lambda mark: (mark.args[0], mark.kwargs),
item.iter_markers(name="require_plugin"))))
req_plugin_mod = {}
opt_plugin_mod = {}
for name in self.req_plugins:
try:
req_plugin_mod[name] = self.__import_plugin(name)
except ModuleNotFoundError:
pytest.skip("Unable to find requested plugin!")
for name in self.opt_plugins:
try:
opt_plugin_mod[name] = self.__import_plugin(name)
except ModuleNotFoundError as e:
TestRun.LOGGER.debug(
f"Failed to import '{name}' - optional plugin. " f"Reason: {e}"
)
continue
for name, mod in req_plugin_mod.items():
try:
self.plugins[name] = mod.plugin_class(
self.req_plugins[name],
self.plugins_config.get(name, {}).get("config", {}))
except Exception:
pytest.skip(f"Unable to initialize plugin '{name}'")
for name, mod in opt_plugin_mod.items():
try:
self.plugins[name] = mod.plugin_class(
self.opt_plugins[name],
self.plugins_config.get(name, {}).get("config", {}))
except Exception as e:
TestRun.LOGGER.debug(
f"Failed to initialize '{name}' - optional plugin. " f"Reason: {e}"
)
continue
def __import_plugin(self, name):
provided_by = self.plugins_config.get(name, {}).get("provided_by")
if provided_by:
return importlib.import_module(provided_by)
try:
return importlib.import_module(f"internal_plugins.{name}")
except ModuleNotFoundError:
pass
return importlib.import_module(f"external_plugins.{name}")
def hook_pre_setup(self):
for plugin in self.plugins.values():
plugin.pre_setup()
def hook_post_setup(self):
for plugin in self.plugins.values():
plugin.post_setup()
def hook_teardown(self):
for plugin in self.plugins.values():
plugin.teardown()
def get_plugin(self, name):
if name not in self.plugins:
raise KeyError("Requested plugin does not exist")
return self.plugins[name]
def teardown_on_signal(self, sig_id, plugin_name):
try:
plugin = self.get_plugin(plugin_name)
except Exception as e:
TestRun.LOGGER.warning(
f"Failed to setup teardown on signal for {plugin_name}. Reason: {e}")
return
old_sig_handler = None
def signal_handler(sig, frame):
plugin.teardown()
if old_sig_handler is not None:
if old_sig_handler == signal.SIG_DFL:
# In case of SIG_DFL the function pointer points to address 0,
# which is not a valid address.
# We have to reset the handler and raise the signal again
signal.signal(sig, signal.SIG_DFL)
signal.raise_signal(sig)
signal.signal(sig, signal_handler)
elif old_sig_handler == signal.SIG_IGN:
# SIG_IGN has value 1 (also an invalid address).
# Here we can just return (do nothing)
return
else:
# When we received neither SIG_IGN nor SIG_DFL, the received value is
# a valid function pointer and we can call the handler directly
old_sig_handler()
signal.signal(sig, old_sig_handler)
old_sig_handler = signal.signal(sig_id, signal_handler)

65
core/test_run.py Normal file
View File

@ -0,0 +1,65 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from contextlib import contextmanager
import pytest
from log.logger import Log
class Blocked(Exception):
pass
class TestRun:
dut = None
executor = None
LOGGER: Log = None
plugin_manager = None
duts = None
disks = None
@classmethod
@contextmanager
def use_dut(cls, dut):
cls.dut = dut
cls.config = cls.dut.config
cls.executor = cls.dut.executor
cls.plugin_manager = cls.dut.plugin_manager
cls.disks = cls.dut.req_disks
yield cls.executor
cls.disks = None
cls.plugin_manager = None
cls.executor = None
# setting cls.config to None omitted (causes problems in the teardown stage of execution)
cls.dut = None
@classmethod
def step(cls, message):
return cls.LOGGER.step(message)
@classmethod
def group(cls, message):
return cls.LOGGER.group(message)
@classmethod
def iteration(cls, iterable, group_name=None):
TestRun.LOGGER.start_group(f"{group_name}" if group_name is not None else "Iteration list")
items = list(iterable)
for i, item in enumerate(items, start=1):
cls.LOGGER.start_iteration(f"Iteration {i}/{len(items)}")
yield item
TestRun.LOGGER.end_iteration()
TestRun.LOGGER.end_group()
@classmethod
def fail(cls, message):
pytest.fail(message)
@classmethod
def block(cls, message):
raise Blocked(message)

272
core/test_run_utils.py Normal file
View File

@ -0,0 +1,272 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
import random
import sys
import traceback
import pytest
from IPy import IP
import core.test_run
from connection.local_executor import LocalExecutor
from connection.ssh_executor import SshExecutor
from core.pair_testing import generate_pair_testing_testcases, register_testcases
from core.plugins import PluginManager
from log.base_log import BaseLogResult
from storage_devices.disk import Disk
from test_utils import disk_finder
from test_utils.dut import Dut
TestRun = core.test_run.TestRun
@classmethod
def __configure(cls, config):
config.addinivalue_line(
"markers",
"require_disk(name, type): require disk of specific type, otherwise skip"
)
config.addinivalue_line(
"markers",
"require_plugin(name, *kwargs): require specific plugins, otherwise skip"
)
config.addinivalue_line(
"markers",
"remote_only: run test only in case of remote execution, otherwise skip"
)
config.addinivalue_line(
"markers",
"os_dependent: run test only if its OS dependent, otherwise skip"
)
config.addinivalue_line(
"markers",
"multidut(number): test requires a number of different platforms to be executed"
)
config.addinivalue_line(
"markers",
"parametrizex(argname, argvalues): sparse parametrized testing"
)
config.addinivalue_line(
"markers",
"CI: marks test for continuous integration pipeline"
)
cls.random_seed = config.getoption("--random-seed") or random.randrange(sys.maxsize)
random.seed(cls.random_seed)
TestRun.configure = __configure
@classmethod
def __prepare(cls, item, config):
if not config:
raise Exception("You need to specify DUT config!")
cls.item = item
cls.config = config
req_disks = list(map(lambda mark: mark.args, cls.item.iter_markers(name="require_disk")))
cls.req_disks = dict(req_disks)
if len(req_disks) != len(cls.req_disks):
raise Exception("Disk name specified more than once!")
TestRun.prepare = __prepare
@classmethod
def __attach_log(cls, log_path, target_name=None):
if target_name is None:
target_name = posixpath.basename(log_path)
if cls.config.get('extra_logs'):
cls.config["extra_logs"][target_name] = log_path
else:
cls.config["extra_logs"] = {target_name: log_path}
TestRun.attach_log = __attach_log
@classmethod
def __setup_disk(cls, disk_name, disk_type):
cls.disks[disk_name] = next(filter(
lambda disk: disk.disk_type in disk_type.types() and disk not in cls.disks.values(),
cls.dut.disks
), None)
if not cls.disks[disk_name]:
pytest.skip("Unable to find requested disk!")
TestRun.__setup_disk = __setup_disk
@classmethod
def __setup_disks(cls):
cls.disks = {}
items = list(cls.req_disks.items())
while items:
resolved, unresolved = [], []
for disk_name, disk_type in items:
(resolved, unresolved)[not disk_type.resolved()].append((disk_name, disk_type))
resolved.sort(
key=lambda disk: (lambda disk_name, disk_type: disk_type)(*disk)
)
for disk_name, disk_type in resolved:
cls.__setup_disk(disk_name, disk_type)
items = unresolved
cls.dut.req_disks = cls.disks
TestRun.__setup_disks = __setup_disks
@classmethod
def __presetup(cls):
cls.plugin_manager = PluginManager(cls.item, cls.config)
cls.plugin_manager.hook_pre_setup()
if cls.config['type'] == 'ssh':
try:
IP(cls.config['ip'])
except ValueError:
TestRun.block("IP address from config is in invalid format.")
port = cls.config.get('port', 22)
if 'user' in cls.config:
cls.executor = SshExecutor(
cls.config['ip'],
cls.config['user'],
port
)
else:
TestRun.block("There is no user given in config.")
elif cls.config['type'] == 'local':
cls.executor = LocalExecutor()
else:
TestRun.block("Execution type (local/ssh) is missing in DUT config!")
TestRun.presetup = __presetup
@classmethod
def __setup(cls):
if list(cls.item.iter_markers(name="remote_only")):
if not cls.executor.is_remote():
pytest.skip()
Disk.plug_all_disks()
if cls.config.get('allow_disk_autoselect', False):
cls.config["disks"] = disk_finder.find_disks()
try:
cls.dut = Dut(cls.config)
except Exception as ex:
raise Exception(f"Failed to setup DUT instance:\n"
f"{str(ex)}\n{traceback.format_exc()}")
cls.__setup_disks()
TestRun.LOGGER.info(f"Re-seeding random number generator with seed: {cls.random_seed}")
random.seed(cls.random_seed)
cls.plugin_manager.hook_post_setup()
TestRun.setup = __setup
@classmethod
def __makereport(cls, item, call, res):
cls.outcome = res.outcome
step_info = {
'result': res.outcome,
'exception': str(call.excinfo.value) if call.excinfo else None
}
setattr(item, "rep_" + res.when, step_info)
from _pytest.outcomes import Failed
from core.test_run import Blocked
if res.when == "call" and res.failed:
msg = f"{call.excinfo.type.__name__}: {call.excinfo.value}"
if call.excinfo.type is Failed:
cls.LOGGER.error(msg)
elif call.excinfo.type is Blocked:
cls.LOGGER.blocked(msg)
else:
cls.LOGGER.exception(msg)
elif res.when == "setup" and res.failed:
msg = f"{call.excinfo.type.__name__}: {call.excinfo.value}"
cls.LOGGER.exception(msg)
res.outcome = "failed"
if res.outcome == "skipped":
cls.LOGGER.skip("Test skipped.")
if res.when == "call" and cls.LOGGER.get_result() == BaseLogResult.FAILED:
res.outcome = "failed"
# To print additional message in final test report, assign it to res.longrepr
cls.LOGGER.generate_summary(item, cls.config.get('meta'))
TestRun.makereport = __makereport
@classmethod
def __generate_tests(cls, metafunc):
marks = getattr(metafunc.function, "pytestmark", [])
parametrizex_marks = [
mark for mark in marks if mark.name == "parametrizex"
]
if not parametrizex_marks:
random.seed(TestRun.random_seed)
return
argnames = []
argvals = []
for mark in parametrizex_marks:
argnames.append(mark.args[0])
argvals.append(list(mark.args[1]))
if metafunc.config.getoption("--parametrization-type") == "full":
for name, values in zip(argnames, argvals):
metafunc.parametrize(name, values)
elif metafunc.config.getoption("--parametrization-type") == "pair":
test_cases = generate_pair_testing_testcases(*argvals)
register_testcases(metafunc, argnames, test_cases)
else:
raise Exception("Not supported parametrization type")
random.seed(TestRun.random_seed)
TestRun.generate_tests = __generate_tests
@classmethod
def __addoption(cls, parser):
parser.addoption("--parametrization-type", choices=["pair", "full"], default="pair")
parser.addoption("--random-seed", type=int, default=None)
TestRun.addoption = __addoption
@classmethod
def __teardown(cls):
for dut in cls.duts:
with cls.use_dut(dut):
if cls.plugin_manager:
cls.plugin_manager.hook_teardown()
TestRun.teardown = __teardown

View File

@ -0,0 +1,4 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#

View File

@ -0,0 +1,22 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
class ExamplePlugin:
def __init__(self, params, config):
self.params = params
print(f"Example plugin initialized with params {self.params}")
def pre_setup(self):
print("Example plugin pre setup")
def post_setup(self):
print("Example plugin post setup")
def teardown(self):
print("Example plugin teardown")
plugin_class = ExamplePlugin

View File

@ -0,0 +1,48 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from datetime import timedelta
from connection.local_executor import LocalExecutor
from connection.ssh_executor import SshExecutor
from core.test_run import TestRun
class PowerControlPlugin:
def __init__(self, params, config):
print("Power Control LibVirt Plugin initialization")
try:
self.ip = config['ip']
self.user = config['user']
except Exception:
raise Exception("Missing fields in config! ('ip' and 'user' required)")
def pre_setup(self):
print("Power Control LibVirt Plugin pre setup")
if self.config['connection_type'] == 'ssh':
self.executor = SshExecutor(
self.ip,
self.user,
self.config.get('port', 22)
)
else:
self.executor = LocalExecutor()
def post_setup(self):
pass
def teardown(self):
pass
def power_cycle(self):
self.executor.run(f"virsh reset {self.config['domain']}")
TestRun.executor.wait_for_connection_loss()
timeout = TestRun.config.get('reboot_timeout')
if timeout:
TestRun.executor.wait_for_connection(timedelta(seconds=int(timeout)))
else:
TestRun.executor.wait_for_connection()
plugin_class = PowerControlPlugin

View File

@ -0,0 +1,39 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from time import sleep
from core.test_run_utils import TestRun
from storage_devices.device import Device
from test_utils import os_utils
from test_utils.output import CmdException
class ScsiDebug:
def __init__(self, params, config):
self.params = params
self.module_name = "scsi_debug"
def pre_setup(self):
pass
def post_setup(self):
self.reload()
def reload(self):
self.teardown()
sleep(1)
load_output = os_utils.load_kernel_module(self.module_name, self.params)
if load_output.exit_code != 0:
raise CmdException(f"Failed to load {self.module_name} module", load_output)
TestRun.LOGGER.info(f"{self.module_name} loaded successfully.")
sleep(10)
TestRun.scsi_debug_devices = Device.get_scsi_debug_devices()
def teardown(self):
if os_utils.is_kernel_module_loaded(self.module_name):
os_utils.unload_kernel_module(self.module_name)
plugin_class = ScsiDebug

View File

@ -0,0 +1,97 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import time
import posixpath
from datetime import timedelta
from core.test_run import TestRun
from test_tools import fs_utils
class Vdbench:
def __init__(self, params, config):
print("VDBench plugin initialization")
self.run_time = timedelta(seconds=60)
try:
self.working_dir = config["working_dir"]
self.reinstall = config["reinstall"]
self.source_dir = config["source_dir"]
except Exception:
raise Exception("Missing fields in config! ('working_dir', 'source_dir' and "
"'reinstall' required)")
self.result_dir = posixpath.join(self.working_dir, 'result.tod')
def pre_setup(self):
pass
def post_setup(self):
print("VDBench plugin post setup")
if not self.reinstall and fs_utils.check_if_directory_exists(self.working_dir):
return
if fs_utils.check_if_directory_exists(self.working_dir):
fs_utils.remove(self.working_dir, True, True)
fs_utils.create_directory(self.working_dir)
TestRun.LOGGER.info("Copying vdbench to working dir.")
fs_utils.copy(posixpath.join(self.source_dir, "*"), self.working_dir,
True, True)
pass
def teardown(self):
pass
def create_config(self, config, run_time: timedelta):
self.run_time = run_time
if config[-1] != ",":
config += ","
config += f"elapsed={int(run_time.total_seconds())}"
TestRun.LOGGER.info(f"Vdbench config:\n{config}")
fs_utils.write_file(posixpath.join(self.working_dir, "param.ini"), config)
def run(self):
cmd = f"{posixpath.join(self.working_dir, 'vdbench')} " \
f"-f {posixpath.join(self.working_dir, 'param.ini')} " \
f"-vr -o {self.result_dir}"
full_cmd = f"screen -dmS vdbench {cmd}"
TestRun.executor.run(full_cmd)
start_time = time.time()
timeout = self.run_time * 1.5
while True:
if not TestRun.executor.run(f"ps aux | grep '{cmd}' | grep -v grep").exit_code == 0:
return self.analyze_log()
if time.time() - start_time > timeout.total_seconds():
TestRun.LOGGER.error("Vdbench timeout.")
return False
time.sleep(1)
def analyze_log(self):
output = TestRun.executor.run(
f"ls -1td {self.result_dir[0:len(self.result_dir) - 3]}* | head -1")
log_path = posixpath.join(output.stdout if output.exit_code == 0 else self.result_dir,
"logfile.html")
log_file = fs_utils.read_file(log_path)
if "Vdbench execution completed successfully" in log_file:
TestRun.LOGGER.info("Vdbench execution completed successfully.")
return True
if "Data Validation error" in log_file or "data_errors=1" in log_file:
TestRun.LOGGER.error("Data corruption occurred!")
elif "Heartbeat monitor:" in log_file:
TestRun.LOGGER.error("Vdbench: heartbeat.")
else:
TestRun.LOGGER.error("Vdbench unknown result.")
return False
plugin_class = Vdbench

0
log/__init__.py Normal file
View File

78
log/base_log.py Normal file
View File

@ -0,0 +1,78 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from enum import Enum
from re import sub
class BaseLogResult(Enum):
DEBUG = 10
PASSED = 11
WORKAROUND = 12
WARNING = 13
SKIPPED = 14
FAILED = 15
EXCEPTION = 16
BLOCKED = 17
CRITICAL = 18
def escape(msg):
return sub(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]+', '', msg)
class BaseLog():
def __init__(self, begin_message=None):
self.__begin_msg = begin_message
self.__result = BaseLogResult.PASSED
def __enter__(self):
if self.__begin_msg is not None:
self.begin(self.__begin_msg)
else:
self.begin("Start BaseLog ...")
def __exit__(self, *args):
self.end()
def __try_to_set_new_result(self, new_result):
if new_result.value > self.__result.value:
self.__result = new_result
def begin(self, message):
pass
def debug(self, message):
pass
def info(self, message):
pass
def workaround(self, message):
self.__try_to_set_new_result(BaseLogResult.WORKAROUND)
def warning(self, message):
self.__try_to_set_new_result(BaseLogResult.WARNING)
def skip(self, message):
self.__try_to_set_new_result(BaseLogResult.SKIPPED)
def error(self, message):
self.__try_to_set_new_result(BaseLogResult.FAILED)
def blocked(self, message):
self.__try_to_set_new_result(BaseLogResult.BLOCKED)
def exception(self, message):
self.__try_to_set_new_result(BaseLogResult.EXCEPTION)
def critical(self, message):
self.__try_to_set_new_result(BaseLogResult.CRITICAL)
def end(self):
return self.__result
def get_result(self):
return self.__result

0
log/group/__init__.py Normal file
View File

View File

@ -0,0 +1,43 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.base_log import BaseLogResult, BaseLog
from log.group.html_group_log import HtmlGroupLog
from datetime import datetime
class HtmlChapterGroupLog(HtmlGroupLog):
SET_RESULT = {
BaseLogResult.PASSED: BaseLog.info,
BaseLogResult.WORKAROUND: BaseLog.workaround,
BaseLogResult.WARNING: BaseLog.warning,
BaseLogResult.SKIPPED: BaseLog.skip,
BaseLogResult.FAILED: BaseLog.error,
BaseLogResult.BLOCKED: BaseLog.blocked,
BaseLogResult.EXCEPTION: BaseLog.exception,
BaseLogResult.CRITICAL: BaseLog.critical}
def __init__(self, html_base, cfg, begin_msg=None, id='ch0'):
super().__init__(HtmlChapterGroupLog._factory, html_base, cfg, begin_msg, id)
@staticmethod
def _factory(html_base, cfg, begin_msg, id):
return HtmlChapterGroupLog(html_base, cfg, begin_msg, id)
def end_dir_group(self, ref_group):
group = super().end_group()
ref_container_id = ref_group._container.get('id')
group._header.set('ondblclick', f"chapterClick('{ref_container_id}')")
def set_result(self, result):
if self._successor is not None:
self._successor.set_result(result)
HtmlChapterGroupLog.SET_RESULT[result](self, "set result")
def end(self):
result = super().end()
exe_time = (datetime.now() - self._start_time).seconds
self._cfg.group_chapter_end(exe_time, self._header, self._container, result)
return result

139
log/group/html_group_log.py Normal file
View File

@ -0,0 +1,139 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from datetime import datetime
from log.base_log import BaseLog, BaseLogResult
class HtmlGroupLog(BaseLog):
def __init__(self, constructor, html_base_element, cfg, begin_message, id_):
super().__init__(begin_message)
self._successor = None
self.__factory = constructor
self.__log_main_store = html_base_element
self._id = id_
self._container = None
self._header = None
self.__msg_idx = 0
self._start_time = datetime.now()
self._cfg = cfg
self._header_msg_type = type(begin_message)
def begin(self, message):
policy = self._cfg.get_policy(type(message))
self._header, self._container = policy.group_begin(self._id, message, self.__log_main_store)
super().begin(message)
def get_step_id(self):
if self._successor is not None:
return self._successor.get_step_id()
else:
return f'step.{self._id}.{self.__msg_idx}'
def __add_test_step(self, message, result=BaseLogResult.PASSED):
policy = self._cfg.get_policy(type(message))
policy.standard(self.get_step_id(), message, result, self._container)
self.__msg_idx += 1
def get_main_log_store(self):
return self.__log_main_store
def start_group(self, message):
self._header_msg_type = type(message)
if self._successor is not None:
result = self._successor.start_group(message)
else:
new_id = f"{self._id}.{self.__msg_idx}"
self.__msg_idx += 1
self._successor = self.__factory(self._container, self._cfg, message, new_id)
self._successor.begin(message)
result = self._successor
return result
def end_group(self):
if self._successor is not None:
if self._successor._successor is None:
self._successor.end()
result = self._successor
self._successor = None
else:
result = self._successor.end_group()
else:
self.end()
result = self
return result
def debug(self, message):
if self._successor is not None:
self._successor.debug(message)
else:
self.__add_test_step(message, BaseLogResult.DEBUG)
return super().debug(message)
def info(self, message):
if self._successor is not None:
self._successor.info(message)
else:
self.__add_test_step(message)
super().info(message)
def workaround(self, message):
if self._successor is not None:
self._successor.workaround(message)
else:
self.__add_test_step(message, BaseLogResult.WORKAROUND)
super().workaround(message)
def warning(self, message):
if self._successor is not None:
self._successor.warning(message)
else:
self.__add_test_step(message, BaseLogResult.WARNING)
super().warning(message)
def skip(self, message):
if self._successor is not None:
self._successor.skip(message)
else:
self.__add_test_step(message, BaseLogResult.SKIPPED)
super().skip(message)
def error(self, message):
if self._successor is not None:
self._successor.error(message)
else:
self.__add_test_step(message, BaseLogResult.FAILED)
super().error(message)
def blocked(self, message):
if self._successor is not None:
self._successor.blocked(message)
else:
self.__add_test_step(message, BaseLogResult.BLOCKED)
super().blocked(message)
def critical(self, message):
if self._successor is not None:
self._successor.critical(message)
else:
self.__add_test_step(message, BaseLogResult.CRITICAL)
super().critical(message)
def exception(self, message):
if self._successor is not None:
self._successor.exception(message)
else:
self.__add_test_step(message, BaseLogResult.EXCEPTION)
super().exception(message)
def end(self):
return super().end()
def get_current_group(self):
if self._successor is not None:
result = self._successor.get_current_group()
else:
result = self
return result

View File

@ -0,0 +1,20 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.group.html_group_log import HtmlGroupLog
class HtmlIterationGroupLog(HtmlGroupLog):
def __init__(self, html_base, cfg, begin_msg, id='itg0'):
super().__init__(HtmlIterationGroupLog._factory, html_base, cfg, begin_msg, id)
@staticmethod
def _factory(html_base, cfg, begin_msg, id):
return HtmlIterationGroupLog(html_base, cfg, begin_msg, id)
def end(self):
result = super().end()
self._cfg.group_end(self._id, self._header, self._container, result)
return result

102
log/html_file_item_log.py Normal file
View File

@ -0,0 +1,102 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.html_file_log import HtmlFileLog
from log.group.html_chapter_group_log import HtmlChapterGroupLog
from log.group.html_iteration_group_log import HtmlIterationGroupLog
from datetime import datetime
from lxml.etree import Element
class HtmlFileItemLog(HtmlFileLog):
def __init__(self, html_file_path, test_title, cfg, iteration_title="Test summary"):
super().__init__(html_file_path, test_title)
root = self.get_root()
self._log_items_store = root.xpath('/html/body')[0]
self._idx = 0
self._log_chapters_store = root.xpath('/html/body/section[@id="iteration-chapters"]')[0]
self._chapter_group = HtmlChapterGroupLog(self._log_chapters_store, cfg, test_title)
self._main_group = HtmlIterationGroupLog(self._log_items_store, cfg, test_title)
self._start_time = datetime.now()
iteration_title_node = root.xpath('/html/body/a/h1')[0]
iteration_title_node.text = iteration_title
self._config = cfg
self._fail_container = root.xpath('/html/body/div/select[@id="error-list-selector"]')[0]
def __add_error(self, msg_idx, msg, error_class):
fail_element = Element('option', value=msg_idx)
fail_element.set('class', error_class)
fail_element.text = msg
self._fail_container.append(fail_element)
def start_iteration(self, message):
super().begin(message)
def get_result(self):
return self._main_group.get_result()
def begin(self, message):
self._chapter_group.begin(message)
self._main_group.begin(message)
def debug(self, message):
self._main_group.debug(message)
def info(self, message):
self._main_group.info(message)
def workaround(self, message):
self._main_group.workaround(message)
def warning(self, message):
self._main_group.warning(message)
def skip(self, message):
self._main_group.skip(message)
def error(self, message):
msg_idx = self._main_group.get_step_id()
self.__add_error(msg_idx, message, "fail")
self._main_group.error(message)
def blocked(self, message):
msg_idx = self._main_group.get_step_id()
self.__add_error(msg_idx, message, "blocked")
self._main_group.blocked(message)
def exception(self, message):
msg_idx = self._main_group.get_step_id()
self.__add_error(msg_idx, message, "exception")
self._main_group.exception(message)
def critical(self, message):
msg_idx = self._main_group.get_step_id()
self.__add_error(msg_idx, message, "critical")
self._main_group.critical(message)
def start_group(self, message):
self._chapter_group.start_group(message)
self._main_group.start_group(message)
def end_group(self):
ref_group = self._main_group.get_current_group()
self._chapter_group.set_result(ref_group.get_result())
self._main_group.end_group()
self._chapter_group.end_dir_group(ref_group)
def end_all_groups(self):
while self._main_group._successor is not None:
self.end_group()
def end(self):
while self._main_group._successor is not None:
self.end_group()
self.end_group()
time_result = datetime.now() - self._start_time
time_node = self.get_root().xpath('/html/body/div[@class="iteration-execution-time"]')[0]
status_node = self.get_root().xpath('/html/body/div[@class="iteration-status"]')[0]
self._config.end_iteration_func(
time_node, status_node, time_result.total_seconds(), self.get_result())
super().end()

29
log/html_file_log.py Normal file
View File

@ -0,0 +1,29 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.base_log import BaseLog
from lxml.html import fromstring
from lxml.html import tostring
class HtmlFileLog(BaseLog):
def __init__(self, file_path, title):
super().__init__(title)
self.__path = file_path
with open(file_path) as file_stream:
self.__root = fromstring(file_stream.read())
node_list = self.__root.xpath('/html/head/title')
node_list[0].text = title
def get_path(self):
return self.__path
def get_root(self):
return self.__root
def end(self):
with open(self.__path, "wb") as file:
x = tostring(self.__root)
file.write(x)

13
log/html_iteration_log.py Normal file
View File

@ -0,0 +1,13 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.html_file_item_log import HtmlFileItemLog
class HtmlIterationLog(HtmlFileItemLog):
def __init__(self, test_title, iteration_title, config):
self.iteration_closed: bool = False
html_file = config.create_iteration_file()
super().__init__(html_file, test_title, config, iteration_title)

204
log/html_log_config.py Normal file
View File

@ -0,0 +1,204 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
from os import path, environ, makedirs
from datetime import datetime
from shutil import copyfile
from lxml.etree import Element
from log.base_log import BaseLogResult
from log.presentation_policy import null_policy
def convert_seconds_to_str(time_in_sec):
h = str(int(time_in_sec / 3600) % 24).zfill(2)
m = str(int(time_in_sec / 60) % 60).zfill(2)
s = str(int(time_in_sec % 60)).zfill(2)
time_msg = f"{h}:{m}:{s} [s]"
if time_in_sec > 86400:
time_msg = f"{int(time_in_sec // (3600 * 24))}d {time_msg}"
return time_msg
class HtmlLogConfig:
STYLE = {
BaseLogResult.DEBUG: 'debug',
BaseLogResult.PASSED: '',
BaseLogResult.WORKAROUND: 'workaround',
BaseLogResult.WARNING: 'warning',
BaseLogResult.SKIPPED: 'skip',
BaseLogResult.FAILED: 'fail',
BaseLogResult.BLOCKED: 'blocked',
BaseLogResult.CRITICAL: 'critical',
BaseLogResult.EXCEPTION: 'exception'}
__MAIN = 'main'
__SETUP = 'setup'
__T_ITERATION = 'iteration'
__FRAMEWORK_T_FOLDER = 'template'
MAIN = __MAIN + '.html'
CSS = __MAIN + '.css'
JS = __MAIN + '.js'
ITERATION_FOLDER = 'iterations'
SETUP = __SETUP + ".html"
def iteration(self):
return f'{HtmlLogConfig.__T_ITERATION}_{str(self._iteration_id).zfill(3)}.html'
def __init__(self, base_dir=None, presentation_policy=null_policy):
self._log_base_dir = base_dir
if base_dir is None:
if os.name == 'nt':
self._log_base_dir = 'c:\\History'
else:
if environ["USER"] == 'root':
self._log_base_dir = '/root/history'
else:
self._log_base_dir = f'/home/{environ["USER"]}'
self._log_dir = None
self._presentation_policy = {}
self.register_presentation_policy(str, presentation_policy)
self._iteration_id = 0
def get_iteration_id(self):
return self._iteration_id
def get_policy(self, type):
return self._presentation_policy[type]
def get_policy_collection(self):
for type, policy in self._presentation_policy.items():
yield policy
def register_presentation_policy(self, type, presentation_policy):
self._presentation_policy[type] = presentation_policy
def __find_template_file(self, name, relative_path=None):
base_dir = path.dirname(path.abspath(__file__))
file_path = path.join(base_dir, HtmlLogConfig.__FRAMEWORK_T_FOLDER)
if relative_path is not None:
file_path = path.join(file_path, relative_path)
file_path = path.join(file_path, name)
if path.isfile(file_path):
return file_path
else:
raise Exception(
f"Unable to find file: {name} in location: {os.path.dirname(file_path)}")
def __get_main_template_file_path(self):
return self.__find_template_file(HtmlLogConfig.MAIN)
def _get_setup_template_file_path(self):
return self.__find_template_file(HtmlLogConfig.SETUP, HtmlLogConfig.ITERATION_FOLDER)
def __get_iteration_template_path(self):
return self.__find_template_file(HtmlLogConfig.__T_ITERATION + '.html',
HtmlLogConfig.ITERATION_FOLDER)
def create_html_test_log(self, test_title):
now = datetime.now()
time_stamp = f"{now.year}_{str(now.month).zfill(2)}_{str(now.day).zfill(2)}_" \
f"{str(now.hour).zfill(2)}_{str(now.minute).zfill(2)}_{str(now.second).zfill(2)}"
self._log_dir = path.join(self._log_base_dir, test_title, time_stamp)
makedirs(self._log_dir)
additional_location = path.join(self._log_dir, HtmlLogConfig.ITERATION_FOLDER)
makedirs(additional_location)
dut_info_folder = path.join(self._log_dir, 'dut_info')
makedirs(dut_info_folder)
main_html = self.__get_main_template_file_path()
main_css = main_html.replace('html', 'css')
main_js = main_html.replace('html', 'js')
copyfile(main_html, path.join(self._log_dir, HtmlLogConfig.MAIN))
copyfile(main_css, path.join(self._log_dir, HtmlLogConfig.CSS))
copyfile(main_js, path.join(self._log_dir, HtmlLogConfig.JS))
copyfile(self._get_setup_template_file_path(), path.join(additional_location,
HtmlLogConfig.SETUP))
return self._log_dir
def get_main_file_path(self):
return path.join(self._log_dir, HtmlLogConfig.MAIN)
def get_setup_file_path(self):
return path.join(self._log_dir, HtmlLogConfig.ITERATION_FOLDER, HtmlLogConfig.SETUP)
def create_iteration_file(self):
self._iteration_id += 1
template_file = self.__get_iteration_template_path()
new_file_name = self.iteration()
result = path.join(self._log_dir, HtmlLogConfig.ITERATION_FOLDER, new_file_name)
copyfile(template_file, result)
return result
def end_iteration(self,
iteration_selector_div,
iteration_selector_select,
iteration_id,
iteration_result):
style = "iteration-selector"
if iteration_result != BaseLogResult.PASSED:
style = f'{style} {HtmlLogConfig.STYLE[iteration_result]}'
if iteration_id and iteration_id % 8 == 0:
new_element = Element("br")
iteration_selector_div[0].append(new_element)
new_element = Element("a")
new_element.set('class', style)
new_element.set('onclick', f"selectIteration('{iteration_id}')")
new_element.text = str(iteration_id)
iteration_selector_div[0].append(new_element)
new_element = Element('option', value=f"{iteration_id}")
new_element.text = 'iteration_' + str(iteration_id).zfill(3)
if iteration_result != BaseLogResult.PASSED:
new_element.set('class', HtmlLogConfig.STYLE[iteration_result])
iteration_selector_select.append(new_element)
def end_setup_iteration(self, iteration_selector_div, iteration_selector_select, log_result):
if log_result != BaseLogResult.PASSED:
a_element = iteration_selector_div[0]
select_element = iteration_selector_select[0]
a_element.set('class', f'iteration-selector {HtmlLogConfig.STYLE[log_result]}')
select_element.set('class', HtmlLogConfig.STYLE[log_result])
def end_iteration_func(self, time_node, status_node, time_in_sec, log_result):
time_node.text = f"Execution time: {convert_seconds_to_str(time_in_sec)}"
status_node.text = f"Iteration status: {log_result.name}"
if log_result != BaseLogResult.PASSED:
status_node.set('class', f'iteration-status {HtmlLogConfig.STYLE[log_result]}')
def end_main_log(self, test_status_div, log_result):
if log_result != BaseLogResult.PASSED:
test_status_div[0].set('class',
f"sidebar-test-status {HtmlLogConfig.STYLE[log_result]}")
test_status_div[0].text = f"Test status: {log_result.name}"
def group_end(self, msg_id, html_header, html_container, log_result):
html_header.set('onclick', f"showHide('ul_{msg_id}')")
sub_element = Element('a', href="#top")
sub_element.text = "[TOP]"
sub_element.set('class', "top-time-marker")
html_header.append(sub_element)
div_style = 'test-group-step'
ul_style = 'iteration-content'
if log_result == BaseLogResult.PASSED:
html_container.set('style', "display: none;")
else:
div_style = f"{div_style} {HtmlLogConfig.STYLE[log_result]}"
ul_style = f"{ul_style} {HtmlLogConfig.STYLE[log_result]}"
html_header.set('class', div_style)
html_container.set('class', ul_style)
def group_chapter_end(self, time_in_sec, html_header, html_container, log_result):
sub_element = Element('a')
sub_element.text = convert_seconds_to_str(time_in_sec)
sub_element.set('class', 'top-marker')
html_header.append(sub_element)
div_style = 'test-group-step'
ul_style = 'iteration-content'
if log_result != BaseLogResult.PASSED:
div_style = f"{div_style} {HtmlLogConfig.STYLE[log_result]}"
ul_style = f"{ul_style} {HtmlLogConfig.STYLE[log_result]}"
html_header.set('class', div_style)
html_container.set('class', ul_style)

126
log/html_log_manager.py Normal file
View File

@ -0,0 +1,126 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.base_log import BaseLog, escape
from log.html_iteration_log import HtmlIterationLog
from log.html_log_config import HtmlLogConfig
from log.html_main_log import HtmlMainLog
from log.html_setup_log import HtmlSetupLog
class HtmlLogManager(BaseLog):
def __init__(self, begin_message=None, log_config=None):
super().__init__(begin_message)
self._config = HtmlLogConfig() if log_config is None else log_config
self._main = None
self._log_setup = None
self._log_iterations = []
self._current_log = None
self._files_path = None
def __add(self, msg):
pass
def begin(self, message):
self._files_path = self._config.create_html_test_log(message)
self._main = HtmlMainLog(message, self._config)
self._log_setup = HtmlSetupLog(message, config=self._config)
self._current_log = self._log_setup
self._main.begin(message)
self._current_log.begin(message)
self.__add("begin: " + message)
@property
def base_dir(self):
return self._files_path
def get_result(self):
log_result = self._log_setup.get_result()
for iteration in self._log_iterations:
if log_result.value < iteration.get_result().value:
log_result = iteration.get_result()
return log_result
def end(self):
self._log_setup.end()
self._main.end_setup_iteration(self._log_setup.get_result())
log_result = self.get_result()
self._main.end(log_result)
self.__add("end")
def add_build_info(self, message):
self._main.add_build_info(escape(message))
def start_iteration(self, message):
message = escape(message)
self._log_iterations.append(HtmlIterationLog(message, message, self._config))
self._main.start_iteration(self._config.get_iteration_id())
self._current_log = self._log_iterations[-1]
self._current_log.begin(message)
self._log_setup.start_iteration(message)
self.__add("start_iteration: " + message)
def end_iteration(self):
self._current_log.end()
self._main.end_iteration(self._current_log.get_result())
self._log_setup.end_iteration(self._current_log.get_result())
self._current_log.iteration_closed = True
self._current_log = self._log_setup
self.__add("end_iteration: ")
return self._current_log
def debug(self, message):
self._current_log.debug(escape(message))
self.__add("debug: " + message)
def info(self, message):
self._current_log.info(escape(message))
self.__add("info: " + message)
def workaround(self, message):
self._current_log.workaround(escape(message))
self.__add(": " + message)
def warning(self, message):
self._current_log.warning(escape(message))
self.__add(": " + message)
def skip(self, message):
self._current_log.skip(escape(message))
self.__add("warning: " + message)
def error(self, message):
self._current_log.error(escape(message))
self.__add("error: " + message)
def blocked(self, message):
self._current_log.blocked(escape(message))
self.__add(f'blocked: {message}')
self.end_all_groups()
def exception(self, message):
self._current_log.exception(escape(message))
self.__add("exception: " + message)
self.end_all_groups()
def critical(self, message):
self._current_log.critical(escape(message))
self.__add("critical: " + message)
self.end_all_groups()
def start_group(self, message):
self._current_log.start_group(escape(message))
self.__add("start_group: " + message)
def end_group(self):
self._current_log.end_group()
self.__add("end_group")
def end_all_groups(self):
for iteration in reversed(self._log_iterations):
if not iteration.iteration_closed:
self.end_iteration()
self._current_log.end_all_groups()

53
log/html_main_log.py Normal file
View File

@ -0,0 +1,53 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.html_file_log import HtmlFileLog
from lxml.etree import Element
class HtmlMainLog(HtmlFileLog):
def __init__(self, title, config):
super().__init__(config.get_main_file_path(), title)
self._config = config
self.__current_iteration_id = None
root = self.get_root()
test_title_div = root.xpath('/html/body/div/div/div/div[@class="sidebar-test-title"]')[0]
test_title_div.text = title
self.__build_information_set = root.xpath(
'/html/body/div/div/div/div[@id="sidebar-tested-build"]')[0]
def add_build_info(self, message):
build_info = Element("div")
build_info.text = message
self.__build_information_set.append(build_info)
def start_iteration(self, iteration_id):
self.__current_iteration_id = iteration_id
def end_iteration(self):
pass
def end_iteration(self, iteration_result):
root = self.get_root()
iteration_selector_div = root.xpath('/html/body/div/div/div[@id="iteration-selector"]')
iteration_selector_select = root.xpath(
'/html/body/div/div/select[@id="sidebar-iteration-list"]')[0]
self._config.end_iteration(iteration_selector_div,
iteration_selector_select,
self.__current_iteration_id,
iteration_result)
def end_setup_iteration(self, result):
root = self.get_root()
iteration_selector_div = root.xpath('/html/body/div/div/div[@id="iteration-selector"]')[0]
iteration_selector_select = root.xpath(
'/html/body/div/div/select[@id="sidebar-iteration-list"]')[0]
self._config.end_setup_iteration(iteration_selector_div, iteration_selector_select, result)
def end(self, result):
root = self.get_root()
test_status_div = root.xpath('/html/body/div/div/div/div[@class="sidebar-test-status"]')
self._config.end_main_log(test_status_div, result)
super().end()

View File

@ -0,0 +1,45 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.base_log import BaseLogResult
from lxml.etree import Element
from datetime import datetime
from log.presentation_policy import PresentationPolicy
from log.html_log_config import HtmlLogConfig
def std_log_entry(msg_id, msg, log_result, html_node):
test_step = Element('li')
style = 'test-step'
if log_result != BaseLogResult.PASSED:
style = f"{style} {HtmlLogConfig.STYLE[log_result]}"
test_step.set('class', style)
test_time = Element('div')
test_time.set('class', 'ts-time')
test_time_txt = Element('a', name=msg_id)
time = datetime.now()
test_time_txt.text = f"{str(time.hour).zfill(2)}:" \
f"{str(time.minute).zfill(2)}:{str(time.second).zfill(2)}"
test_time.append(test_time_txt)
test_step.append(test_time)
test_msg = Element('div')
test_msg.set('class', 'ts-msg')
test_msg.text = msg
test_step.append(test_msg)
html_node.append(test_step)
def group_log_begin(msg_id, msg, html_node):
element = Element("div")
sub_element = Element('a', name=msg_id)
sub_element.text = msg
element.append(sub_element)
html_node.append(element)
ul_set = Element('ul', id=f'ul_{msg_id}')
html_node.append(ul_set)
return element, ul_set
html_policy = PresentationPolicy(std_log_entry, group_log_begin)

34
log/html_setup_log.py Normal file
View File

@ -0,0 +1,34 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.html_file_item_log import HtmlFileItemLog
from log.base_log import BaseLogResult
class HtmlSetupLog(HtmlFileItemLog):
LOG_RESULT = {
BaseLogResult.PASSED: HtmlFileItemLog.info,
BaseLogResult.WORKAROUND: HtmlFileItemLog.workaround,
BaseLogResult.WARNING: HtmlFileItemLog.warning,
BaseLogResult.SKIPPED: HtmlFileItemLog.skip,
BaseLogResult.FAILED: HtmlFileItemLog.error,
BaseLogResult.BLOCKED: HtmlFileItemLog.blocked,
BaseLogResult.EXCEPTION: HtmlFileItemLog.exception,
BaseLogResult.CRITICAL: HtmlFileItemLog.critical}
def __init__(self, test_title, config, iteration_title="Test summary"):
html_file_path = config.get_setup_file_path()
super().__init__(html_file_path, test_title, config, iteration_title)
self._last_iteration_title = ''
def start_iteration(self, message):
self._last_iteration_title = message
def end_iteration(self, iteration_result):
HtmlSetupLog.LOG_RESULT[iteration_result](self, self._last_iteration_title)
def end(self):
super().end()

220
log/logger.py Normal file
View File

@ -0,0 +1,220 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import logging
import os
import sys
from contextlib import contextmanager
from datetime import datetime
from threading import Lock
import portalocker
from log.html_log_config import HtmlLogConfig
from log.html_log_manager import HtmlLogManager
from log.html_presentation_policy import html_policy
from test_utils.output import Output
from test_utils.singleton import Singleton
def create_log(log_base_path, test_module, additional_args=None):
Log.setup()
log_cfg = HtmlLogConfig(base_dir=log_base_path,
presentation_policy=html_policy)
log = Log(log_config=log_cfg)
test_name = 'TestNameError'
error_msg = None
try:
test_name = test_module
if additional_args:
test_name += f"__{'_'.join(additional_args)}"
except Exception as ex:
error_msg = f'Detected some problems during calculating test name: {ex}'
finally:
log.begin(test_name)
print(f"\n<LogFile>{os.path.join(log.base_dir, 'main.html')}</LogFile>")
if error_msg:
log.exception(error_msg)
return log
class Log(HtmlLogManager, metaclass=Singleton):
logger = None
LOG_FORMAT = '%(asctime)s %(levelname)s:\t%(message)s'
DATE_FORMAT = "%Y/%m/%d %H:%M:%S"
command_id = 0
lock = Lock()
@classmethod
def destroy(cls):
del cls._instances[cls]
@classmethod
def setup(cls):
# Get handle to root logger.
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Set paramiko log level to warning
logging.getLogger('paramiko').setLevel(logging.WARNING)
# Create Handlers.
stdout_handler = logging.StreamHandler(sys.stdout)
# Set logging level on handlers.
stdout_handler.setLevel(logging.DEBUG)
# Set log formatting on each handler.
formatter = logging.Formatter(Log.LOG_FORMAT, Log.DATE_FORMAT)
stdout_handler.setFormatter(formatter)
# Attach handlers to root logger.
logger.handlers = []
logger.addHandler(stdout_handler)
cls.logger = logger
logger.info("Logger successfully initialized.")
@contextmanager
def step(self, message):
self.step_info(message)
super(Log, self).start_group(message)
if Log.logger:
Log.logger.info(message)
yield
super(Log, self).end_group()
@contextmanager
def group(self, message):
self.start_group(message)
yield
self.end_group()
def add_build_info(self, msg):
super(Log, self).add_build_info(msg)
if Log.logger:
Log.logger.info(msg)
def info(self, msg):
super(Log, self).info(msg)
if Log.logger:
Log.logger.info(msg)
def debug(self, msg):
super(Log, self).debug(msg)
if Log.logger:
Log.logger.debug(msg)
def error(self, msg):
super(Log, self).error(msg)
if Log.logger:
Log.logger.error(msg)
def blocked(self, msg):
super(Log, self).blocked(msg)
if Log.logger:
Log.logger.fatal(msg)
def exception(self, msg):
super(Log, self).exception(msg)
if Log.logger:
Log.logger.exception(msg)
def critical(self, msg):
super(Log, self).critical(msg)
if Log.logger:
Log.logger.fatal(msg)
def workaround(self, msg):
super(Log, self).workaround(msg)
if Log.logger:
Log.logger.warning(msg)
def warning(self, msg):
super(Log, self).warning(msg)
if Log.logger:
Log.logger.warning(msg)
def get_new_command_id(self):
self.lock.acquire()
command_id = self.command_id
self.command_id += 1
self.lock.release()
return command_id
def write_to_command_log(self, message):
super(Log, self).debug(message)
command_log_path = os.path.join(self.base_dir, "dut_info", 'commands.log')
timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S:%f')
with portalocker.Lock(command_log_path, "ab+") as command_log:
line_to_write = f"[{timestamp}] {message}\n"
command_log.write(line_to_write.encode())
def write_command_to_command_log(self, command, command_id, info=None):
added_info = "" if info is None else f"[{info}] "
self.write_to_command_log(f"{added_info}Command id: {command_id}\n{command}")
def write_output_to_command_log(self, output: Output, command_id):
if output is not None:
line_to_write = f"Command id: {command_id}\n\texit code: {output.exit_code}\n" \
f"\tstdout: {output.stdout}\n" \
f"\tstderr: {output.stderr}\n\n\n"
self.write_to_command_log(line_to_write)
else:
self.write_to_command_log(f"Command id: {command_id}\n\tNone output.")
def step_info(self, step_name):
from core.test_run import TestRun
decorator = "// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"
message = f"\n\n\n{decorator}{step_name}\n\n{decorator}\n"
try:
serial_monitor = TestRun.plugin_manager.get_plugin("serial_monitor")
serial_monitor.send_to_serial(message)
except (KeyError, AttributeError):
pass
self.write_to_command_log(message)
def get_additional_logs(self):
from core.test_run import TestRun
from test_tools.fs_utils import check_if_file_exists
messages_log = "/var/log/messages"
if not check_if_file_exists(messages_log):
messages_log = "/var/log/syslog"
log_files = {"messages.log": messages_log,
"dmesg.log": "/tmp/dmesg"}
extra_logs = TestRun.config.get("extra_logs", {})
log_files.update(extra_logs)
TestRun.executor.run(f"dmesg > {log_files['dmesg.log']}")
for log_name, log_source_path in log_files.items():
try:
log_destination_path = os.path.join(
self.base_dir, f"dut_info", TestRun.dut.ip, log_name
)
TestRun.executor.rsync_from(log_source_path, log_destination_path)
except Exception as e:
TestRun.LOGGER.warning(
f"There was a problem during gathering {log_name} log.\n{str(e)}"
)
def generate_summary(self, item, meta):
import json
summary_path = os.path.join(self.base_dir, 'info.json')
with open(summary_path, "w+") as summary:
data = {
'module': os.path.relpath(item.fspath, os.getcwd()),
'function': item.name,
'meta': meta,
'status': self.get_result().name,
'path': os.path.normpath(self.base_dir),
'stage_status': {
'setup': getattr(item, "rep_setup", {}),
'call': getattr(item, "rep_call", {}),
'teardown': getattr(item, "rep_teardown", {})
}
}
json.dump(data, summary)

View File

@ -0,0 +1,21 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
class PresentationPolicy:
def __init__(self, standard_log, group_begin_func):
self.standard = standard_log
self.group_begin = group_begin_func
def std_log_entry(msg_id, msg, log_result, html_node):
pass
def group_log_begin(msg_id, msg, html_node):
return html_node, html_node
null_policy = PresentationPolicy(std_log_entry, group_log_begin)

View File

@ -0,0 +1,35 @@
<!--
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
-->
<html>
<head>
<title>[title]</title>
<link rel="stylesheet" type="text/css" href="../main.css"/>
<script src="../main.js"></script>
<meta charset="UTF-8"/>
</head>
<body onload="onLoadDocument();">
<div class="floating">
<b>View: </b>
<select id="mode-selector" onchange="selectMode();">
<option style="background-color: white; color: black;" value="info">Info</option>
<option style="background-color: white; color: black" value="debug">Debug</option>
</select>
<b>Errors: </b>
<button onclick="previousError()"><</button>
<select id="error-list-selector" onchange="errorSelected('error-list-selector')">
<option value="top" class="empty">-empty-</option>
</select>
<button onclick="nextError()">></button>
</div>
<br/>
<a name="top"><h1 class="iteration-title" style="border-bottom: 4px solid rgba(255, 0, 0, 1)">[title]</h1></a>
<div class="iteration-status">Iteration status: [status]</div>
<div class="iteration-execution-time">Execution time: [time] [s]</div>
<section class="iteration-chapters" id="iteration-chapters">
<h2>Groups:</h2>
</section>
</body>
</html>

View File

@ -0,0 +1,37 @@
<!--
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
-->
<html>
<head>
<title>Setup</title>
<link rel="stylesheet" type="text/css" href="../main.css"/>
<meta charset="UTF-8"/>
<script src="../main.js"></script>
</head>
<body onload="onLoadDocument();">
<div class="floating">
<b>View: </b>
<select id="mode-selector" onchange="selectMode();">
<option style="background-color: white; color: black;" value="info">Info</option>
<option style="background-color: white; color: black" value="debug">Debug</option>
</select>
<b>Errors: </b>
<button onclick="previousError()"><</button>
<select id="error-list-selector" onchange="errorSelected('error-list-selector')">
<option value="top">-empty-</option>
</select>
<button onclick="nextError()">></button>
</div>
<br/>
<a name="top">
<h1 class="iteration-title" style="border-bottom: 4px solid rgba(255, 0, 0, 1)">Test summary</h1>
</a>
<div class="iteration-status">Iteration status: [STATUS]</div>
<div class="iteration-execution-time">Execution time: [time] [s]</div>
<section class="iteration-chapters" id="iteration-chapters">
<h2>Groups:</h2>
</section>
</body>
</html>

383
log/template/main.css Normal file
View File

@ -0,0 +1,383 @@
/*
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
*/
html, body {
margin: 0;
padding: 0;
background-color: #F0F0F0;
font-family: Calibri;
color: black;
}
div { display: block; }
h2 { margin: 0; padding: 0; }
h4 { margin: 0; padding: 0; }
div.meta-container {
margin-left: 502px;
min-width: 500px;
height: 100vh;
}
div.main-layaut {
float: right;
width: 100%;
background-color: #FDFDFD;
height: 100vh;
overflow-y: scroll;
overflow-x: hidden;
}
div.sidebar {
float: left;
width: 500px;
height: 100vh;
margin-left: -502px;
border: 4px;
background-color: #F0F0F0;
overflow-x: hidden;
overflow-y: auto;
text-align: center;
color: white;
overflow-x: hidden;
overflow-y: hidden;
}
div.sidebar-hide {
padding: 3px;
height: 20px;
margin: 5px auto;
font-family: Consolas;
font-weight: normal;
font-size: 15px;
color: white;
text-shadow: 1px 1px 3px black;
background-color: rgb(40,80,180);
cursor: default;
border: 2px solid silver;
border-radius: 25px;
}
div.sidebar-show { color: balck; height: 50%; }
div.sidebar-test { overflow-x: hidden; overflow-y: hidden;}
div.sidebar-test-title {
padding: 10px;
height: 40px;
margin: 5px auto;
background-color: rgb(40,80,180);
font-size: 100%;
border: 2px solid silver;
border-radius: 25px;
}
div.sidebar-test-status {
padding: 3px;
height: 20px;
background-color: green;
border: 2px solid silver;
border-radius: 25px;
}
div.sidebar-tested-build {
color: black;
border-radius: 25px;
width: 80%;
margin: 5px auto;
padding: 25px;
background-color: #F7F7F7;
border: 1px solid silver;
word-wrap: break-word;
word-break: break-all;
overflow: hidden;
text-align: left;
}
div.sidebar-test-iteration {
padding: 3px;
height: 20px;
margin: 5px auto;
font-family: Consolas;
font-weight: normal;
font-size: 15px;
color: white;
text-shadow: 1px 1px 3px black;
background-color: rgb(40,80,180);
cursor: default;
border: 2px solid silver;
border-radius: 25px;
}
.debug { display: none; }
select.sidebar-iteration-list {
margin: 5px auto;
background-color: white;
color: black;
width: 90%;
}
select.warning { background-color: #ff0; color: black; }
select.workaround { background-color: #fff8dc; color: black; }
select.skip { background-color: silver; color: black; }
select.fail { background-color: red; color: white; }
select.blocked { background-color: #7030a0; color: white; }
select.exception { background-color: #e29517; color: white; }
select.critical { background-color: #002060; color: white; }
option {
background-color: green;
color: white;
margin: 2px;
}
option.warning { background-color: #ff0; color: black; }
option.workaround { background-color: #fff8dc; color: black; }
option.skip { background-color: silver; color: black; }
option.error { background-color: red; color: white; }
option.blocked { background-color: #7030a0; color: white; }
option.exception { background-color: #e29517; color: white; }
select.critical { background-color: #002060; color: white; }
a.iteration-selector {
border: 2px solid silver;
border-radius: 40px;
width: 36px;
height: 36px;
margin: 0;
padding: 0;
vertical-align: middle;
display: table-cell;
color: white;
background-color: green;
text-shadow: 0 0 3px black;
font-size: 20px;
font-weight: bold;
line-height: 1em;
text-align: center;
cursor: pointer;
}
a.warning { background-color: #ff0; }
a.workaround { background-color: #fff8dc; }
a.skip { background-color: silver; }
a.fail { background-color: red; }
a.exception { background-color: #e29517; }
a.blocked { background-color: #7030a0; }
a.critical { background-color: #002060; }
a.selected { border: 2px solid black; }
select.error-list-selector { background-color: silver; }
div.test-chapter-step {
margin: 4px auto;
border-style: solid;
border-color: #8CB9AE;
border-radius: 10px;
padding-left: 10px;
padding-right: 10px;
cursor: pointer;
}
div.sidebar-copyright {
position: absolute;
background-color: #DDD;
text-align: center;
padding: 4px;
color: #888;
bottom: 0;
font-size: 12px;
font-family: Consolas;
}
div.floating {
right: 0;
border: 3px solid silver;
width: 40%;
text-align: center;
vertical-align: top;
position: fixed;
background-color : #F0F0F0;
border-bottom: 1px solid #999;
z-index: 999;
color: #333;
box-shadow: 0 0px 6px gray;
}
h1 {
display: block;
font-size: 2em;
font-weight: bold;
}
div.iteration-selector {
margin: 5px auto;
}
div.iteration-status {
padding: 3px;
height: 20px;
background-color: green;
border: 2px solid silver;
border-radius: 25px;
color: white;
text-align: center;
}
h1.iteration-title { text-align: center; }
div.iteration-execution-time { text-align: center; }
section.iteration-chapters {
border-radius: 25px;
width: 80%;
margin: 10px auto;
padding: 25px;
background-color: #F7F7F7;
border: 1px solid silver;
word-wrap: break-word;
word-break: break-all;
overflow: hidden;
}
ul.iteration-content {
list-style-type: none;
border-left-color: green;
border-left-style: solid;
margin: 0px;
}
ul.warning { border-left-color: #ff0; }
ul.workaround { border-left-color: #fff8dc; }
ul.skip { border-left-color: silver; }
ul.fail { border-left-color: red; }
ul.blocked { border-left-color: #7030a0; }
ul.critical { border-left-color: #002060; }
ul.exception { border-left-color: #e29517; }
li.iteration-content {
border-color: rgba(192, 192, 192, 1);
background-color: rgba(238, 238, 238, 1);
display: block;
margin: 2px auto;
border: 1px solid #C0C0C0;
padding: 3px 6px;
font-family: Calibri;
font-size: 16px;
line-height: 1.15em;
word-wrap: break-word;
word-break: break-all;
overflow: hidden;
border-left-color: green;
border-left-style: solid;
word-break: break-all;
}
div.test-group-step {
color: black;
background-color: #8CB9AE;
border: 1px solid #5C8880;
font-size: 18px;
letter-spacing: 2px;
cursor: pointer;
margin: 4px;
border-radius: 10px;
padding-left: 10px;
padding-right: 10px;
overflow-wrap: break-word;
word-wrap: break-word;
word-break: break-all;
}
div.warning { background-color: #ff0; color: black; }
div.workaround { background-color: #fff8dc; color: black; }
div.skip { background-color: silver; color: black; }
div.fail { background-color: red; color: white; }
div.blocked { background-color: #7030a0; color: white; }
div.critical { background-color: #002060; color: white; }
div.exception { background-color: #e29517; color: white; }
a.top-marker { cursor: pointer; float: right; }
a.top-time-marker {
word-wrap: break-word;
float: right;
}
li.test-step {
color: black;
border-color: rgba(192, 192, 192, 1);
background-color: rgba(238, 238, 238, 1);
display: block;
margin: 4px auto;
border: 1px solid #C0C0C0;
padding: 3px 6px;
font-family: Calibri;
font-size: 16px;
line-height: 1.15em;
word-wrap: break-word;
word-break: break-all;
overflow: hidden;
border-left-color: green;
border-left-style: solid;
border-radius: 10px;
padding-left: 10px;
padding-right: 10px
}
li.warning { background-color: #ff0; border-left-color: #ff0; }
li.workaround { background-color: #fff8dc; border-left-color: #fff8dc; }
li.skip { background-color: silver; border-left-color: silver; }
li.fail {
background-color: red;
border-left-color: red;
color: white;
}
li.blocked {
background-color: #7030a0;
border-left-color: #7030a0;
color: white;
}
li.exception {
background-color: #e29517;
border-left-color: #e29517;
color: white;
}
li.critical {
background-color: #002060;
border-left-color: #002060;
color: white;
}
div.ts-iteration {
float: left;
margin: 2px auto;
border: 1px solid silver;
padding: 3px 3px;
text-align: center;
}
div.ts-total-time {
margin: 2px auto;
border: 1px solid silver;
padding: 3px 3px;
text-align: right;
}
div.ts-time {
float: left;
font-size: 12px;
margin: 2px auto;
border: 1px solid #A7A7A7;
padding: 3px 3px;
}
div.ts-msg {
font-size: 16px;
font-family: Courier;
margin: 2px auto;
border: 1px solid #A7A7A7;
padding: 3px 3px;
white-space: pre-wrap;
word-break: break-all;
}

44
log/template/main.html Normal file
View File

@ -0,0 +1,44 @@
<!--
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
-->
<html>
<head>
<title>[test title]</title>
<link rel="stylesheet" type="text/css" href="main.css">
<meta charset="UTF-8"/>
</head>
<body>
<div class="meta-container">
<div class="sidebar">
<div class="sidebar-test" id="sidebar-test">
<div class="sidebar-show" style="display:none" onclick="sidebarCtrl('sidebar-hide', 'sidebar-show')">&gt;&gt;</div>
<div class="sidebar-test-title">Test title: </div>
<div class="sidebar-test-status">Test status: </div>
<div class="sidebar-tested-build" id="sidebar-tested-build">
<h2>Build:</h2>
</div>
<div class="sidebar-hide" id=sidebar-hide" onclick="sidebarCtrl('sidebar-hide', 'sidebar-show')">&lt;&lt;</div>
<div class="sidebar-show" style="display:none" onclick="sidebarCtrl('sidebar-hide', 'sidebar-show')">&gt;&gt;</div>
</div>
<div class="sidebar-test-iteration">Executed iterations:</div>
<select id="sidebar-iteration-list" class="sidebar-iteration-list" onchange="selectIterationFromSelect()" onclick="clickSelectIteration()">
<option value="M">Setup</option>
</select>
<div id="iteration-selector">
<a class="iteration-selector" onclick="selectIteration('M')">M</a>
</div>
<div class="sidebar-copyright" id="sidebar-copyright">
SPDX-License-Identifier: BSD-3-Clause
<br>
Copyright &#xa9 2015-2021 Intel Corporation
</div>
</div>
<div class="main-layaut">
<iframe id="main-view" src="iterations/setup.html" width="100%" height="99%"></iframe>
</div>
</div>
<script src="main.js"></script>
</body>
</html>

223
log/template/main.js Normal file
View File

@ -0,0 +1,223 @@
/*
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
*/
function onLoadDocument() {
hideDebug();
}
function selectMode() {
var selector = document.getElementById('mode-selector');
if (selector.value.includes('info')) {
hideDebug();
} else {
showDebug();
}
}
function hideDebug() {
var debugTestStepArray = document.getElementsByTagName('li');
for (i = 0; i < debugTestStepArray.length; i ++) {
if(debugTestStepArray[i].className.includes('debug')) {
debugTestStepArray[i].style.display = 'none';
}
}
}
function showDebug() {
var debugTestStepArray = document.getElementsByTagName('li');
for (i = 0; i < debugTestStepArray.length; i ++) {
if(debugTestStepArray[i].className.includes('debug')) {
debugTestStepArray[i].style.display = '';
}
}
}
function sidebarCtrl(ctrlHideId, ctrlShowClass) {
var metaContainer = document.getElementsByClassName("meta-container")[0];
var sidebar = document.getElementsByClassName('sidebar')[0];
var sidebarTest = document.getElementById('sidebar-test');
var ctrlHide = document.getElementById(ctrlHideId);
var ctrlShowSet = document.getElementsByClassName(ctrlShowClass);
if(sidebar.style.width.includes('15px')) {
showSidebar(metaContainer, sidebar, ctrlHide, ctrlShowSet, sidebarTest);
} else {
hideSidebar(metaContainer, sidebar, ctrlHide, ctrlShowSet, sidebarTest);
}
}
function showSidebar(mContainer, sidebar, ctrlHide, ctrlShowSet, sidebarTest) {
sidebar.style.cursor = 'default';
mContainer.style.marginLeft = '';
sidebarTest.style.width = '';
sidebarTest.style.height = '';
sidebar.style.height = '';
sidebar.style.marginLeft = '';
sidebar.style.width = '';
var i;
for (i = 0; i < sidebarTest.children.length; i++) {
sidebarTest.children[i].style.display = '';
}
document.getElementById('iteration-selector').style.display = '';
document.getElementById('sidebar-iteration-list').style.display = '';
document.getElementById('sidebar-copyright').style.display = '';
for(i = 0; i < ctrlShowSet.length; i ++) {
ctrlShowSet[i].style.display = 'none';
}
}
function hideSidebar(mContainer, sidebar, ctrlHide, ctrlShowSet, sidebarTest) {
document.getElementById('iteration-selector').style.display = 'none';
document.getElementById('sidebar-iteration-list').style.display = 'none';
document.getElementById('sidebar-copyright').style.display = 'none';
var i;
for (i = 0; i < sidebarTest.children.length; i++) {
sidebarTest.children[i].style.display = 'none';
}
sidebarTest.style.display = '';
for(i = 0; i < ctrlShowSet.length; i ++) {
ctrlShowSet[i].style.display = '';
ctrlShowSet[i].style.color = 'black';
}
sidebar.style.width = '15px';
sidebar.style.marginLeft = '-15px';
sidebar.style.height = '100%';
sidebarTest.style.height = '100%';
sidebarTest.style.width = '100%';
mContainer.style.marginLeft = '16px';
sidebar.style.cursor = 'pointer';
}
function previousError() {
var errorSelector = document.getElementById("error-list-selector");
if (errorSelector.length > 1) {
var id = errorSelector.selectedIndex;
if (id - 1 > 0) {
errorSelector.selectedIndex = (id - 1);
} else {
errorSelector.selectedIndex = (errorSelector.length - 1);
}
errorSelected('error-list-selector');
}
}
function nextError() {
var errorSelector = document.getElementById("error-list-selector");
if (errorSelector.length > 1) {
var id = errorSelector.selectedIndex;
if (id + 1 < errorSelector.length) {
errorSelector.selectedIndex = (id + 1);
} else {
errorSelector.selectedIndex = 1;
}
errorSelected('error-list-selector');
}
}
function selectIterationFromSelect() {
var element = document.getElementById("sidebar-iteration-list");
loadDocument(element.value);
updateIterationSelector(element);
}
function clickSelectIteration() {
var element = document.getElementById("sidebar-iteration-list");
for (i = 0; i < element.length; i ++) {
option = element[i];
var cls = option.getAttribute('class');
switch(cls) {
case "warning":
option.style.backgroundColor = "yellow";
option.style.color = "black";
break;
case "skip":
option.style.backgroundColor = "silver";
option.style.color = "black";
break;
case "fail":
option.style.backgroundColor = "red";
option.style.color = "white";
break;
case "exception":
option.style.backgroundColor = "blueviolet";
option.style.color = "white";
break;
default:
option.style.backgroundColor = "white";
option.style.color = "black";
break;
}
};
}
function selectIteration(iteration) {
var selectElement = document.getElementById("sidebar-iteration-list");
var docId = loadDocument(iteration);
selectElement.selectedIndex = docId;
updateIterationSelector(selectElement);
}
function loadDocument(fileId) {
var result = 0;
if(fileId == 'M') {
document.getElementById("main-view").src = "iterations/setup.html";
} else {
var id = pad(fileId, 3);
document.getElementById("main-view").src = "iterations/iteration_" + id + ".html";
result = parseInt(fileId);
}
return result;
}
function updateIterationSelector(element) {
var index = element.selectedIndex
var option_class = element[index].getAttribute('class')
if (option_class != null) {
element.setAttribute('class', "sidebar-iteration-list " + option_class);
} else {
element.setAttribute('class', "sidebar-iteration-list");
}
}
function errorSelected(selectorId) {
var newLocation = document.getElementById(selectorId).value;
window.location.hash = newLocation;
}
function pad(strNumber, padding) {
while((strNumber.length + 1) <= padding) {
strNumber = "0" + strNumber;
}
return strNumber;
}
function showHide(id) {
var ulElement = document.getElementById(id);
if(ulElement.style.display == 'none') {
ulElement.style.display = '';
} else {
ulElement.style.display = 'none';
}
}
function chapterClick(id) {
var id_array = id.split('.');
var node_id = "";
var i = 0;
var destinationElement = document.getElementById(id);
if (destinationElement.style.display == 'none') {
do {
node_id += id_array[i];
var ele = document.getElementById(node_id);
ele.style.display = '';
node_id += '.';
i += 1;
} while (i < id_array.length);
window.location = '#' + id;
} else {
destinationElement.style.display = 'none';
}
}

117
storage_devices/device.py Normal file
View File

@ -0,0 +1,117 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
from core.test_run import TestRun
from test_tools import disk_utils, fs_utils
from test_tools.disk_utils import get_device_filesystem_type, get_sysfs_path
from test_utils.io_stats import IoStats
from test_utils.size import Size, Unit
class Device:
def __init__(self, path):
disk_utils.validate_dev_path(path)
self.path = path
self.size = Size(disk_utils.get_size(self.get_device_id()), Unit.Byte)
self.filesystem = get_device_filesystem_type(self.get_device_id())
self.mount_point = None
def create_filesystem(self, fs_type: disk_utils.Filesystem, force=True, blocksize=None):
disk_utils.create_filesystem(self, fs_type, force, blocksize)
self.filesystem = fs_type
def wipe_filesystem(self, force=True):
disk_utils.wipe_filesystem(self, force)
self.filesystem = None
def is_mounted(self):
output = TestRun.executor.run(f"findmnt {self.path}")
if output.exit_code != 0:
return False
else:
mount_point_line = output.stdout.split('\n')[1]
device_path = fs_utils.readlink(self.path)
self.mount_point = mount_point_line[0:mount_point_line.find(device_path)].strip()
return True
def mount(self, mount_point, options: [str] = None):
if not self.is_mounted():
if disk_utils.mount(self, mount_point, options):
self.mount_point = mount_point
else:
raise Exception(f"Device is already mounted! Actual mount point: {self.mount_point}")
def unmount(self):
if not self.is_mounted():
TestRun.LOGGER.info("Device is not mounted.")
elif disk_utils.unmount(self):
self.mount_point = None
def get_device_link(self, directory: str):
items = self.get_all_device_links(directory)
return next(i for i in items if i.full_path.startswith(directory))
def get_device_id(self):
return fs_utils.readlink(self.path).split('/')[-1]
def get_all_device_links(self, directory: str):
from test_tools import fs_utils
output = fs_utils.ls(f"$(find -L {directory} -samefile {self.path})")
return fs_utils.parse_ls_output(output, self.path)
def get_io_stats(self):
return IoStats.get_io_stats(self.get_device_id())
def get_sysfs_property(self, property_name):
path = posixpath.join(disk_utils.get_sysfs_path(self.get_device_id()),
"queue", property_name)
return TestRun.executor.run_expect_success(f"cat {path}").stdout
def set_sysfs_property(self, property_name, value):
TestRun.LOGGER.info(
f"Setting {property_name} for device {self.get_device_id()} to {value}.")
path = posixpath.join(disk_utils.get_sysfs_path(self.get_device_id()), "queue",
property_name)
fs_utils.write_file(path, str(value))
def set_max_io_size(self, new_max_io_size: Size):
self.set_sysfs_property("max_sectors_kb",
int(new_max_io_size.get_value(Unit.KibiByte)))
def get_max_io_size(self):
return Size(int(self.get_sysfs_property("max_sectors_kb")), Unit.KibiByte)
def get_max_hw_io_size(self):
return Size(int(self.get_sysfs_property("max_hw_sectors_kb")), Unit.KibiByte)
def get_discard_granularity(self):
return self.get_sysfs_property("discard_granularity")
def get_discard_max_bytes(self):
return self.get_sysfs_property("discard_max_bytes")
def get_discard_zeroes_data(self):
return self.get_sysfs_property("discard_zeroes_data")
def get_numa_node(self):
return int(TestRun.executor.run_expect_success(
f"cat {get_sysfs_path(self.get_device_id())}/device/numa_node").stdout)
def __str__(self):
return (
f'system path: {self.path}, short link: /dev/{self.get_device_id()},'
f' filesystem: {self.filesystem}, mount point: {self.mount_point}, size: {self.size}'
)
def __repr__(self):
return str(self)
@staticmethod
def get_scsi_debug_devices():
scsi_debug_devices = TestRun.executor.run_expect_success(
"lsscsi --scsi_id | grep scsi_debug").stdout
return [Device(f'/dev/disk/by-id/scsi-{device.split()[-1]}')
for device in scsi_debug_devices.splitlines()]

237
storage_devices/disk.py Normal file
View File

@ -0,0 +1,237 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import itertools
import json
import re
from datetime import timedelta
from enum import IntEnum
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools import disk_utils, fs_utils, nvme_cli
from test_utils import disk_finder
from test_utils.os_utils import wait
from test_utils.size import Unit
from test_tools.disk_utils import get_pci_address
class DiskType(IntEnum):
hdd = 0
hdd4k = 1
sata = 2
nand = 3
optane = 4
class DiskTypeSetBase:
def resolved(self):
raise NotImplementedError()
def types(self):
raise NotImplementedError()
def json(self):
return json.dumps({
"type": "set",
"values": [t.name for t in self.types()]
})
def __lt__(self, other):
return min(self.types()) < min(other.types())
def __le__(self, other):
return min(self.types()) <= min(other.types())
def __eq__(self, other):
return min(self.types()) == min(other.types())
def __ne__(self, other):
return min(self.types()) != min(other.types())
def __gt__(self, other):
return min(self.types()) > min(other.types())
def __ge__(self, other):
return min(self.types()) >= min(other.types())
class DiskTypeSet(DiskTypeSetBase):
def __init__(self, *args):
self.__types = set(*args)
def resolved(self):
return True
def types(self):
return self.__types
class DiskTypeLowerThan(DiskTypeSetBase):
def __init__(self, disk_name):
self.__disk_name = disk_name
def resolved(self):
return self.__disk_name in TestRun.disks
def types(self):
if not self.resolved():
raise LookupError("Disk type not resolved!")
disk_type = TestRun.disks[self.__disk_name].disk_type
return set(filter(lambda d: d < disk_type, [*DiskType]))
def json(self):
return json.dumps({
"type": "operator",
"name": "lt",
"args": [self.__disk_name]
})
class Disk(Device):
def __init__(
self,
path,
disk_type: DiskType,
serial_number,
block_size,
):
Device.__init__(self, path)
self.serial_number = serial_number
self.block_size = Unit(block_size)
self.disk_type = disk_type
self.partitions = []
def create_partitions(
self,
sizes: [],
partition_table_type=disk_utils.PartitionTable.gpt):
disk_utils.create_partitions(self, sizes, partition_table_type)
def remove_partition(self, part):
part_number = int(part.path.split("part")[1])
disk_utils.remove_parition(self, part_number)
self.partitions.remove(part)
def umount_all_partitions(self):
TestRun.LOGGER.info(
f"Umounting all partitions from: {self.path}")
cmd = f'umount -l {fs_utils.readlink(self.path)}*?'
TestRun.executor.run(cmd)
def remove_partitions(self):
for part in self.partitions:
if part.is_mounted():
part.unmount()
if disk_utils.remove_partitions(self):
self.partitions.clear()
def is_detected(self):
if self.serial_number:
serial_numbers = disk_finder.get_all_serial_numbers()
return self.serial_number in serial_numbers
elif self.path:
output = fs_utils.ls_item(f"{self.path}")
return fs_utils.parse_ls_output(output)[0] is not None
raise Exception("Couldn't check if device is detected by the system")
def wait_for_plug_status(self, should_be_visible):
if not wait(lambda: should_be_visible == self.is_detected(),
timedelta(minutes=1),
timedelta(seconds=1)):
raise Exception(f"Timeout occurred while trying to "
f"{'plug' if should_be_visible else 'unplug'} disk.")
def plug(self):
if self.is_detected():
return
TestRun.executor.run_expect_success(self.plug_command)
self.wait_for_plug_status(True)
def unplug(self):
if not self.is_detected():
return
TestRun.executor.run_expect_success(self.unplug_command)
self.wait_for_plug_status(False)
@staticmethod
def plug_all_disks():
TestRun.executor.run_expect_success(NvmeDisk.plug_all_command)
TestRun.executor.run_expect_success(SataDisk.plug_all_command)
def __str__(self):
disk_str = f'system path: {self.path}, type: {self.disk_type.name}, ' \
f'serial: {self.serial_number}, size: {self.size}, ' \
f'block size: {self.block_size}, partitions:\n'
for part in self.partitions:
disk_str += f'\t{part}'
return disk_str
@staticmethod
def create_disk(path,
disk_type: DiskType,
serial_number,
block_size):
if disk_type is DiskType.nand or disk_type is DiskType.optane:
return NvmeDisk(path, disk_type, serial_number, block_size)
else:
return SataDisk(path, disk_type, serial_number, block_size)
class NvmeDisk(Disk):
plug_all_command = "echo 1 > /sys/bus/pci/rescan"
def __init__(self, path, disk_type, serial_number, block_size):
Disk.__init__(self, path, disk_type, serial_number, block_size)
self.plug_command = NvmeDisk.plug_all_command
self.unplug_command = f"echo 1 > /sys/block/{self.get_device_id()}/device/remove || " \
f"echo 1 > /sys/block/{self.get_device_id()}/device/device/remove"
self.pci_address = get_pci_address(self.get_device_id())
def __str__(self):
disk_str = super().__str__()
disk_str = f"pci address: {self.pci_address}, " + disk_str
return disk_str
def format_disk(self, metadata_size=None, block_size=None,
force=True, format_params=None, reset=True):
nvme_cli.format_disk(self, metadata_size, block_size, force, format_params, reset)
def get_lba_formats(self):
return nvme_cli.get_lba_formats(self)
def get_lba_format_in_use(self):
return nvme_cli.get_lba_format_in_use(self)
class SataDisk(Disk):
plug_all_command = "for i in $(find -H /sys/devices/ -path '*/scsi_host/*/scan' -type f); " \
"do echo '- - -' > $i; done;"
def __init__(self, path, disk_type, serial_number, block_size):
Disk.__init__(self, path, disk_type, serial_number, block_size)
self.plug_command = SataDisk.plug_all_command
self.unplug_command = \
f"echo 1 > {self.get_sysfs_properties(self.get_device_id()).full_path}/device/delete"
def get_sysfs_properties(self, device_id):
ls_command = f"$(find -H /sys/devices/ -name {device_id} -type d)"
output = fs_utils.ls_item(f"{ls_command}")
sysfs_addr = fs_utils.parse_ls_output(output)[0]
if not sysfs_addr:
raise Exception(f"Failed to find sysfs address: ls -l {ls_command}")
dirs = sysfs_addr.full_path.split('/')
scsi_address = dirs[-3]
matches = re.search(
r"^(?P<controller>\d+)[-:](?P<port>\d+)[-:](?P<target>\d+)[-:](?P<lun>\d+)$",
scsi_address)
controller_id = matches["controller"]
port_id = matches["port"]
target_id = matches["target"]
lun = matches["lun"]
host_path = "/".join(itertools.takewhile(lambda x: not x.startswith("host"), dirs))
self.plug_command = f"echo '{port_id} {target_id} {lun}' > " \
f"{host_path}/host{controller_id}/scsi_host/host{controller_id}/scan"
return sysfs_addr

66
storage_devices/drbd.py Normal file
View File

@ -0,0 +1,66 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import os
import posixpath
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools.drbdadm import Drbdadm
from test_utils.filesystem.symlink import Symlink
from test_utils.output import CmdException
class Drbd(Device):
def __init__(self, config):
if Drbdadm.dump_config(config.name).exit_code != 0:
raise ValueError(f"Resource {config.name} not found")
self.config = config
def create_metadata(self, force):
return Drbdadm.create_metadata(self.config.name, force)
def up(self):
output = Drbdadm.up(self.config.name)
if output.exit_code != 0:
raise CmdException(f"Failed to create {self.config.name} drbd instance")
self.path = posixpath.join("/dev/disk/by-id/", posixpath.basename(self.config.device))
self.symlink = Symlink.get_symlink(self.path, self.config.device, True)
self.device = Device(self.path)
return self.device
def wait_for_sync(self):
return Drbdadm.wait_for_sync(self.config.name)
def is_in_sync(self):
return Drbdadm.in_sync(self.config.name)
def get_status(self):
return Drbdadm.get_status(self.config.name)
def set_primary(self, force=False):
return Drbdadm.set_node_primary(self.config.name, force)
def down(self):
output = Drbdadm.down(self.config.name)
if output.exit_code != 0:
raise CmdException(f"Failed to stop {self.config.name} drbd instance")
self.device = None
self.symlink.remove(True, True)
@staticmethod
def down_all():
try:
Drbdadm.down_all()
except CmdException as e:
if "no resources defined" not in str(e):
raise e
@staticmethod
def is_installed():
return TestRun.executor.run("which drbdadm && modinfo drbd").exit_code == 0

531
storage_devices/lvm.py Normal file
View File

@ -0,0 +1,531 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import threading
from typing import Union
from api.cas.core import Core
from core.test_run import TestRun
from storage_devices.device import Device
from storage_devices.disk import Disk, NvmeDisk
from storage_devices.partition import Partition
from test_tools.fs_utils import readlink
from test_utils.disk_finder import resolve_to_by_id_link
from test_utils.filesystem.symlink import Symlink
from test_utils.size import Size
lvm_config_path = "/etc/lvm/lvm.conf"
filter_prototype_regex = r"^\sfilter\s=\s\["
types_prototype_regex = r"^\stypes\s=\s\["
global_filter_prototype_regex = r"^\sglobal_filter\s=\s\["
tab = "\\\\t"
class LvmConfiguration:
def __init__(
self,
lvm_filters: [] = None,
pv_num: int = None,
vg_num: int = None,
lv_num: int = None,
cache_num: int = None,
cas_dev_num: int = None
):
self.lvm_filters = lvm_filters
self.pv_num = pv_num
self.vg_num = vg_num
self.lv_num = lv_num
self.cache_num = cache_num
self.cas_dev_num = cas_dev_num
@staticmethod
def __read_definition_from_lvm_config(
prototype_regex: str
):
cmd = f"grep '{prototype_regex}' {lvm_config_path}"
output = TestRun.executor.run(cmd).stdout
return output
@classmethod
def __add_block_dev_to_lvm_config(
cls,
block_device_type: str,
number_of_partitions: int = 16
):
types_definition = cls.read_types_definition_from_lvm_config()
if types_definition:
if block_device_type in types_definition:
TestRun.LOGGER.info(f"Device type '{block_device_type}' already present in config")
return
TestRun.LOGGER.info(f"Add block device type to existing list")
new_type_prefix = f"types = [\"{block_device_type}\", {number_of_partitions}, "
config_update_cmd = f"sed -i 's/{types_prototype_regex}/\t{new_type_prefix}/g'" \
f" {lvm_config_path}"
else:
TestRun.LOGGER.info(f"Create new types variable")
new_types = f"types = [\"{block_device_type}\", {number_of_partitions}]"
characteristic_line = f"# Configuration option devices\\/sysfs_scan."
config_update_cmd = f"sed -i /'{characteristic_line}'/i\\ '{tab}{new_types}' " \
f"{lvm_config_path}"
TestRun.LOGGER.info(f"Adding {block_device_type} ({number_of_partitions} partitions) "
f"to supported types in {lvm_config_path}")
TestRun.executor.run(config_update_cmd)
@classmethod
def __add_filter_to_lvm_config(
cls,
filter: str
):
if filter is None:
TestRun.LOGGER.error(f"Lvm filter for lvm config not provided.")
filters_definition = cls.read_filter_definition_from_lvm_config()
if filters_definition:
if filter in filters_definition:
TestRun.LOGGER.info(f"Filter definition '{filter}' already present in config")
return
new_filter_formatted = filter.replace("/", "\\/")
new_filter_prefix = f"filter = [ \"{new_filter_formatted}\", "
TestRun.LOGGER.info(f"Adding filter to existing list")
config_update_cmd = f"sed -i 's/{filter_prototype_regex}/\t{new_filter_prefix}/g'" \
f" {lvm_config_path}"
else:
TestRun.LOGGER.info(f"Create new filter variable")
new_filter = f"filter = [\"{filter}\"]"
characteristic_line = f"# Configuration option devices\\/global_filter."
config_update_cmd = f"sed -i /'{characteristic_line}'/i\\ '{tab}{new_filter}' " \
f"{lvm_config_path}"
TestRun.LOGGER.info(f"Adding filter '{filter}' to {lvm_config_path}")
TestRun.executor.run(config_update_cmd)
@classmethod
def read_types_definition_from_lvm_config(cls):
return cls.__read_definition_from_lvm_config(types_prototype_regex)
@classmethod
def read_filter_definition_from_lvm_config(cls):
return cls.__read_definition_from_lvm_config(filter_prototype_regex)
@classmethod
def read_global_filter_definition_from_lvm_config(cls):
return cls.__read_definition_from_lvm_config(global_filter_prototype_regex)
@classmethod
def add_block_devices_to_lvm_config(
cls,
device_type: str
):
if device_type is None:
TestRun.LOGGER.error(f"No device provided.")
cls.__add_block_dev_to_lvm_config(device_type)
@classmethod
def add_filters_to_lvm_config(
cls,
filters: []
):
if filters is None:
TestRun.LOGGER.error(f"Lvm filters for lvm config not provided.")
for f in filters:
cls.__add_filter_to_lvm_config(f)
@classmethod
def configure_dev_types_in_config(
cls,
devices: ([Device], Device)
):
if isinstance(devices, list):
devs = []
for device in devices:
dev = device.parent_device if isinstance(device, Partition) else device
devs.append(dev)
if any(isinstance(dev, Core) for dev in devs):
cls.add_block_devices_to_lvm_config("cas")
if any(isinstance(dev, NvmeDisk) for dev in devs):
cls.add_block_devices_to_lvm_config("nvme")
else:
dev = devices.parent_device if isinstance(devices, Partition) else devices
if isinstance(dev, Core):
cls.add_block_devices_to_lvm_config("cas")
if isinstance(dev, NvmeDisk):
cls.add_block_devices_to_lvm_config("nvme")
@classmethod
def configure_filters(
cls,
lvm_filters: [],
devices: ([Device], Device)
):
if lvm_filters:
TestRun.LOGGER.info(f"Preparing configuration for LVMs - filters.")
LvmConfiguration.add_filters_to_lvm_config(lvm_filters)
cls.configure_dev_types_in_config(devices)
@staticmethod
def remove_global_filter_from_config():
cmd = f"sed -i '/{global_filter_prototype_regex}/d' {lvm_config_path}"
TestRun.executor.run(cmd)
@staticmethod
def remove_filters_from_config():
cmd = f"sed -i '/{filter_prototype_regex}/d' {lvm_config_path}"
TestRun.executor.run(cmd)
class VolumeGroup:
__unique_vg_id = 0
__lock = threading.Lock()
def __init__(self, name: str = None):
self.name = name
def __eq__(self, other):
try:
return self.name == other.name
except AttributeError:
return False
@classmethod
def __get_vg_name(cls, prefix: str = "vg"):
with cls.__lock:
cls.__unique_vg_id += 1
return f"{prefix}{cls.__unique_vg_id}"
@staticmethod
def get_all_volume_groups():
output_lines = TestRun.executor.run(f"pvscan").stdout.splitlines()
volume_groups = {}
for line in output_lines:
if "PV" not in line:
continue
line_elements = line.split()
pv = line_elements[line_elements.index("PV") + 1]
vg = ""
if "VG" in line:
vg = line_elements[line_elements.index("VG") + 1]
if vg not in volume_groups:
volume_groups[vg] = []
volume_groups[vg].append(pv)
return volume_groups
@staticmethod
def create_vg(vg_name: str, device_paths: str):
if not vg_name:
raise ValueError("Name needed for VG creation.")
if not device_paths:
raise ValueError("Device paths needed for VG creation.")
cmd = f"vgcreate --yes {vg_name} {device_paths} "
TestRun.executor.run_expect_success(cmd)
@classmethod
def is_vg_already_present(cls, dev_number: int, device_paths: str = None):
if not device_paths:
TestRun.LOGGER.exception("No devices provided.")
volume_groups = cls.get_all_volume_groups()
for vg in volume_groups:
for pv in volume_groups[vg]:
if len(volume_groups[vg]) == dev_number and pv in device_paths:
return cls(vg)
for vg in volume_groups:
for pv in volume_groups[vg]:
if pv in device_paths:
TestRun.LOGGER.warning(f"Some devices are used in other LVM volume group")
return False
@classmethod
def create(cls, device_paths: str = None):
vg_name = cls.__get_vg_name()
VolumeGroup.create_vg(vg_name, device_paths)
volume_groups = VolumeGroup.get_all_volume_groups()
if vg_name in volume_groups:
return cls(vg_name)
else:
TestRun.LOGGER.error("Had not found newly created VG.")
@staticmethod
def remove(vg_name: str):
if not vg_name:
raise ValueError("Name needed for VG remove operation.")
cmd = f"vgremove {vg_name}"
return TestRun.executor.run(cmd)
@staticmethod
def get_logical_volumes_path(vg_name: str):
cmd = f"lvdisplay | grep /dev/{vg_name}/ | awk '{{print $3}}'"
paths = TestRun.executor.run(cmd).stdout.splitlines()
return paths
class Lvm(Disk):
__unique_lv_id = 0
__lock = threading.Lock()
def __init__(
self,
path_dm: str, # device mapper path
volume_group: VolumeGroup,
volume_name: str = None
):
Device.__init__(self, resolve_to_by_id_link(path_dm))
self.device_name = path_dm.split('/')[-1]
self.volume_group = volume_group
self.volume_name = volume_name
def __eq__(self, other):
try:
return self.device_name == other.device_name and \
self.volume_group == other.volume_group and \
self.volume_name == other.volume_name
except AttributeError:
return False
@classmethod
def __get_unique_lv_name(cls, prefix: str = "lv"):
with cls.__lock:
cls.__unique_lv_id += 1
return f"{prefix}{cls.__unique_lv_id}"
@classmethod
def __create(
cls,
name: str,
volume_size_cmd: str,
volume_group: VolumeGroup
):
TestRun.LOGGER.info(f"Creating LV '{name}'.")
cmd = f"lvcreate {volume_size_cmd} --name {name} {volume_group.name} --yes"
TestRun.executor.run_expect_success(cmd)
volumes = cls.discover_logical_volumes()
for volume in volumes:
if name == volume.volume_name:
return volume
@classmethod
def configure_global_filter(
cls,
dev_first: Device,
lv_amount: int,
pv_devs: ([Device], Device)
):
device_first = dev_first.parent_device if isinstance(dev_first, Partition) else dev_first
if lv_amount > 1 and isinstance(device_first, Core):
global_filter_def = LvmConfiguration.read_global_filter_definition_from_lvm_config()
if not isinstance(pv_devs, list):
pv_devs = [pv_devs]
if global_filter_def:
TestRun.LOGGER.info(f"Configure 'global filter' variable")
links = []
for pv_dev in pv_devs:
link = pv_dev.get_device_link("/dev/disk/by-id")
links.append(str(link))
for link in links:
if link in global_filter_def:
TestRun.LOGGER.info(f"Global filter definition already contains '{link}'")
continue
new_link_formatted = link.replace("/", "\\/")
new_global_filter_prefix = f"global_filter = [ \"r|{new_link_formatted}|\", "
TestRun.LOGGER.info(f"Adding global filter '{link}' to existing list")
config_update_cmd = f"sed -i 's/{global_filter_prototype_regex}/\t" \
f"{new_global_filter_prefix}/g' {lvm_config_path}"
TestRun.executor.run(config_update_cmd)
else:
for pv_dev in pv_devs:
link = pv_dev.get_device_link("/dev/disk/by-id")
global_filter = f"\"r|{link}|\""
global_filter += ", "
global_filter = global_filter[:-2]
TestRun.LOGGER.info(f"Create new 'global filter' variable")
new_global = f"global_filter = [{global_filter}]"
characteristic_line = f"# Configuration option devices\\/types."
config_update_cmd = f"sed -i /'{characteristic_line}'/i\\ " \
f"'{tab}{new_global}' {lvm_config_path}"
TestRun.LOGGER.info(f"Adding global filter '{global_filter}' to {lvm_config_path}")
TestRun.executor.run(config_update_cmd)
TestRun.LOGGER.info(f"Remove 'filter' in order to 'global_filter' to be used")
if LvmConfiguration.read_filter_definition_from_lvm_config():
LvmConfiguration.remove_filters_from_config()
@classmethod
def create_specific_lvm_configuration(
cls,
devices: ([Device], Device),
lvm_configuration: LvmConfiguration,
lvm_as_core: bool = False
):
pv_per_vg = int(lvm_configuration.pv_num / lvm_configuration.vg_num)
lv_per_vg = int(lvm_configuration.lv_num / lvm_configuration.vg_num)
lv_size_percentage = int(100 / lv_per_vg)
LvmConfiguration.configure_filters(lvm_configuration.lvm_filters, devices)
logical_volumes = []
for vg_iter in range(lvm_configuration.vg_num):
if isinstance(devices, list):
pv_devs = []
start_range = vg_iter * pv_per_vg
end_range = start_range + pv_per_vg
for i in range(start_range, end_range):
pv_devs.append(devices[i])
device_first = devices[0]
else:
pv_devs = devices
device_first = devices
for j in range(lv_per_vg):
lv = cls.create(lv_size_percentage, pv_devs)
logical_volumes.append(lv)
if lvm_as_core:
cls.configure_global_filter(device_first, lv_per_vg, pv_devs)
return logical_volumes
@classmethod
def create(
cls,
volume_size_or_percent: Union[Size, int],
devices: ([Device], Device),
name: str = None
):
if isinstance(volume_size_or_percent, Size):
size_cmd = f"--size {volume_size_or_percent.get_value()}B"
elif isinstance(volume_size_or_percent, int):
size_cmd = f"--extents {volume_size_or_percent}%VG"
else:
TestRun.LOGGER.error(f"Incorrect type of the first argument (volume_size_or_percent).")
if not name:
name = cls.__get_unique_lv_name()
devices_paths = cls.get_devices_path(devices)
dev_number = len(devices) if isinstance(devices, list) else 1
vg = VolumeGroup.is_vg_already_present(dev_number, devices_paths)
if not vg:
vg = VolumeGroup.create(devices_paths)
return cls.__create(name, size_cmd, vg)
@staticmethod
def get_devices_path(devices: ([Device], Device)):
if isinstance(devices, list):
return " ".join([Symlink(dev.path).get_target() for dev in devices])
else:
return Symlink(devices.path).get_target()
@classmethod
def discover_logical_volumes(cls):
vol_groups = VolumeGroup.get_all_volume_groups()
volumes = []
for vg in vol_groups:
lv_discovered = VolumeGroup.get_logical_volumes_path(vg)
if lv_discovered:
for lv_path in lv_discovered:
cls.make_sure_lv_is_active(lv_path)
lv_name = lv_path.split('/')[-1]
volumes.append(
cls(
readlink(lv_path),
VolumeGroup(vg),
lv_name
)
)
else:
TestRun.LOGGER.info(f"No LVMs present in the system.")
return volumes
@classmethod
def discover(cls):
TestRun.LOGGER.info("Discover LVMs in system...")
return cls.discover_logical_volumes()
@staticmethod
def remove(lv_name: str, vg_name: str):
if not lv_name:
raise ValueError("LV name needed for LV remove operation.")
if not vg_name:
raise ValueError("VG name needed for LV remove operation.")
cmd = f"lvremove -f {vg_name}/{lv_name}"
return TestRun.executor.run(cmd)
@staticmethod
def remove_pv(pv_name: str):
if not pv_name:
raise ValueError("Name needed for PV remove operation.")
cmd = f"pvremove {pv_name}"
return TestRun.executor.run(cmd)
@classmethod
def remove_all(cls):
cmd = f"lvdisplay | grep 'LV Path' | awk '{{print $3}}'"
lvm_paths = TestRun.executor.run(cmd).stdout.splitlines()
for lvm_path in lvm_paths:
lv_name = lvm_path.split('/')[-1]
vg_name = lvm_path.split('/')[-2]
cls.remove(lv_name, vg_name)
cmd = f"vgdisplay | grep 'VG Name' | awk '{{print $3}}'"
vg_names = TestRun.executor.run(cmd).stdout.splitlines()
for vg_name in vg_names:
TestRun.executor.run(f"vgchange -an {vg_name}")
VolumeGroup.remove(vg_name)
cmd = f"pvdisplay | grep 'PV Name' | awk '{{print $3}}'"
pv_names = TestRun.executor.run(cmd).stdout.splitlines()
for pv_name in pv_names:
cls.remove_pv(pv_name)
TestRun.LOGGER.info(f"Successfully removed all LVMs.")
@staticmethod
def make_sure_lv_is_active(lv_path: str):
cmd = f"lvscan"
output_lines = TestRun.executor.run_expect_success(cmd).stdout.splitlines()
for line in output_lines:
if "inactive " in line and lv_path in line:
cmd = f"lvchange -ay {lv_path}"
TestRun.executor.run_expect_success(cmd)

View File

@ -0,0 +1,22 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from storage_devices.device import Device
from test_tools import disk_utils
from test_utils.size import Size
class Partition(Device):
def __init__(self, parent_dev, type, number, begin: Size, end: Size):
Device.__init__(self, disk_utils.get_partition_path(parent_dev.path, number))
self.number = number
self.parent_device = parent_dev
self.type = type
self.begin = begin
self.end = end
def __str__(self):
return f"\tsystem path: {self.path}, size: {self.size}, type: {self.type}, " \
f"parent device: {self.parent_device.path}\n"

182
storage_devices/raid.py Normal file
View File

@ -0,0 +1,182 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import threading
from enum import IntEnum, Enum
from core.test_run import TestRun
from storage_devices.device import Device
from storage_devices.disk import Disk
from test_tools.fs_utils import readlink
from test_tools.mdadm import Mdadm
from test_utils.disk_finder import resolve_to_by_id_link
from test_utils.size import Size, Unit
def get_devices_paths_string(devices: [Device]):
return " ".join([d.path for d in devices])
class Level(IntEnum):
Raid0 = 0
Raid1 = 1
Raid4 = 4
Raid5 = 5
Raid6 = 6
Raid10 = 10
class StripSize(IntEnum):
Strip4K = 4
Strip8K = 8
Strip16K = 16
Strip32K = 32
Strip64K = 64
Strip128K = 128
Strip256K = 256
Strip1M = 1024
class MetadataVariant(Enum):
Legacy = "legacy"
Imsm = "imsm"
class RaidConfiguration:
def __init__(
self,
level: Level = None,
metadata: MetadataVariant = MetadataVariant.Imsm,
number_of_devices: int = 0,
size: Size = None,
strip_size: StripSize = None,
name: str = None,
):
self.level = level
self.metadata = metadata
self.number_of_devices = number_of_devices
self.size = size
self.strip_size = strip_size
self.name = name
class Raid(Disk):
__unique_id = 0
__lock = threading.Lock()
def __init__(
self,
path: str,
level: Level,
uuid: str,
container_uuid: str = None,
container_path: str = None,
metadata: MetadataVariant = MetadataVariant.Imsm,
array_devices: [Device] = [],
volume_devices: [Device] = [],
):
Device.__init__(self, resolve_to_by_id_link(path.replace("/dev/", "")))
self.device_name = path.split('/')[-1]
self.level = level
self.uuid = uuid
self.container_uuid = container_uuid
self.container_path = container_path
self.metadata = metadata
self.array_devices = array_devices if array_devices else volume_devices.copy()
self.volume_devices = volume_devices
self.partitions = []
self.__block_size = None
def __eq__(self, other):
try:
return self.uuid == other.uuid
except AttributeError:
return False
@property
def block_size(self):
if not self.__block_size:
self.__block_size = Unit(int(self.get_sysfs_property("logical_block_size")))
return self.__block_size
def stop(self):
Mdadm.stop(self.path)
if self.container_path:
Mdadm.stop(self.container_path)
@classmethod
def discover(cls):
TestRun.LOGGER.info("Discover RAIDs in system...")
raids = []
for raid in Mdadm.examine_result():
raids.append(
cls(
raid["path"],
Level[raid["level"]],
raid["uuid"],
raid["container"]["uuid"] if "container" in raid else None,
raid["container"]["path"] if "container" in raid else None,
MetadataVariant(raid["metadata"]),
[Device(d) for d in raid["array_devices"]],
[Device(d) for d in raid["devices"]]
)
)
return raids
@classmethod
def create(
cls,
raid_configuration: RaidConfiguration,
devices: [Device]
):
import copy
raid_conf = copy.deepcopy(raid_configuration)
if not raid_conf.number_of_devices:
raid_conf.number_of_devices = len(devices)
elif len(devices) < raid_conf.number_of_devices:
raise ValueError("RAID configuration requires at least "
f"{raid_conf.number_of_devices} devices")
md_dir_path = "/dev/md/"
array_devices = devices
volume_devices = devices[:raid_conf.number_of_devices]
if raid_conf.metadata != MetadataVariant.Legacy:
container_conf = RaidConfiguration(
name=cls.__get_unique_name(raid_conf.metadata.value),
metadata=raid_conf.metadata,
number_of_devices=len(array_devices)
)
Mdadm.create(container_conf, get_devices_paths_string(array_devices))
if not raid_conf.name:
raid_conf.name = cls.__get_unique_name()
Mdadm.create(raid_conf, get_devices_paths_string(volume_devices))
raid_link = md_dir_path + raid_conf.name
raid = [r for r in Mdadm.examine_result() if readlink(r["path"]) == readlink(raid_link)][0]
return cls(
raid["path"],
raid_conf.level,
raid["uuid"],
raid["container"]["uuid"] if "container" in raid else None,
raid["container"]["path"] if "container" in raid else None,
raid_conf.metadata,
array_devices,
volume_devices
)
@staticmethod
def remove_all():
Mdadm.stop()
@classmethod
def __get_unique_name(cls, prefix: str = "Raid"):
with cls.__lock:
cls.__unique_id += 1
return f"{prefix}{cls.__unique_id}"

View File

@ -0,0 +1,80 @@
#
# Copyright(c) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools import disk_utils
from test_tools.fs_utils import ls, parse_ls_output
from test_utils.filesystem.symlink import Symlink
from test_utils.os_utils import reload_kernel_module, unload_kernel_module, is_kernel_module_loaded
from test_utils.size import Size, Unit
class RamDisk(Device):
_module = "brd"
@classmethod
def create(cls, disk_size: Size, disk_count: int = 1):
if disk_count < 1:
raise ValueError("Wrong number of RAM disks requested")
TestRun.LOGGER.info("Configure RAM disks...")
params = {
"rd_size": int(disk_size.get_value(Unit.KiB)),
"rd_nr": disk_count
}
reload_kernel_module(cls._module, params)
if not cls._is_configured(disk_size, disk_count):
raise EnvironmentError(f"Wrong RAM disk configuration after loading '{cls._module}' "
"module")
return cls.list()
@classmethod
def remove_all(cls):
if not is_kernel_module_loaded(cls._module):
return
for ram_disk in cls._list_devices():
TestRun.executor.run(f"umount {ram_disk.full_path}")
link_path = posixpath.join("/dev/disk/by-id", ram_disk.name)
try:
link = Symlink.get_symlink(link_path=link_path, target=ram_disk.full_path)
link.remove(force=True)
except FileNotFoundError:
pass
TestRun.LOGGER.info("Removing RAM disks...")
unload_kernel_module(cls._module)
@classmethod
def list(cls):
ram_disks = []
for ram_disk in cls._list_devices():
link_path = posixpath.join("/dev/disk/by-id", ram_disk.name)
link = Symlink.get_symlink(
link_path=link_path, target=ram_disk.full_path, create=True
)
ram_disks.append(cls(link.full_path))
return ram_disks
@classmethod
def _is_configured(cls, disk_size: Size, disk_count: int):
ram_disks = cls._list_devices()
return (
len(ram_disks) >= disk_count
and Size(disk_utils.get_size(ram_disks[0].name), Unit.Byte).align_down(Unit.MiB.value)
== disk_size.align_down(Unit.MiB.value)
)
@staticmethod
def _list_devices():
ls_ram_disks = ls("/dev/ram*")
if "No such file or directory" in ls_ram_disks:
return []
return parse_ls_output(ls_ram_disks)

0
test_tools/__init__.py Normal file
View File

225
test_tools/blktrace.py Normal file
View File

@ -0,0 +1,225 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import math
from aenum import IntFlag, Enum
from datetime import timedelta
from core.test_run import TestRun
from storage_devices.device import Device
from test_utils.filesystem.directory import Directory
from test_utils.os_utils import is_mounted, drop_caches, DropCachesMode
from test_utils.size import Size, Unit
DEBUGFS_MOUNT_POINT = "/sys/kernel/debug"
PREFIX = "trace_"
HEADER_FORMAT = "%a|%C|%d|%e|%n|%N|%S|%5T.%9t\\n"
class BlkTraceMask(IntFlag):
read = 1
write = 1 << 1
flush = 1 << 2
sync = 1 << 3
queue = 1 << 4
requeue = 1 << 5
issue = 1 << 6
complete = 1 << 7
fs = 1 << 8
pc = 1 << 9
notify = 1 << 10
ahead = 1 << 11
meta = 1 << 12
discard = 1 << 13
drv_data = 1 << 14
fua = 1 << 15
class ActionKind(Enum):
IoDeviceRemap = "A"
IoBounce = "B"
IoCompletion = "C"
IoToDriver = "D"
IoFrontMerge = "F"
GetRequest = "G"
IoInsert = "I"
IoMerge = "M"
PlugRequest = "P"
IoHandled = "Q"
RequeueRequest = "R"
SleepRequest = "S"
TimeoutUnplug = "T" # old version of TimerUnplug
UnplugRequest = "U"
TimerUnplug = "UT"
Split = "X"
class RwbsKind(IntFlag):
Undefined = 0
R = 1 # Read
W = 1 << 1 # Write
D = 1 << 2 # Discard
F = 1 << 3 # Flush
S = 1 << 4 # Synchronous
M = 1 << 5 # Metadata
A = 1 << 6 # Read Ahead
N = 1 << 7 # None of the above
def __str__(self):
ret = []
if self & RwbsKind.R:
ret.append("read")
if self & RwbsKind.W:
ret.append("write")
if self & RwbsKind.D:
ret.append("discard")
if self & RwbsKind.F:
ret.append("flush")
if self & RwbsKind.S:
ret.append("sync")
if self & RwbsKind.M:
ret.append("metadata")
if self & RwbsKind.A:
ret.append("readahead")
if self & RwbsKind.N:
ret.append("none")
return "|".join(ret)
class BlkTrace:
def __init__(self, device: Device, *masks: BlkTraceMask):
self._mount_debugfs()
if device is None:
raise Exception("Device not provided")
self.device = device
self.masks = "" if not masks else f' -a {" -a ".join([m.name for m in masks])}'
self.blktrace_pid = -1
self.__outputDirectoryPath = None
@staticmethod
def _mount_debugfs():
if not is_mounted(DEBUGFS_MOUNT_POINT):
TestRun.executor.run_expect_success(f"mount -t debugfs none {DEBUGFS_MOUNT_POINT}")
def start_monitoring(self, buffer_size: Size = None, number_of_subbuffers: int = None):
if self.blktrace_pid != -1:
raise Exception(f"blktrace already running with PID: {self.blktrace_pid}")
self.__outputDirectoryPath = Directory.create_temp_directory().full_path
drop_caches(DropCachesMode.ALL)
number_of_subbuffers = ("" if number_of_subbuffers is None
else f" --num-sub-buffers={number_of_subbuffers}")
buffer_size = ("" if buffer_size is None
else f" --buffer-size={buffer_size.get_value(Unit.KibiByte)}")
command = (f"blktrace{number_of_subbuffers}{buffer_size} --dev={self.device.path}"
f"{self.masks} --output={PREFIX} --output-dir={self.__outputDirectoryPath}")
echo_output = TestRun.executor.run_expect_success(
f"nohup {command} </dev/null &>{self.__outputDirectoryPath}/out & echo $!"
)
self.blktrace_pid = int(echo_output.stdout)
TestRun.LOGGER.info(f"blktrace monitoring for device {self.device.path} started"
f" (PID: {self.blktrace_pid}, output dir: {self.__outputDirectoryPath}")
def stop_monitoring(self):
if self.blktrace_pid == -1:
raise Exception("PID for blktrace is not set - has monitoring been started?")
drop_caches(DropCachesMode.ALL)
TestRun.executor.run_expect_success(f"kill -s SIGINT {self.blktrace_pid}")
self.blktrace_pid = -1
# dummy command for swallowing output of killed command
TestRun.executor.run("sleep 2 && echo dummy")
TestRun.LOGGER.info(f"blktrace monitoring for device {self.device.path} stopped")
return self.__parse_blktrace_output()
def __parse_blktrace_output(self):
TestRun.LOGGER.info(f"Parsing blktrace headers from {self.__outputDirectoryPath}... "
"Be patient")
command = (f'blkparse --input-dir={self.__outputDirectoryPath} --input={PREFIX} '
f'--format="{HEADER_FORMAT}"')
blkparse_output = TestRun.executor.run_expect_success(
command, timeout=timedelta(minutes=60)
)
parsed_headers = []
for line in blkparse_output.stdout.splitlines():
# At the end per-cpu summary is posted - there is no need for it now
if line.startswith('CPU'):
break
header = Header.parse(line)
if header is None:
continue
parsed_headers.append(header)
TestRun.LOGGER.info(
f"Parsed {len(parsed_headers)} blktrace headers from {self.__outputDirectoryPath}"
)
parsed_headers.sort(key=lambda x: x.timestamp)
return parsed_headers
class Header:
def __init__(self):
self.action = None
self.block_count = None
self.byte_count = None
self.command = None
self.error_value = None
self.rwbs = RwbsKind.Undefined
self.sector_number = None
self.timestamp = None
@staticmethod
def parse(header_line: str):
# messages/notifies are not formatted according to --format
# so should be ignored (or parsed using standard format):
if "m N" in header_line:
return None
header_fields = header_line.split('|')
if len(header_fields) != 8:
return None
timestamp_fields = header_fields[7].split('.')
timestamp_nano = int(timestamp_fields[-1]) if len(timestamp_fields) == 2 else 0
header = Header()
header.action = ActionKind(header_fields[0])
header.command = header_fields[1]
if len(header_fields[2]):
header.rwbs = RwbsKind['|'.join(list(header_fields[2]))]
header.error_value = int(header_fields[3])
header.block_count = int(header_fields[4])
header.byte_count = int(header_fields[5])
header.sector_number = int(header_fields[6])
header.timestamp = int(timestamp_fields[0]) * math.pow(10, 9) + timestamp_nano
return header
def __str__(self):
ret = []
if self.action:
ret.append(f"action: {self.action.name}")
if self.block_count:
ret.append(f"block_count: {self.block_count}")
if self.byte_count:
ret.append(f"byte_count: {self.byte_count}")
if self.command:
ret.append(f"command: {self.command}")
if self.error_value:
ret.append(f"error_value: {self.error_value}")
if self.rwbs:
ret.append(f"rwbs: {self.rwbs}")
if self.sector_number:
ret.append(f"sector_number: {self.sector_number}")
if self.timestamp:
ret.append(f"timestamp: {self.timestamp}")
return " ".join(ret)

882
test_tools/checksec.sh Normal file
View File

@ -0,0 +1,882 @@
#!/bin/bash
#
# The BSD License (http://www.opensource.org/licenses/bsd-license.php)
# specifies the terms and conditions of use for checksec.sh:
#
# Copyright (c) 2009-2011, Tobias Klein.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Tobias Klein nor the name of trapkit.de may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# Name : checksec.sh
# Version : 1.5
# Author : Tobias Klein
# Date : November 2011
# Download: http://www.trapkit.de/tools/checksec.html
# Changes : http://www.trapkit.de/tools/checksec_changes.txt
#
# Description:
#
# Modern Linux distributions offer some mitigation techniques to make it
# harder to exploit software vulnerabilities reliably. Mitigations such
# as RELRO, NoExecute (NX), Stack Canaries, Address Space Layout
# Randomization (ASLR) and Position Independent Executables (PIE) have
# made reliably exploiting any vulnerabilities that do exist far more
# challenging. The checksec.sh script is designed to test what *standard*
# Linux OS and PaX (http://pax.grsecurity.net/) security features are being
# used.
#
# As of version 1.3 the script also lists the status of various Linux kernel
# protection mechanisms.
#
# Credits:
#
# Thanks to Brad Spengler (grsecurity.net) for the PaX support.
# Thanks to Jon Oberheide (jon.oberheide.org) for the kernel support.
# Thanks to Ollie Whitehouse (Research In Motion) for rpath/runpath support.
#
# Others that contributed to checksec.sh (in no particular order):
#
# Simon Ruderich, Denis Scherbakov, Stefan Kuttler, Radoslaw Madej,
# Anthony G. Basile, Martin Vaeth and Brian Davis.
#
# global vars
have_readelf=1
verbose=false
# FORTIFY_SOURCE vars
FS_end=_chk
FS_cnt_total=0
FS_cnt_checked=0
FS_cnt_unchecked=0
FS_chk_func_libc=0
FS_functions=0
FS_libc=0
# version information
version() {
echo "checksec v1.5, Tobias Klein, www.trapkit.de, November 2011"
echo
}
# help
help() {
echo "Usage: checksec [OPTION]"
echo
echo "Options:"
echo
echo " --file <executable-file>"
echo " --dir <directory> [-v]"
echo " --proc <process name>"
echo " --proc-all"
echo " --proc-libs <process ID>"
echo " --kernel"
echo " --fortify-file <executable-file>"
echo " --fortify-proc <process ID>"
echo " --version"
echo " --help"
echo
echo "For more information, see:"
echo " http://www.trapkit.de/tools/checksec.html"
echo
}
# check if command exists
command_exists () {
type $1 > /dev/null 2>&1;
}
# check if directory exists
dir_exists () {
if [ -d $1 ] ; then
return 0
else
return 1
fi
}
# check user privileges
root_privs () {
if [ $(/usr/bin/id -u) -eq 0 ] ; then
return 0
else
return 1
fi
}
# check if input is numeric
isNumeric () {
echo "$@" | grep -q -v "[^0-9]"
}
# check if input is a string
isString () {
echo "$@" | grep -q -v "[^A-Za-z]"
}
# check file(s)
filecheck() {
# check for RELRO support
if readelf -l $1 2>/dev/null | grep -q 'GNU_RELRO'; then
if readelf -d $1 2>/dev/null | grep -q 'BIND_NOW'; then
echo -n -e '\033[32mFull RELRO \033[m '
else
echo -n -e '\033[33mPartial RELRO\033[m '
fi
else
echo -n -e '\033[31mNo RELRO \033[m '
fi
# check for stack canary support
if readelf -s $1 2>/dev/null | grep -q '__stack_chk_fail'; then
echo -n -e '\033[32mCanary found \033[m '
else
echo -n -e '\033[31mNo canary found\033[m '
fi
# check for NX support
if readelf -W -l $1 2>/dev/null | grep 'GNU_STACK' | grep -q 'RWE'; then
echo -n -e '\033[31mNX disabled\033[m '
else
echo -n -e '\033[32mNX enabled \033[m '
fi
# check for PIE support
if readelf -h $1 2>/dev/null | grep -q 'Type:[[:space:]]*EXEC'; then
echo -n -e '\033[31mNo PIE \033[m '
elif readelf -h $1 2>/dev/null | grep -q 'Type:[[:space:]]*DYN'; then
if readelf -d $1 2>/dev/null | grep -q '(DEBUG)'; then
echo -n -e '\033[32mPIE enabled \033[m '
else
echo -n -e '\033[33mDSO \033[m '
fi
else
echo -n -e '\033[33mNot an ELF file\033[m '
fi
# check for rpath / run path
if readelf -d $1 2>/dev/null | grep -q 'rpath'; then
echo -n -e '\033[31mRPATH \033[m '
else
echo -n -e '\033[32mNo RPATH \033[m '
fi
if readelf -d $1 2>/dev/null | grep -q 'runpath'; then
echo -n -e '\033[31mRUNPATH \033[m '
else
echo -n -e '\033[32mNo RUNPATH \033[m '
fi
}
# check process(es)
proccheck() {
# check for RELRO support
if readelf -l $1/exe 2>/dev/null | grep -q 'Program Headers'; then
if readelf -l $1/exe 2>/dev/null | grep -q 'GNU_RELRO'; then
if readelf -d $1/exe 2>/dev/null | grep -q 'BIND_NOW'; then
echo -n -e '\033[32mFull RELRO \033[m '
else
echo -n -e '\033[33mPartial RELRO \033[m '
fi
else
echo -n -e '\033[31mNo RELRO \033[m '
fi
else
echo -n -e '\033[31mPermission denied (please run as root)\033[m\n'
exit 1
fi
# check for stack canary support
if readelf -s $1/exe 2>/dev/null | grep -q 'Symbol table'; then
if readelf -s $1/exe 2>/dev/null | grep -q '__stack_chk_fail'; then
echo -n -e '\033[32mCanary found \033[m '
else
echo -n -e '\033[31mNo canary found \033[m '
fi
else
if [ "$1" != "1" ] ; then
echo -n -e '\033[33mPermission denied \033[m '
else
echo -n -e '\033[33mNo symbol table found\033[m '
fi
fi
# first check for PaX support
if cat $1/status 2> /dev/null | grep -q 'PaX:'; then
pageexec=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b6) )
segmexec=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b10) )
mprotect=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b8) )
randmmap=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b9) )
if [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "M" && "$randmmap" = "R" ]] ; then
echo -n -e '\033[32mPaX enabled\033[m '
elif [[ "$pageexec" = "p" && "$segmexec" = "s" && "$randmmap" = "R" ]] ; then
echo -n -e '\033[33mPaX ASLR only\033[m '
elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "m" && "$randmmap" = "R" ]] ; then
echo -n -e '\033[33mPaX mprot off \033[m'
elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "M" && "$randmmap" = "r" ]] ; then
echo -n -e '\033[33mPaX ASLR off\033[m '
elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "m" && "$randmmap" = "r" ]] ; then
echo -n -e '\033[33mPaX NX only\033[m '
else
echo -n -e '\033[31mPaX disabled\033[m '
fi
# fallback check for NX support
elif readelf -W -l $1/exe 2>/dev/null | grep 'GNU_STACK' | grep -q 'RWE'; then
echo -n -e '\033[31mNX disabled\033[m '
else
echo -n -e '\033[32mNX enabled \033[m '
fi
# check for PIE support
if readelf -h $1/exe 2>/dev/null | grep -q 'Type:[[:space:]]*EXEC'; then
echo -n -e '\033[31mNo PIE \033[m '
elif readelf -h $1/exe 2>/dev/null | grep -q 'Type:[[:space:]]*DYN'; then
if readelf -d $1/exe 2>/dev/null | grep -q '(DEBUG)'; then
echo -n -e '\033[32mPIE enabled \033[m '
else
echo -n -e '\033[33mDynamic Shared Object\033[m '
fi
else
echo -n -e '\033[33mNot an ELF file \033[m '
fi
}
# check mapped libraries
libcheck() {
libs=( $(awk '{ print $6 }' /proc/$1/maps | grep '/' | sort -u | xargs file | grep ELF | awk '{ print $1 }' | sed 's/:/ /') )
printf "\n* Loaded libraries (file information, # of mapped files: ${#libs[@]}):\n\n"
for element in $(seq 0 $((${#libs[@]} - 1)))
do
echo " ${libs[$element]}:"
echo -n " "
filecheck ${libs[$element]}
printf "\n\n"
done
}
# check for system-wide ASLR support
aslrcheck() {
# PaX ASLR support
if !(cat /proc/1/status 2> /dev/null | grep -q 'Name:') ; then
echo -n -e ':\033[33m insufficient privileges for PaX ASLR checks\033[m\n'
echo -n -e ' Fallback to standard Linux ASLR check'
fi
if cat /proc/1/status 2> /dev/null | grep -q 'PaX:'; then
printf ": "
if cat /proc/1/status 2> /dev/null | grep 'PaX:' | grep -q 'R'; then
echo -n -e '\033[32mPaX ASLR enabled\033[m\n\n'
else
echo -n -e '\033[31mPaX ASLR disabled\033[m\n\n'
fi
else
# standard Linux 'kernel.randomize_va_space' ASLR support
# (see the kernel file 'Documentation/sysctl/kernel.txt' for a detailed description)
printf " (kernel.randomize_va_space): "
if /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 1'; then
echo -n -e '\033[33mOn (Setting: 1)\033[m\n\n'
printf " Description - Make the addresses of mmap base, stack and VDSO page randomized.\n"
printf " This, among other things, implies that shared libraries will be loaded to \n"
printf " random addresses. Also for PIE-linked binaries, the location of code start\n"
printf " is randomized. Heap addresses are *not* randomized.\n\n"
elif /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 2'; then
echo -n -e '\033[32mOn (Setting: 2)\033[m\n\n'
printf " Description - Make the addresses of mmap base, heap, stack and VDSO page randomized.\n"
printf " This, among other things, implies that shared libraries will be loaded to random \n"
printf " addresses. Also for PIE-linked binaries, the location of code start is randomized.\n\n"
elif /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 0'; then
echo -n -e '\033[31mOff (Setting: 0)\033[m\n'
else
echo -n -e '\033[31mNot supported\033[m\n'
fi
printf " See the kernel file 'Documentation/sysctl/kernel.txt' for more details.\n\n"
fi
}
# check cpu nx flag
nxcheck() {
if grep -q nx /proc/cpuinfo; then
echo -n -e '\033[32mYes\033[m\n\n'
else
echo -n -e '\033[31mNo\033[m\n\n'
fi
}
# check for kernel protection mechanisms
kernelcheck() {
printf " Description - List the status of kernel protection mechanisms. Rather than\n"
printf " inspect kernel mechanisms that may aid in the prevention of exploitation of\n"
printf " userspace processes, this option lists the status of kernel configuration\n"
printf " options that harden the kernel itself against attack.\n\n"
printf " Kernel config: "
if [ -f /proc/config.gz ] ; then
kconfig="zcat /proc/config.gz"
printf "\033[32m/proc/config.gz\033[m\n\n"
elif [ -f /boot/config-`uname -r` ] ; then
kconfig="cat /boot/config-`uname -r`"
printf "\033[33m/boot/config-`uname -r`\033[m\n\n"
printf " Warning: The config on disk may not represent running kernel config!\n\n";
elif [ -f "${KBUILD_OUTPUT:-/usr/src/linux}"/.config ] ; then
kconfig="cat ${KBUILD_OUTPUT:-/usr/src/linux}/.config"
printf "\033[33m%s\033[m\n\n" "${KBUILD_OUTPUT:-/usr/src/linux}/.config"
printf " Warning: The config on disk may not represent running kernel config!\n\n";
else
printf "\033[31mNOT FOUND\033[m\n\n"
exit 0
fi
printf " GCC stack protector support: "
if $kconfig | grep -qi 'CONFIG_CC_STACKPROTECTOR=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Strict user copy checks: "
if $kconfig | grep -qi 'CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Enforce read-only kernel data: "
if $kconfig | grep -qi 'CONFIG_DEBUG_RODATA=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Restrict /dev/mem access: "
if $kconfig | grep -qi 'CONFIG_STRICT_DEVMEM=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Restrict /dev/kmem access: "
if $kconfig | grep -qi 'CONFIG_DEVKMEM=y'; then
printf "\033[31mDisabled\033[m\n"
else
printf "\033[32mEnabled\033[m\n"
fi
printf "\n"
printf "* grsecurity / PaX: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC=y'; then
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_HIGH=y'; then
printf "\033[32mHigh GRKERNSEC\033[m\n\n"
elif $kconfig | grep -qi 'CONFIG_GRKERNSEC_MEDIUM=y'; then
printf "\033[33mMedium GRKERNSEC\033[m\n\n"
elif $kconfig | grep -qi 'CONFIG_GRKERNSEC_LOW=y'; then
printf "\033[31mLow GRKERNSEC\033[m\n\n"
else
printf "\033[33mCustom GRKERNSEC\033[m\n\n"
fi
printf " Non-executable kernel pages: "
if $kconfig | grep -qi 'CONFIG_PAX_KERNEXEC=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Prevent userspace pointer deref: "
if $kconfig | grep -qi 'CONFIG_PAX_MEMORY_UDEREF=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Prevent kobject refcount overflow: "
if $kconfig | grep -qi 'CONFIG_PAX_REFCOUNT=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Bounds check heap object copies: "
if $kconfig | grep -qi 'CONFIG_PAX_USERCOPY=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Disable writing to kmem/mem/port: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_KMEM=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Disable privileged I/O: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_IO=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Harden module auto-loading: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_MODHARDEN=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Hide kernel symbols: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_HIDESYM=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
else
printf "\033[31mNo GRKERNSEC\033[m\n\n"
printf " The grsecurity / PaX patchset is available here:\n"
printf " http://grsecurity.net/\n"
fi
printf "\n"
printf "* Kernel Heap Hardening: "
if $kconfig | grep -qi 'CONFIG_KERNHEAP=y'; then
if $kconfig | grep -qi 'CONFIG_KERNHEAP_FULLPOISON=y'; then
printf "\033[32mFull KERNHEAP\033[m\n\n"
else
printf "\033[33mPartial KERNHEAP\033[m\n\n"
fi
else
printf "\033[31mNo KERNHEAP\033[m\n\n"
printf " The KERNHEAP hardening patchset is available here:\n"
printf " https://www.subreption.com/kernheap/\n\n"
fi
}
# --- FORTIFY_SOURCE subfunctions (start) ---
# is FORTIFY_SOURCE supported by libc?
FS_libc_check() {
printf "* FORTIFY_SOURCE support available (libc) : "
if [ "${#FS_chk_func_libc[@]}" != "0" ] ; then
printf "\033[32mYes\033[m\n"
else
printf "\033[31mNo\033[m\n"
exit 1
fi
}
# was the binary compiled with FORTIFY_SOURCE?
FS_binary_check() {
printf "* Binary compiled with FORTIFY_SOURCE support: "
for FS_elem_functions in $(seq 0 $((${#FS_functions[@]} - 1)))
do
if [[ ${FS_functions[$FS_elem_functions]} =~ _chk ]] ; then
printf "\033[32mYes\033[m\n"
return
fi
done
printf "\033[31mNo\033[m\n"
exit 1
}
FS_comparison() {
echo
printf " ------ EXECUTABLE-FILE ------- . -------- LIBC --------\n"
printf " FORTIFY-able library functions | Checked function names\n"
printf " -------------------------------------------------------\n"
for FS_elem_libc in $(seq 0 $((${#FS_chk_func_libc[@]} - 1)))
do
for FS_elem_functions in $(seq 0 $((${#FS_functions[@]} - 1)))
do
FS_tmp_func=${FS_functions[$FS_elem_functions]}
FS_tmp_libc=${FS_chk_func_libc[$FS_elem_libc]}
if [[ $FS_tmp_func =~ ^$FS_tmp_libc$ ]] ; then
printf " \033[31m%-30s\033[m | __%s%s\n" $FS_tmp_func $FS_tmp_libc $FS_end
let FS_cnt_total++
let FS_cnt_unchecked++
elif [[ $FS_tmp_func =~ ^$FS_tmp_libc(_chk) ]] ; then
printf " \033[32m%-30s\033[m | __%s%s\n" $FS_tmp_func $FS_tmp_libc $FS_end
let FS_cnt_total++
let FS_cnt_checked++
fi
done
done
}
FS_summary() {
echo
printf "SUMMARY:\n\n"
printf "* Number of checked functions in libc : ${#FS_chk_func_libc[@]}\n"
printf "* Total number of library functions in the executable: ${#FS_functions[@]}\n"
printf "* Number of FORTIFY-able functions in the executable : %s\n" $FS_cnt_total
printf "* Number of checked functions in the executable : \033[32m%s\033[m\n" $FS_cnt_checked
printf "* Number of unchecked functions in the executable : \033[31m%s\033[m\n" $FS_cnt_unchecked
echo
}
# --- FORTIFY_SOURCE subfunctions (end) ---
if !(command_exists readelf) ; then
printf "\033[31mWarning: 'readelf' not found! It's required for most checks.\033[m\n\n"
have_readelf=0
fi
# parse command-line arguments
case "$1" in
--version)
version
exit 0
;;
--help)
help
exit 0
;;
--dir)
if [ "$3" = "-v" ] ; then
verbose=true
fi
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid directory.\033[m\n\n"
exit 1
fi
# remove trailing slashes
tempdir=`echo $2 | sed -e "s/\/*$//"`
if [ ! -d $tempdir ] ; then
printf "\033[31mError: The directory '$tempdir' does not exist.\033[m\n\n"
exit 1
fi
cd $tempdir
printf "RELRO STACK CANARY NX PIE RPATH RUNPATH FILE\n"
for N in [A-Za-z]*; do
if [ "$N" != "[A-Za-z]*" ]; then
# read permissions?
if [ ! -r $N ]; then
printf "\033[31mError: No read permissions for '$tempdir/$N' (run as root).\033[m\n"
else
# ELF executable?
out=`file $N`
if [[ ! $out =~ ELF ]] ; then
if [ "$verbose" = "true" ] ; then
printf "\033[34m*** Not an ELF file: $tempdir/"
file $N
printf "\033[m"
fi
else
filecheck $N
if [ `find $tempdir/$N \( -perm -004000 -o -perm -002000 \) -type f -print` ]; then
printf "\033[37;41m%s%s\033[m" $2 $N
else
printf "%s%s" $tempdir/ $N
fi
echo
fi
fi
fi
done
exit 0
;;
--file)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid file.\033[m\n\n"
exit 1
fi
# does the file exist?
if [ ! -e $2 ] ; then
printf "\033[31mError: The file '$2' does not exist.\033[m\n\n"
exit 1
fi
# read permissions?
if [ ! -r $2 ] ; then
printf "\033[31mError: No read permissions for '$2' (run as root).\033[m\n\n"
exit 1
fi
# ELF executable?
out=`file $2`
if [[ ! $out =~ ELF ]] ; then
printf "\033[31mError: Not an ELF file: "
file $2
printf "\033[m\n"
exit 1
fi
printf "RELRO STACK CANARY NX PIE RPATH RUNPATH FILE\n"
filecheck $2
if [ `find $2 \( -perm -004000 -o -perm -002000 \) -type f -print` ] ; then
printf "\033[37;41m%s%s\033[m" $2 $N
else
printf "%s" $2
fi
echo
exit 0
;;
--proc-all)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
cd /proc
printf "* System-wide ASLR"
aslrcheck
printf "* Does the CPU support NX: "
nxcheck
printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
for N in [1-9]*; do
if [ $N != $$ ] && readlink -q $N/exe > /dev/null; then
printf "%16s" `head -1 $N/status | cut -b 7-`
printf "%7d " $N
proccheck $N
echo
fi
done
if [ ! -e /usr/bin/id ] ; then
printf "\n\033[33mNote: If you are running 'checksec.sh' as an unprivileged user, you\n"
printf " will not see all processes. Please run the script as root.\033[m\n\n"
else
if !(root_privs) ; then
printf "\n\033[33mNote: You are running 'checksec.sh' as an unprivileged user.\n"
printf " Too see all processes, please run the script as root.\033[m\n\n"
fi
fi
exit 0
;;
--proc)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid process name.\033[m\n\n"
exit 1
fi
if !(isString "$2") ; then
printf "\033[31mError: Please provide a valid process name.\033[m\n\n"
exit 1
fi
cd /proc
printf "* System-wide ASLR"
aslrcheck
printf "* Does the CPU support NX: "
nxcheck
printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
for N in `ps -Ao pid,comm | grep $2 | cut -b1-6`; do
if [ -d $N ] ; then
printf "%16s" `head -1 $N/status | cut -b 7-`
printf "%7d " $N
# read permissions?
if [ ! -r $N/exe ] ; then
if !(root_privs) ; then
printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
exit 1
fi
if [ ! `readlink $N/exe` ] ; then
printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
exit 1
fi
exit 1
fi
proccheck $N
echo
fi
done
exit 0
;;
--proc-libs)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
if !(isNumeric "$2") ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
cd /proc
printf "* System-wide ASLR"
aslrcheck
printf "* Does the CPU support NX: "
nxcheck
printf "* Process information:\n\n"
printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
N=$2
if [ -d $N ] ; then
printf "%16s" `head -1 $N/status | cut -b 7-`
printf "%7d " $N
# read permissions?
if [ ! -r $N/exe ] ; then
if !(root_privs) ; then
printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
exit 1
fi
if [ ! `readlink $N/exe` ] ; then
printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
exit 1
fi
exit 1
fi
proccheck $N
echo
libcheck $N
fi
exit 0
;;
--kernel)
cd /proc
printf "* Kernel protection information:\n\n"
kernelcheck
exit 0
;;
--fortify-file)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid file.\033[m\n\n"
exit 1
fi
# does the file exist?
if [ ! -e $2 ] ; then
printf "\033[31mError: The file '$2' does not exist.\033[m\n\n"
exit 1
fi
# read permissions?
if [ ! -r $2 ] ; then
printf "\033[31mError: No read permissions for '$2' (run as root).\033[m\n\n"
exit 1
fi
# ELF executable?
out=`file $2`
if [[ ! $out =~ ELF ]] ; then
printf "\033[31mError: Not an ELF file: "
file $2
printf "\033[m\n"
exit 1
fi
if [ -e /lib/libc.so.6 ] ; then
FS_libc=/lib/libc.so.6
elif [ -e /lib64/libc.so.6 ] ; then
FS_libc=/lib64/libc.so.6
elif [ -e /lib/i386-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/i386-linux-gnu/libc.so.6
elif [ -e /lib/x86_64-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/x86_64-linux-gnu/libc.so.6
else
printf "\033[31mError: libc not found.\033[m\n\n"
exit 1
fi
FS_chk_func_libc=( $(readelf -s $FS_libc | grep _chk@@ | awk '{ print $8 }' | cut -c 3- | sed -e 's/_chk@.*//') )
FS_functions=( $(readelf -s $2 | awk '{ print $8 }' | sed 's/_*//' | sed -e 's/@.*//') )
FS_libc_check
FS_binary_check
FS_comparison
FS_summary
exit 0
;;
--fortify-proc)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
if !(isNumeric "$2") ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
cd /proc
N=$2
if [ -d $N ] ; then
# read permissions?
if [ ! -r $N/exe ] ; then
if !(root_privs) ; then
printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
exit 1
fi
if [ ! `readlink $N/exe` ] ; then
printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
exit 1
fi
exit 1
fi
if [ -e /lib/libc.so.6 ] ; then
FS_libc=/lib/libc.so.6
elif [ -e /lib64/libc.so.6 ] ; then
FS_libc=/lib64/libc.so.6
elif [ -e /lib/i386-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/i386-linux-gnu/libc.so.6
elif [ -e /lib/x86_64-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/x86_64-linux-gnu/libc.so.6
else
printf "\033[31mError: libc not found.\033[m\n\n"
exit 1
fi
printf "* Process name (PID) : %s (%d)\n" `head -1 $N/status | cut -b 7-` $N
FS_chk_func_libc=( $(readelf -s $FS_libc | grep _chk@@ | awk '{ print $8 }' | cut -c 3- | sed -e 's/_chk@.*//') )
FS_functions=( $(readelf -s $2/exe | awk '{ print $8 }' | sed 's/_*//' | sed -e 's/@.*//') )
FS_libc_check
FS_binary_check
FS_comparison
FS_summary
fi
exit 0
;;
*)
if [ "$#" != "0" ] ; then
printf "\033[31mError: Unknown option '$1'.\033[m\n\n"
fi
help
exit 1
;;
esac

40
test_tools/dd.py Normal file
View File

@ -0,0 +1,40 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import test_utils.linux_command as linux_comm
import test_utils.size as size
from core.test_run import TestRun
class Dd(linux_comm.LinuxCommand):
def __init__(self):
linux_comm.LinuxCommand.__init__(self, TestRun.executor, 'dd')
def block_size(self, value: size.Size):
return self.set_param('bs', int(value.get_value()))
def count(self, value):
return self.set_param('count', value)
def input(self, value):
return self.set_param('if', value)
def iflag(self, *values):
return self.set_param('iflag', *values)
def oflag(self, *values):
return self.set_param('oflag', *values)
def conv(self, *values):
return self.set_param('conv', *values)
def output(self, value):
return self.set_param('of', value)
def seek(self, value):
return self.set_param('seek', value)
def skip(self, value):
return self.set_param('skip', value)

47
test_tools/ddrescue.py Normal file
View File

@ -0,0 +1,47 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import test_utils.linux_command as linux_comm
import test_utils.size as size
from core.test_run import TestRun
class Ddrescue(linux_comm.LinuxCommand):
def __init__(self):
linux_comm.LinuxCommand.__init__(self, TestRun.executor, 'ddrescue')
self.source_path = None
self.destination_path = None
self.param_name_prefix = "--"
def source(self, value):
self.source_path = value
return self
def destination(self, value):
self.destination_path = value
return self
def reverse(self):
return self.set_flags("reverse")
def synchronous(self):
return self.set_flags("synchronous")
def direct(self):
return self.set_flags("direct")
def force(self):
return self.set_flags("force")
def block_size(self, value: size.Size):
return self.set_param('sector-size', int(value.get_value()))
def size(self, value: size.Size):
return self.set_param('size', int(value.get_value()))
def __str__(self):
command = linux_comm.LinuxCommand.__str__(self)
command += f" {self.source_path} {self.destination_path}"
return command

329
test_tools/device_mapper.py Normal file
View File

@ -0,0 +1,329 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from enum import Enum
from core.test_run import TestRun
from storage_devices.device import Device
from test_utils.disk_finder import resolve_to_by_id_link
from test_utils.linux_command import LinuxCommand
from test_utils.size import Size, Unit
class DmTarget(Enum):
# Fill argument types for other targets if you need them
LINEAR = (str, int)
STRIPED = (int, int, list)
ERROR = ()
ZERO = ()
CRYPT = ()
DELAY = (str, int, int, str, int, int)
FLAKEY = (str, int, int, int)
MIRROR = ()
MULTIPATH = ()
RAID = ()
SNAPSHOT = ()
def __str__(self):
return self.name.lower()
class DmTable:
class TableEntry:
pass
class DmTable:
class TableEntry:
def __init__(self, offset: int, length: int, target: DmTarget, *params):
self.offset = int(offset)
self.length = int(length)
self.target = DmTarget(target)
self.params = list(params)
self.validate()
def validate(self):
if self.target.value:
for i in range(len(self.params)):
try:
self.params[i] = self.target.value[i](self.params[i])
except IndexError:
raise ValueError("invalid dm target parameter")
def __str__(self):
ret = f"{self.offset} {self.length} {self.target}"
for param in self.params:
ret += f" {param}"
return ret
def __init__(self):
self.table = []
@classmethod
def uniform_error_table(
cls, start_lba: int, stop_lba: int, num_error_zones: int, error_zone_size: Size
):
table = cls()
increment = (stop_lba - start_lba) // num_error_zones
for zone_start in range(start_lba, stop_lba, increment):
table.add_entry(
DmTable.TableEntry(
zone_start,
error_zone_size.get_value(Unit.Blocks512),
DmTarget.ERROR,
)
)
return table
@classmethod
def passthrough_table(cls, device: Device):
table = cls()
table.add_entry(
DmTable.TableEntry(
0,
device.size.get_value(Unit.Blocks512),
DmTarget.LINEAR,
device.path,
0,
)
)
return table
@classmethod
def error_table(cls, offset: int, size: Size):
table = cls()
table.add_entry(
DmTable.TableEntry(offset, size.get_value(Unit.Blocks512), DmTarget.ERROR)
)
return table
def fill_gaps(self, device: Device, fill_end=True):
gaps = self.get_gaps()
for gap in gaps[:-1]:
self.add_entry(
DmTable.TableEntry(
gap[0], gap[1], DmTarget.LINEAR, device.path, int(gap[0])
)
)
table_end = gaps[-1][0]
if fill_end and (Size(table_end, Unit.Blocks512) < device.size):
self.add_entry(
DmTable.TableEntry(
table_end,
device.size.get_value(Unit.Blocks512) - table_end,
DmTarget.LINEAR,
device.path,
table_end,
)
)
return self
def add_entry(self, entry: DmTable.TableEntry):
self.table.append(entry)
return self
def get_gaps(self):
if not self.table:
return [(0, -1)]
gaps = []
self.table.sort(key=lambda entry: entry.offset)
if self.table[0].offset != 0:
gaps.append((0, self.table[0].offset))
for e1, e2 in zip(self.table, self.table[1:]):
if e1.offset + e1.length != e2.offset:
gaps.append(
(e1.offset + e1.length, e2.offset - (e1.offset + e1.length))
)
if len(self.table) > 1:
gaps.append((e2.offset + e2.length, -1))
else:
gaps.append((self.table[0].offset + self.table[0].length, -1))
return gaps
def validate(self):
self.table.sort(key=lambda entry: entry.offset)
if self.table[0].offset != 0:
raise ValueError(f"dm table should start at LBA 0: {self.table[0]}")
for e1, e2 in zip(self.table, self.table[1:]):
if e1.offset + e1.length != e2.offset:
raise ValueError(
f"dm table should not have any holes or overlaps: {e1} -> {e2}"
)
def get_size(self):
self.table.sort(key=lambda entry: entry.offset)
return Size(self.table[-1].offset + self.table[-1].length, Unit.Blocks512)
def __str__(self):
output = ""
for entry in self.table:
output += f"{entry}\n"
return output
class DeviceMapper(LinuxCommand):
@classmethod
def remove_all(cls, force=True):
TestRun.LOGGER.info("Removing all device mapper devices")
cmd = "dmsetup remove_all"
if force:
cmd += " --force"
return TestRun.executor.run_expect_success(cmd)
def __init__(self, name: str):
LinuxCommand.__init__(self, TestRun.executor, "dmsetup")
self.name = name
@staticmethod
def wrap_table(table: DmTable):
return f"<< ENDHERE\n{str(table)}ENDHERE\n"
def get_path(self):
return f"/dev/mapper/{self.name}"
def clear(self):
return TestRun.executor.run_expect_success(f"{self.command_name} clear {self.name}")
def create(self, table: DmTable):
try:
table.validate()
except ValueError:
for entry in table.table:
TestRun.LOGGER.error(f"{entry}")
raise
TestRun.LOGGER.info(f"Creating device mapper device '{self.name}'")
for entry in table.table:
TestRun.LOGGER.debug(f"{entry}")
return TestRun.executor.run_expect_success(
f"{self.command_name} create {self.name} {self.wrap_table(table)}"
)
def remove(self):
TestRun.LOGGER.info(f"Removing device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} remove {self.name}")
def suspend(self):
TestRun.LOGGER.info(f"Suspending device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} suspend {self.name}")
def resume(self):
TestRun.LOGGER.info(f"Resuming device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} resume {self.name}")
def reload(self, table: DmTable):
table.validate()
TestRun.LOGGER.info(f"Reloading table for device mapper device '{self.name}'")
for entry in table.table:
TestRun.LOGGER.debug(f"{entry}")
return TestRun.executor.run_expect_success(
f"{self.command_name} reload {self.name} {self.wrap_table(table)}"
)
class ErrorDevice(Device):
def __init__(self, name: str, base_device: Device, table: DmTable = None):
self.device = base_device
self.mapper = DeviceMapper(name)
self.name = name
self.table = DmTable.passthrough_table(base_device) if not table else table
self.active = False
self.start()
self.path = resolve_to_by_id_link(self.mapper.get_path().replace('/dev/', ''))
@property
def system_path(self):
if self.active:
output = TestRun.executor.run_expect_success(f"realpath {self.mapper.get_path()}")
return output.stdout
return None
@property
def size(self):
if self.active:
return self.table.get_size()
return None
def start(self):
self.mapper.create(self.table)
self.active = True
def stop(self):
self.mapper.remove()
self.active = False
def change_table(self, table: DmTable, permanent=True):
if self.active:
self.mapper.suspend()
self.mapper.reload(table)
self.mapper.resume()
if permanent:
self.table = table
def suspend_errors(self):
empty_table = DmTable.passthrough_table(self.device)
TestRun.LOGGER.info(f"Suspending issuing errors for error device '{self.name}'")
self.change_table(empty_table, False)
def resume_errors(self):
TestRun.LOGGER.info(f"Resuming issuing errors for error device '{self.name}'")
self.change_table(self.table, False)
def suspend(self):
if not self.active:
TestRun.LOGGER.warning(
f"cannot suspend error device '{self.name}'! It's already running"
)
self.mapper.suspend()
self.active = False
def resume(self):
if self.active:
TestRun.LOGGER.warning(
f"cannot resume error device '{self.name}'! It's already running"
)
self.mapper.resume()
self.active = True

397
test_tools/disk_utils.py Normal file
View File

@ -0,0 +1,397 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
import re
import time
from enum import Enum
from core.test_run import TestRun
from test_tools import fs_utils
from test_tools.dd import Dd
from test_tools.fs_utils import readlink, parse_ls_output, ls
from test_utils.output import CmdException
from test_utils.size import Size, Unit
SECTOR_SIZE = 512
class Filesystem(Enum):
xfs = 0
ext3 = 1
ext4 = 2
class PartitionTable(Enum):
msdos = 0
gpt = 1
class PartitionType(Enum):
efi = 0
primary = 1
extended = 2
logical = 3
lvm = 4
msr = 5
swap = 6
standard = 7
unknown = 8
def create_filesystem(device, filesystem: Filesystem, force=True, blocksize=None):
TestRun.LOGGER.info(
f"Creating filesystem ({filesystem.name}) on device: {device.path}")
force_param = ' -f ' if filesystem == Filesystem.xfs else ' -F '
force_param = force_param if force else ''
block_size_param = f' -b size={blocksize}' if filesystem == Filesystem.xfs \
else f' -b {blocksize}'
block_size_param = block_size_param if blocksize else ''
cmd = f'mkfs.{filesystem.name} {force_param} {device.path} {block_size_param}'
cmd = re.sub(' +', ' ', cmd)
TestRun.executor.run_expect_success(cmd)
TestRun.LOGGER.info(
f"Successfully created filesystem on device: {device.path}")
def create_partition_table(device, partition_table_type: PartitionTable = PartitionTable.gpt):
TestRun.LOGGER.info(
f"Creating partition table ({partition_table_type.name}) for device: {device.path}")
cmd = f'parted --script {device.path} mklabel {partition_table_type.name}'
TestRun.executor.run_expect_success(cmd)
device.partition_table = partition_table_type
TestRun.LOGGER.info(
f"Successfully created {partition_table_type.name} "
f"partition table on device: {device.path}")
def get_partition_path(parent_dev, number):
# TODO: change this to be less specific hw dependent (kernel)
if "dev/cas" not in parent_dev:
id_separator = '-part'
else:
id_separator = 'p' # "cas1-1p1"
return f'{parent_dev}{id_separator}{number}'
def remove_parition(device, part_number):
TestRun.LOGGER.info(f"Removing part {part_number} from {device.path}")
cmd = f'parted --script {device.path} rm {part_number}'
output = TestRun.executor.run(cmd)
if output.exit_code != 0:
TestRun.executor.run_expect_success("partprobe")
def create_partition(
device,
part_size,
part_number,
part_type: PartitionType = PartitionType.primary,
unit=Unit.MebiByte,
aligned: bool = True):
TestRun.LOGGER.info(
f"Creating {part_type.name} partition on device: {device.path}")
begin = get_first_partition_offset(device, aligned)
for part in device.partitions:
begin += part.size
if part.type == PartitionType.logical:
begin += Size(1, Unit.MebiByte if not aligned else device.block_size)
if part_type == PartitionType.logical:
begin += Size(1, Unit.MebiByte if not aligned else device.block_size)
if part_size != Size.zero():
end = (begin + part_size)
end_cmd = f'{end.get_value(unit)}{unit_to_string(unit)}'
else:
end_cmd = '100%'
cmd = f'parted --script {device.path} mkpart ' \
f'{part_type.name} ' \
f'{begin.get_value(unit)}{unit_to_string(unit)} ' \
f'{end_cmd}'
output = TestRun.executor.run(cmd)
if output.exit_code != 0:
TestRun.executor.run_expect_success("partprobe")
TestRun.executor.run_expect_success("udevadm settle")
if not check_partition_after_create(
part_size,
part_number,
device.path,
part_type,
aligned):
raise Exception("Could not create partition!")
if part_type != PartitionType.extended:
from storage_devices.partition import Partition
new_part = Partition(device,
part_type,
part_number,
begin,
end if type(end) is Size else device.size)
dd = Dd().input("/dev/zero") \
.output(new_part.path) \
.count(1) \
.block_size(Size(1, Unit.Blocks4096)) \
.oflag("direct")
dd.run()
device.partitions.append(new_part)
TestRun.LOGGER.info(f"Successfully created {part_type.name} partition on {device.path}")
def available_disk_size(device):
dev = f"/dev/{device.get_device_id()}"
# get number of device's sectors
disk_sectors = int(TestRun.executor.run(f"fdisk -l {dev} | grep {dev} | grep sectors "
f"| awk '{{print $7 }}' ").stdout)
# get last occupied sector
last_occupied_sector = int(TestRun.executor.run(f"fdisk -l {dev} | grep {dev} "
f"| awk '{{print $3 }}' | tail -1").stdout)
available_disk_sectors = disk_sectors - last_occupied_sector
return Size(available_disk_sectors, Unit(get_block_size(device)))
def create_partitions(device, sizes: [], partition_table_type=PartitionTable.gpt):
create_partition_table(device, partition_table_type)
partition_type = PartitionType.primary
partition_number_offset = 0
msdos_part_max_size = Size(2, Unit.TeraByte)
for s in sizes:
size = Size(
s.get_value(device.block_size) - device.block_size.value, device.block_size)
if partition_table_type == PartitionTable.msdos and \
len(sizes) > 4 and len(device.partitions) == 3:
if available_disk_size(device) > msdos_part_max_size:
part_size = msdos_part_max_size
else:
part_size = Size.zero()
create_partition(device,
part_size,
4,
PartitionType.extended)
partition_type = PartitionType.logical
partition_number_offset = 1
partition_number = len(device.partitions) + 1 + partition_number_offset
create_partition(device,
size,
partition_number,
partition_type,
Unit.MebiByte,
True)
def get_block_size(device):
try:
block_size = float(TestRun.executor.run(
f"cat {get_sysfs_path(device)}/queue/hw_sector_size").stdout)
except ValueError:
block_size = Unit.Blocks512.value
return block_size
def get_size(device):
output = TestRun.executor.run_expect_success(f"cat {get_sysfs_path(device)}/size")
blocks_count = int(output.stdout)
return blocks_count * SECTOR_SIZE
def get_sysfs_path(device):
sysfs_path = f"/sys/class/block/{device}"
if TestRun.executor.run(f"test -d {sysfs_path}").exit_code != 0:
sysfs_path = f"/sys/block/{device}"
return sysfs_path
def get_pci_address(device):
pci_address = TestRun.executor.run(f"cat /sys/block/{device}/device/address").stdout
return pci_address
def check_partition_after_create(size, part_number, parent_dev_path, part_type, aligned):
partition_path = get_partition_path(parent_dev_path, part_number)
if "dev/cas" not in partition_path:
cmd = f"find {partition_path} -type l"
else:
cmd = f"find {partition_path}"
output = TestRun.executor.run_expect_success(cmd).stdout
if partition_path not in output:
TestRun.LOGGER.info(
"Partition created, but could not find it in system, trying 'hdparm -z'")
TestRun.executor.run_expect_success(f"hdparm -z {parent_dev_path}")
output_after_hdparm = TestRun.executor.run_expect_success(
f"parted --script {parent_dev_path} print").stdout
TestRun.LOGGER.info(output_after_hdparm)
counter = 0
while partition_path not in output and counter < 10:
time.sleep(2)
output = TestRun.executor.run(cmd).stdout
counter += 1
if len(output.split('\n')) > 1 or partition_path not in output:
return False
if aligned and part_type != PartitionType.extended \
and size.get_value(Unit.Byte) % Unit.Blocks4096.value != 0:
TestRun.LOGGER.warning(
f"Partition {partition_path} is not 4k aligned: {size.get_value(Unit.KibiByte)}KiB")
partition_size = get_size(readlink(partition_path).split('/')[-1])
if part_type == PartitionType.extended or \
partition_size == size.get_value(Unit.Byte):
return True
TestRun.LOGGER.warning(
f"Partition size {partition_size} does not match expected {size.get_value(Unit.Byte)} size."
)
return True
def get_first_partition_offset(device, aligned: bool):
if aligned:
return Size(1, Unit.MebiByte)
# 33 sectors are reserved for the backup GPT
return Size(34, Unit(device.blocksize)) \
if device.partition_table == PartitionTable.gpt else Size(1, device.blocksize)
def remove_partitions(device):
from test_utils.os_utils import Udev
if device.is_mounted():
device.unmount()
for partition in device.partitions:
unmount(partition)
TestRun.LOGGER.info(f"Removing partitions from device: {device.path} "
f"({device.get_device_id()}).")
device.wipe_filesystem()
Udev.trigger()
Udev.settle()
output = TestRun.executor.run(f"ls {device.path}* -1")
if len(output.stdout.split('\n')) > 1:
TestRun.LOGGER.error(f"Could not remove partitions from device {device.path}")
return False
return True
def mount(device, mount_point, options: [str] = None):
if not fs_utils.check_if_directory_exists(mount_point):
fs_utils.create_directory(mount_point, True)
TestRun.LOGGER.info(f"Mounting device {device.path} ({device.get_device_id()}) "
f"to {mount_point}.")
cmd = f"mount {device.path} {mount_point}"
if options:
cmd = f"{cmd} -o {','.join(options)}"
output = TestRun.executor.run(cmd)
if output.exit_code != 0:
raise Exception(f"Failed to mount {device.path} to {mount_point}")
device.mount_point = mount_point
def unmount(device):
TestRun.LOGGER.info(f"Unmounting device {device.path} ({device.get_device_id()}).")
if device.mount_point is not None:
output = TestRun.executor.run(f"umount {device.mount_point}")
if output.exit_code != 0:
TestRun.LOGGER.error("Could not unmount device.")
return False
return True
else:
TestRun.LOGGER.info("Device is not mounted.")
return True
def unit_to_string(unit):
unit_string = {
Unit.Byte: 'B',
Unit.Blocks512: 's',
Unit.Blocks4096: 's',
Unit.KibiByte: 'KiB',
Unit.MebiByte: 'MiB',
Unit.GibiByte: 'GiB',
Unit.TebiByte: 'TiB',
Unit.KiloByte: 'kB',
Unit.MegaByte: 'MB',
Unit.GigaByte: 'GB',
Unit.TeraByte: 'TB'
}
return unit_string.get(unit, "Invalid unit.")
def wipe_filesystem(device, force=True):
TestRun.LOGGER.info(f"Erasing the device: {device.path}")
force_param = ' -f' if force else ''
cmd = f'wipefs -a{force_param} {device.path}'
TestRun.executor.run_expect_success(cmd)
TestRun.LOGGER.info(
f"Successfully wiped device: {device.path}")
def check_if_device_supports_trim(device):
if device.get_device_id().startswith("nvme"):
return True
command_output = TestRun.executor.run(
f'hdparm -I {device.path} | grep "TRIM supported"')
return command_output.exit_code == 0
def get_device_filesystem_type(device_id):
cmd = f'lsblk -l -o NAME,FSTYPE | sort | uniq | grep "{device_id} "'
try:
stdout = TestRun.executor.run_expect_success(cmd).stdout
except CmdException:
# unusual devices might not be listed in output (i.e. RAID containers)
if TestRun.executor.run(f"test -b /dev/{device_id}").exit_code != 0:
raise
else:
return None
split_stdout = stdout.strip().split()
if len(split_stdout) <= 1:
return None
else:
try:
return Filesystem[split_stdout[1]]
except KeyError:
TestRun.LOGGER.warning(f"Unrecognized filesystem: {split_stdout[1]}")
return None
def _is_by_id_path(path: str):
"""check if given path already is proper by-id path"""
dev_by_id_dir = "/dev/disk/by-id"
by_id_paths = parse_ls_output(ls(dev_by_id_dir), dev_by_id_dir)
return path in [posixpath.join(dev_by_id_dir, id_path.full_path) for id_path in by_id_paths]
def _is_dev_path_whitelisted(path: str):
"""check if given path is whitelisted"""
whitelisted_paths = [r"cas\d+-\d+", r"/dev/dm-\d+"]
for whitelisted_path in whitelisted_paths:
if re.search(whitelisted_path, path) is not None:
return True
return False
def validate_dev_path(path: str):
if not posixpath.isabs(path):
raise ValueError(f'Given path "{path}" is not absolute.')
if _is_dev_path_whitelisted(path):
return path
if _is_by_id_path(path):
return path
raise ValueError(f'By-id device link {path} is broken.')

67
test_tools/drbdadm.py Normal file
View File

@ -0,0 +1,67 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from core.test_run import TestRun
class Drbdadm:
# create metadata for resource
@staticmethod
def create_metadata(resource_name: str, force: bool):
cmd = "drbdadm create-md" + (" --force" if force else "") + f" {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# enable resource
@staticmethod
def up(resource_name: str):
cmd = f"drbdadm up {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# disable resource
@staticmethod
def down_all():
cmd = f"drbdadm down all"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def down(resource_name):
cmd = f"drbdadm down {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# promote resource to primary
@staticmethod
def set_node_primary(resource_name: str, force=False):
cmd = f"drbdadm primary {resource_name}"
cmd += " --force" if force else ""
return TestRun.executor.run_expect_success(cmd)
# demote resource to secondary
@staticmethod
def set_node_secondary(resource_name: str):
cmd = f"drbdadm secondary {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# check status for all or for specified resource
@staticmethod
def get_status(resource_name: str = ""):
cmd = f"drbdadm status {resource_name}"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def in_sync(resource_name: str):
cmd = f"drbdadm status {resource_name} | grep Inconsistent"
return TestRun.executor.run(cmd).exit_code == 1
# wait sync
@staticmethod
def wait_for_sync(resource_name: str = ""):
# ssh connection might timeout in case on long sync
cmd = f"drbdadm wait-sync {resource_name}"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def dump_config(resource_name: str):
cmd = f"drbdadm dump {resource_name}"
return TestRun.executor.run(cmd)

View File

105
test_tools/fio/fio.py Normal file
View File

@ -0,0 +1,105 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import datetime
import uuid
import test_tools.fio.fio_param
import test_tools.fs_utils
from core.test_run import TestRun
from test_tools import fs_utils
from test_utils import os_utils
class Fio:
def __init__(self, executor_obj=None):
self.fio_version = "fio-3.30"
self.default_run_time = datetime.timedelta(hours=1)
self.jobs = []
self.executor = executor_obj if executor_obj is not None else TestRun.executor
self.base_cmd_parameters: test_tools.fio.fio_param.FioParam = None
self.global_cmd_parameters: test_tools.fio.fio_param.FioParam = None
def create_command(self, output_type=test_tools.fio.fio_param.FioOutput.json):
self.base_cmd_parameters = test_tools.fio.fio_param.FioParamCmd(self, self.executor)
self.global_cmd_parameters = test_tools.fio.fio_param.FioParamConfig(self, self.executor)
self.fio_file = f'fio_run_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}_{uuid.uuid4().hex}'
self.base_cmd_parameters\
.set_param('eta', 'always')\
.set_param('output-format', output_type.value)\
.set_param('output', self.fio_file)
self.global_cmd_parameters.set_flags('group_reporting')
return self.global_cmd_parameters
def is_installed(self):
return self.executor.run("fio --version").stdout.strip() == self.fio_version
def install(self):
fio_url = f"http://brick.kernel.dk/snaps/{self.fio_version}.tar.bz2"
fio_package = os_utils.download_file(fio_url)
fs_utils.uncompress_archive(fio_package)
TestRun.executor.run_expect_success(f"cd {fio_package.parent_dir}/{self.fio_version}"
f" && ./configure && make -j && make install")
def calculate_timeout(self):
if "time_based" not in self.global_cmd_parameters.command_flags:
return self.default_run_time
total_time = self.global_cmd_parameters.get_parameter_value("runtime")
if len(total_time) != 1:
raise ValueError("Wrong fio 'runtime' parameter configuration")
total_time = int(total_time[0])
ramp_time = self.global_cmd_parameters.get_parameter_value("ramp_time")
if ramp_time is not None:
if len(ramp_time) != 1:
raise ValueError("Wrong fio 'ramp_time' parameter configuration")
ramp_time = int(ramp_time[0])
total_time += ramp_time
return datetime.timedelta(seconds=total_time)
def run(self, timeout: datetime.timedelta = None):
if timeout is None:
timeout = self.calculate_timeout()
self.prepare_run()
return self.executor.run(str(self), timeout)
def run_in_background(self):
self.prepare_run()
return self.executor.run_in_background(str(self))
def prepare_run(self):
if not self.is_installed():
self.install()
if len(self.jobs) > 0:
self.executor.run(f"{str(self)}-showcmd -")
TestRun.LOGGER.info(self.executor.run(f"cat {self.fio_file}").stdout)
TestRun.LOGGER.info(str(self))
def execution_cmd_parameters(self):
if len(self.jobs) > 0:
separator = "\n\n"
return f"{str(self.global_cmd_parameters)}\n" \
f"{separator.join(str(job) for job in self.jobs)}"
else:
return str(self.global_cmd_parameters)
def __str__(self):
if len(self.jobs) > 0:
command = f"echo '{self.execution_cmd_parameters()}' |" \
f" {str(self.base_cmd_parameters)} -"
else:
fio_parameters = test_tools.fio.fio_param.FioParamCmd(self, self.executor)
fio_parameters.command_env_var.update(self.base_cmd_parameters.command_env_var)
fio_parameters.command_param.update(self.base_cmd_parameters.command_param)
fio_parameters.command_param.update(self.global_cmd_parameters.command_param)
fio_parameters.command_flags.extend(self.global_cmd_parameters.command_flags)
fio_parameters.set_param('name', 'fio')
command = str(fio_parameters)
return command

388
test_tools/fio/fio_param.py Normal file
View File

@ -0,0 +1,388 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import datetime
import json
import secrets
from enum import Enum
from types import SimpleNamespace as Namespace
from connection.base_executor import BaseExecutor
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools.fio.fio_result import FioResult
from test_utils.linux_command import LinuxCommand
from test_utils.size import Size
class CpusAllowedPolicy(Enum):
shared = 0,
split = 1
class ErrorFilter(Enum):
none = 0,
read = 1,
write = 2,
io = 3,
verify = 4,
all = 5
class FioOutput(Enum):
normal = 'normal'
terse = 'terse'
json = 'json'
jsonplus = 'json+'
class IoEngine(Enum):
# Basic read or write I/O. fseek is used to position the I/O location.
sync = 0,
# Linux native asynchronous I/O.
libaio = 1,
# Basic pread or pwrite I/O.
psync = 2,
# Basic readv or writev I/O.
# Will emulate queuing by coalescing adjacent IOs into a single submission.
vsync = 3,
# Basic preadv or pwritev I/O.
pvsync = 4,
# POSIX asynchronous I/O using aio_read and aio_write.
posixaio = 5,
# File is memory mapped with mmap and data copied using memcpy.
mmap = 6,
# RADOS Block Device
rbd = 7,
# SPDK Block Device
spdk_bdev = 8
class ReadWrite(Enum):
randread = 0,
randrw = 1,
randwrite = 2,
read = 3,
readwrite = 4,
write = 5,
trim = 6,
randtrim = 7,
trimwrite = 8
class VerifyMethod(Enum):
# Use an md5 sum of the data area and store it in the header of each block.
md5 = 0,
# Use an experimental crc64 sum of the data area and store it in the header of each block.
crc64 = 1,
# Use optimized sha1 as the checksum function.
sha1 = 2,
# Verify a strict pattern.
# Normally fio includes a header with some basic information and a checksum, but if this
# option is set, only the specific pattern set with verify_pattern is verified.
pattern = 3,
# Write extra information about each I/O (timestamp, block number, etc.).
# The block number is verified.
meta = 4
class RandomGenerator(Enum):
tausworthe = 0,
lfsr = 1,
tausworthe64 = 2
class FioParam(LinuxCommand):
def __init__(self, fio, command_executor: BaseExecutor, command_name):
LinuxCommand.__init__(self, command_executor, command_name)
self.verification_pattern = ''
self.fio = fio
def get_verification_pattern(self):
if not self.verification_pattern:
self.verification_pattern = f'0x{secrets.token_hex(32)}'
return self.verification_pattern
def allow_mounted_write(self, value: bool = True):
return self.set_param('allow_mounted_write', int(value))
# example: "bs=8k,32k" => 8k for reads, 32k for writes and trims
def block_size(self, *sizes: Size):
return self.set_param('blocksize', *[int(size) for size in sizes])
def blocksize_range(self, ranges):
value = []
for bs_range in ranges:
str_range = str(int(bs_range[0])) + '-' + str(int(bs_range[1]))
value.append(str_range)
return self.set_param('blocksize_range', ",".join(value))
def bs_split(self, value):
return self.set_param('bssplit', value)
def buffer_pattern(self, pattern):
return self.set_param('buffer_pattern', pattern)
def continue_on_error(self, value: ErrorFilter):
return self.set_param('continue_on_error', value.name)
def cpus_allowed(self, value):
return self.set_param('cpus_allowed', ",".join(value))
def cpus_allowed_policy(self, value: CpusAllowedPolicy):
return self.set_param('cpus_allowed_policy', value.name)
def direct(self, value: bool = True):
if 'buffered' in self.command_param:
self.remove_param('buffered')
return self.set_param('direct', int(value))
def directory(self, directory):
return self.set_param('directory', directory)
def do_verify(self, value: bool = True):
return self.set_param('do_verify', int(value))
def exit_all_on_error(self, value: bool = True):
return self.set_flags('exitall_on_error') if value \
else self.remove_flag('exitall_on_error')
def group_reporting(self, value: bool = True):
return self.set_flags('group_reporting') if value else self.remove_flag('group_reporting')
def file_name(self, path):
return self.set_param('filename', path)
def file_size(self, size: Size):
return self.set_param('filesize', int(size))
def file_size_range(self, ranges):
value = []
for bs_range in ranges:
str_range = str(int(bs_range[0])) + '-' + str(int(bs_range[1]))
value.append(str_range)
return self.set_param('filesize', ",".join(value))
def fsync(self, value: int):
return self.set_param('fsync', value)
def ignore_errors(self, read_errors, write_errors, verify_errors):
separator = ':'
return self.set_param(
'ignore_error',
separator.join(str(err) for err in read_errors),
separator.join(str(err) for err in write_errors),
separator.join(str(err) for err in verify_errors))
def io_depth(self, value: int):
if value != 1:
if 'ioengine' in self.command_param and \
self.command_param['ioengine'] == 'sync':
TestRun.LOGGER.warning("Setting iodepth will have no effect with "
"'ioengine=sync' setting")
return self.set_param('iodepth', value)
def io_engine(self, value: IoEngine):
if value == IoEngine.sync:
if 'iodepth' in self.command_param and self.command_param['iodepth'] != 1:
TestRun.LOGGER.warning("Setting 'ioengine=sync' will cause iodepth setting "
"to be ignored")
return self.set_param('ioengine', value.name)
def io_size(self, value: Size):
return self.set_param('io_size', int(value.get_value()))
def loops(self, value: int):
return self.set_param('loops', value)
def no_random_map(self, value: bool = True):
if 'verify' in self.command_param:
raise ValueError("'NoRandomMap' parameter is mutually exclusive with verify")
if value:
return self.set_flags('norandommap')
else:
return self.remove_flag('norandommap')
def nr_files(self, value: int):
return self.set_param('nrfiles', value)
def num_ios(self, value: int):
return self.set_param('number_ios', value)
def num_jobs(self, value: int):
return self.set_param('numjobs', value)
def offset(self, value: Size):
return self.set_param('offset', int(value.get_value()))
def offset_increment(self, value: Size):
return self.set_param('offset_increment', f"{value.value}{value.unit.get_short_name()}")
def percentage_random(self, value: int):
if value <= 100:
return self.set_param('percentage_random', value)
raise ValueError("Argument out of range. Should be 0-100.")
def pool(self, value):
return self.set_param('pool', value)
def ramp_time(self, value: datetime.timedelta):
return self.set_param('ramp_time', int(value.total_seconds()))
def random_distribution(self, value):
return self.set_param('random_distribution', value)
def rand_repeat(self, value: int):
return self.set_param('randrepeat', value)
def rand_seed(self, value: int):
return self.set_param('randseed', value)
def read_write(self, rw: ReadWrite):
return self.set_param('readwrite', rw.name)
def run_time(self, value: datetime.timedelta):
if value.total_seconds() == 0:
raise ValueError("Runtime parameter must not be set to 0.")
return self.set_param('runtime', int(value.total_seconds()))
def serialize_overlap(self, value: bool = True):
return self.set_param('serialize_overlap', int(value))
def size(self, value: Size):
return self.set_param('size', int(value.get_value()))
def stonewall(self, value: bool = True):
return self.set_flags('stonewall') if value else self.remove_param('stonewall')
def sync(self, value: bool = True):
return self.set_param('sync', int(value))
def time_based(self, value: bool = True):
return self.set_flags('time_based') if value else self.remove_flag('time_based')
def thread(self, value: bool = True):
return self.set_flags('thread') if value else self.remove_param('thread')
def lat_percentiles(self, value: bool):
return self.set_param('lat_percentiles', int(value))
def scramble_buffers(self, value: bool):
return self.set_param('scramble_buffers', int(value))
def slat_percentiles(self, value: bool):
return self.set_param('slat_percentiles', int(value))
def spdk_core_mask(self, value: str):
return self.set_param('spdk_core_mask', value)
def spdk_json_conf(self, path):
return self.set_param('spdk_json_conf', path)
def clat_percentiles(self, value: bool):
return self.set_param('clat_percentiles', int(value))
def percentile_list(self, value: []):
val = ':'.join(value) if len(value) > 0 else '100'
return self.set_param('percentile_list', val)
def verification_with_pattern(self, pattern=None):
if pattern is not None and pattern != '':
self.verification_pattern = pattern
return self.verify(VerifyMethod.pattern) \
.set_param('verify_pattern', self.get_verification_pattern()) \
.do_verify()
def verify(self, value: VerifyMethod):
return self.set_param('verify', value.name)
def create_only(self, value: bool = False):
return self.set_param('create_only', int(value))
def verify_pattern(self, pattern=None):
return self.set_param('verify_pattern', pattern or self.get_verification_pattern())
def verify_backlog(self, value: int):
return self.set_param('verify_backlog', value)
def verify_dump(self, value: bool = True):
return self.set_param('verify_dump', int(value))
def verify_fatal(self, value: bool = True):
return self.set_param('verify_fatal', int(value))
def verify_only(self, value: bool = True):
return self.set_flags('verify_only') if value else self.remove_param('verify_only')
def write_hint(self, value: str):
return self.set_param('write_hint', value)
def write_percentage(self, value: int):
if value <= 100:
return self.set_param('rwmixwrite', value)
raise ValueError("Argument out of range. Should be 0-100.")
def random_generator(self, value: RandomGenerator):
return self.set_param('random_generator', value.name)
def target(self, target):
if isinstance(target, Device):
return self.file_name(target.path)
return self.file_name(target)
def add_job(self, job_name=None):
if not job_name:
job_name = f'job{len(self.fio.jobs)}'
new_job = FioParamConfig(self.fio, self.command_executor, f'[{job_name}]')
self.fio.jobs.append(new_job)
return new_job
def clear_jobs(self):
self.fio.jobs = []
return self
def edit_global(self):
return self.fio.global_cmd_parameters
def run(self, fio_timeout: datetime.timedelta = None):
if "per_job_logs" in self.fio.global_cmd_parameters.command_param:
self.fio.global_cmd_parameters.set_param("per_job_logs", '0')
fio_output = self.fio.run(fio_timeout)
if fio_output.exit_code != 0:
raise Exception(f"Exception occurred while trying to execute fio, exit_code:"
f"{fio_output.exit_code}.\n"
f"stdout: {fio_output.stdout}\nstderr: {fio_output.stderr}")
TestRun.executor.run(f"sed -i '/^[[:alnum:]]/d' {self.fio.fio_file}") # Remove warnings
out = self.command_executor.run_expect_success(f"cat {self.fio.fio_file}").stdout
return self.get_results(out)
def run_in_background(self):
if "per_job_logs" in self.fio.global_cmd_parameters.command_param:
self.fio.global_cmd_parameters.set_param("per_job_logs", '0')
return self.fio.run_in_background()
@staticmethod
def get_results(result):
data = json.loads(result, object_hook=lambda d: Namespace(**d))
jobs_list = []
if hasattr(data, 'jobs'):
jobs = data.jobs
for job in jobs:
job_result = FioResult(data, job)
jobs_list.append(job_result)
return jobs_list
class FioParamCmd(FioParam):
def __init__(self, fio, command_executor: BaseExecutor, command_name='fio'):
FioParam.__init__(self, fio, command_executor, command_name)
self.param_name_prefix = "--"
class FioParamConfig(FioParam):
def __init__(self, fio, command_executor: BaseExecutor, command_name='[global]'):
FioParam.__init__(self, fio, command_executor, command_name)
self.param_name_prefix = "\n"

View File

@ -0,0 +1,19 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import secrets
from aenum import Enum
class Pattern(Enum):
cyclic = "0x00336699ccffcc996633"
sequential = "0x" + "".join([f"{i:02x}" for i in range(0, 256)])
high = "0xaa"
low = "0x84210"
zeroes = "0x00"
ones = "0xff"
bin_1 = high
bin_2 = "0x55"
random = "0x" + secrets.token_hex()

View File

@ -0,0 +1,164 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from test_utils.size import Size, Unit, UnitPerSecond
from test_utils.time import Time
class FioResult:
def __init__(self, result, job):
self.result = result
self.job = job
def __str__(self):
result_dict = {
"Total read I/O": self.read_io(),
"Total read bandwidth ": self.read_bandwidth(),
"Read bandwidth average ": self.read_bandwidth_average(),
"Read bandwidth deviation ": self.read_bandwidth_deviation(),
"Read IOPS": self.read_iops(),
"Read runtime": self.read_runtime(),
"Read average completion latency": self.read_completion_latency_average(),
"Total write I/O": self.write_io(),
"Total write bandwidth ": self.write_bandwidth(),
"Write bandwidth average ": self.write_bandwidth_average(),
"Write bandwidth deviation ": self.write_bandwidth_deviation(),
"Write IOPS": self.write_iops(),
"Write runtime": self.write_runtime(),
"Write average completion latency": self.write_completion_latency_average(),
}
disks_name = self.disks_name()
if disks_name:
result_dict.update({"Disk name": ",".join(disks_name)})
result_dict.update({"Total number of errors": self.total_errors()})
s = ""
for key in result_dict.keys():
s += f"{key}: {result_dict[key]}\n"
return s
def total_errors(self):
return getattr(self.job, "total_err", 0)
def disks_name(self):
disks_name = []
if hasattr(self.result, "disk_util"):
for disk in self.result.disk_util:
disks_name.append(disk.name)
return disks_name
def read_io(self):
return Size(self.job.read.io_kbytes, Unit.KibiByte)
def read_bandwidth(self):
return Size(self.job.read.bw, UnitPerSecond(Unit.KibiByte))
def read_bandwidth_average(self):
return Size(self.job.read.bw_mean, UnitPerSecond(Unit.KibiByte))
def read_bandwidth_deviation(self):
return Size(self.job.read.bw_dev, UnitPerSecond(Unit.KibiByte))
def read_iops(self):
return self.job.read.iops
def read_runtime(self):
return Time(microseconds=self.job.read.runtime)
def read_completion_latency_min(self):
return Time(nanoseconds=self.job.read.lat_ns.min)
def read_completion_latency_max(self):
return Time(nanoseconds=self.job.read.lat_ns.max)
def read_completion_latency_average(self):
return Time(nanoseconds=self.job.read.lat_ns.mean)
def read_completion_latency_percentile(self):
return self.job.read.lat_ns.percentile.__dict__
def read_requests_number(self):
return self.result.disk_util[0].read_ios
def write_io(self):
return Size(self.job.write.io_kbytes, Unit.KibiByte)
def write_bandwidth(self):
return Size(self.job.write.bw, UnitPerSecond(Unit.KibiByte))
def write_bandwidth_average(self):
return Size(self.job.write.bw_mean, UnitPerSecond(Unit.KibiByte))
def write_bandwidth_deviation(self):
return Size(self.job.write.bw_dev, UnitPerSecond(Unit.KibiByte))
def write_iops(self):
return self.job.write.iops
def write_runtime(self):
return Time(microseconds=self.job.write.runtime)
def write_completion_latency_average(self):
return Time(nanoseconds=self.job.write.lat_ns.mean)
def write_completion_latency_min(self):
return Time(nanoseconds=self.job.write.lat_ns.min)
def write_completion_latency_max(self):
return Time(nanoseconds=self.job.write.lat_ns.max)
def write_completion_latency_average(self):
return Time(nanoseconds=self.job.write.lat_ns.mean)
def write_completion_latency_percentile(self):
return self.job.write.lat_ns.percentile.__dict__
def write_requests_number(self):
return self.result.disk_util[0].write_ios
def trim_io(self):
return Size(self.job.trim.io_kbytes, Unit.KibiByte)
def trim_bandwidth(self):
return Size(self.job.trim.bw, UnitPerSecond(Unit.KibiByte))
def trim_bandwidth_average(self):
return Size(self.job.trim.bw_mean, UnitPerSecond(Unit.KibiByte))
def trim_bandwidth_deviation(self):
return Size(self.job.trim.bw_dev, UnitPerSecond(Unit.KibiByte))
def trim_iops(self):
return self.job.trim.iops
def trim_runtime(self):
return Time(microseconds=self.job.trim.runtime)
def trim_completion_latency_average(self):
return Time(nanoseconds=self.job.trim.lat_ns.mean)
def trim_completion_latency_min(self):
return Time(nanoseconds=self.job.trim.lat_ns.min)
def trim_completion_latency_max(self):
return Time(nanoseconds=self.job.trim.lat_ns.max)
def trim_completion_latency_average(self):
return Time(nanoseconds=self.job.trim.lat_ns.mean)
def trim_completion_latency_percentile(self):
return self.job.trim.lat_ns.percentile.__dict__
@staticmethod
def result_list_to_dict(results):
result_dict = {}
for result in results:
result_dict[result.job.jobname] = result.job
return result_dict

378
test_tools/fs_utils.py Normal file
View File

@ -0,0 +1,378 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import base64
import math
import textwrap
from aenum import IntFlag, Enum
from collections import namedtuple
from datetime import datetime
from core.test_run import TestRun
from test_tools.dd import Dd
from test_utils.size import Size, Unit
class Permissions(IntFlag):
r = 4
w = 2
x = 1
def __str__(self):
ret_string = ""
for p in Permissions:
if p in self:
ret_string += p.name
return ret_string
class PermissionsUsers(IntFlag):
u = 4
g = 2
o = 1
def __str__(self):
ret_string = ""
for p in PermissionsUsers:
if p in self:
ret_string += p.name
return ret_string
class PermissionSign(Enum):
add = '+'
remove = '-'
set = '='
class FilesPermissions():
perms_exceptions = {}
def __init__(self, files_list: list):
self.files_list = files_list
def add_exceptions(self, perms: dict):
self.perms_exceptions.update(perms)
def check(self, file_perm: int = 644, dir_perm: int = 755):
failed_perms = []
FailedPerm = namedtuple("FailedPerm", ["file", "current_perm", "expected_perm"])
for file in self.files_list:
perm = get_permissions(file)
if file in self.perms_exceptions:
if perm != self.perms_exceptions[file]:
failed_perms.append(FailedPerm(file, perm, self.perms_exceptions[file]))
continue
if check_if_regular_file_exists(file):
if perm != file_perm:
failed_perms.append(FailedPerm(file, perm, file_perm))
elif check_if_directory_exists(file):
if perm != dir_perm:
failed_perms.append(FailedPerm(file, perm, dir_perm))
else:
raise Exception(f"{file}: File type not recognized.")
return failed_perms
def create_directory(path, parents: bool = False):
cmd = f"mkdir {'--parents ' if parents else ''}\"{path}\""
return TestRun.executor.run_expect_success(cmd)
def check_if_directory_exists(path):
return TestRun.executor.run(f"test -d \"{path}\"").exit_code == 0
def check_if_file_exists(path):
return TestRun.executor.run(f"test -e \"{path}\"").exit_code == 0
def check_if_regular_file_exists(path):
return TestRun.executor.run(f"test -f \"{path}\"").exit_code == 0
def check_if_symlink_exists(path):
return TestRun.executor.run(f"test -L \"{path}\"").exit_code == 0
def copy(source: str,
destination: str,
force: bool = False,
recursive: bool = False,
dereference: bool = False):
cmd = f"cp{' --force' if force else ''}" \
f"{' --recursive' if recursive else ''}" \
f"{' --dereference' if dereference else ''} " \
f"\"{source}\" \"{destination}\""
return TestRun.executor.run_expect_success(cmd)
def move(source, destination, force: bool = False):
cmd = f"mv{' --force' if force else ''} \"{source}\" \"{destination}\""
return TestRun.executor.run_expect_success(cmd)
def remove(path, force: bool = False, recursive: bool = False, ignore_errors: bool = False):
cmd = f"rm{' --force' if force else ''}{' --recursive' if recursive else ''} \"{path}\""
output = TestRun.executor.run(cmd)
if output.exit_code != 0 and not ignore_errors:
raise Exception(f"Could not remove file {path}."
f"\nstdout: {output.stdout}\nstderr: {output.stderr}")
return output
def get_permissions(path, dereference: bool = True):
cmd = f"stat --format='%a' {'--dereference' if dereference else ''} \"{path}\""
return int(TestRun.executor.run_expect_success(cmd).stdout)
def chmod(path, permissions: Permissions, users: PermissionsUsers,
sign: PermissionSign = PermissionSign.set, recursive: bool = False):
cmd = f"chmod{' --recursive' if recursive else ''} " \
f"{str(users)}{sign.value}{str(permissions)} \"{path}\""
output = TestRun.executor.run(cmd)
return output
def chmod_numerical(path, permissions: int, recursive: bool = False):
cmd = f"chmod{' --recursive' if recursive else ''} {permissions} \"{path}\""
return TestRun.executor.run_expect_success(cmd)
def chown(path, owner, group, recursive):
cmd = f"chown {'--recursive ' if recursive else ''}{owner}:{group} \"{path}\""
return TestRun.executor.run_expect_success(cmd)
def create_file(path):
if not path.strip():
raise ValueError("Path cannot be empty or whitespaces.")
cmd = f"touch \"{path}\""
return TestRun.executor.run_expect_success(cmd)
def compare(file, other_file):
output = TestRun.executor.run(
f"cmp --silent \"{file}\" \"{other_file}\"")
if output.exit_code == 0:
return True
elif output.exit_code > 1:
raise Exception(f"Compare command execution failed. {output.stdout}\n{output.stderr}")
else:
return False
def diff(file, other_file):
output = TestRun.executor.run(
f"diff \"{file}\" \"{other_file}\"")
if output.exit_code == 0:
return None
elif output.exit_code > 1:
raise Exception(f"Diff command execution failed. {output.stdout}\n{output.stderr}")
else:
return output.stderr
# For some reason separators other than '/' don't work when using sed on system paths
# This requires escaping '/' in pattern and target string
def escape_sed_string(string: str, sed_replace: bool = False):
string = string.replace("'", r"\x27").replace("/", r"\/")
# '&' has special meaning in sed replace and needs to be escaped
if sed_replace:
string = string.replace("&", r"\&")
return string
def insert_line_before_pattern(file, pattern, new_line):
pattern = escape_sed_string(pattern)
new_line = escape_sed_string(new_line)
cmd = f"sed -i '/{pattern}/i {new_line}' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def replace_first_pattern_occurrence(file, pattern, new_string):
pattern = escape_sed_string(pattern)
new_string = escape_sed_string(new_string, sed_replace=True)
cmd = f"sed -i '0,/{pattern}/s//{new_string}/' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def replace_in_lines(file, pattern, new_string, regexp=False):
pattern = escape_sed_string(pattern)
new_string = escape_sed_string(new_string, sed_replace=True)
cmd = f"sed -i{' -r' if regexp else ''} 's/{pattern}/{new_string}/g' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def append_line(file, string):
cmd = f"echo '{string}' >> \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def remove_lines(file, pattern, regexp=False):
pattern = escape_sed_string(pattern)
cmd = f"sed -i{' -r' if regexp else ''} '/{pattern}/d' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def read_file(file):
if not file.strip():
raise ValueError("File path cannot be empty or whitespace.")
output = TestRun.executor.run_expect_success(f"cat \"{file}\"")
return output.stdout
def write_file(file, content, overwrite: bool = True, unix_line_end: bool = True):
if not file.strip():
raise ValueError("File path cannot be empty or whitespace.")
if not content:
raise ValueError("Content cannot be empty.")
if unix_line_end:
content.replace('\r', '')
content += '\n'
max_length = 60000
split_content = textwrap.TextWrapper(width=max_length, replace_whitespace=False).wrap(content)
split_content[-1] += '\n'
for s in split_content:
redirection_char = '>' if overwrite else '>>'
overwrite = False
encoded_content = base64.b64encode(s.encode("utf-8"))
cmd = f"printf '{encoded_content.decode('utf-8')}' " \
f"| base64 --decode {redirection_char} \"{file}\""
TestRun.executor.run_expect_success(cmd)
def uncompress_archive(file, destination=None):
from test_utils.filesystem.file import File
if not isinstance(file, File):
file = File(file)
if not destination:
destination = file.parent_dir
command = (f"unzip -u {file.full_path} -d {destination}"
if str(file).endswith(".zip")
else f"tar --extract --file={file.full_path} --directory={destination}")
TestRun.executor.run_expect_success(command)
def ls(path, options=''):
default_options = "-lA --time-style=+'%Y-%m-%d %H:%M:%S'"
output = TestRun.executor.run(
f"ls {default_options} {options} \"{path}\"")
return output.stdout
def ls_item(path):
output = ls(path, '-d')
return output.splitlines()[0] if output else None
def parse_ls_output(ls_output, dir_path=''):
split_output = ls_output.split('\n')
fs_items = []
for line in split_output:
if not line.strip():
continue
line_fields = line.split()
if len(line_fields) < 8:
continue
file_type = line[0]
if file_type not in ['-', 'd', 'l', 'b', 'c', 'p', 's']:
continue
permissions = line_fields[0][1:].replace('.', '')
owner = line_fields[2]
group = line_fields[3]
has_size = ',' not in line_fields[4]
if has_size:
size = Size(float(line_fields[4]), Unit.Byte)
else:
size = None
line_fields.pop(4)
split_date = line_fields[5].split('-')
split_time = line_fields[6].split(':')
modification_time = datetime(int(split_date[0]), int(split_date[1]), int(split_date[2]),
int(split_time[0]), int(split_time[1]), int(split_time[2]))
if dir_path and file_type != 'l':
full_path = '/'.join([dir_path, line_fields[7]])
else:
full_path = line_fields[7]
from test_utils.filesystem.file import File, FsItem
from test_utils.filesystem.directory import Directory
from test_utils.filesystem.symlink import Symlink
if file_type == '-':
fs_item = File(full_path)
elif file_type == 'd':
fs_item = Directory(full_path)
elif file_type == 'l':
fs_item = Symlink(full_path)
else:
fs_item = FsItem(full_path)
fs_item.permissions.user = Permissions['|'.join(list(permissions[:3].replace('-', '')))] \
if permissions[:3] != '---' else Permissions(0)
fs_item.permissions.group = Permissions['|'.join(list(permissions[3:6].replace('-', '')))] \
if permissions[3:6] != '---' else Permissions(0)
fs_item.permissions.other = Permissions['|'.join(list(permissions[6:].replace('-', '')))] \
if permissions[6:] != '---' else Permissions(0)
fs_item.owner = owner
fs_item.group = group
fs_item.size = size
fs_item.modification_time = modification_time
fs_items.append(fs_item)
return fs_items
def find_all_files(path: str, recursive: bool = True):
if not path.strip():
raise ValueError("No path given.")
output = TestRun.executor.run_expect_success(f"find \"{path}\" {'-maxdepth 1' if not recursive else ''} \( -type f -o -type l \) -print")
return output.stdout.splitlines()
def find_all_dirs(path: str, recursive: bool = True):
if not path.strip():
raise ValueError("No path given.")
output = TestRun.executor.run_expect_success(f"find \"{path}\" {'-maxdepth 1' if not recursive else ''} -type d -print")
return output.stdout.splitlines()
def find_all_items(path: str, recursive: bool = True):
return [*find_all_files(path, recursive), *find_all_dirs(path, recursive)]
def readlink(link: str, options="--canonicalize-existing"):
return TestRun.executor.run_expect_success(
f"readlink {options} \"{link}\""
).stdout
def create_random_test_file(target_file_path: str,
file_size: Size = Size(1, Unit.MebiByte),
random: bool = True):
from test_utils.filesystem.file import File
bs = Size(512, Unit.KibiByte)
cnt = math.ceil(file_size.value / bs.value)
file = File.create_file(target_file_path)
dd = Dd().output(target_file_path) \
.input("/dev/urandom" if random else "/dev/zero") \
.block_size(bs) \
.count(cnt) \
.oflag("direct")
dd.run()
file.refresh_item()
return file

179
test_tools/iostat.py Normal file
View File

@ -0,0 +1,179 @@
#
# Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from core.test_run import TestRun
from storage_devices.device import Device
from test_utils.size import Size, Unit, UnitPerSecond
from test_utils.time import Time
import csv
class IOstatExtended:
iostat_option = "x"
def __init__(self, device_statistics: dict):
# Notes about params:
# await param is displayed only on flag -s
# avgrq-sz doesn't appear in newer versions of iostat -x
self.device_name = device_statistics["Device"]
# rrqm/s
self.read_requests_merged_per_sec = float(device_statistics["rrqm/s"])
# wrqm/s
self.write_requests_merged_per_sec = float(device_statistics["wrqm/s"])
# r/s
self.read_requests_per_sec = float(device_statistics["r/s"])
# w/s
self.write_requests_per_sec = float(device_statistics["w/s"])
# rkB/s
self.reads_per_sec = Size(float(device_statistics["rkB/s"]), UnitPerSecond(Unit.KiloByte))
# wkB/s
self.writes_per_sec = Size(float(device_statistics["wkB/s"]), UnitPerSecond(Unit.KiloByte))
# avgqu-sz - in newer versions is named aqu-sz
self.average_queue_length = float(
device_statistics["aqu-sz"]
if "aqu-sz" in device_statistics
else device_statistics.get("avgqu-sz", 0)
)
# r_await
self.read_average_service_time = Time(milliseconds=float(device_statistics["r_await"]))
# w_await
self.write_average_service_time = Time(milliseconds=float(device_statistics["w_await"]))
# iostat's documentation says to not trust 11th field
# util
self.utilization = float(device_statistics["%util"])
def __str__(self):
return (
f"\n=========={self.device_name} IO stats: ==========\n"
f"Read requests merged per second: {self.read_requests_merged_per_sec}\n"
f"Write requests merged per second: {self.write_requests_merged_per_sec}\n"
f"Read requests: {self.read_requests_per_sec}\n"
f"Write requests: {self.write_requests_per_sec}\n"
f"Reads per second: {self.reads_per_sec}\n"
f"Writes per second {self.writes_per_sec}\n"
f"Average queue length {self.average_queue_length}\n"
f"Read average service time {self.read_average_service_time}\n"
f"Write average service time: {self.write_average_service_time}\n"
f"Utilization: {self.utilization}\n"
f"=================================================\n"
)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not other:
return False
return (
self.read_requests_merged_per_sec == other.read_requests_merged_per_sec
and self.write_requests_merged_per_sec == other.write_requests_merged_per_sec
and self.read_requests_per_sec == other.read_requests_per_sec
and self.write_requests_per_sec == other.write_requests_per_sec
and self.reads_per_sec == other.reads_per_sec
and self.writes_per_sec == other.writes_per_sec
and self.average_queue_length == other.average_queue_length
and self.read_average_service_time == other.read_average_service_time
and self.write_average_service_time == other.write_average_service_time
and self.utilization == other.utilization
)
@classmethod
def get_iostat_list(
cls,
devices_list: [Device],
since_boot: bool = True,
interval: int = 1,
):
"""
Returns list of IOstat objects containing extended statistics displayed
in kibibytes/kibibytes per second.
"""
return _get_iostat_list(cls, devices_list, since_boot, interval)
class IOstatBasic:
iostat_option = "d"
def __init__(self, device_statistics):
self.device_name = device_statistics["Device"]
# tps
self.transfers_per_second = float(device_statistics["tps"])
# kB_read/s
self.reads_per_second = Size(
float(device_statistics["kB_read/s"]), UnitPerSecond(Unit.KiloByte)
)
# kB_wrtn/s
self.writes_per_second = Size(
float(device_statistics["kB_wrtn/s"]), UnitPerSecond(Unit.KiloByte)
)
# kB_read
self.total_reads = Size(float(device_statistics["kB_read"]), Unit.KibiByte)
# kB_wrtn
self.total_writes = Size(float(device_statistics["kB_wrtn"]), Unit.KibiByte)
def __str__(self):
return (
f"\n=========={self.device_name} IO stats: ==========\n"
f"Transfers per second: {self.transfers_per_second}\n"
f"Kilobytes read per second: {self.reads_per_second}\n"
f"Kilobytes written per second: {self.writes_per_second}\n"
f"Kilobytes read: {self.total_reads}\n"
f"Kilobytes written: {self.total_writes}\n"
f"=================================================\n"
)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, IOstatBasic):
return False
return vars(self) == vars(other)
@classmethod
def get_iostat_list(
cls,
devices_list: [Device],
since_boot: bool = True,
interval: int = 1,
):
"""
Returns list of IOstat objects containing basic statistics displayed
in kibibytes/kibibytes per second.
"""
return _get_iostat_list(cls, devices_list, since_boot, interval)
def _get_iostat_list(
class_type: type,
devices_list: [Device],
since_boot: bool,
interval: int,
):
if interval < 1:
raise ValueError("iostat interval must be positive!")
iostat_cmd = f"iostat -k -{class_type.iostat_option} "
if not since_boot:
iostat_cmd += f"-y {interval} 1 "
iostat_cmd += " ".join([name.get_device_id() for name in devices_list])
sed_cmd = "sed -n '/^$/d;s/\s\+/,/g;/^Device/,$p'"
cmd = f"{iostat_cmd} | {sed_cmd}"
lines = TestRun.executor.run(cmd).stdout.splitlines()
table_contents = csv.DictReader(lines, delimiter=",")
ret = []
for device in table_contents:
ret += [class_type(device)]
return ret

129
test_tools/kedr.py Normal file
View File

@ -0,0 +1,129 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import wget
import os
from enum import Enum
from core.test_run import TestRun
from test_tools import fs_utils
from test_utils.os_utils import DEBUGFS_MOUNT_POINT
KEDR_0_6_URL = "https://github.com/euspectre/kedr/archive/v0.6.tar.gz"
BUILD_DIR = "build"
LEAKS_LOGS_PATH = f"{DEBUGFS_MOUNT_POINT}/kedr_leak_check"
KMALLOC_FAULT_SIMULATION_PATH = "/sys/kernel/debug/kedr_fault_simulation"
class KedrProfile(Enum):
MEM_LEAK_CHECK = "leak_check.conf"
FAULT_SIM = "fsim.conf"
class Kedr:
@staticmethod
def is_installed():
return "KEDR version" in TestRun.executor.run("kedr --version").stdout.strip()
@classmethod
def install(cls):
if cls.is_installed():
TestRun.LOGGER.info("Kedr is already installed!")
return
# TODO check if cmake is installed before
# TODO consider using os_utils.download_file()
kedr_archive = wget.download(KEDR_0_6_URL)
TestRun.executor.rsync_to(
f"\"{kedr_archive}\"",
f"{TestRun.config['working_dir']}")
kedr_dir = TestRun.executor.run_expect_success(
f"cd {TestRun.config['working_dir']} && "
f"tar -ztf \"{kedr_archive}\" | sed -e 's@/.*@@' | uniq"
).stdout
TestRun.executor.run_expect_success(
f"cd {TestRun.config['working_dir']} && "
f"tar -xf \"{kedr_archive}\" && "
f"cd {kedr_dir} && "
f"mkdir -p {BUILD_DIR} && "
f"cd {BUILD_DIR} && "
f"cmake ../sources/ && "
f"make && "
f"make install"
)
os.remove(kedr_archive)
TestRun.LOGGER.info("Kedr installed succesfully")
@classmethod
def is_loaded(cls):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
if "KEDR status: loaded" in TestRun.executor.run_expect_success("kedr status").stdout:
return True
else:
return False
@classmethod
def start(cls, module, profile: KedrProfile = KedrProfile.MEM_LEAK_CHECK):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
TestRun.LOGGER.info(f"Starting kedr with {profile} profile")
start_cmd = f"kedr start {module} -f {profile.value}"
TestRun.executor.run_expect_success(start_cmd)
# TODO extend to scenarios other than kmalloc
def setup_fault_injections(condition: str = "1"):
TestRun.executor.run_expect_success(
f'echo "kmalloc" > {KMALLOC_FAULT_SIMULATION_PATH}/points/kmalloc/current_indicator')
TestRun.executor.run_expect_success(
f'echo "{condition}" > {KMALLOC_FAULT_SIMULATION_PATH}/points/kmalloc/expression')
@classmethod
def fsim_show_last_fault(cls):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
if not cls.is_loaded():
raise Exception("Kedr is not loaded!")
return fs_utils.read_file(f"{KMALLOC_FAULT_SIMULATION_PATH}/last_fault")
@classmethod
def stop(cls):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
TestRun.executor.run_expect_success("kedr stop")
@classmethod
def check_for_mem_leaks(cls, module):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
if not cls.is_loaded():
raise Exception("Kedr is not loaded!")
if fs_utils.check_if_directory_exists(f"{LEAKS_LOGS_PATH}/{module}"):
logs_path = f"{LEAKS_LOGS_PATH}/{module}"
elif fs_utils.check_if_directory_exists(f"{DEBUGFS_MOUNT_POINT}"):
logs_path = f"{LEAKS_LOGS_PATH}"
else:
raise Exception("Couldn't find kedr logs dir!")
leaks = fs_utils.read_file(f"{logs_path}/possible_leaks")
frees = fs_utils.read_file(f"{logs_path}/unallocated_frees")
summary = fs_utils.read_file(f"{logs_path}/info")
if leaks or frees:
raise Exception("Memory leaks found!\n"
f"Kedr summary: {summary}\n"
f"Possible memory leaks: {leaks}\n"
f"Unallocated frees: {frees}\n")

140
test_tools/mdadm.py Normal file
View File

@ -0,0 +1,140 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import re
from core.test_run import TestRun
from test_utils.size import Unit
class Mdadm:
@staticmethod
def assemble(device_paths: str = None):
cmd = f"mdadm --assemble " + (device_paths if device_paths else "--scan")
return TestRun.executor.run(cmd)
@staticmethod
def create(conf, device_paths: str):
if not conf.name:
raise ValueError("Name needed for RAID creation.")
if not device_paths:
raise ValueError("Device paths needed for RAID creation.")
cmd = f"mdadm --create --run /dev/md/{conf.name} "
if conf.metadata.value != "legacy":
cmd += f"--metadata={conf.metadata.value} "
if conf.level is not None:
cmd += f"--level={conf.level.value} "
if conf.number_of_devices:
cmd += f"--raid-devices={conf.number_of_devices} "
if conf.strip_size:
cmd += f"--chunk={conf.strip_size} "
if conf.size:
cmd += f"--size={int(conf.size.get_value(Unit.KibiByte))} "
cmd += device_paths
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def detail(raid_device_paths: str):
if not raid_device_paths:
raise ValueError("Provide paths of RAID devices to show details for.")
cmd = f"mdadm --detail {raid_device_paths} --prefer=by-id"
return TestRun.executor.run_expect_success(cmd)
@classmethod
def detail_result(cls, raid_device_paths: str):
output = cls.detail(raid_device_paths)
details = {}
for device_details in re.split("^/dev/", output.stdout, flags=re.MULTILINE):
if not device_details:
continue
lines = device_details.splitlines()
key = "/dev/" + lines[0].rstrip(':')
details[key] = {}
details[key]["path"] = key
details[key]["devices"] = cls.__parse_devices(device_details)
details[key]["level"] = cls.__parse_level(device_details)
details[key]["uuid"] = cls.__parse_uuid(device_details)
metadata = cls.__parse_metadata(device_details)
if metadata:
details[key]["metadata"] = metadata
return details
@staticmethod
def examine(brief: bool = True, device_paths: str = None):
cmd = f"mdadm --examine "
if brief:
cmd += "--brief "
cmd += (device_paths if device_paths else "--scan")
return TestRun.executor.run_expect_success(cmd)
@classmethod
def examine_result(cls, device_paths: str = None):
output = cls.examine(device_paths=device_paths)
raids = []
uuid_path_prefix = "/dev/disk/by-id/md-uuid-"
# sometimes links for RAIDs are not properly created, force udev to create them
TestRun.executor.run("udevadm trigger && udevadm settle")
for line in output.stdout.splitlines():
split_line = line.split()
try:
uuid = [i for i in split_line if i.startswith("UUID=")][0].split("=")[-1]
except IndexError:
continue
raid_link = uuid_path_prefix + uuid
raid = Mdadm.detail_result(raid_link)[raid_link]
if raid["level"] == "Container":
continue
raid["metadata"], raid["array_devices"] = "legacy", []
container = (
[i for i in split_line if i.startswith("container=")][0]
if "container=" in line else None
)
if container:
container_link = uuid_path_prefix + container.split("=")[-1]
raid["container"] = cls.detail_result(container_link)[container_link]
raid["metadata"] = raid["container"]["metadata"]
raid["array_devices"] = raid["container"]["devices"]
raids.append(raid)
return raids
@staticmethod
def stop(device_paths: str = None):
cmd = f"mdadm --stop " + (device_paths if device_paths else "--scan")
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def zero_superblock(device_paths: str):
cmd = f"mdadm --zero-superblock {device_paths}"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def __parse_devices(details: str):
devices = []
for detail in [d.strip() for d in details.splitlines() if " /dev/" in d]:
devices.append(detail.split()[-1])
return devices
@staticmethod
def __parse_level(details: str):
level = [line for line in details.splitlines() if "Raid Level" in line][0].split(" : ")[-1]
return level.capitalize()
@staticmethod
def __parse_uuid(details: str):
uuid = [line for line in details.splitlines() if "UUID" in line][0].split(" : ")[-1]
return uuid
@staticmethod
def __parse_metadata(details: str):
try:
return [
line.strip() for line in details.splitlines()
if line.strip().startswith("Version :")
][0].split(" : ")[-1]
except IndexError:
return None

60
test_tools/nvme_cli.py Normal file
View File

@ -0,0 +1,60 @@
#
# Copyright(c) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import json
from core.test_run import TestRun
def format_disk(device, metadata_size=None, block_size=None,
force=True, format_params=None, reset=True):
force_param = '-f' if force else ''
reset_param = '-r' if reset else ''
format_params = ' '.join(format_params) if format_params else ''
lbafs = get_lba_formats(device)
if metadata_size:
lbafs = [lbaf for lbaf in lbafs if lbaf['metadata_size'] == metadata_size]
if block_size:
lbafs = [lbaf for lbaf in lbafs if lbaf['block_size'] == block_size]
if len(lbafs) == 1:
TestRun.LOGGER.info(
f"Formatting device {device.path} with {metadata_size} metadata size "
f"and {lbafs[0]['block_size']} block size")
TestRun.executor.run_expect_success(
f"nvme format {device.path} -l {lbafs[0]['lba_format']} "
f"{force_param} {reset_param} {format_params}")
TestRun.LOGGER.info(f"Successfully format device: {device.path}")
else:
raise Exception(f"Wrong parameters to format device: {device.path}")
elif block_size:
lbafs = [lbaf for lbaf in lbafs if lbaf['block_size'] == block_size]
if len(lbafs) > 0:
TestRun.LOGGER.info(
f"Formatting device {device.path} with {block_size} block size")
TestRun.executor.run_expect_success(
f"nvme format {device.path} -b {block_size} "
f"{force_param} {reset_param} {format_params}")
TestRun.LOGGER.info(f"Successfully format device: {device.path}")
else:
raise Exception(f"Wrong parameters to format device: {device.path}")
else:
raise Exception("Cannot format device without specified parameters")
def get_lba_formats(device):
output = json.loads(TestRun.executor.run_expect_success(
f"nvme id-ns {device.path} -o json").stdout)
entries = output['lbafs']
lbafs = []
for entry in entries:
lbaf = {"lba_format": entries.index(entry),
"metadata_size": entry['ms'],
"block_size": 2 ** entry['ds'],
"in_use": entries.index(entry) == output['flbas']}
lbafs.append(lbaf)
return lbafs
def get_lba_format_in_use(device):
lbafs = get_lba_formats(device)
return next((lbaf for lbaf in lbafs if lbaf['in_use']))

121
test_tools/packaging.py Normal file
View File

@ -0,0 +1,121 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import re
from core.test_run import TestRun
from test_utils.output import CmdException
class RpmSet():
def __init__(self, packages_paths: list):
self.packages = packages_paths
def _get_package_names(self):
return " ".join([os.path.splitext(os.path.basename(pckg))[0] for pckg in self.packages])
def check_if_installed(self):
if not self.packages:
raise ValueError("No packages given.")
output = TestRun.executor.run(f"rpm --query {self._get_package_names()}")
return output.exit_code == 0
def install(self):
TestRun.LOGGER.info(f"Installing RPM packages")
if not self.packages:
raise ValueError("No packages given.")
output = TestRun.executor.run(
f"rpm --upgrade --verbose --replacepkgs {' '.join(self.packages)}"
)
if (
output.exit_code != 0
or re.search("error", output.stdout, re.IGNORECASE)
or re.search("error", output.stderr, re.IGNORECASE)
):
raise CmdException("Installation failed or errors found during the process.", output)
def uninstall(self):
TestRun.LOGGER.info(f"Uninstalling RPM packages")
if not self.check_if_installed():
raise FileNotFoundError("Could not uninstall - packages not installed yet.")
output = TestRun.executor.run(f"rpm --erase --verbose {self._get_package_names()}")
if (
output.exit_code != 0
or re.search("error", output.stdout, re.IGNORECASE)
or re.search("error", output.stderr, re.IGNORECASE)
):
raise CmdException("Uninstallation failed or errors found during the process.", output)
@staticmethod
def uninstall_all_matching(*packages_names: str):
for name in packages_names:
TestRun.LOGGER.info(f"Uninstalling all RPM packages matching '{name}'")
TestRun.executor.run_expect_success(
f"rpm --query --all | grep {name} | "
f"xargs --no-run-if-empty rpm --erase --verbose"
)
class DebSet():
def __init__(self, packages_paths: list):
self.packages = packages_paths
def _get_package_names(self):
return " ".join([os.path.basename(pckg).split("_")[0] for pckg in self.packages])
def check_if_installed(self):
if not self.packages:
raise ValueError("No packages given.")
output = TestRun.executor.run(f"dpkg --no-pager --list {self._get_package_names()}")
return output.exit_code == 0
def install(self):
TestRun.LOGGER.info(f"Installing DEB packages")
if not self.packages:
raise ValueError("No packages given.")
output = TestRun.executor.run(
f"dpkg --force-confdef --force-confold --install {' '.join(self.packages)}"
)
if (
output.exit_code != 0
or re.search("error", output.stdout, re.IGNORECASE)
or re.search("error", output.stderr, re.IGNORECASE)
):
raise CmdException("Installation failed or errors found during the process.", output)
def uninstall(self):
TestRun.LOGGER.info(f"Uninstalling DEB packages")
if not self.check_if_installed():
raise FileNotFoundError("Could not uninstall - packages not installed yet.")
output = TestRun.executor.run(f"dpkg --purge {self._get_package_names()}")
if (
output.exit_code != 0
or re.search("error", output.stdout, re.IGNORECASE)
or re.search("error", output.stderr, re.IGNORECASE)
):
raise CmdException("Uninstallation failed or errors found during the process.", output)
@staticmethod
def uninstall_all_matching(*packages_names: str):
for name in packages_names:
TestRun.LOGGER.info(f"Uninstalling all DEB packages matching '{name}'")
TestRun.executor.run_expect_success(
f"dpkg-query --no-pager --showformat='${{Package}}\n' --show | grep {name} | "
f"xargs --no-run-if-empty dpkg --purge"
)

View File

View File

@ -0,0 +1,34 @@
<?xml version="1.0" encoding="utf-8"?>
<Peach xmlns="http://peachfuzzer.com/2012/Peach" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://peachfuzzer.com/2012/Peach /peach/peach.xsd">
<!-- The structure of a data block -->
<DataModel name="Value">
<!-- Place for an auto generated config -->
</DataModel>
<DataModel name="NewLine">
<String name="NewLine" value="\n" mutable="false"/>
</DataModel>
<!-- Basic state machine logic needed to test a protocol -->
<!-- Encoding fuzzed parameter to base64 and adding new line at the end -->
<StateModel name="TheState" initialState="Initial">
<State name="Initial">
<Action type="output">
<DataModel ref="Value">
<Transformer class="Base64Encode"/>
</DataModel>
</Action>
<Action type="output">
<DataModel ref="NewLine"/>
</Action>
<Action type="close"/>
</State>
</StateModel>
<!-- Write output to fuzzedParams.txt file -->
<Test name="Default">
<StateModel ref="TheState"/>
<Publisher class="File">
<Param name="FileName" value="fuzzedParams.txt"/>
<Param name="Append" value="true"/>
<Param name="Overwrite" value="false"/>
</Publisher>
</Test>
</Peach>

View File

@ -0,0 +1,208 @@
#
# Copyright(c) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import wget
import base64
import posixpath
import random
import tempfile
import lxml.etree as etree
from collections import namedtuple
from core.test_run import TestRun
from test_tools import fs_utils
from test_tools.fs_utils import create_directory, check_if_file_exists, write_file
class PeachFuzzer:
"""
API to work with Peach Fuzzer tool in Test-Framework.
Peach Fuzzer is used only for generating fuzzed values that later are used in Test-Framework
in order to execute fuzzed CLI commands or to prepare fuzzed config files.
"""
peach_fuzzer_3_0_url = "https://sourceforge.net/projects/peachfuzz/files/Peach/3.0/" \
"peach-3.0.202-linux-x86_64-release.zip"
base_dir = "/root/Fuzzy"
peach_dir = "peach-3.0.202-linux-x86_64-release"
xml_config_template = posixpath.join(posixpath.dirname(__file__), "config_template.xml")
xml_config_file = posixpath.join(base_dir, "fuzzerConfig.xml")
xml_namespace = "http://peachfuzzer.com/2012/Peach"
fuzzy_output_file = posixpath.join(base_dir, "fuzzedParams.txt")
tested_param_placeholder = b"{param}"
# escape backslash first, so it doesn't interfere with escaping other characters
escape_chars = '\\\n"\'&|;()`<>$! '
@classmethod
def get_fuzzed_command(cls, command_template: bytes, count: int):
"""
Generate command with fuzzed parameter provided on command_template.
Command is ready to be executed with test executor
:param command_template: byte string with command to be executed.
parameter to be replaced with fuzzed string has to be tested_param_placeholder
:param count: amount of fuzzed commands to generate
:returns: named tuple with fuzzed param and CLI ready to be executed with Test-Framework
executors. Param is returned in order to implement correct values checkers in the tests
"""
TestRun.LOGGER.info(f"Try to get commands with fuzzed parameters")
FuzzedCommand = namedtuple('FuzzedCommand', ['param', 'command'])
if cls.tested_param_placeholder not in command_template:
TestRun.block("No param placeholder is found in command template!")
cmd_prefix = b"echo "
cmd_suffix = b" | base64 --decode | sh"
for fuzzed_parameter in cls.generate_peach_fuzzer_parameters(count):
yield FuzzedCommand(fuzzed_parameter,
cmd_prefix + base64.b64encode(command_template.replace(
cls.tested_param_placeholder, fuzzed_parameter)) + cmd_suffix)
@classmethod
def generate_peach_fuzzer_parameters(cls, count: int):
"""
Generate fuzzed parameter according to Peach Fuzzer XML config
Fuzzed parameter later can be used for either generating cli command or config.
:param count: amount of fuzzed strings to generate
:returns: fuzzed value in byte string
"""
if not cls._is_installed():
TestRun.LOGGER.info("Try to install Peach Fuzzer")
cls._install()
if not cls._is_xml_config_prepared():
TestRun.block("No Peach Fuzzer XML config needed to generate fuzzed values was found!")
fs_utils.remove(cls.fuzzy_output_file, force=True, ignore_errors=True)
TestRun.LOGGER.info(f"Generate {count} unique fuzzed values")
cmd = f"cd {cls.base_dir}; {cls.peach_dir}/peach --range 0,{count - 1} " \
f"--seed {random.randrange(2 ** 32)} {cls.xml_config_file} > " \
f"{cls.base_dir}/peachOutput.log"
TestRun.executor.run_expect_success(cmd)
if not check_if_file_exists(cls.fuzzy_output_file):
TestRun.block("No expected fuzzy output file was found!")
# process fuzzy output file locally on the controller as it can be very big
local_fuzzy_file = tempfile.NamedTemporaryFile(delete=False)
local_fuzzy_file.close()
TestRun.executor.rsync_from(cls.fuzzy_output_file, local_fuzzy_file.name)
with open(local_fuzzy_file.name, "r") as fd:
for fuzzed_param_line in fd:
fuzzed_param_bytes = base64.b64decode(fuzzed_param_line)
fuzzed_param_bytes = cls._escape_special_chars(fuzzed_param_bytes)
yield fuzzed_param_bytes
@classmethod
def generate_config(cls, data_model_config: list):
"""
Generate Peach Fuzzer XML config based on template provided in xml_config_template
and data template passed as an argument.
:param data_model_config: dictionary with config that has to be used for generating
DataModel section in PeachFuzzer XML config. Config can be stored in test in more compact
form, e.g. in yaml, and can be converted to dict just before passing to this function.
Example of such config in yaml:
- name: String
attributes:
name: CacheId
value: '1'
size: '14'
mutable: 'true'
children:
- name: Hint
attributes:
name: NumericalString
value: 'true'
"""
if not posixpath.exists(cls.xml_config_template):
TestRun.block("Peach fuzzer xml config template not found!")
root = etree.parse(cls.xml_config_template)
data_model = root.find(f'{{{cls.xml_namespace}}}DataModel[@name="Value"]')
cls.__create_xml_nodes(data_model, data_model_config)
create_directory(cls.base_dir, True)
write_file(cls.xml_config_file, etree.tostring(root, encoding="unicode"))
@classmethod
def copy_config(cls, config_file: str):
"""
Instead of generating config with "generate_config" method, config can be prepared manually
and just passed as is to PeachFuzzer.
:param config_file: Peach Fuzzer XML config to be copied to the DUT
"""
if not posixpath.exists(config_file):
TestRun.block("Peach fuzzer xml config to be copied doesn't exist!")
create_directory(cls.base_dir, True)
TestRun.executor.rsync_to(config_file, cls.xml_config_file)
@classmethod
def __create_xml_nodes(cls, xml_node, config):
"""
Create XML code for Peach Fuzzer based on python dict config
"""
for element in config:
new_node = etree.Element(element["name"])
for attr_name, attr_value in element["attributes"].items():
new_node.set(attr_name, attr_value)
if element.get("children"):
cls.__create_xml_nodes(new_node, element.get("children"))
xml_node.append(new_node)
@classmethod
def _install(cls):
"""
Install Peach Fuzzer on the DUT
"""
peach_archive = wget.download(cls.peach_fuzzer_3_0_url)
create_directory(cls.base_dir, True)
TestRun.executor.rsync_to(f"\"{peach_archive}\"", f"{cls.base_dir}")
TestRun.executor.run_expect_success(
f'cd {cls.base_dir} && unzip -u "{peach_archive}"')
if cls._is_installed():
TestRun.LOGGER.info("Peach fuzzer installed successfully")
os.remove(peach_archive)
else:
TestRun.block("Peach fuzzer installation failed!")
@classmethod
def _is_installed(cls):
"""
Check if Peach Fuzzer is installed on the DUT
"""
if not cls._is_mono_installed():
TestRun.block("Mono is not installed, can't continue with Peach Fuzzer!")
if fs_utils.check_if_directory_exists(posixpath.join(cls.base_dir, cls.peach_dir)):
return "Peach" in TestRun.executor.run(
f"cd {cls.base_dir} && {cls.peach_dir}/peach --version").stdout.strip()
else:
return False
@classmethod
def _escape_special_chars(cls, fuzzed_str: bytes):
"""
Escape special chars provided in escape_chars list in the fuzzed string generated by
Peach Fuzzer
Escaping is done for example in order to make fuzzed string executable in Linux CLI
If fuzzed string will be used in other places, escape_chars list may be overwritten.
"""
for i in cls.escape_chars:
i = bytes(i, "utf-8")
if i in fuzzed_str[:]:
fuzzed_str = fuzzed_str.replace(i, b'\\' + i)
return fuzzed_str
@classmethod
def _is_xml_config_prepared(cls):
"""
Check if Peach Fuzzer XML config is present on the DUT
"""
if fs_utils.check_if_file_exists(cls.xml_config_file):
return True
else:
return False
@staticmethod
def _is_mono_installed():
"""
Check if Mono (.NET compatible framework) is installed on the DUT
If it's not, it has to be installed manually.
For RHEL-based OSes it's usually mono-complete package
"""
return TestRun.executor.run("which mono").exit_code == 0

4
test_utils/__init__.py Normal file
View File

@ -0,0 +1,4 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#

View File

@ -0,0 +1,18 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import concurrent
def start_async_func(func, *args):
"""
Starts asynchronous task and returns an Future object, which in turn returns an
actual result after triggering result() method on it.
- result() method is waiting for the task to be completed.
- done() method returns True when task ended (have a result or ended with an exception)
otherwise returns False
"""
executor = concurrent.futures.ThreadPoolExecutor()
return executor.submit(func, *args)

190
test_utils/disk_finder.py Normal file
View File

@ -0,0 +1,190 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
from core.test_run import TestRun
from test_tools import disk_utils
from test_tools.fs_utils import check_if_file_exists, readlink
from test_utils import os_utils
from test_utils.output import CmdException
def find_disks():
devices_result = []
TestRun.LOGGER.info("Finding platform's disks.")
# TODO: intelmas should be implemented as a separate tool in the future.
# There will be intelmas installer in case, when it is not installed
output = TestRun.executor.run('intelmas')
if output.exit_code != 0:
raise Exception(f"Error while executing command: 'intelmas'.\n"
f"stdout: {output.stdout}\nstderr: {output.stderr}")
block_devices = get_block_devices_list()
try:
discover_ssd_devices(block_devices, devices_result)
discover_hdd_devices(block_devices, devices_result)
except Exception as e:
raise Exception(f"Exception occurred while looking for disks: {str(e)}")
return devices_result
def get_block_devices_list():
devices = TestRun.executor.run_expect_success("ls /sys/block -1").stdout.splitlines()
os_disks = get_system_disks()
block_devices = []
for dev in devices:
if ('sd' in dev or 'nvme' in dev) and dev not in os_disks:
block_devices.append(dev)
return block_devices
def discover_hdd_devices(block_devices, devices_res):
for dev in block_devices:
if TestRun.executor.run_expect_success(f"cat /sys/block/{dev}/removable").stdout == "1":
continue # skip removable drives
block_size = disk_utils.get_block_size(dev)
if int(block_size) == 4096:
disk_type = 'hdd4k'
else:
disk_type = 'hdd'
devices_res.append({
"type": disk_type,
"path": f"{resolve_to_by_id_link(dev)}",
"serial": TestRun.executor.run_expect_success(
f"sg_inq /dev/{dev} | grep -i 'serial number'"
).stdout.split(': ')[1].strip(),
"blocksize": block_size,
"size": disk_utils.get_size(dev)})
block_devices.clear()
# This method discovers only Intel SSD devices
def discover_ssd_devices(block_devices, devices_res):
ssd_count = int(TestRun.executor.run_expect_success(
'intelmas show -intelssd | grep DevicePath | wc -l').stdout)
for i in range(0, ssd_count):
# Workaround for intelmas bug that lists all of the devices (non intel included)
# with -intelssd flag
if TestRun.executor.run(
f"intelmas show -display index -intelssd {i} | grep -w Intel").exit_code == 0:
device_path = TestRun.executor.run_expect_success(
f"intelmas show -intelssd {i} | grep DevicePath").stdout.split()[2]
dev = device_path.replace("/dev/", "")
if "sg" in dev:
sata_dev = TestRun.executor.run_expect_success(
f"sg_map | grep {dev}").stdout.split()[1]
dev = sata_dev.replace("/dev/", "")
if dev not in block_devices:
continue
serial_number = TestRun.executor.run_expect_success(
f"intelmas show -intelssd {i} | grep SerialNumber").stdout.split()[2].strip()
if 'nvme' not in device_path:
disk_type = 'sata'
device_path = dev
elif TestRun.executor.run(
f"intelmas show -intelssd {i} | grep Optane").exit_code == 0:
disk_type = 'optane'
else:
disk_type = 'nand'
devices_res.append({
"type": disk_type,
"path": resolve_to_by_id_link(device_path),
"serial": serial_number,
"blocksize": disk_utils.get_block_size(dev),
"size": disk_utils.get_size(dev)})
block_devices.remove(dev)
def get_disk_serial_number(dev_path):
commands = [
f"(udevadm info --query=all --name={dev_path} | grep 'SCSI.*_SERIAL' || "
f"udevadm info --query=all --name={dev_path} | grep 'ID_SERIAL_SHORT') | "
"awk --field-separator '=' '{print $NF}'",
f"sg_inq {dev_path} 2> /dev/null | grep '[Ss]erial number:' | "
"awk '{print $NF}'",
f"udevadm info --query=all --name={dev_path} | grep 'ID_SERIAL' | "
"awk --field-separator '=' '{print $NF}'"
]
for command in commands:
serial = TestRun.executor.run(command).stdout
if serial:
return serial.split('\n')[0]
return None
def get_all_serial_numbers():
serial_numbers = {}
block_devices = get_block_devices_list()
for dev in block_devices:
serial = get_disk_serial_number(dev)
try:
path = resolve_to_by_id_link(dev)
except Exception:
continue
if serial:
serial_numbers[serial] = path
else:
TestRun.LOGGER.warning(f"Device {path} ({dev}) does not have a serial number.")
serial_numbers[path] = path
return serial_numbers
def get_system_disks():
system_device = TestRun.executor.run_expect_success('mount | grep " / "').stdout.split()[0]
readlink_output = readlink(system_device)
device_name = readlink_output.split('/')[-1]
sys_block_path = os_utils.get_sys_block_path()
used_device_names = __get_slaves(device_name)
if not used_device_names:
used_device_names = [device_name]
disk_names = []
for device_name in used_device_names:
if check_if_file_exists(f'{sys_block_path}/{device_name}/partition'):
parent_device = readlink(f'{sys_block_path}/{device_name}/..').split('/')[-1]
disk_names.append(parent_device)
else:
disk_names.append(device_name)
return disk_names
def __get_slaves(device_name: str):
try:
device_names = TestRun.executor.run_expect_success(
f'ls {os_utils.get_sys_block_path()}/{device_name}/slaves').stdout.splitlines()
except CmdException as e:
if "No such file or directory" not in e.output.stderr:
raise
return None
device_list = []
for device_name in device_names:
slaves = __get_slaves(device_name)
if slaves:
for slave in slaves:
device_list.append(slave)
else:
device_list.append(device_name)
return device_list
def resolve_to_by_id_link(path):
by_id_paths = TestRun.executor.run_expect_success("ls /dev/disk/by-id -1").stdout.splitlines()
dev_full_paths = [posixpath.join("/dev/disk/by-id", by_id_path) for by_id_path in by_id_paths]
for full_path in dev_full_paths:
# handle exception for broken links
try:
if readlink(full_path) == readlink(posixpath.join("/dev", path)):
return full_path
except CmdException:
continue
raise ValueError(f'By-id device link not found for device {path}')

61
test_utils/drbd.py Normal file
View File

@ -0,0 +1,61 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import os
from test_utils.filesystem.file import File
class Resource:
def __init__(self, name, device, nodes, options=None):
self.name = name
self.device = device
self.nodes = nodes
self.options = options
def __str__(self):
output = (
f"resource {self.name} {{ \n"
f" device {self.device}; \n"
f"{''.join([str(node) for node in self.nodes])}"
)
if self.options:
output += f" options {{\n"
for (k, v) in self.options.items():
output += f" {k} {v};\n"
output += f" }}\n"
output += f"}}"
return output
def __repr__(self):
return str(self)
def save(self, path="/etc/drbd.d/", filename=None):
filename = filename if filename else f"{self.name}.res"
file = File(path + filename)
file.write(str(self))
class Node:
def __init__(self, name, disk, meta_disk, ip, port):
self.name = name
self.disk = disk
self.meta_disk = meta_disk
self.ip = ip
self.port = port
def __str__(self):
return (
f" on {self.name} {{ \n"
f" disk {self.disk};\n"
f" meta-disk {self.meta_disk};\n"
f" address {self.ip}:{self.port};\n"
f" }} \n"
)
def __repr__(self):
return str(self)

43
test_utils/dut.py Normal file
View File

@ -0,0 +1,43 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from storage_devices.disk import Disk, DiskType
class Dut:
def __init__(self, dut_info):
self.config = dut_info
self.disks = []
for disk_info in dut_info.get('disks', []):
self.disks.append(Disk.create_disk(disk_info['path'],
DiskType[disk_info['type']],
disk_info['serial'],
disk_info['blocksize']))
self.disks.sort(key=lambda disk: disk.disk_type, reverse=True)
self.ipmi = dut_info['ipmi'] if 'ipmi' in dut_info else None
self.spider = dut_info['spider'] if 'spider' in dut_info else None
self.wps = dut_info['wps'] if 'wps' in dut_info else None
self.env = dut_info['env'] if 'env' in dut_info else None
self.ip = dut_info['ip'] if 'ip' in dut_info else "127.0.0.1"
def __str__(self):
dut_str = f'ip: {self.ip}\n'
dut_str += f'ipmi: {self.ipmi["ip"]}\n' if self.ipmi is not None else ''
dut_str += f'spider: {self.spider["ip"]}\n' if self.spider is not None else ''
dut_str += f'wps: {self.wps["ip"]} port: {self.wps["port"]}\n' \
if self.wps is not None else ''
dut_str += f'disks:\n'
for disk in self.disks:
dut_str += f"\t{disk}"
dut_str += "\n"
return dut_str
def get_disks_of_type(self, disk_type: DiskType):
ret_list = []
for d in self.disks:
if d.disk_type == disk_type:
ret_list.append(d)
return ret_list

View File

@ -0,0 +1,113 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from textwrap import dedent
from string import Template
from pathlib import Path
from .systemd import enable_service, reload_daemon, systemd_service_directory, disable_service
from test_tools.fs_utils import (
create_file,
write_file,
remove,
)
class EmergencyEscape:
escape_marker = "EMERGENCY_ESCAPE"
escape_service = Path("emergency-escape.service")
escape_service_template = Template(
dedent(
f"""
[Unit]
After=emergency.target
IgnoreOnIsolate=true
DefaultDependencies=no
[Service]
Type=oneshot
ExecStart=/bin/sh -c '/usr/bin/echo "{escape_marker}" > /dev/kmsg'
$user_method
ExecStart=/usr/bin/systemctl daemon-reload
ExecStart=/usr/bin/systemctl default --no-block
[Install]
WantedBy=emergency.target
"""
).strip()
)
cleanup_service = Path("emergency-escape-cleanup.service")
cleanup_service_template = Template(
dedent(
"""
[Unit]
After=emergency-escape.service
IgnoreOnIsolate=true
DefaultDependencies=no
[Service]
Type=oneshot
$user_method
ExecStart=/usr/bin/systemctl disable emergency-escape.service
ExecStart=/usr/bin/rm -f /usr/lib/systemd/system/emergency-escape.service
ExecStart=/usr/bin/systemctl daemon-reload
[Install]
WantedBy=emergency-escape.service
"""
).strip()
)
def __init__(self):
self.escape_method = []
self.cleanup_method = []
def arm(self):
escape_path = str(systemd_service_directory / EmergencyEscape.escape_service)
cleanup_path = str(systemd_service_directory / EmergencyEscape.cleanup_service)
create_file(escape_path)
create_file(cleanup_path)
user_escape = "\n".join([f"ExecStart={method}" for method in self.escape_method])
user_cleanup = "\n".join([f"ExecStart={method}" for method in self.cleanup_method])
escape_contents = EmergencyEscape.escape_service_template.substitute(
user_method=user_escape
)
cleanup_contents = EmergencyEscape.cleanup_service_template.substitute(
user_method=user_cleanup
)
write_file(escape_path, escape_contents)
write_file(cleanup_path, cleanup_contents)
enable_service(EmergencyEscape.escape_service)
enable_service(EmergencyEscape.cleanup_service)
def cleanup(self):
remove(str(systemd_service_directory / EmergencyEscape.cleanup_service), ignore_errors=True)
remove(str(systemd_service_directory / EmergencyEscape.escape_service), ignore_errors=True)
reload_daemon()
@classmethod
def verify_trigger_in_log(cls, log_list):
for l in log_list:
if cls.escape_marker in l:
return True
return False
def add_escape_method_command(self, method):
self.escape_method.append(method)
def add_cleanup_method_command(self, method):
self.cleanup_method.append(method)
def __enter__(self):
self.arm()
def __exit__(self, exc_type, exc_value, exc_traceback):
self.cleanup()

View File

View File

@ -0,0 +1,31 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from core.test_run import TestRun
from test_tools import fs_utils
from test_tools.fs_utils import check_if_directory_exists
from test_utils.filesystem.fs_item import FsItem
class Directory(FsItem):
def __init__(self, full_path):
FsItem.__init__(self, full_path)
def ls(self):
output = fs_utils.ls(f"{self.full_path}")
return fs_utils.parse_ls_output(output, self.full_path)
@staticmethod
def create_directory(path: str, parents: bool = False):
fs_utils.create_directory(path, parents)
output = fs_utils.ls_item(path)
return fs_utils.parse_ls_output(output)[0]
@staticmethod
def create_temp_directory(parent_dir_path: str = "/tmp"):
command = f"mktemp --directory --tmpdir={parent_dir_path}"
output = TestRun.executor.run_expect_success(command)
if not check_if_directory_exists(output.stdout):
TestRun.LOGGER.exception("'mktemp' succeeded, but created directory does not exist")
return Directory(output.stdout)

View File

@ -0,0 +1,83 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from core.test_run import TestRun
from test_tools import fs_utils
from test_tools.dd import Dd
from test_utils.filesystem.fs_item import FsItem
from test_utils.size import Size
class File(FsItem):
def __init__(self, full_path):
FsItem.__init__(self, full_path)
def compare(self, other_file):
return fs_utils.compare(str(self), str(other_file))
def diff(self, other_file):
return fs_utils.diff(str(self), str(other_file))
def md5sum(self, binary=True):
output = TestRun.executor.run(
f"md5sum {'-b' if binary else ''} {self.full_path}")
if output.exit_code != 0:
raise Exception(f"Md5sum command execution failed! {output.stdout}\n{output.stderr}")
return output.stdout.split()[0]
def read(self):
return fs_utils.read_file(str(self))
def write(self, content, overwrite: bool = True):
fs_utils.write_file(str(self), content, overwrite)
self.refresh_item()
def get_properties(self):
return FileProperties(self)
@staticmethod
def create_file(path: str):
fs_utils.create_file(path)
output = fs_utils.ls_item(path)
return fs_utils.parse_ls_output(output)[0]
def padding(self, size: Size):
dd = Dd().input("/dev/zero").output(self).count(1).block_size(size)
dd.run()
self.refresh_item()
def remove(self, force: bool = False, ignore_errors: bool = False):
fs_utils.remove(str(self), force=force, ignore_errors=ignore_errors)
def copy(self,
destination,
force: bool = False,
recursive: bool = False,
dereference: bool = False):
fs_utils.copy(str(self), destination, force, recursive, dereference)
if fs_utils.check_if_directory_exists(destination):
path = f"{destination}{'/' if destination[-1] != '/' else ''}{self.name}"
else:
path = destination
output = fs_utils.ls_item(path)
return fs_utils.parse_ls_output(output)[0]
class FileProperties:
def __init__(self, file):
file = fs_utils.parse_ls_output(fs_utils.ls_item(file.full_path))[0]
self.full_path = file.full_path
self.parent_dir = FsItem.get_parent_dir(self.full_path)
self.name = FsItem.get_name(self.full_path)
self.modification_time = file.modification_time
self.owner = file.owner
self.group = file.group
self.permissions = file.permissions
self.size = file.size
def __eq__(self, other):
return (self.permissions == other.permissions and self.size == other.size
and self.owner == other.owner and self.group == other.group
and self.name == other.name)

View File

@ -0,0 +1,102 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
from test_tools import fs_utils
class FsItem:
def __init__(self, full_path):
self.full_path = full_path
# all below values must be refreshed in refresh_item()
self.parent_dir = self.get_parent_dir(self.full_path)
self.name = self.get_name(self.full_path)
self.modification_time = None
self.owner = None
self.group = None
self.permissions = FsPermissions()
self.size = None
@staticmethod
def get_name(path):
head, tail = posixpath.split(path)
return tail or posixpath.basename(head)
@staticmethod
def get_parent_dir(path):
head, tail = posixpath.split(path)
if tail:
return head
else:
head, tail = posixpath.split(head)
return head
def __str__(self):
return self.full_path
def chmod_numerical(self, permissions: int, recursive: bool = False):
fs_utils.chmod_numerical(self.full_path, permissions, recursive)
self.refresh_item()
def chmod(self,
permissions: fs_utils.Permissions,
users: fs_utils.PermissionsUsers,
sign: fs_utils.PermissionSign = fs_utils.PermissionSign.set,
recursive: bool = False):
fs_utils.chmod(self.full_path, permissions, users, sign=sign, recursive=recursive)
self.refresh_item()
def chown(self, owner, group, recursive: bool = False):
fs_utils.chown(self.full_path, owner, group, recursive)
self.refresh_item()
def copy(self,
destination,
force: bool = False,
recursive: bool = False,
dereference: bool = False):
target_dir_exists = fs_utils.check_if_directory_exists(destination)
fs_utils.copy(str(self), destination, force, recursive, dereference)
if target_dir_exists:
path = f"{destination}{'/' if destination[-1] != '/' else ''}{self.name}"
else:
path = destination
output = fs_utils.ls_item(f"{path}")
return fs_utils.parse_ls_output(output)[0]
def move(self,
destination,
force: bool = False):
target_dir_exists = fs_utils.check_if_directory_exists(destination)
fs_utils.move(str(self), destination, force)
if target_dir_exists:
self.full_path = f"{destination}{'/' if destination[-1] != '/' else ''}{self.name}"
else:
self.full_path = destination
self.refresh_item()
return self
def refresh_item(self):
updated_file = fs_utils.parse_ls_output(fs_utils.ls_item(self.full_path))[0]
# keep order the same as in __init__()
self.parent_dir = updated_file.parent_dir
self.name = updated_file.name
self.modification_time = updated_file.modification_time
self.owner = updated_file.owner
self.group = updated_file.group
self.permissions = updated_file.permissions
self.size = updated_file.size
return self
class FsPermissions:
def __init__(self, user=None, group=None, other=None):
self.user = user
self.group = group
self.other = other
def __eq__(self, other):
return self.user == other.user and self.group == other.group and self.other == other.other

View File

@ -0,0 +1,91 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from core.test_run import TestRun
from test_tools.fs_utils import (
readlink,
create_directory,
check_if_symlink_exists,
check_if_directory_exists,
)
from test_utils.filesystem.file import File
class Symlink(File):
def __init__(self, full_path):
File.__init__(self, full_path)
def md5sum(self, binary=True):
output = TestRun.executor.run_expect_success(
f"md5sum {'-b' if binary else ''} {self.get_target()}"
)
return output.stdout.split()[0]
def get_target(self):
return readlink(self.full_path)
def get_symlink_path(self):
return self.full_path
def remove_symlink(self):
path = self.get_symlink_path()
TestRun.executor.run_expect_success(f"rm -f {path}")
@classmethod
def create_symlink(cls, link_path: str, target: str, force: bool = False):
"""
Creates a Symlink - new or overwrites existing one if force parameter is True
:param link_path: path to the place where we want to create a symlink
:param target: the path of an object that the requested Symlink points to
:param force: determines if the existing symlink with the same name should be overridden
return: Symlink object located under link_path
"""
cmd = f"ln --symbolic {target} {link_path}"
is_dir = check_if_directory_exists(link_path)
parent_dir = cls.get_parent_dir(link_path)
if is_dir:
raise IsADirectoryError(f"'{link_path}' is an existing directory.")
if force:
if not check_if_directory_exists(parent_dir):
create_directory(parent_dir, True)
TestRun.executor.run_expect_success(f"rm -f {link_path}")
TestRun.executor.run_expect_success(cmd)
return cls(link_path)
@classmethod
def get_symlink(cls, link_path: str, target: str = None, create: bool = False):
"""
Request a Symlink (create new or identify existing)
:param link_path: full path of the requested Symlink
:param target: path of an object that the requested Symlink points to
(required if create is True)
:param create: determines if the requested Symlink should be created if it does not exist
:return: Symlink object located under link_path
"""
if create and not target:
raise AttributeError("Target is required for symlink creation.")
is_symlink = check_if_symlink_exists(link_path)
if is_symlink:
if not target or readlink(link_path) == readlink(target):
return cls(link_path)
else:
raise FileExistsError("Existing symlink points to a different target.")
elif not create:
raise FileNotFoundError("Requested symlink does not exist.")
is_dir = check_if_directory_exists(link_path)
if is_dir:
raise IsADirectoryError(
f"'{link_path}' is an existing directory." "\nUse a full path for symlink creation."
)
parent_dir = cls.get_parent_dir(link_path)
if not check_if_directory_exists(parent_dir):
create_directory(parent_dir, True)
cmd = f"ln --symbolic {target} {link_path}"
TestRun.executor.run_expect_success(cmd)
return cls(link_path)

20
test_utils/fstab.py Normal file
View File

@ -0,0 +1,20 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from test_tools import fs_utils
from test_utils import systemd
def add_mountpoint(device, mount_point, fs_type, mount_now=True):
fs_utils.append_line("/etc/fstab",
f"{device.path} {mount_point} {fs_type.name} defaults 0 0")
systemd.reload_daemon()
if mount_now:
systemd.restart_service("local-fs.target")
def remove_mountpoint(device):
fs_utils.remove_lines("/etc/fstab", device.path)
systemd.reload_daemon()

11
test_utils/generator.py Normal file
View File

@ -0,0 +1,11 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import random
import string
def random_string(length: int, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for i in range(length))

112
test_utils/io_stats.py Normal file
View File

@ -0,0 +1,112 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import re
from core.test_run import TestRun
from test_utils.output import CmdException
SYSFS_LINE_FORMAT = r"^(\d+\s+){10,}\d+$"
PROCFS_LINE_FORMAT = r"^\d+\s+\d+\s+\w+\s+" + SYSFS_LINE_FORMAT[1:]
# This class represents block device I/O statistics.
# For more information see:
# https://www.kernel.org/doc/Documentation/admin-guide/iostats.rst
class IoStats:
def __init__(self):
self.reads = None # field 0
self.reads_merged = None # field 1
self.sectors_read = None # field 2
self.read_time_ms = None # field 3
self.writes = None # field 4
self.writes_merged = None # field 5
self.sectors_written = None # field 6
self.write_time_ms = None # field 7
self.ios_in_progress = None # field 8
self.io_time_ms = None # field 9
self.io_time_weighed_ms = None # field 10
# only in kernels 4.18+
self.discards = None # field 11
self.discards_merged = None # field 12
self.sectors_discarded = None # field 13
self.discard_time_ms = None # field 14
# only in kernels 5.5+
self.flushes = None # field 15
self.flush_time_ms = None # field 16
def __sub__(self, other):
if self.reads < other.reads:
raise Exception("Cannot subtract Reads")
if self.writes < other.writes:
raise Exception("Cannot subtract Writes")
stats = IoStats()
stats.reads = self.reads - other.reads
stats.reads_merged = self.reads_merged - other.reads_merged
stats.sectors_read = self.sectors_read - other.sectors_read
stats.read_time_ms = self.read_time_ms - other.read_time_ms
stats.writes = self.writes - other.writes
stats.writes_merged = self.writes_merged - other.writes_merged
stats.sectors_written = self.sectors_written - other.sectors_written
stats.write_time_ms = self.write_time_ms - other.write_time_ms
stats.ios_in_progress = 0
stats.io_time_ms = self.io_time_ms - other.io_time_ms
stats.io_time_weighed_ms = self.io_time_weighed_ms - other.io_time_weighed_ms
if stats.discards and other.discards:
stats.discards = self.discards - other.discards
if stats.discards_merged and other.discards_merged:
stats.discards_merged = self.discards_merged - other.discards_merged
if stats.sectors_discarded and other.sectors_discarded:
stats.sectors_discarded = self.sectors_discarded - other.sectors_discarded
if stats.discard_time_ms and other.discard_time_ms:
stats.discard_time_ms = self.discard_time_ms - other.discard_time_ms
if stats.flushes and other.flushes:
stats.flushes = self.flushes - other.flushes
if stats.flush_time_ms and other.flush_time_ms:
stats.flush_time_ms = self.flush_time_ms - other.flush_time_ms
return stats
@staticmethod
def parse(stats_line: str):
stats_line = stats_line.strip()
if re.match(SYSFS_LINE_FORMAT, stats_line):
fields = stats_line.split()
elif re.match(PROCFS_LINE_FORMAT, stats_line):
fields = stats_line.split()[3:]
else:
raise Exception(f"Wrong input format for diskstat parser")
values = [int(f) for f in fields]
stats = IoStats()
stats.reads = values[0]
stats.reads_merged = values[1]
stats.sectors_read = values[2]
stats.read_time_ms = values[3]
stats.writes = values[4]
stats.writes_merged = values[5]
stats.sectors_written = values[6]
stats.write_time_ms = values[7]
stats.ios_in_progress = values[8]
stats.io_time_ms = values[9]
stats.io_time_weighed_ms = values[10]
if len(values) > 11:
stats.discards = values[11]
stats.discards_merged = values[12]
stats.sectors_discarded = values[13]
stats.discard_time_ms = values[14]
if len(values) > 15:
stats.flushes = values[15]
stats.flush_time_ms = values[16]
return stats
@staticmethod
def get_io_stats(device_id):
stats_output = TestRun.executor.run_expect_success(
f"cat /proc/diskstats | grep '{device_id} '")
if not stats_output.stdout.strip():
raise CmdException("Failed to get statistics for device " + device_id, stats_output)
return IoStats.parse(stats_line=stats_output.stdout.splitlines()[0])

View File

@ -0,0 +1,79 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from collections import defaultdict
class LinuxCommand:
def __init__(self, command_executor, command_name):
self.command_executor = command_executor
self.command_param = defaultdict(list)
self.command_flags = []
self.command_name = command_name
self.param_name_prefix = ''
self.param_separator = ' '
self.param_value_prefix = '='
self.param_value_list_separator = ','
self.command_env_var = defaultdict(list)
self.env_var_separator = ' '
self.env_var_value_prefix = '='
def run(self):
return self.command_executor.run(str(self))
def run_in_background(self):
return self.command_executor.run_in_background(str(self))
def set_flags(self, *flag):
for f in flag:
self.command_flags.append(f)
return self
def remove_flag(self, flag):
if flag in self.command_flags:
self.command_flags.remove(flag)
return self
def set_param(self, key, *values):
self.remove_param(key)
for val in values:
self.command_param[key].append(str(val))
return self
def remove_param(self, key):
if key in self.command_param:
del self.command_param[key]
return self
def set_env_var(self, key, *values):
self.remove_env_var(key)
for val in values:
self.command_env_var[key].append(str(val))
return self
def remove_env_var(self, key):
if key in self.command_env_var:
del self.command_env_var[key]
return self
def get_parameter_value(self, param_name):
if param_name in self.command_param.keys():
return self.command_param[param_name]
return None
def __str__(self):
command = ''
for key, value in self.command_env_var.items():
command += f'{key}{self.env_var_value_prefix}{",".join(value)}' \
f'{self.env_var_separator}'
command += self.command_name
for key, value in self.command_param.items():
command += f'{self.param_separator}{self.param_name_prefix}' \
f'{key}{self.param_value_prefix}{",".join(value)}'
for flag in self.command_flags:
command += f'{self.param_separator}{self.param_name_prefix}{flag}'
return command

462
test_utils/os_utils.py Normal file
View File

@ -0,0 +1,462 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import math
import posixpath
import re
import time
from datetime import timedelta, datetime
from aenum import IntFlag, Enum, IntEnum
from packaging import version
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools.dd import Dd
from test_tools.disk_utils import get_sysfs_path
from test_tools.fs_utils import check_if_directory_exists, create_directory, check_if_file_exists
from test_utils.filesystem.file import File
from test_utils.output import CmdException
from test_utils.retry import Retry
from test_utils.size import Size, Unit
DEBUGFS_MOUNT_POINT = "/sys/kernel/debug"
MEMORY_MOUNT_POINT = "/mnt/memspace"
class DropCachesMode(IntFlag):
PAGECACHE = 1
SLAB = 2
ALL = PAGECACHE | SLAB
class OvercommitMemoryMode(Enum):
DEFAULT = 0
ALWAYS = 1
NEVER = 2
class Runlevel(IntEnum):
"""
Halt the system.
SysV Runlevel: 0
systemd Target: runlevel0.target, poweroff.target
"""
runlevel0 = 0
poweroff = runlevel0
"""
Single user mode.
SysV Runlevel: 1, s, single
systemd Target: runlevel1.target, rescue.target
"""
runlevel1 = 1
rescue = runlevel1
"""
User-defined/Site-specific runlevels. By default, identical to 3.
SysV Runlevel: 2, 4
systemd Target: runlevel2.target, runlevel4.target, multi-user.target
"""
runlevel2 = 2
"""
Multi-user, non-graphical. Users can usually login via multiple consoles or via the network.
SysV Runlevel: 3
systemd Target: runlevel3.target, multi-user.target
"""
runlevel3 = 3
multi_user = runlevel3
"""
Multi-user, graphical. Usually has all the services of runlevel 3 plus a graphical login.
SysV Runlevel: 5
systemd Target: runlevel5.target, graphical.target
"""
runlevel5 = 5
graphical = runlevel5
"""
Reboot
SysV Runlevel: 6
systemd Target: runlevel6.target, reboot.target
"""
runlevel6 = 6
reboot = runlevel6
"""
Emergency shell
SysV Runlevel: emergency
systemd Target: emergency.target
"""
runlevel7 = 7
emergency = runlevel7
class SystemManagerType(Enum):
sysv = 0
systemd = 1
def get_system_manager():
output = TestRun.executor.run_expect_success("ps -p 1").stdout
type = output.split('\n')[1].split()[3]
if type == "init":
return SystemManagerType.sysv
elif type == "systemd":
return SystemManagerType.systemd
raise Exception(f"Unknown system manager type ({type}).")
def change_runlevel(runlevel: Runlevel):
if runlevel == get_runlevel():
return
if Runlevel.runlevel0 < runlevel < Runlevel.runlevel6:
system_manager = get_system_manager()
if system_manager == SystemManagerType.systemd:
TestRun.executor.run_expect_success(f"systemctl set-default {runlevel.name}.target")
else:
TestRun.executor.run_expect_success(
f"sed -i 's/^.*id:.*$/id:{runlevel.value}:initdefault: /' /etc/inittab")
TestRun.executor.run_expect_success(f"init {runlevel.value}")
def get_runlevel():
system_manager = get_system_manager()
if system_manager == SystemManagerType.systemd:
result = TestRun.executor.run_expect_success("systemctl get-default")
try:
name = result.stdout.split(".")[0].replace("-", "_")
return Runlevel[name]
except Exception:
raise Exception(f"Cannot parse '{result.output}' to runlevel.")
else:
result = TestRun.executor.run_expect_success("runlevel")
try:
split_output = result.stdout.split()
runlevel = Runlevel(int(split_output[1]))
return runlevel
except Exception:
raise Exception(f"Cannot parse '{result.output}' to runlevel.")
class Udev(object):
@staticmethod
def enable():
TestRun.LOGGER.info("Enabling udev")
TestRun.executor.run_expect_success("udevadm control --start-exec-queue")
@staticmethod
def disable():
TestRun.LOGGER.info("Disabling udev")
TestRun.executor.run_expect_success("udevadm control --stop-exec-queue")
@staticmethod
def trigger():
TestRun.executor.run_expect_success("udevadm trigger")
@staticmethod
def settle():
TestRun.executor.run_expect_success("udevadm settle")
def drop_caches(level: DropCachesMode = DropCachesMode.ALL):
TestRun.executor.run_expect_success(
f"echo {level.value} > /proc/sys/vm/drop_caches")
def disable_memory_affecting_functions():
"""Disables system functions affecting memory"""
# Don't allow sshd to be killed in case of out-of-memory:
TestRun.executor.run(
"echo '-1000' > /proc/`cat /var/run/sshd.pid`/oom_score_adj"
)
TestRun.executor.run(
"echo -17 > /proc/`cat /var/run/sshd.pid`/oom_adj"
) # deprecated
TestRun.executor.run_expect_success(
f"echo {OvercommitMemoryMode.NEVER.value} > /proc/sys/vm/overcommit_memory"
)
TestRun.executor.run_expect_success("echo '100' > /proc/sys/vm/overcommit_ratio")
TestRun.executor.run_expect_success(
"echo '64 64 32' > /proc/sys/vm/lowmem_reserve_ratio"
)
TestRun.executor.run_expect_success("swapoff --all")
drop_caches(DropCachesMode.SLAB)
def defaultize_memory_affecting_functions():
"""Sets default values to system functions affecting memory"""
TestRun.executor.run_expect_success(
f"echo {OvercommitMemoryMode.DEFAULT.value} > /proc/sys/vm/overcommit_memory"
)
TestRun.executor.run_expect_success("echo 50 > /proc/sys/vm/overcommit_ratio")
TestRun.executor.run_expect_success(
"echo '256 256 32' > /proc/sys/vm/lowmem_reserve_ratio"
)
TestRun.executor.run_expect_success("swapon --all")
def get_free_memory():
"""Returns free amount of memory in bytes"""
output = TestRun.executor.run_expect_success("free -b")
output = output.stdout.splitlines()
for line in output:
if 'free' in line:
index = line.split().index('free') + 1 # 1st row has 1 element less than following rows
if 'Mem' in line:
mem_line = line.split()
return Size(int(mem_line[index]))
def get_mem_available():
"""Returns amount of available memory from /proc/meminfo"""
cmd = "cat /proc/meminfo | grep MemAvailable | awk '{ print $2 }'"
mem_available = TestRun.executor.run(cmd).stdout
return Size(int(mem_available), Unit.KibiByte)
def get_module_mem_footprint(module_name):
"""Returns allocated size of specific module's metadata from /proc/vmallocinfo"""
cmd = f"cat /proc/vmallocinfo | grep {module_name} | awk '{{ print $2 }}' "
output_lines = TestRun.executor.run(cmd).stdout.splitlines()
memory_used = 0
for line in output_lines:
memory_used += int(line)
return Size(memory_used)
def allocate_memory(size: Size):
"""Allocates given amount of memory"""
mount_ramfs()
TestRun.LOGGER.info(f"Allocating {size.get_value(Unit.MiB):0.2f} MiB of memory.")
bs = Size(1, Unit.Blocks512)
dd = (
Dd()
.block_size(bs)
.count(math.ceil(size / bs))
.input("/dev/zero")
.output(f"{MEMORY_MOUNT_POINT}/data")
)
output = dd.run()
if output.exit_code != 0:
raise CmdException("Allocating memory failed.", output)
def get_number_of_processors_from_cpuinfo():
"""Returns number of processors (count) which are listed out in /proc/cpuinfo"""
cmd = f"cat /proc/cpuinfo | grep processor | wc -l"
output = TestRun.executor.run(cmd).stdout
return int(output)
def get_number_of_processes(process_name):
cmd = f"ps aux | grep {process_name} | grep -v grep | wc -l"
output = TestRun.executor.run(cmd).stdout
return int(output)
def mount_ramfs():
"""Mounts ramfs to enable allocating memory space"""
if not check_if_directory_exists(MEMORY_MOUNT_POINT):
create_directory(MEMORY_MOUNT_POINT)
if not is_mounted(MEMORY_MOUNT_POINT):
TestRun.executor.run_expect_success(f"mount -t ramfs ramfs {MEMORY_MOUNT_POINT}")
def unmount_ramfs():
"""Unmounts ramfs and releases whole space allocated by it in memory"""
TestRun.executor.run_expect_success(f"umount {MEMORY_MOUNT_POINT}")
def download_file(url, destination_dir="/tmp"):
# TODO use wget module instead
command = ("wget --tries=3 --timeout=5 --continue --quiet "
f"--directory-prefix={destination_dir} {url}")
TestRun.executor.run_expect_success(command)
path = f"{destination_dir.rstrip('/')}/{File.get_name(url)}"
return File(path)
def get_kernel_version():
version_string = TestRun.executor.run_expect_success("uname -r").stdout
version_string = version_string.split('-')[0]
return version.Version(version_string)
class ModuleRemoveMethod(Enum):
rmmod = "rmmod"
modprobe = "modprobe -r"
def is_kernel_module_loaded(module_name):
output = TestRun.executor.run(f"lsmod | grep ^{module_name}")
return output.exit_code == 0
def get_sys_block_path():
sys_block = "/sys/class/block"
if not check_if_directory_exists(sys_block):
sys_block = "/sys/block"
return sys_block
def load_kernel_module(module_name, module_args: {str, str}=None):
cmd = f"modprobe {module_name}"
if module_args is not None:
for key, value in module_args.items():
cmd += f" {key}={value}"
return TestRun.executor.run(cmd)
def unload_kernel_module(module_name, unload_method: ModuleRemoveMethod = ModuleRemoveMethod.rmmod):
cmd = f"{unload_method.value} {module_name}"
return TestRun.executor.run_expect_success(cmd)
def get_kernel_module_parameter(module_name, parameter):
param_file_path = f"/sys/module/{module_name}/parameters/{parameter}"
if not check_if_file_exists(param_file_path):
raise FileNotFoundError(f"File {param_file_path} does not exist!")
return File(param_file_path).read()
def is_mounted(path: str):
if path is None or path.isspace():
raise Exception("Checked path cannot be empty")
command = f"mount | grep --fixed-strings '{path.rstrip('/')} '"
return TestRun.executor.run(command).exit_code == 0
def mount_debugfs():
if not is_mounted(DEBUGFS_MOUNT_POINT):
TestRun.executor.run_expect_success(f"mount -t debugfs none {DEBUGFS_MOUNT_POINT}")
def reload_kernel_module(module_name, module_args: {str, str}=None,
unload_method: ModuleRemoveMethod = ModuleRemoveMethod.rmmod):
if is_kernel_module_loaded(module_name):
unload_kernel_module(module_name, unload_method)
Retry.run_while_false(
lambda: load_kernel_module(module_name, module_args).exit_code == 0,
timeout=timedelta(seconds=5)
)
def get_module_path(module_name):
cmd = f"modinfo {module_name}"
# module path is in second column of first line of `modinfo` output
module_info = TestRun.executor.run_expect_success(cmd).stdout
module_path = module_info.splitlines()[0].split()[1]
return module_path
def get_executable_path(exec_name):
cmd = f"which {exec_name}"
path = TestRun.executor.run_expect_success(cmd).stdout
return path
def get_udev_service_path(unit_name):
cmd = f"systemctl cat {unit_name}"
# path is in second column of first line of output
info = TestRun.executor.run_expect_success(cmd).stdout
path = info.splitlines()[0].split()[1]
return path
def kill_all_io():
# TERM signal should be used in preference to the KILL signal, since a
# process may install a handler for the TERM signal in order to perform
# clean-up steps before terminating in an orderly fashion.
TestRun.executor.run("killall -q --signal TERM dd fio blktrace")
time.sleep(3)
TestRun.executor.run("killall -q --signal KILL dd fio blktrace")
TestRun.executor.run("kill -9 `ps aux | grep -i vdbench.* | awk '{ print $2 }'`")
if TestRun.executor.run("pgrep -x dd").exit_code == 0:
raise Exception(f"Failed to stop dd!")
if TestRun.executor.run("pgrep -x fio").exit_code == 0:
raise Exception(f"Failed to stop fio!")
if TestRun.executor.run("pgrep -x blktrace").exit_code == 0:
raise Exception(f"Failed to stop blktrace!")
if TestRun.executor.run("pgrep vdbench").exit_code == 0:
raise Exception(f"Failed to stop vdbench!")
def wait(predicate, timeout: timedelta, interval: timedelta = None):
start_time = datetime.now()
result = False
while start_time + timeout > datetime.now():
result = predicate()
if result:
break
if interval is not None:
time.sleep(interval.total_seconds())
return result
def sync():
TestRun.executor.run_expect_success("sync")
def get_dut_cpu_number():
return int(TestRun.executor.run_expect_success("nproc").stdout)
def get_dut_cpu_physical_cores():
""" Get list of CPU numbers that don't share physical cores """
output = TestRun.executor.run_expect_success("lscpu --all --parse").stdout
core_list = []
visited_phys_cores = []
for line in output.split("\n"):
if "#" in line:
continue
cpu_no, phys_core_no = line.split(",")[:2]
if phys_core_no not in visited_phys_cores:
core_list.append(cpu_no)
visited_phys_cores.append(phys_core_no)
return core_list
def set_wbt_lat(device: Device, value: int):
if value < 0:
raise ValueError("Write back latency can't be negative number")
wbt_lat_config_path = posixpath.join(
get_sysfs_path(device.get_device_id()), "queue/wbt_lat_usec"
)
return TestRun.executor.run_expect_success(f"echo {value} > {wbt_lat_config_path}")
def get_wbt_lat(device: Device):
wbt_lat_config_path = posixpath.join(
get_sysfs_path(device.get_device_id()), "queue/wbt_lat_usec"
)
return int(TestRun.executor.run_expect_success(f"cat {wbt_lat_config_path}").stdout)
def get_cores_ids_range(numa_node: int):
output = TestRun.executor.run_expect_success(f"lscpu --all --parse").stdout
parse_output = re.findall(r'(\d+),(\d+),(?:\d+),(\d+),,', output, re.I)
return [element[0] for element in parse_output if int(element[2]) == numa_node]

22
test_utils/output.py Normal file
View File

@ -0,0 +1,22 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
class Output:
def __init__(self, output_out, output_err, return_code):
self.stdout = output_out.decode('utf-8', errors="ignore").rstrip() if \
type(output_out) == bytes else output_out
self.stderr = output_err.decode('utf-8', errors="ignore").rstrip() if \
type(output_err) == bytes else output_err
self.exit_code = return_code
def __str__(self):
return f"exit_code: {self.exit_code}\nstdout: {self.stdout}\nstderr: {self.stderr}"
class CmdException(Exception):
def __init__(self, message: str, output: Output):
super().__init__(f"{message}\n{str(output)}")
self.output = output

57
test_utils/retry.py Normal file
View File

@ -0,0 +1,57 @@
#
# Copyright(c) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from datetime import datetime, timedelta
from functools import partial
from core.test_run import TestRun
class Retry:
"""
The Retry class implements methods designed to retry execution until desired result.
The func parameter is meant to be a method. If this method needs args/kwargs, they should be
encapsulated with the method, i.e. using a partial function (an example of this is contained
within run_command_until_success())
"""
@classmethod
def run_command_until_success(
cls, command: str, retries: int = None, timeout: timedelta = None
):
# encapsulate method and args/kwargs as a partial function
func = partial(TestRun.executor.run_expect_success, command)
return cls.run_while_exception(func, retries=retries, timeout=timeout)
@classmethod
def run_while_exception(cls, func, retries: int = None, timeout: timedelta = None):
result = None
def wrapped_func():
nonlocal result
try:
result = func()
return True
except:
return False
cls.run_while_false(wrapped_func, retries=retries, timeout=timeout)
return result
@classmethod
def run_while_false(cls, func, retries: int = None, timeout: timedelta = None):
if retries is None and timeout is None:
raise AttributeError("At least one stop condition is required for Retry calls!")
start = datetime.now()
retry_calls = 0
result = func()
while not result:
result = func()
retry_calls += 1
if result \
or (timeout is not None and datetime.now() - start > timeout) \
or (retries is not None and retry_calls == retries):
break
return result

77
test_utils/scsi_debug.py Normal file
View File

@ -0,0 +1,77 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import re
from core.test_run import TestRun
syslog_path = "/var/log/messages"
class Logs:
last_read_line = 1
FLUSH = re.compile(r"scsi_debug:[\s\S]*cmd 35")
FUA = re.compile(r"scsi_debug:[\s\S]*cmd 2a 08")
@staticmethod
def check_syslog_for_signals():
Logs.check_syslog_for_flush()
Logs.check_syslog_for_fua()
@staticmethod
def check_syslog_for_flush():
"""Check syslog for FLUSH logs"""
log_lines = Logs._read_syslog(Logs.last_read_line)
flush_logs_counter = Logs._count_logs(log_lines, Logs.FLUSH)
log_type = "FLUSH"
Logs._validate_logs_amount(flush_logs_counter, log_type)
@staticmethod
def check_syslog_for_fua():
"""Check syslog for FUA logs"""
log_lines = Logs._read_syslog(Logs.last_read_line)
fua_logs_counter = Logs._count_logs(log_lines, Logs.FUA)
log_type = "FUA"
Logs._validate_logs_amount(fua_logs_counter, log_type)
@staticmethod
def _read_syslog(last_read_line: int):
"""Read recent lines in syslog, mark last line and return read lines as list."""
log_lines = TestRun.executor.run_expect_success(
f"tail -qn +{last_read_line} {syslog_path}"
).stdout.splitlines()
# mark last read line to continue next reading from here
Logs.last_read_line += len(log_lines)
return log_lines
@staticmethod
def _count_logs(log_lines: list, expected_log):
"""Count specified log in list and return its amount."""
logs_counter = 0
for line in log_lines:
is_log_in_line = expected_log.search(line)
if is_log_in_line is not None:
logs_counter += 1
return logs_counter
@staticmethod
def _validate_logs_amount(logs_counter: int, log_type: str):
"""Validate amount of logs and return"""
if logs_counter == 0:
if Logs._is_flush(log_type):
TestRun.LOGGER.error(f"{log_type} log not occured")
else:
TestRun.LOGGER.warning(f"{log_type} log not occured")
elif logs_counter == 1:
TestRun.LOGGER.warning(f"{log_type} log occured only once.")
else:
TestRun.LOGGER.info(f"{log_type} log occured {logs_counter} times.")
@staticmethod
def _is_flush(log_type: str):
return log_type == "FLUSH"

16
test_utils/singleton.py Normal file
View File

@ -0,0 +1,16 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
class Singleton(type):
"""
Singleton class
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]

211
test_utils/size.py Normal file
View File

@ -0,0 +1,211 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import enum
import math
from multimethod import multimethod
def parse_unit(str_unit: str):
for u in Unit:
if str_unit == u.name:
return u
if str_unit == "KiB":
return Unit.KibiByte
elif str_unit in ["4KiB blocks", "4KiB Blocks"]:
return Unit.Blocks4096
elif str_unit == "MiB":
return Unit.MebiByte
elif str_unit == "GiB":
return Unit.GibiByte
elif str_unit == "TiB":
return Unit.TebiByte
if str_unit == "B":
return Unit.Byte
elif str_unit == "KB":
return Unit.KiloByte
elif str_unit == "MB":
return Unit.MegaByte
elif str_unit == "GB":
return Unit.GigaByte
elif str_unit == "TB":
return Unit.TeraByte
raise ValueError(f"Unable to parse {str_unit}")
class Unit(enum.Enum):
Byte = 1
KiloByte = 1000
KibiByte = 1024
MegaByte = 1000 * KiloByte
MebiByte = 1024 * KibiByte
GigaByte = 1000 * MegaByte
GibiByte = 1024 * MebiByte
TeraByte = 1000 * GigaByte
TebiByte = 1024 * GibiByte
Blocks512 = 512
Blocks4096 = 4096
KiB = KibiByte
KB = KiloByte
MiB = MebiByte
MB = MegaByte
GiB = GibiByte
GB = GigaByte
TiB = TebiByte
TB = TeraByte
def get_value(self):
return self.value
def __str__(self):
return self.get_name()
def get_name(self):
return self.name
def get_short_name(self):
if self == Unit.Byte:
return "B"
elif self == Unit.KibiByte:
return "KiB"
elif self == Unit.KiloByte:
return "KB"
elif self == Unit.MebiByte:
return "MiB"
elif self == Unit.MegaByte:
return "MB"
elif self == Unit.GibiByte:
return "GiB"
elif self == Unit.GigaByte:
return "GB"
elif self == Unit.TebiByte:
return "TiB"
elif self == Unit.TeraByte:
return "TB"
raise ValueError(f"Unable to get short unit name for {self}.")
class UnitPerSecond:
def __init__(self, unit):
self.value = unit.get_value()
self.name = unit.name + "/s"
def get_value(self):
return self.value
class Size:
def __init__(self, value: float, unit: Unit = Unit.Byte):
if value < 0:
raise ValueError("Size has to be positive.")
self.value = value * unit.value
self.unit = unit
def __str__(self):
return f"{self.get_value(self.unit)} {self.unit}"
def __hash__(self):
return self.value.__hash__()
def __int__(self):
return int(self.get_value())
def __add__(self, other):
return Size(self.get_value() + other.get_value())
def __lt__(self, other):
return self.get_value() < other.get_value()
def __le__(self, other):
return self.get_value() <= other.get_value()
def __eq__(self, other):
return self.get_value() == other.get_value()
def __ne__(self, other):
return self.get_value() != other.get_value()
def __gt__(self, other):
return self.get_value() > other.get_value()
def __ge__(self, other):
return self.get_value() >= other.get_value()
def __radd__(self, other):
return Size(other + self.get_value())
def __sub__(self, other):
if self < other:
raise ValueError("Subtracted value is too big. Result size cannot be negative.")
return Size(self.get_value() - other.get_value())
@multimethod
def __mul__(self, other: int):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __rmul__(self, other: int):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __mul__(self, other: float):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __rmul__(self, other: float):
return Size(math.ceil(self.get_value() * other))
@multimethod
def __truediv__(self, other):
if other.get_value() == 0:
raise ValueError("Divisor must not be equal to 0.")
return self.get_value() / other.get_value()
@multimethod
def __truediv__(self, other: int):
if other == 0:
raise ValueError("Divisor must not be equal to 0.")
return Size(math.ceil(self.get_value() / other))
def set_unit(self, new_unit: Unit):
new_size = Size(self.get_value(target_unit=new_unit), unit=new_unit)
if new_size != self:
raise ValueError(f"{new_unit} is not precise enough for {self}")
self.value = new_size.value
self.unit = new_size.unit
return self
def get_value(self, target_unit: Unit = Unit.Byte):
return self.value / target_unit.value
def is_zero(self):
if self.value == 0:
return True
else:
return False
def align_up(self, alignment):
if self == self.align_down(alignment):
return Size(int(self))
return Size(int(self.align_down(alignment)) + alignment)
def align_down(self, alignment):
if alignment <= 0:
raise ValueError("Alignment must be a positive value!")
if alignment & (alignment - 1):
raise ValueError("Alignment must be a power of two!")
return Size(int(self) & ~(alignment - 1))
@staticmethod
def zero():
return Size(0)

25
test_utils/systemd.py Normal file
View File

@ -0,0 +1,25 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from pathlib import Path
from core.test_run import TestRun
systemd_service_directory = Path("/usr/lib/systemd/system/")
def enable_service(name):
TestRun.executor.run_expect_success(f"systemctl enable {name}")
def disable_service(name):
TestRun.executor.run_expect_success(f"systemctl disable {name}")
def reload_daemon():
TestRun.executor.run_expect_success("systemctl daemon-reload")
def restart_service(name):
TestRun.executor.run_expect_success(f"systemctl restart {name}")

14
test_utils/time.py Normal file
View File

@ -0,0 +1,14 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from attotime import attotimedelta
class Time(attotimedelta):
def total_microseconds(self):
return self.total_nanoseconds() / 1_000
def total_milliseconds(self):
return self.total_nanoseconds() / 1_000_000