tests: Embed test framework within OCL repository

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga
2022-12-23 12:50:17 +01:00
parent bc0c8c1bf5
commit 849f59855c
91 changed files with 9930 additions and 2 deletions

View File

@@ -104,7 +104,7 @@ The complete documentation for Open CAS Linux is available in the
Before running tests make sure you have a platform with at least 2 disks (one for cache and one for core). Be careful as these devices will be most likely overwritten with random data during tests. Tests can be either executed locally or on a remote platform (via ssh) specified in the dut_config.
1. Go to test directory `cd test/functional`.
1. Install dependencies with command `pip3 install -r test-framework/requirements.txt`.
1. Install dependencies with command `pip3 install -r requirements.txt`.
1. Create DUT config. See example [here](test/functional/config/example_dut_config.yml).
a) Set disks params. You need at least two disks, of which at least one is an SSD drive.
b) For remote execution uncomment and set the `ip`, `user` and `password` fields.

View File

@@ -1,5 +1,19 @@
attotime>=0.2.0
pytest>=4.4.0,<=6.2.5
multimethod>=1.1
paramiko>=2.7.2
IPy>=1.00
aenum>=2.2.1
packaging>=20.3
typing>=3.7.4.1
pyyaml>=5.4
lxml>=4.6.3
wget>=3.2
attotime>=0.2.0
gitpython>=3.1.7
cryptography>=3.4.6
psutil>=5.8.0
py==1.10.0
portalocker>=2.3.1
pytest-asyncio>=0.14.0
recordclass>=0.8.4
schema==0.7.2

View File

@@ -0,0 +1,4 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#

View File

@@ -0,0 +1,4 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#

View File

@@ -0,0 +1,85 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import time
from datetime import timedelta
from core.test_run import TestRun
from test_utils.output import CmdException
class BaseExecutor:
def _execute(self, command, timeout):
raise NotImplementedError()
def _rsync(self, src, dst, delete, symlinks, checksum, exclude_list, timeout,
dut_to_controller):
raise NotImplementedError()
def rsync_to(self, src, dst, delete=False, symlinks=False, checksum=False, exclude_list=[],
timeout: timedelta = timedelta(seconds=90)):
return self._rsync(src, dst, delete, symlinks, checksum, exclude_list, timeout, False)
def rsync_from(self, src, dst, delete=False, symlinks=False, checksum=False, exclude_list=[],
timeout: timedelta = timedelta(seconds=90)):
return self._rsync(src, dst, delete, symlinks, checksum, exclude_list, timeout, True)
def is_remote(self):
return False
def is_active(self):
return True
def wait_for_connection(self, timeout: timedelta = None):
pass
def run(self, command, timeout: timedelta = timedelta(minutes=30)):
if TestRun.dut and TestRun.dut.env:
command = f"{TestRun.dut.env} && {command}"
command_id = TestRun.LOGGER.get_new_command_id()
ip_info = TestRun.dut.ip if len(TestRun.duts) > 1 else ""
TestRun.LOGGER.write_command_to_command_log(command, command_id, info=ip_info)
output = self._execute(command, timeout)
TestRun.LOGGER.write_output_to_command_log(output, command_id)
return output
def run_in_background(self,
command,
stdout_redirect_path="/dev/null",
stderr_redirect_path="/dev/null"):
command += f" > {stdout_redirect_path} 2> {stderr_redirect_path} &echo $!"
output = self.run(command)
if output is not None:
return int(output.stdout)
def wait_cmd_finish(self, pid: int, timeout: timedelta = timedelta(minutes=30)):
self.run(f"tail --pid={pid} -f /dev/null", timeout)
def check_if_process_exists(self, pid: int):
output = self.run(f"ps aux | awk '{{print $2 }}' | grep ^{pid}$", timedelta(seconds=10))
return True if output.exit_code == 0 else False
def kill_process(self, pid: int):
# TERM signal should be used in preference to the KILL signal, since a
# process may install a handler for the TERM signal in order to perform
# clean-up steps before terminating in an orderly fashion.
self.run(f"kill -s SIGTERM {pid} &> /dev/null")
time.sleep(3)
self.run(f"kill -s SIGKILL {pid} &> /dev/null")
def run_expect_success(self, command, timeout: timedelta = timedelta(minutes=30)):
output = self.run(command, timeout)
if output.exit_code != 0:
raise CmdException(f"Exception occurred while trying to execute '{command}' command.",
output)
return output
def run_expect_fail(self, command, timeout: timedelta = timedelta(minutes=30)):
output = self.run(command, timeout)
if output.exit_code == 0:
raise CmdException(f"Command '{command}' executed properly but error was expected.",
output)
return output

View File

@@ -0,0 +1,15 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from connection.base_executor import BaseExecutor
class DummyExecutor(BaseExecutor):
def _execute(self, command, timeout=None):
print(command)
def _rsync(self, src, dst, delete, symlinks, checksum, exclude_list, timeout,
dut_to_controller):
print(f'COPY FROM "{src}" TO "{dst}"')

View File

@@ -0,0 +1,48 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import subprocess
from datetime import timedelta
from connection.base_executor import BaseExecutor
from test_utils.output import Output
class LocalExecutor(BaseExecutor):
def _execute(self, command, timeout):
completed_process = subprocess.run(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=timeout.total_seconds())
return Output(completed_process.stdout,
completed_process.stderr,
completed_process.returncode)
def _rsync(self, src, dst, delete=False, symlinks=False, checksum=False, exclude_list=[],
timeout: timedelta = timedelta(seconds=90), dut_to_controller=False):
options = []
if delete:
options.append("--delete")
if symlinks:
options.append("--links")
if checksum:
options.append("--checksum")
for exclude in exclude_list:
options.append(f"--exclude {exclude}")
completed_process = subprocess.run(
f'rsync -r {src} {dst} {" ".join(options)}',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=timeout.total_seconds())
if completed_process.returncode:
raise Exception(f"rsync failed:\n{completed_process}")

View File

@@ -0,0 +1,142 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import socket
import subprocess
import paramiko
from datetime import timedelta, datetime
from connection.base_executor import BaseExecutor
from core.test_run import TestRun
from test_utils.output import Output
class SshExecutor(BaseExecutor):
def __init__(self, ip, username, port=22):
self.ip = ip
self.user = username
self.port = port
self.ssh = paramiko.SSHClient()
self._check_config_for_reboot_timeout()
def __del__(self):
self.ssh.close()
def connect(self, user=None, port=None,
timeout: timedelta = timedelta(seconds=30)):
user = user or self.user
port = port or self.port
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.ssh.connect(self.ip, username=user,
port=port, timeout=timeout.total_seconds(),
banner_timeout=timeout.total_seconds())
except paramiko.AuthenticationException as e:
raise paramiko.AuthenticationException(
f"Authentication exception occurred while trying to connect to DUT. "
f"Please check your SSH key-based authentication.\n{e}")
except (paramiko.SSHException, socket.timeout) as e:
raise ConnectionError(f"An exception of type '{type(e)}' occurred while trying to "
f"connect to {self.ip}.\n {e}")
def disconnect(self):
try:
self.ssh.close()
except Exception:
raise Exception(f"An exception occurred while trying to disconnect from {self.ip}")
def _execute(self, command, timeout):
try:
(stdin, stdout, stderr) = self.ssh.exec_command(command,
timeout=timeout.total_seconds())
except paramiko.SSHException as e:
raise ConnectionError(f"An exception occurred while executing command '{command}' on"
f" {self.ip}\n{e}")
return Output(stdout.read(), stderr.read(), stdout.channel.recv_exit_status())
def _rsync(self, src, dst, delete=False, symlinks=False, checksum=False, exclude_list=[],
timeout: timedelta = timedelta(seconds=90), dut_to_controller=False):
options = []
if delete:
options.append("--delete")
if symlinks:
options.append("--links")
if checksum:
options.append("--checksum")
for exclude in exclude_list:
options.append(f"--exclude {exclude}")
src_to_dst = f"{self.user}@{self.ip}:{src} {dst} " if dut_to_controller else\
f"{src} {self.user}@{self.ip}:{dst} "
try:
completed_process = subprocess.run(
f'rsync -r -e "ssh -p {self.port} '
f'-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" '
+ src_to_dst + f'{" ".join(options)}',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=timeout.total_seconds())
except Exception as e:
TestRun.LOGGER.exception(f"Exception occurred during rsync process. "
f"Please check your SSH key-based authentication.\n{e}")
if completed_process.returncode:
raise Exception(f"rsync failed:\n{completed_process}")
def is_remote(self):
return True
def _check_config_for_reboot_timeout(self):
if "reboot_timeout" in TestRun.config.keys():
self._parse_timeout_to_int()
else:
self.reboot_timeout = None
def _parse_timeout_to_int(self):
self.reboot_timeout = int(TestRun.config["reboot_timeout"])
if self.reboot_timeout < 0:
raise ValueError("Reboot timeout cannot be negative.")
def reboot(self):
self.run("reboot")
self.wait_for_connection_loss()
self.wait_for_connection(timedelta(seconds=self.reboot_timeout)) \
if self.reboot_timeout is not None else self.wait_for_connection()
def is_active(self):
try:
self.ssh.exec_command('', timeout=5)
return True
except Exception:
return False
def wait_for_connection(self, timeout: timedelta = timedelta(minutes=10)):
start_time = datetime.now()
with TestRun.group("Waiting for DUT ssh connection"):
while start_time + timeout > datetime.now():
try:
self.connect()
return
except paramiko.AuthenticationException:
raise
except Exception:
continue
raise ConnectionError("Timeout occurred while trying to establish ssh connection")
def wait_for_connection_loss(self, timeout: timedelta = timedelta(minutes=1)):
with TestRun.group("Waiting for DUT ssh connection loss"):
end_time = datetime.now() + timeout
while end_time > datetime.now():
self.disconnect()
try:
self.connect(timeout=timedelta(seconds=5))
except Exception:
return
raise ConnectionError("Timeout occurred before ssh connection loss")

View File

@@ -0,0 +1,107 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
# The MIT License (MIT)
#
# Copyright (c) 2004-2020 Holger Krekel and others
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from itertools import product, combinations
import random
from core.test_run import TestRun
def testcase_id(param_set):
if len(param_set.values) == 1:
return param_set.values[0]
return "-".join([str(value) for value in param_set.values])
def generate_pair_testing_testcases(*argvals):
"""
Generate test_cases from provided argument values lists in such way that each possible
(argX, argY) pair will be used.
"""
# if only one argument is used, yield from it
if len(argvals) == 1:
for val in argvals[0]:
yield (val,)
# append argument index to argument values list to avoid confusion when there are two arguments
# with the same type
for i, arg in enumerate(argvals):
for j, val in enumerate(arg):
arg[j] = (i, val)
# generate all possible test cases
all_test_cases = list(product(*argvals))
random.seed(TestRun.random_seed)
random.shuffle(all_test_cases)
used_pairs = set()
for tc in all_test_cases:
current_pairs = set(combinations(tc, 2))
# if cardinality of (current_pairs & used_pairs) is lesser than cardinality of current_pairs
# it means not all argument pairs in this tc have been used. return current tc
# and update used_pairs set
if len(current_pairs & used_pairs) != len(current_pairs):
used_pairs.update(current_pairs)
# unpack testcase by deleting argument index
yield list(list(zip(*tc))[1])
def register_testcases(metafunc, argnames, argvals):
"""
Add custom parametrization test cases. Based on metafunc's parametrize method.
"""
from _pytest.python import CallSpec2, _find_parametrized_scope
from _pytest.mark import ParameterSet
from _pytest.fixtures import scope2index
parameter_sets = [ParameterSet(values=val, marks=[], id=None) for val in argvals]
metafunc._validate_if_using_arg_names(argnames, False)
arg_value_types = metafunc._resolve_arg_value_types(argnames, False)
ids = [testcase_id(param_set) for param_set in parameter_sets]
scope = _find_parametrized_scope(argnames, metafunc._arg2fixturedefs, False)
scopenum = scope2index(scope, descr=f"parametrizex() call in {metafunc.function.__name__}")
calls = []
for callspec in metafunc._calls or [CallSpec2(metafunc)]:
for param_index, (param_id, param_set) in enumerate(zip(ids, parameter_sets)):
newcallspec = callspec.copy()
newcallspec.setmulti2(
arg_value_types,
argnames,
param_set.values,
param_id,
param_set.marks,
scopenum,
param_index,
)
calls.append(newcallspec)
metafunc._calls = calls

View File

@@ -0,0 +1,124 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
import sys
import importlib
import signal
from core.test_run import TestRun
class PluginManager:
def __init__(self, item, config):
if 'plugins_dir' in config:
sys.path.append(config['plugins_dir'])
self.plugins = {}
self.plugins_config = config.get('plugins', {})
self.req_plugins = config.get('req_plugins', {})
self.opt_plugins = config.get('opt_plugins', {})
self.req_plugins.update(dict(map(lambda mark: (mark.args[0], mark.kwargs),
item.iter_markers(name="require_plugin"))))
req_plugin_mod = {}
opt_plugin_mod = {}
for name in self.req_plugins:
try:
req_plugin_mod[name] = self.__import_plugin(name)
except ModuleNotFoundError:
pytest.skip("Unable to find requested plugin!")
for name in self.opt_plugins:
try:
opt_plugin_mod[name] = self.__import_plugin(name)
except ModuleNotFoundError as e:
TestRun.LOGGER.debug(
f"Failed to import '{name}' - optional plugin. " f"Reason: {e}"
)
continue
for name, mod in req_plugin_mod.items():
try:
self.plugins[name] = mod.plugin_class(
self.req_plugins[name],
self.plugins_config.get(name, {}).get("config", {}))
except Exception:
pytest.skip(f"Unable to initialize plugin '{name}'")
for name, mod in opt_plugin_mod.items():
try:
self.plugins[name] = mod.plugin_class(
self.opt_plugins[name],
self.plugins_config.get(name, {}).get("config", {}))
except Exception as e:
TestRun.LOGGER.debug(
f"Failed to initialize '{name}' - optional plugin. " f"Reason: {e}"
)
continue
def __import_plugin(self, name):
provided_by = self.plugins_config.get(name, {}).get("provided_by")
if provided_by:
return importlib.import_module(provided_by)
try:
return importlib.import_module(f"internal_plugins.{name}")
except ModuleNotFoundError:
pass
return importlib.import_module(f"external_plugins.{name}")
def hook_pre_setup(self):
for plugin in self.plugins.values():
plugin.pre_setup()
def hook_post_setup(self):
for plugin in self.plugins.values():
plugin.post_setup()
def hook_teardown(self):
for plugin in self.plugins.values():
plugin.teardown()
def get_plugin(self, name):
if name not in self.plugins:
raise KeyError("Requested plugin does not exist")
return self.plugins[name]
def teardown_on_signal(self, sig_id, plugin_name):
try:
plugin = self.get_plugin(plugin_name)
except Exception as e:
TestRun.LOGGER.warning(
f"Failed to setup teardown on signal for {plugin_name}. Reason: {e}")
return
old_sig_handler = None
def signal_handler(sig, frame):
plugin.teardown()
if old_sig_handler is not None:
if old_sig_handler == signal.SIG_DFL:
# In case of SIG_DFL the function pointer points to address 0,
# which is not a valid address.
# We have to reset the handler and raise the signal again
signal.signal(sig, signal.SIG_DFL)
signal.raise_signal(sig)
signal.signal(sig, signal_handler)
elif old_sig_handler == signal.SIG_IGN:
# SIG_IGN has value 1 (also an invalid address).
# Here we can just return (do nothing)
return
else:
# When we received neither SIG_IGN nor SIG_DFL, the received value is
# a valid function pointer and we can call the handler directly
old_sig_handler()
signal.signal(sig, old_sig_handler)
old_sig_handler = signal.signal(sig_id, signal_handler)

View File

@@ -0,0 +1,65 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from contextlib import contextmanager
import pytest
from log.logger import Log
class Blocked(Exception):
pass
class TestRun:
dut = None
executor = None
LOGGER: Log = None
plugin_manager = None
duts = None
disks = None
@classmethod
@contextmanager
def use_dut(cls, dut):
cls.dut = dut
cls.config = cls.dut.config
cls.executor = cls.dut.executor
cls.plugin_manager = cls.dut.plugin_manager
cls.disks = cls.dut.req_disks
yield cls.executor
cls.disks = None
cls.plugin_manager = None
cls.executor = None
# setting cls.config to None omitted (causes problems in the teardown stage of execution)
cls.dut = None
@classmethod
def step(cls, message):
return cls.LOGGER.step(message)
@classmethod
def group(cls, message):
return cls.LOGGER.group(message)
@classmethod
def iteration(cls, iterable, group_name=None):
TestRun.LOGGER.start_group(f"{group_name}" if group_name is not None else "Iteration list")
items = list(iterable)
for i, item in enumerate(items, start=1):
cls.LOGGER.start_iteration(f"Iteration {i}/{len(items)}")
yield item
TestRun.LOGGER.end_iteration()
TestRun.LOGGER.end_group()
@classmethod
def fail(cls, message):
pytest.fail(message)
@classmethod
def block(cls, message):
raise Blocked(message)

View File

@@ -0,0 +1,272 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
import random
import sys
import traceback
import pytest
from IPy import IP
import core.test_run
from connection.local_executor import LocalExecutor
from connection.ssh_executor import SshExecutor
from core.pair_testing import generate_pair_testing_testcases, register_testcases
from core.plugins import PluginManager
from log.base_log import BaseLogResult
from storage_devices.disk import Disk
from test_utils import disk_finder
from test_utils.dut import Dut
TestRun = core.test_run.TestRun
@classmethod
def __configure(cls, config):
config.addinivalue_line(
"markers",
"require_disk(name, type): require disk of specific type, otherwise skip"
)
config.addinivalue_line(
"markers",
"require_plugin(name, *kwargs): require specific plugins, otherwise skip"
)
config.addinivalue_line(
"markers",
"remote_only: run test only in case of remote execution, otherwise skip"
)
config.addinivalue_line(
"markers",
"os_dependent: run test only if its OS dependent, otherwise skip"
)
config.addinivalue_line(
"markers",
"multidut(number): test requires a number of different platforms to be executed"
)
config.addinivalue_line(
"markers",
"parametrizex(argname, argvalues): sparse parametrized testing"
)
config.addinivalue_line(
"markers",
"CI: marks test for continuous integration pipeline"
)
cls.random_seed = config.getoption("--random-seed") or random.randrange(sys.maxsize)
random.seed(cls.random_seed)
TestRun.configure = __configure
@classmethod
def __prepare(cls, item, config):
if not config:
raise Exception("You need to specify DUT config!")
cls.item = item
cls.config = config
req_disks = list(map(lambda mark: mark.args, cls.item.iter_markers(name="require_disk")))
cls.req_disks = dict(req_disks)
if len(req_disks) != len(cls.req_disks):
raise Exception("Disk name specified more than once!")
TestRun.prepare = __prepare
@classmethod
def __attach_log(cls, log_path, target_name=None):
if target_name is None:
target_name = posixpath.basename(log_path)
if cls.config.get('extra_logs'):
cls.config["extra_logs"][target_name] = log_path
else:
cls.config["extra_logs"] = {target_name: log_path}
TestRun.attach_log = __attach_log
@classmethod
def __setup_disk(cls, disk_name, disk_type):
cls.disks[disk_name] = next(filter(
lambda disk: disk.disk_type in disk_type.types() and disk not in cls.disks.values(),
cls.dut.disks
), None)
if not cls.disks[disk_name]:
pytest.skip("Unable to find requested disk!")
TestRun.__setup_disk = __setup_disk
@classmethod
def __setup_disks(cls):
cls.disks = {}
items = list(cls.req_disks.items())
while items:
resolved, unresolved = [], []
for disk_name, disk_type in items:
(resolved, unresolved)[not disk_type.resolved()].append((disk_name, disk_type))
resolved.sort(
key=lambda disk: (lambda disk_name, disk_type: disk_type)(*disk)
)
for disk_name, disk_type in resolved:
cls.__setup_disk(disk_name, disk_type)
items = unresolved
cls.dut.req_disks = cls.disks
TestRun.__setup_disks = __setup_disks
@classmethod
def __presetup(cls):
cls.plugin_manager = PluginManager(cls.item, cls.config)
cls.plugin_manager.hook_pre_setup()
if cls.config['type'] == 'ssh':
try:
IP(cls.config['ip'])
except ValueError:
TestRun.block("IP address from config is in invalid format.")
port = cls.config.get('port', 22)
if 'user' in cls.config:
cls.executor = SshExecutor(
cls.config['ip'],
cls.config['user'],
port
)
else:
TestRun.block("There is no user given in config.")
elif cls.config['type'] == 'local':
cls.executor = LocalExecutor()
else:
TestRun.block("Execution type (local/ssh) is missing in DUT config!")
TestRun.presetup = __presetup
@classmethod
def __setup(cls):
if list(cls.item.iter_markers(name="remote_only")):
if not cls.executor.is_remote():
pytest.skip()
Disk.plug_all_disks()
if cls.config.get('allow_disk_autoselect', False):
cls.config["disks"] = disk_finder.find_disks()
try:
cls.dut = Dut(cls.config)
except Exception as ex:
raise Exception(f"Failed to setup DUT instance:\n"
f"{str(ex)}\n{traceback.format_exc()}")
cls.__setup_disks()
TestRun.LOGGER.info(f"Re-seeding random number generator with seed: {cls.random_seed}")
random.seed(cls.random_seed)
cls.plugin_manager.hook_post_setup()
TestRun.setup = __setup
@classmethod
def __makereport(cls, item, call, res):
cls.outcome = res.outcome
step_info = {
'result': res.outcome,
'exception': str(call.excinfo.value) if call.excinfo else None
}
setattr(item, "rep_" + res.when, step_info)
from _pytest.outcomes import Failed
from core.test_run import Blocked
if res.when == "call" and res.failed:
msg = f"{call.excinfo.type.__name__}: {call.excinfo.value}"
if call.excinfo.type is Failed:
cls.LOGGER.error(msg)
elif call.excinfo.type is Blocked:
cls.LOGGER.blocked(msg)
else:
cls.LOGGER.exception(msg)
elif res.when == "setup" and res.failed:
msg = f"{call.excinfo.type.__name__}: {call.excinfo.value}"
cls.LOGGER.exception(msg)
res.outcome = "failed"
if res.outcome == "skipped":
cls.LOGGER.skip("Test skipped.")
if res.when == "call" and cls.LOGGER.get_result() == BaseLogResult.FAILED:
res.outcome = "failed"
# To print additional message in final test report, assign it to res.longrepr
cls.LOGGER.generate_summary(item, cls.config.get('meta'))
TestRun.makereport = __makereport
@classmethod
def __generate_tests(cls, metafunc):
marks = getattr(metafunc.function, "pytestmark", [])
parametrizex_marks = [
mark for mark in marks if mark.name == "parametrizex"
]
if not parametrizex_marks:
random.seed(TestRun.random_seed)
return
argnames = []
argvals = []
for mark in parametrizex_marks:
argnames.append(mark.args[0])
argvals.append(list(mark.args[1]))
if metafunc.config.getoption("--parametrization-type") == "full":
for name, values in zip(argnames, argvals):
metafunc.parametrize(name, values)
elif metafunc.config.getoption("--parametrization-type") == "pair":
test_cases = generate_pair_testing_testcases(*argvals)
register_testcases(metafunc, argnames, test_cases)
else:
raise Exception("Not supported parametrization type")
random.seed(TestRun.random_seed)
TestRun.generate_tests = __generate_tests
@classmethod
def __addoption(cls, parser):
parser.addoption("--parametrization-type", choices=["pair", "full"], default="pair")
parser.addoption("--random-seed", type=int, default=None)
TestRun.addoption = __addoption
@classmethod
def __teardown(cls):
for dut in cls.duts:
with cls.use_dut(dut):
if cls.plugin_manager:
cls.plugin_manager.hook_teardown()
TestRun.teardown = __teardown

View File

@@ -0,0 +1,4 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#

View File

@@ -0,0 +1,22 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
class ExamplePlugin:
def __init__(self, params, config):
self.params = params
print(f"Example plugin initialized with params {self.params}")
def pre_setup(self):
print("Example plugin pre setup")
def post_setup(self):
print("Example plugin post setup")
def teardown(self):
print("Example plugin teardown")
plugin_class = ExamplePlugin

View File

@@ -0,0 +1,48 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from datetime import timedelta
from connection.local_executor import LocalExecutor
from connection.ssh_executor import SshExecutor
from core.test_run import TestRun
class PowerControlPlugin:
def __init__(self, params, config):
print("Power Control LibVirt Plugin initialization")
try:
self.ip = config['ip']
self.user = config['user']
except Exception:
raise Exception("Missing fields in config! ('ip' and 'user' required)")
def pre_setup(self):
print("Power Control LibVirt Plugin pre setup")
if self.config['connection_type'] == 'ssh':
self.executor = SshExecutor(
self.ip,
self.user,
self.config.get('port', 22)
)
else:
self.executor = LocalExecutor()
def post_setup(self):
pass
def teardown(self):
pass
def power_cycle(self):
self.executor.run(f"virsh reset {self.config['domain']}")
TestRun.executor.wait_for_connection_loss()
timeout = TestRun.config.get('reboot_timeout')
if timeout:
TestRun.executor.wait_for_connection(timedelta(seconds=int(timeout)))
else:
TestRun.executor.wait_for_connection()
plugin_class = PowerControlPlugin

View File

@@ -0,0 +1,39 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from time import sleep
from core.test_run_utils import TestRun
from storage_devices.device import Device
from test_utils import os_utils
from test_utils.output import CmdException
class ScsiDebug:
def __init__(self, params, config):
self.params = params
self.module_name = "scsi_debug"
def pre_setup(self):
pass
def post_setup(self):
self.reload()
def reload(self):
self.teardown()
sleep(1)
load_output = os_utils.load_kernel_module(self.module_name, self.params)
if load_output.exit_code != 0:
raise CmdException(f"Failed to load {self.module_name} module", load_output)
TestRun.LOGGER.info(f"{self.module_name} loaded successfully.")
sleep(10)
TestRun.scsi_debug_devices = Device.get_scsi_debug_devices()
def teardown(self):
if os_utils.is_kernel_module_loaded(self.module_name):
os_utils.unload_kernel_module(self.module_name)
plugin_class = ScsiDebug

View File

@@ -0,0 +1,97 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import time
import posixpath
from datetime import timedelta
from core.test_run import TestRun
from test_tools import fs_utils
class Vdbench:
def __init__(self, params, config):
print("VDBench plugin initialization")
self.run_time = timedelta(seconds=60)
try:
self.working_dir = config["working_dir"]
self.reinstall = config["reinstall"]
self.source_dir = config["source_dir"]
except Exception:
raise Exception("Missing fields in config! ('working_dir', 'source_dir' and "
"'reinstall' required)")
self.result_dir = posixpath.join(self.working_dir, 'result.tod')
def pre_setup(self):
pass
def post_setup(self):
print("VDBench plugin post setup")
if not self.reinstall and fs_utils.check_if_directory_exists(self.working_dir):
return
if fs_utils.check_if_directory_exists(self.working_dir):
fs_utils.remove(self.working_dir, True, True)
fs_utils.create_directory(self.working_dir)
TestRun.LOGGER.info("Copying vdbench to working dir.")
fs_utils.copy(posixpath.join(self.source_dir, "*"), self.working_dir,
True, True)
pass
def teardown(self):
pass
def create_config(self, config, run_time: timedelta):
self.run_time = run_time
if config[-1] != ",":
config += ","
config += f"elapsed={int(run_time.total_seconds())}"
TestRun.LOGGER.info(f"Vdbench config:\n{config}")
fs_utils.write_file(posixpath.join(self.working_dir, "param.ini"), config)
def run(self):
cmd = f"{posixpath.join(self.working_dir, 'vdbench')} " \
f"-f {posixpath.join(self.working_dir, 'param.ini')} " \
f"-vr -o {self.result_dir}"
full_cmd = f"screen -dmS vdbench {cmd}"
TestRun.executor.run(full_cmd)
start_time = time.time()
timeout = self.run_time * 1.5
while True:
if not TestRun.executor.run(f"ps aux | grep '{cmd}' | grep -v grep").exit_code == 0:
return self.analyze_log()
if time.time() - start_time > timeout.total_seconds():
TestRun.LOGGER.error("Vdbench timeout.")
return False
time.sleep(1)
def analyze_log(self):
output = TestRun.executor.run(
f"ls -1td {self.result_dir[0:len(self.result_dir) - 3]}* | head -1")
log_path = posixpath.join(output.stdout if output.exit_code == 0 else self.result_dir,
"logfile.html")
log_file = fs_utils.read_file(log_path)
if "Vdbench execution completed successfully" in log_file:
TestRun.LOGGER.info("Vdbench execution completed successfully.")
return True
if "Data Validation error" in log_file or "data_errors=1" in log_file:
TestRun.LOGGER.error("Data corruption occurred!")
elif "Heartbeat monitor:" in log_file:
TestRun.LOGGER.error("Vdbench: heartbeat.")
else:
TestRun.LOGGER.error("Vdbench unknown result.")
return False
plugin_class = Vdbench

View File

@@ -0,0 +1,78 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from enum import Enum
from re import sub
class BaseLogResult(Enum):
DEBUG = 10
PASSED = 11
WORKAROUND = 12
WARNING = 13
SKIPPED = 14
FAILED = 15
EXCEPTION = 16
BLOCKED = 17
CRITICAL = 18
def escape(msg):
return sub(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]+', '', msg)
class BaseLog():
def __init__(self, begin_message=None):
self.__begin_msg = begin_message
self.__result = BaseLogResult.PASSED
def __enter__(self):
if self.__begin_msg is not None:
self.begin(self.__begin_msg)
else:
self.begin("Start BaseLog ...")
def __exit__(self, *args):
self.end()
def __try_to_set_new_result(self, new_result):
if new_result.value > self.__result.value:
self.__result = new_result
def begin(self, message):
pass
def debug(self, message):
pass
def info(self, message):
pass
def workaround(self, message):
self.__try_to_set_new_result(BaseLogResult.WORKAROUND)
def warning(self, message):
self.__try_to_set_new_result(BaseLogResult.WARNING)
def skip(self, message):
self.__try_to_set_new_result(BaseLogResult.SKIPPED)
def error(self, message):
self.__try_to_set_new_result(BaseLogResult.FAILED)
def blocked(self, message):
self.__try_to_set_new_result(BaseLogResult.BLOCKED)
def exception(self, message):
self.__try_to_set_new_result(BaseLogResult.EXCEPTION)
def critical(self, message):
self.__try_to_set_new_result(BaseLogResult.CRITICAL)
def end(self):
return self.__result
def get_result(self):
return self.__result

View File

@@ -0,0 +1,43 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.base_log import BaseLogResult, BaseLog
from log.group.html_group_log import HtmlGroupLog
from datetime import datetime
class HtmlChapterGroupLog(HtmlGroupLog):
SET_RESULT = {
BaseLogResult.PASSED: BaseLog.info,
BaseLogResult.WORKAROUND: BaseLog.workaround,
BaseLogResult.WARNING: BaseLog.warning,
BaseLogResult.SKIPPED: BaseLog.skip,
BaseLogResult.FAILED: BaseLog.error,
BaseLogResult.BLOCKED: BaseLog.blocked,
BaseLogResult.EXCEPTION: BaseLog.exception,
BaseLogResult.CRITICAL: BaseLog.critical}
def __init__(self, html_base, cfg, begin_msg=None, id='ch0'):
super().__init__(HtmlChapterGroupLog._factory, html_base, cfg, begin_msg, id)
@staticmethod
def _factory(html_base, cfg, begin_msg, id):
return HtmlChapterGroupLog(html_base, cfg, begin_msg, id)
def end_dir_group(self, ref_group):
group = super().end_group()
ref_container_id = ref_group._container.get('id')
group._header.set('ondblclick', f"chapterClick('{ref_container_id}')")
def set_result(self, result):
if self._successor is not None:
self._successor.set_result(result)
HtmlChapterGroupLog.SET_RESULT[result](self, "set result")
def end(self):
result = super().end()
exe_time = (datetime.now() - self._start_time).seconds
self._cfg.group_chapter_end(exe_time, self._header, self._container, result)
return result

View File

@@ -0,0 +1,139 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from datetime import datetime
from log.base_log import BaseLog, BaseLogResult
class HtmlGroupLog(BaseLog):
def __init__(self, constructor, html_base_element, cfg, begin_message, id_):
super().__init__(begin_message)
self._successor = None
self.__factory = constructor
self.__log_main_store = html_base_element
self._id = id_
self._container = None
self._header = None
self.__msg_idx = 0
self._start_time = datetime.now()
self._cfg = cfg
self._header_msg_type = type(begin_message)
def begin(self, message):
policy = self._cfg.get_policy(type(message))
self._header, self._container = policy.group_begin(self._id, message, self.__log_main_store)
super().begin(message)
def get_step_id(self):
if self._successor is not None:
return self._successor.get_step_id()
else:
return f'step.{self._id}.{self.__msg_idx}'
def __add_test_step(self, message, result=BaseLogResult.PASSED):
policy = self._cfg.get_policy(type(message))
policy.standard(self.get_step_id(), message, result, self._container)
self.__msg_idx += 1
def get_main_log_store(self):
return self.__log_main_store
def start_group(self, message):
self._header_msg_type = type(message)
if self._successor is not None:
result = self._successor.start_group(message)
else:
new_id = f"{self._id}.{self.__msg_idx}"
self.__msg_idx += 1
self._successor = self.__factory(self._container, self._cfg, message, new_id)
self._successor.begin(message)
result = self._successor
return result
def end_group(self):
if self._successor is not None:
if self._successor._successor is None:
self._successor.end()
result = self._successor
self._successor = None
else:
result = self._successor.end_group()
else:
self.end()
result = self
return result
def debug(self, message):
if self._successor is not None:
self._successor.debug(message)
else:
self.__add_test_step(message, BaseLogResult.DEBUG)
return super().debug(message)
def info(self, message):
if self._successor is not None:
self._successor.info(message)
else:
self.__add_test_step(message)
super().info(message)
def workaround(self, message):
if self._successor is not None:
self._successor.workaround(message)
else:
self.__add_test_step(message, BaseLogResult.WORKAROUND)
super().workaround(message)
def warning(self, message):
if self._successor is not None:
self._successor.warning(message)
else:
self.__add_test_step(message, BaseLogResult.WARNING)
super().warning(message)
def skip(self, message):
if self._successor is not None:
self._successor.skip(message)
else:
self.__add_test_step(message, BaseLogResult.SKIPPED)
super().skip(message)
def error(self, message):
if self._successor is not None:
self._successor.error(message)
else:
self.__add_test_step(message, BaseLogResult.FAILED)
super().error(message)
def blocked(self, message):
if self._successor is not None:
self._successor.blocked(message)
else:
self.__add_test_step(message, BaseLogResult.BLOCKED)
super().blocked(message)
def critical(self, message):
if self._successor is not None:
self._successor.critical(message)
else:
self.__add_test_step(message, BaseLogResult.CRITICAL)
super().critical(message)
def exception(self, message):
if self._successor is not None:
self._successor.exception(message)
else:
self.__add_test_step(message, BaseLogResult.EXCEPTION)
super().exception(message)
def end(self):
return super().end()
def get_current_group(self):
if self._successor is not None:
result = self._successor.get_current_group()
else:
result = self
return result

View File

@@ -0,0 +1,20 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.group.html_group_log import HtmlGroupLog
class HtmlIterationGroupLog(HtmlGroupLog):
def __init__(self, html_base, cfg, begin_msg, id='itg0'):
super().__init__(HtmlIterationGroupLog._factory, html_base, cfg, begin_msg, id)
@staticmethod
def _factory(html_base, cfg, begin_msg, id):
return HtmlIterationGroupLog(html_base, cfg, begin_msg, id)
def end(self):
result = super().end()
self._cfg.group_end(self._id, self._header, self._container, result)
return result

View File

@@ -0,0 +1,102 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.html_file_log import HtmlFileLog
from log.group.html_chapter_group_log import HtmlChapterGroupLog
from log.group.html_iteration_group_log import HtmlIterationGroupLog
from datetime import datetime
from lxml.etree import Element
class HtmlFileItemLog(HtmlFileLog):
def __init__(self, html_file_path, test_title, cfg, iteration_title="Test summary"):
super().__init__(html_file_path, test_title)
root = self.get_root()
self._log_items_store = root.xpath('/html/body')[0]
self._idx = 0
self._log_chapters_store = root.xpath('/html/body/section[@id="iteration-chapters"]')[0]
self._chapter_group = HtmlChapterGroupLog(self._log_chapters_store, cfg, test_title)
self._main_group = HtmlIterationGroupLog(self._log_items_store, cfg, test_title)
self._start_time = datetime.now()
iteration_title_node = root.xpath('/html/body/a/h1')[0]
iteration_title_node.text = iteration_title
self._config = cfg
self._fail_container = root.xpath('/html/body/div/select[@id="error-list-selector"]')[0]
def __add_error(self, msg_idx, msg, error_class):
fail_element = Element('option', value=msg_idx)
fail_element.set('class', error_class)
fail_element.text = msg
self._fail_container.append(fail_element)
def start_iteration(self, message):
super().begin(message)
def get_result(self):
return self._main_group.get_result()
def begin(self, message):
self._chapter_group.begin(message)
self._main_group.begin(message)
def debug(self, message):
self._main_group.debug(message)
def info(self, message):
self._main_group.info(message)
def workaround(self, message):
self._main_group.workaround(message)
def warning(self, message):
self._main_group.warning(message)
def skip(self, message):
self._main_group.skip(message)
def error(self, message):
msg_idx = self._main_group.get_step_id()
self.__add_error(msg_idx, message, "fail")
self._main_group.error(message)
def blocked(self, message):
msg_idx = self._main_group.get_step_id()
self.__add_error(msg_idx, message, "blocked")
self._main_group.blocked(message)
def exception(self, message):
msg_idx = self._main_group.get_step_id()
self.__add_error(msg_idx, message, "exception")
self._main_group.exception(message)
def critical(self, message):
msg_idx = self._main_group.get_step_id()
self.__add_error(msg_idx, message, "critical")
self._main_group.critical(message)
def start_group(self, message):
self._chapter_group.start_group(message)
self._main_group.start_group(message)
def end_group(self):
ref_group = self._main_group.get_current_group()
self._chapter_group.set_result(ref_group.get_result())
self._main_group.end_group()
self._chapter_group.end_dir_group(ref_group)
def end_all_groups(self):
while self._main_group._successor is not None:
self.end_group()
def end(self):
while self._main_group._successor is not None:
self.end_group()
self.end_group()
time_result = datetime.now() - self._start_time
time_node = self.get_root().xpath('/html/body/div[@class="iteration-execution-time"]')[0]
status_node = self.get_root().xpath('/html/body/div[@class="iteration-status"]')[0]
self._config.end_iteration_func(
time_node, status_node, time_result.total_seconds(), self.get_result())
super().end()

View File

@@ -0,0 +1,29 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.base_log import BaseLog
from lxml.html import fromstring
from lxml.html import tostring
class HtmlFileLog(BaseLog):
def __init__(self, file_path, title):
super().__init__(title)
self.__path = file_path
with open(file_path) as file_stream:
self.__root = fromstring(file_stream.read())
node_list = self.__root.xpath('/html/head/title')
node_list[0].text = title
def get_path(self):
return self.__path
def get_root(self):
return self.__root
def end(self):
with open(self.__path, "wb") as file:
x = tostring(self.__root)
file.write(x)

View File

@@ -0,0 +1,13 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.html_file_item_log import HtmlFileItemLog
class HtmlIterationLog(HtmlFileItemLog):
def __init__(self, test_title, iteration_title, config):
self.iteration_closed: bool = False
html_file = config.create_iteration_file()
super().__init__(html_file, test_title, config, iteration_title)

View File

@@ -0,0 +1,204 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
from os import path, environ, makedirs
from datetime import datetime
from shutil import copyfile
from lxml.etree import Element
from log.base_log import BaseLogResult
from log.presentation_policy import null_policy
def convert_seconds_to_str(time_in_sec):
h = str(int(time_in_sec / 3600) % 24).zfill(2)
m = str(int(time_in_sec / 60) % 60).zfill(2)
s = str(int(time_in_sec % 60)).zfill(2)
time_msg = f"{h}:{m}:{s} [s]"
if time_in_sec > 86400:
time_msg = f"{int(time_in_sec // (3600 * 24))}d {time_msg}"
return time_msg
class HtmlLogConfig:
STYLE = {
BaseLogResult.DEBUG: 'debug',
BaseLogResult.PASSED: '',
BaseLogResult.WORKAROUND: 'workaround',
BaseLogResult.WARNING: 'warning',
BaseLogResult.SKIPPED: 'skip',
BaseLogResult.FAILED: 'fail',
BaseLogResult.BLOCKED: 'blocked',
BaseLogResult.CRITICAL: 'critical',
BaseLogResult.EXCEPTION: 'exception'}
__MAIN = 'main'
__SETUP = 'setup'
__T_ITERATION = 'iteration'
__FRAMEWORK_T_FOLDER = 'template'
MAIN = __MAIN + '.html'
CSS = __MAIN + '.css'
JS = __MAIN + '.js'
ITERATION_FOLDER = 'iterations'
SETUP = __SETUP + ".html"
def iteration(self):
return f'{HtmlLogConfig.__T_ITERATION}_{str(self._iteration_id).zfill(3)}.html'
def __init__(self, base_dir=None, presentation_policy=null_policy):
self._log_base_dir = base_dir
if base_dir is None:
if os.name == 'nt':
self._log_base_dir = 'c:\\History'
else:
if environ["USER"] == 'root':
self._log_base_dir = '/root/history'
else:
self._log_base_dir = f'/home/{environ["USER"]}'
self._log_dir = None
self._presentation_policy = {}
self.register_presentation_policy(str, presentation_policy)
self._iteration_id = 0
def get_iteration_id(self):
return self._iteration_id
def get_policy(self, type):
return self._presentation_policy[type]
def get_policy_collection(self):
for type, policy in self._presentation_policy.items():
yield policy
def register_presentation_policy(self, type, presentation_policy):
self._presentation_policy[type] = presentation_policy
def __find_template_file(self, name, relative_path=None):
base_dir = path.dirname(path.abspath(__file__))
file_path = path.join(base_dir, HtmlLogConfig.__FRAMEWORK_T_FOLDER)
if relative_path is not None:
file_path = path.join(file_path, relative_path)
file_path = path.join(file_path, name)
if path.isfile(file_path):
return file_path
else:
raise Exception(
f"Unable to find file: {name} in location: {os.path.dirname(file_path)}")
def __get_main_template_file_path(self):
return self.__find_template_file(HtmlLogConfig.MAIN)
def _get_setup_template_file_path(self):
return self.__find_template_file(HtmlLogConfig.SETUP, HtmlLogConfig.ITERATION_FOLDER)
def __get_iteration_template_path(self):
return self.__find_template_file(HtmlLogConfig.__T_ITERATION + '.html',
HtmlLogConfig.ITERATION_FOLDER)
def create_html_test_log(self, test_title):
now = datetime.now()
time_stamp = f"{now.year}_{str(now.month).zfill(2)}_{str(now.day).zfill(2)}_" \
f"{str(now.hour).zfill(2)}_{str(now.minute).zfill(2)}_{str(now.second).zfill(2)}"
self._log_dir = path.join(self._log_base_dir, test_title, time_stamp)
makedirs(self._log_dir)
additional_location = path.join(self._log_dir, HtmlLogConfig.ITERATION_FOLDER)
makedirs(additional_location)
dut_info_folder = path.join(self._log_dir, 'dut_info')
makedirs(dut_info_folder)
main_html = self.__get_main_template_file_path()
main_css = main_html.replace('html', 'css')
main_js = main_html.replace('html', 'js')
copyfile(main_html, path.join(self._log_dir, HtmlLogConfig.MAIN))
copyfile(main_css, path.join(self._log_dir, HtmlLogConfig.CSS))
copyfile(main_js, path.join(self._log_dir, HtmlLogConfig.JS))
copyfile(self._get_setup_template_file_path(), path.join(additional_location,
HtmlLogConfig.SETUP))
return self._log_dir
def get_main_file_path(self):
return path.join(self._log_dir, HtmlLogConfig.MAIN)
def get_setup_file_path(self):
return path.join(self._log_dir, HtmlLogConfig.ITERATION_FOLDER, HtmlLogConfig.SETUP)
def create_iteration_file(self):
self._iteration_id += 1
template_file = self.__get_iteration_template_path()
new_file_name = self.iteration()
result = path.join(self._log_dir, HtmlLogConfig.ITERATION_FOLDER, new_file_name)
copyfile(template_file, result)
return result
def end_iteration(self,
iteration_selector_div,
iteration_selector_select,
iteration_id,
iteration_result):
style = "iteration-selector"
if iteration_result != BaseLogResult.PASSED:
style = f'{style} {HtmlLogConfig.STYLE[iteration_result]}'
if iteration_id and iteration_id % 8 == 0:
new_element = Element("br")
iteration_selector_div[0].append(new_element)
new_element = Element("a")
new_element.set('class', style)
new_element.set('onclick', f"selectIteration('{iteration_id}')")
new_element.text = str(iteration_id)
iteration_selector_div[0].append(new_element)
new_element = Element('option', value=f"{iteration_id}")
new_element.text = 'iteration_' + str(iteration_id).zfill(3)
if iteration_result != BaseLogResult.PASSED:
new_element.set('class', HtmlLogConfig.STYLE[iteration_result])
iteration_selector_select.append(new_element)
def end_setup_iteration(self, iteration_selector_div, iteration_selector_select, log_result):
if log_result != BaseLogResult.PASSED:
a_element = iteration_selector_div[0]
select_element = iteration_selector_select[0]
a_element.set('class', f'iteration-selector {HtmlLogConfig.STYLE[log_result]}')
select_element.set('class', HtmlLogConfig.STYLE[log_result])
def end_iteration_func(self, time_node, status_node, time_in_sec, log_result):
time_node.text = f"Execution time: {convert_seconds_to_str(time_in_sec)}"
status_node.text = f"Iteration status: {log_result.name}"
if log_result != BaseLogResult.PASSED:
status_node.set('class', f'iteration-status {HtmlLogConfig.STYLE[log_result]}')
def end_main_log(self, test_status_div, log_result):
if log_result != BaseLogResult.PASSED:
test_status_div[0].set('class',
f"sidebar-test-status {HtmlLogConfig.STYLE[log_result]}")
test_status_div[0].text = f"Test status: {log_result.name}"
def group_end(self, msg_id, html_header, html_container, log_result):
html_header.set('onclick', f"showHide('ul_{msg_id}')")
sub_element = Element('a', href="#top")
sub_element.text = "[TOP]"
sub_element.set('class', "top-time-marker")
html_header.append(sub_element)
div_style = 'test-group-step'
ul_style = 'iteration-content'
if log_result == BaseLogResult.PASSED:
html_container.set('style', "display: none;")
else:
div_style = f"{div_style} {HtmlLogConfig.STYLE[log_result]}"
ul_style = f"{ul_style} {HtmlLogConfig.STYLE[log_result]}"
html_header.set('class', div_style)
html_container.set('class', ul_style)
def group_chapter_end(self, time_in_sec, html_header, html_container, log_result):
sub_element = Element('a')
sub_element.text = convert_seconds_to_str(time_in_sec)
sub_element.set('class', 'top-marker')
html_header.append(sub_element)
div_style = 'test-group-step'
ul_style = 'iteration-content'
if log_result != BaseLogResult.PASSED:
div_style = f"{div_style} {HtmlLogConfig.STYLE[log_result]}"
ul_style = f"{ul_style} {HtmlLogConfig.STYLE[log_result]}"
html_header.set('class', div_style)
html_container.set('class', ul_style)

View File

@@ -0,0 +1,126 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.base_log import BaseLog, escape
from log.html_iteration_log import HtmlIterationLog
from log.html_log_config import HtmlLogConfig
from log.html_main_log import HtmlMainLog
from log.html_setup_log import HtmlSetupLog
class HtmlLogManager(BaseLog):
def __init__(self, begin_message=None, log_config=None):
super().__init__(begin_message)
self._config = HtmlLogConfig() if log_config is None else log_config
self._main = None
self._log_setup = None
self._log_iterations = []
self._current_log = None
self._files_path = None
def __add(self, msg):
pass
def begin(self, message):
self._files_path = self._config.create_html_test_log(message)
self._main = HtmlMainLog(message, self._config)
self._log_setup = HtmlSetupLog(message, config=self._config)
self._current_log = self._log_setup
self._main.begin(message)
self._current_log.begin(message)
self.__add("begin: " + message)
@property
def base_dir(self):
return self._files_path
def get_result(self):
log_result = self._log_setup.get_result()
for iteration in self._log_iterations:
if log_result.value < iteration.get_result().value:
log_result = iteration.get_result()
return log_result
def end(self):
self._log_setup.end()
self._main.end_setup_iteration(self._log_setup.get_result())
log_result = self.get_result()
self._main.end(log_result)
self.__add("end")
def add_build_info(self, message):
self._main.add_build_info(escape(message))
def start_iteration(self, message):
message = escape(message)
self._log_iterations.append(HtmlIterationLog(message, message, self._config))
self._main.start_iteration(self._config.get_iteration_id())
self._current_log = self._log_iterations[-1]
self._current_log.begin(message)
self._log_setup.start_iteration(message)
self.__add("start_iteration: " + message)
def end_iteration(self):
self._current_log.end()
self._main.end_iteration(self._current_log.get_result())
self._log_setup.end_iteration(self._current_log.get_result())
self._current_log.iteration_closed = True
self._current_log = self._log_setup
self.__add("end_iteration: ")
return self._current_log
def debug(self, message):
self._current_log.debug(escape(message))
self.__add("debug: " + message)
def info(self, message):
self._current_log.info(escape(message))
self.__add("info: " + message)
def workaround(self, message):
self._current_log.workaround(escape(message))
self.__add(": " + message)
def warning(self, message):
self._current_log.warning(escape(message))
self.__add(": " + message)
def skip(self, message):
self._current_log.skip(escape(message))
self.__add("warning: " + message)
def error(self, message):
self._current_log.error(escape(message))
self.__add("error: " + message)
def blocked(self, message):
self._current_log.blocked(escape(message))
self.__add(f'blocked: {message}')
self.end_all_groups()
def exception(self, message):
self._current_log.exception(escape(message))
self.__add("exception: " + message)
self.end_all_groups()
def critical(self, message):
self._current_log.critical(escape(message))
self.__add("critical: " + message)
self.end_all_groups()
def start_group(self, message):
self._current_log.start_group(escape(message))
self.__add("start_group: " + message)
def end_group(self):
self._current_log.end_group()
self.__add("end_group")
def end_all_groups(self):
for iteration in reversed(self._log_iterations):
if not iteration.iteration_closed:
self.end_iteration()
self._current_log.end_all_groups()

View File

@@ -0,0 +1,53 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.html_file_log import HtmlFileLog
from lxml.etree import Element
class HtmlMainLog(HtmlFileLog):
def __init__(self, title, config):
super().__init__(config.get_main_file_path(), title)
self._config = config
self.__current_iteration_id = None
root = self.get_root()
test_title_div = root.xpath('/html/body/div/div/div/div[@class="sidebar-test-title"]')[0]
test_title_div.text = title
self.__build_information_set = root.xpath(
'/html/body/div/div/div/div[@id="sidebar-tested-build"]')[0]
def add_build_info(self, message):
build_info = Element("div")
build_info.text = message
self.__build_information_set.append(build_info)
def start_iteration(self, iteration_id):
self.__current_iteration_id = iteration_id
def end_iteration(self):
pass
def end_iteration(self, iteration_result):
root = self.get_root()
iteration_selector_div = root.xpath('/html/body/div/div/div[@id="iteration-selector"]')
iteration_selector_select = root.xpath(
'/html/body/div/div/select[@id="sidebar-iteration-list"]')[0]
self._config.end_iteration(iteration_selector_div,
iteration_selector_select,
self.__current_iteration_id,
iteration_result)
def end_setup_iteration(self, result):
root = self.get_root()
iteration_selector_div = root.xpath('/html/body/div/div/div[@id="iteration-selector"]')[0]
iteration_selector_select = root.xpath(
'/html/body/div/div/select[@id="sidebar-iteration-list"]')[0]
self._config.end_setup_iteration(iteration_selector_div, iteration_selector_select, result)
def end(self, result):
root = self.get_root()
test_status_div = root.xpath('/html/body/div/div/div/div[@class="sidebar-test-status"]')
self._config.end_main_log(test_status_div, result)
super().end()

View File

@@ -0,0 +1,45 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.base_log import BaseLogResult
from lxml.etree import Element
from datetime import datetime
from log.presentation_policy import PresentationPolicy
from log.html_log_config import HtmlLogConfig
def std_log_entry(msg_id, msg, log_result, html_node):
test_step = Element('li')
style = 'test-step'
if log_result != BaseLogResult.PASSED:
style = f"{style} {HtmlLogConfig.STYLE[log_result]}"
test_step.set('class', style)
test_time = Element('div')
test_time.set('class', 'ts-time')
test_time_txt = Element('a', name=msg_id)
time = datetime.now()
test_time_txt.text = f"{str(time.hour).zfill(2)}:" \
f"{str(time.minute).zfill(2)}:{str(time.second).zfill(2)}"
test_time.append(test_time_txt)
test_step.append(test_time)
test_msg = Element('div')
test_msg.set('class', 'ts-msg')
test_msg.text = msg
test_step.append(test_msg)
html_node.append(test_step)
def group_log_begin(msg_id, msg, html_node):
element = Element("div")
sub_element = Element('a', name=msg_id)
sub_element.text = msg
element.append(sub_element)
html_node.append(element)
ul_set = Element('ul', id=f'ul_{msg_id}')
html_node.append(ul_set)
return element, ul_set
html_policy = PresentationPolicy(std_log_entry, group_log_begin)

View File

@@ -0,0 +1,34 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from log.html_file_item_log import HtmlFileItemLog
from log.base_log import BaseLogResult
class HtmlSetupLog(HtmlFileItemLog):
LOG_RESULT = {
BaseLogResult.PASSED: HtmlFileItemLog.info,
BaseLogResult.WORKAROUND: HtmlFileItemLog.workaround,
BaseLogResult.WARNING: HtmlFileItemLog.warning,
BaseLogResult.SKIPPED: HtmlFileItemLog.skip,
BaseLogResult.FAILED: HtmlFileItemLog.error,
BaseLogResult.BLOCKED: HtmlFileItemLog.blocked,
BaseLogResult.EXCEPTION: HtmlFileItemLog.exception,
BaseLogResult.CRITICAL: HtmlFileItemLog.critical}
def __init__(self, test_title, config, iteration_title="Test summary"):
html_file_path = config.get_setup_file_path()
super().__init__(html_file_path, test_title, config, iteration_title)
self._last_iteration_title = ''
def start_iteration(self, message):
self._last_iteration_title = message
def end_iteration(self, iteration_result):
HtmlSetupLog.LOG_RESULT[iteration_result](self, self._last_iteration_title)
def end(self):
super().end()

View File

@@ -0,0 +1,220 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import logging
import os
import sys
from contextlib import contextmanager
from datetime import datetime
from threading import Lock
import portalocker
from log.html_log_config import HtmlLogConfig
from log.html_log_manager import HtmlLogManager
from log.html_presentation_policy import html_policy
from test_utils.output import Output
from test_utils.singleton import Singleton
def create_log(log_base_path, test_module, additional_args=None):
Log.setup()
log_cfg = HtmlLogConfig(base_dir=log_base_path,
presentation_policy=html_policy)
log = Log(log_config=log_cfg)
test_name = 'TestNameError'
error_msg = None
try:
test_name = test_module
if additional_args:
test_name += f"__{'_'.join(additional_args)}"
except Exception as ex:
error_msg = f'Detected some problems during calculating test name: {ex}'
finally:
log.begin(test_name)
print(f"\n<LogFile>{os.path.join(log.base_dir, 'main.html')}</LogFile>")
if error_msg:
log.exception(error_msg)
return log
class Log(HtmlLogManager, metaclass=Singleton):
logger = None
LOG_FORMAT = '%(asctime)s %(levelname)s:\t%(message)s'
DATE_FORMAT = "%Y/%m/%d %H:%M:%S"
command_id = 0
lock = Lock()
@classmethod
def destroy(cls):
del cls._instances[cls]
@classmethod
def setup(cls):
# Get handle to root logger.
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Set paramiko log level to warning
logging.getLogger('paramiko').setLevel(logging.WARNING)
# Create Handlers.
stdout_handler = logging.StreamHandler(sys.stdout)
# Set logging level on handlers.
stdout_handler.setLevel(logging.DEBUG)
# Set log formatting on each handler.
formatter = logging.Formatter(Log.LOG_FORMAT, Log.DATE_FORMAT)
stdout_handler.setFormatter(formatter)
# Attach handlers to root logger.
logger.handlers = []
logger.addHandler(stdout_handler)
cls.logger = logger
logger.info("Logger successfully initialized.")
@contextmanager
def step(self, message):
self.step_info(message)
super(Log, self).start_group(message)
if Log.logger:
Log.logger.info(message)
yield
super(Log, self).end_group()
@contextmanager
def group(self, message):
self.start_group(message)
yield
self.end_group()
def add_build_info(self, msg):
super(Log, self).add_build_info(msg)
if Log.logger:
Log.logger.info(msg)
def info(self, msg):
super(Log, self).info(msg)
if Log.logger:
Log.logger.info(msg)
def debug(self, msg):
super(Log, self).debug(msg)
if Log.logger:
Log.logger.debug(msg)
def error(self, msg):
super(Log, self).error(msg)
if Log.logger:
Log.logger.error(msg)
def blocked(self, msg):
super(Log, self).blocked(msg)
if Log.logger:
Log.logger.fatal(msg)
def exception(self, msg):
super(Log, self).exception(msg)
if Log.logger:
Log.logger.exception(msg)
def critical(self, msg):
super(Log, self).critical(msg)
if Log.logger:
Log.logger.fatal(msg)
def workaround(self, msg):
super(Log, self).workaround(msg)
if Log.logger:
Log.logger.warning(msg)
def warning(self, msg):
super(Log, self).warning(msg)
if Log.logger:
Log.logger.warning(msg)
def get_new_command_id(self):
self.lock.acquire()
command_id = self.command_id
self.command_id += 1
self.lock.release()
return command_id
def write_to_command_log(self, message):
super(Log, self).debug(message)
command_log_path = os.path.join(self.base_dir, "dut_info", 'commands.log')
timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S:%f')
with portalocker.Lock(command_log_path, "ab+") as command_log:
line_to_write = f"[{timestamp}] {message}\n"
command_log.write(line_to_write.encode())
def write_command_to_command_log(self, command, command_id, info=None):
added_info = "" if info is None else f"[{info}] "
self.write_to_command_log(f"{added_info}Command id: {command_id}\n{command}")
def write_output_to_command_log(self, output: Output, command_id):
if output is not None:
line_to_write = f"Command id: {command_id}\n\texit code: {output.exit_code}\n" \
f"\tstdout: {output.stdout}\n" \
f"\tstderr: {output.stderr}\n\n\n"
self.write_to_command_log(line_to_write)
else:
self.write_to_command_log(f"Command id: {command_id}\n\tNone output.")
def step_info(self, step_name):
from core.test_run import TestRun
decorator = "// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"
message = f"\n\n\n{decorator}{step_name}\n\n{decorator}\n"
try:
serial_monitor = TestRun.plugin_manager.get_plugin("serial_monitor")
serial_monitor.send_to_serial(message)
except (KeyError, AttributeError):
pass
self.write_to_command_log(message)
def get_additional_logs(self):
from core.test_run import TestRun
from test_tools.fs_utils import check_if_file_exists
messages_log = "/var/log/messages"
if not check_if_file_exists(messages_log):
messages_log = "/var/log/syslog"
log_files = {"messages.log": messages_log,
"dmesg.log": "/tmp/dmesg"}
extra_logs = TestRun.config.get("extra_logs", {})
log_files.update(extra_logs)
TestRun.executor.run(f"dmesg > {log_files['dmesg.log']}")
for log_name, log_source_path in log_files.items():
try:
log_destination_path = os.path.join(
self.base_dir, f"dut_info", TestRun.dut.ip, log_name
)
TestRun.executor.rsync_from(log_source_path, log_destination_path)
except Exception as e:
TestRun.LOGGER.warning(
f"There was a problem during gathering {log_name} log.\n{str(e)}"
)
def generate_summary(self, item, meta):
import json
summary_path = os.path.join(self.base_dir, 'info.json')
with open(summary_path, "w+") as summary:
data = {
'module': os.path.relpath(item.fspath, os.getcwd()),
'function': item.name,
'meta': meta,
'status': self.get_result().name,
'path': os.path.normpath(self.base_dir),
'stage_status': {
'setup': getattr(item, "rep_setup", {}),
'call': getattr(item, "rep_call", {}),
'teardown': getattr(item, "rep_teardown", {})
}
}
json.dump(data, summary)

View File

@@ -0,0 +1,21 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
class PresentationPolicy:
def __init__(self, standard_log, group_begin_func):
self.standard = standard_log
self.group_begin = group_begin_func
def std_log_entry(msg_id, msg, log_result, html_node):
pass
def group_log_begin(msg_id, msg, html_node):
return html_node, html_node
null_policy = PresentationPolicy(std_log_entry, group_log_begin)

View File

@@ -0,0 +1,35 @@
<!--
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
-->
<html>
<head>
<title>[title]</title>
<link rel="stylesheet" type="text/css" href="../main.css"/>
<script src="../main.js"></script>
<meta charset="UTF-8"/>
</head>
<body onload="onLoadDocument();">
<div class="floating">
<b>View: </b>
<select id="mode-selector" onchange="selectMode();">
<option style="background-color: white; color: black;" value="info">Info</option>
<option style="background-color: white; color: black" value="debug">Debug</option>
</select>
<b>Errors: </b>
<button onclick="previousError()"><</button>
<select id="error-list-selector" onchange="errorSelected('error-list-selector')">
<option value="top" class="empty">-empty-</option>
</select>
<button onclick="nextError()">></button>
</div>
<br/>
<a name="top"><h1 class="iteration-title" style="border-bottom: 4px solid rgba(255, 0, 0, 1)">[title]</h1></a>
<div class="iteration-status">Iteration status: [status]</div>
<div class="iteration-execution-time">Execution time: [time] [s]</div>
<section class="iteration-chapters" id="iteration-chapters">
<h2>Groups:</h2>
</section>
</body>
</html>

View File

@@ -0,0 +1,37 @@
<!--
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
-->
<html>
<head>
<title>Setup</title>
<link rel="stylesheet" type="text/css" href="../main.css"/>
<meta charset="UTF-8"/>
<script src="../main.js"></script>
</head>
<body onload="onLoadDocument();">
<div class="floating">
<b>View: </b>
<select id="mode-selector" onchange="selectMode();">
<option style="background-color: white; color: black;" value="info">Info</option>
<option style="background-color: white; color: black" value="debug">Debug</option>
</select>
<b>Errors: </b>
<button onclick="previousError()"><</button>
<select id="error-list-selector" onchange="errorSelected('error-list-selector')">
<option value="top">-empty-</option>
</select>
<button onclick="nextError()">></button>
</div>
<br/>
<a name="top">
<h1 class="iteration-title" style="border-bottom: 4px solid rgba(255, 0, 0, 1)">Test summary</h1>
</a>
<div class="iteration-status">Iteration status: [STATUS]</div>
<div class="iteration-execution-time">Execution time: [time] [s]</div>
<section class="iteration-chapters" id="iteration-chapters">
<h2>Groups:</h2>
</section>
</body>
</html>

View File

@@ -0,0 +1,383 @@
/*
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
*/
html, body {
margin: 0;
padding: 0;
background-color: #F0F0F0;
font-family: Calibri;
color: black;
}
div { display: block; }
h2 { margin: 0; padding: 0; }
h4 { margin: 0; padding: 0; }
div.meta-container {
margin-left: 502px;
min-width: 500px;
height: 100vh;
}
div.main-layaut {
float: right;
width: 100%;
background-color: #FDFDFD;
height: 100vh;
overflow-y: scroll;
overflow-x: hidden;
}
div.sidebar {
float: left;
width: 500px;
height: 100vh;
margin-left: -502px;
border: 4px;
background-color: #F0F0F0;
overflow-x: hidden;
overflow-y: auto;
text-align: center;
color: white;
overflow-x: hidden;
overflow-y: hidden;
}
div.sidebar-hide {
padding: 3px;
height: 20px;
margin: 5px auto;
font-family: Consolas;
font-weight: normal;
font-size: 15px;
color: white;
text-shadow: 1px 1px 3px black;
background-color: rgb(40,80,180);
cursor: default;
border: 2px solid silver;
border-radius: 25px;
}
div.sidebar-show { color: balck; height: 50%; }
div.sidebar-test { overflow-x: hidden; overflow-y: hidden;}
div.sidebar-test-title {
padding: 10px;
height: 40px;
margin: 5px auto;
background-color: rgb(40,80,180);
font-size: 100%;
border: 2px solid silver;
border-radius: 25px;
}
div.sidebar-test-status {
padding: 3px;
height: 20px;
background-color: green;
border: 2px solid silver;
border-radius: 25px;
}
div.sidebar-tested-build {
color: black;
border-radius: 25px;
width: 80%;
margin: 5px auto;
padding: 25px;
background-color: #F7F7F7;
border: 1px solid silver;
word-wrap: break-word;
word-break: break-all;
overflow: hidden;
text-align: left;
}
div.sidebar-test-iteration {
padding: 3px;
height: 20px;
margin: 5px auto;
font-family: Consolas;
font-weight: normal;
font-size: 15px;
color: white;
text-shadow: 1px 1px 3px black;
background-color: rgb(40,80,180);
cursor: default;
border: 2px solid silver;
border-radius: 25px;
}
.debug { display: none; }
select.sidebar-iteration-list {
margin: 5px auto;
background-color: white;
color: black;
width: 90%;
}
select.warning { background-color: #ff0; color: black; }
select.workaround { background-color: #fff8dc; color: black; }
select.skip { background-color: silver; color: black; }
select.fail { background-color: red; color: white; }
select.blocked { background-color: #7030a0; color: white; }
select.exception { background-color: #e29517; color: white; }
select.critical { background-color: #002060; color: white; }
option {
background-color: green;
color: white;
margin: 2px;
}
option.warning { background-color: #ff0; color: black; }
option.workaround { background-color: #fff8dc; color: black; }
option.skip { background-color: silver; color: black; }
option.error { background-color: red; color: white; }
option.blocked { background-color: #7030a0; color: white; }
option.exception { background-color: #e29517; color: white; }
select.critical { background-color: #002060; color: white; }
a.iteration-selector {
border: 2px solid silver;
border-radius: 40px;
width: 36px;
height: 36px;
margin: 0;
padding: 0;
vertical-align: middle;
display: table-cell;
color: white;
background-color: green;
text-shadow: 0 0 3px black;
font-size: 20px;
font-weight: bold;
line-height: 1em;
text-align: center;
cursor: pointer;
}
a.warning { background-color: #ff0; }
a.workaround { background-color: #fff8dc; }
a.skip { background-color: silver; }
a.fail { background-color: red; }
a.exception { background-color: #e29517; }
a.blocked { background-color: #7030a0; }
a.critical { background-color: #002060; }
a.selected { border: 2px solid black; }
select.error-list-selector { background-color: silver; }
div.test-chapter-step {
margin: 4px auto;
border-style: solid;
border-color: #8CB9AE;
border-radius: 10px;
padding-left: 10px;
padding-right: 10px;
cursor: pointer;
}
div.sidebar-copyright {
position: absolute;
background-color: #DDD;
text-align: center;
padding: 4px;
color: #888;
bottom: 0;
font-size: 12px;
font-family: Consolas;
}
div.floating {
right: 0;
border: 3px solid silver;
width: 40%;
text-align: center;
vertical-align: top;
position: fixed;
background-color : #F0F0F0;
border-bottom: 1px solid #999;
z-index: 999;
color: #333;
box-shadow: 0 0px 6px gray;
}
h1 {
display: block;
font-size: 2em;
font-weight: bold;
}
div.iteration-selector {
margin: 5px auto;
}
div.iteration-status {
padding: 3px;
height: 20px;
background-color: green;
border: 2px solid silver;
border-radius: 25px;
color: white;
text-align: center;
}
h1.iteration-title { text-align: center; }
div.iteration-execution-time { text-align: center; }
section.iteration-chapters {
border-radius: 25px;
width: 80%;
margin: 10px auto;
padding: 25px;
background-color: #F7F7F7;
border: 1px solid silver;
word-wrap: break-word;
word-break: break-all;
overflow: hidden;
}
ul.iteration-content {
list-style-type: none;
border-left-color: green;
border-left-style: solid;
margin: 0px;
}
ul.warning { border-left-color: #ff0; }
ul.workaround { border-left-color: #fff8dc; }
ul.skip { border-left-color: silver; }
ul.fail { border-left-color: red; }
ul.blocked { border-left-color: #7030a0; }
ul.critical { border-left-color: #002060; }
ul.exception { border-left-color: #e29517; }
li.iteration-content {
border-color: rgba(192, 192, 192, 1);
background-color: rgba(238, 238, 238, 1);
display: block;
margin: 2px auto;
border: 1px solid #C0C0C0;
padding: 3px 6px;
font-family: Calibri;
font-size: 16px;
line-height: 1.15em;
word-wrap: break-word;
word-break: break-all;
overflow: hidden;
border-left-color: green;
border-left-style: solid;
word-break: break-all;
}
div.test-group-step {
color: black;
background-color: #8CB9AE;
border: 1px solid #5C8880;
font-size: 18px;
letter-spacing: 2px;
cursor: pointer;
margin: 4px;
border-radius: 10px;
padding-left: 10px;
padding-right: 10px;
overflow-wrap: break-word;
word-wrap: break-word;
word-break: break-all;
}
div.warning { background-color: #ff0; color: black; }
div.workaround { background-color: #fff8dc; color: black; }
div.skip { background-color: silver; color: black; }
div.fail { background-color: red; color: white; }
div.blocked { background-color: #7030a0; color: white; }
div.critical { background-color: #002060; color: white; }
div.exception { background-color: #e29517; color: white; }
a.top-marker { cursor: pointer; float: right; }
a.top-time-marker {
word-wrap: break-word;
float: right;
}
li.test-step {
color: black;
border-color: rgba(192, 192, 192, 1);
background-color: rgba(238, 238, 238, 1);
display: block;
margin: 4px auto;
border: 1px solid #C0C0C0;
padding: 3px 6px;
font-family: Calibri;
font-size: 16px;
line-height: 1.15em;
word-wrap: break-word;
word-break: break-all;
overflow: hidden;
border-left-color: green;
border-left-style: solid;
border-radius: 10px;
padding-left: 10px;
padding-right: 10px
}
li.warning { background-color: #ff0; border-left-color: #ff0; }
li.workaround { background-color: #fff8dc; border-left-color: #fff8dc; }
li.skip { background-color: silver; border-left-color: silver; }
li.fail {
background-color: red;
border-left-color: red;
color: white;
}
li.blocked {
background-color: #7030a0;
border-left-color: #7030a0;
color: white;
}
li.exception {
background-color: #e29517;
border-left-color: #e29517;
color: white;
}
li.critical {
background-color: #002060;
border-left-color: #002060;
color: white;
}
div.ts-iteration {
float: left;
margin: 2px auto;
border: 1px solid silver;
padding: 3px 3px;
text-align: center;
}
div.ts-total-time {
margin: 2px auto;
border: 1px solid silver;
padding: 3px 3px;
text-align: right;
}
div.ts-time {
float: left;
font-size: 12px;
margin: 2px auto;
border: 1px solid #A7A7A7;
padding: 3px 3px;
}
div.ts-msg {
font-size: 16px;
font-family: Courier;
margin: 2px auto;
border: 1px solid #A7A7A7;
padding: 3px 3px;
white-space: pre-wrap;
word-break: break-all;
}

View File

@@ -0,0 +1,44 @@
<!--
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
-->
<html>
<head>
<title>[test title]</title>
<link rel="stylesheet" type="text/css" href="main.css">
<meta charset="UTF-8"/>
</head>
<body>
<div class="meta-container">
<div class="sidebar">
<div class="sidebar-test" id="sidebar-test">
<div class="sidebar-show" style="display:none" onclick="sidebarCtrl('sidebar-hide', 'sidebar-show')">&gt;&gt;</div>
<div class="sidebar-test-title">Test title: </div>
<div class="sidebar-test-status">Test status: </div>
<div class="sidebar-tested-build" id="sidebar-tested-build">
<h2>Build:</h2>
</div>
<div class="sidebar-hide" id=sidebar-hide" onclick="sidebarCtrl('sidebar-hide', 'sidebar-show')">&lt;&lt;</div>
<div class="sidebar-show" style="display:none" onclick="sidebarCtrl('sidebar-hide', 'sidebar-show')">&gt;&gt;</div>
</div>
<div class="sidebar-test-iteration">Executed iterations:</div>
<select id="sidebar-iteration-list" class="sidebar-iteration-list" onchange="selectIterationFromSelect()" onclick="clickSelectIteration()">
<option value="M">Setup</option>
</select>
<div id="iteration-selector">
<a class="iteration-selector" onclick="selectIteration('M')">M</a>
</div>
<div class="sidebar-copyright" id="sidebar-copyright">
SPDX-License-Identifier: BSD-3-Clause
<br>
Copyright &#xa9 2015-2021 Intel Corporation
</div>
</div>
<div class="main-layaut">
<iframe id="main-view" src="iterations/setup.html" width="100%" height="99%"></iframe>
</div>
</div>
<script src="main.js"></script>
</body>
</html>

View File

@@ -0,0 +1,223 @@
/*
Copyright(c) 2019-2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
*/
function onLoadDocument() {
hideDebug();
}
function selectMode() {
var selector = document.getElementById('mode-selector');
if (selector.value.includes('info')) {
hideDebug();
} else {
showDebug();
}
}
function hideDebug() {
var debugTestStepArray = document.getElementsByTagName('li');
for (i = 0; i < debugTestStepArray.length; i ++) {
if(debugTestStepArray[i].className.includes('debug')) {
debugTestStepArray[i].style.display = 'none';
}
}
}
function showDebug() {
var debugTestStepArray = document.getElementsByTagName('li');
for (i = 0; i < debugTestStepArray.length; i ++) {
if(debugTestStepArray[i].className.includes('debug')) {
debugTestStepArray[i].style.display = '';
}
}
}
function sidebarCtrl(ctrlHideId, ctrlShowClass) {
var metaContainer = document.getElementsByClassName("meta-container")[0];
var sidebar = document.getElementsByClassName('sidebar')[0];
var sidebarTest = document.getElementById('sidebar-test');
var ctrlHide = document.getElementById(ctrlHideId);
var ctrlShowSet = document.getElementsByClassName(ctrlShowClass);
if(sidebar.style.width.includes('15px')) {
showSidebar(metaContainer, sidebar, ctrlHide, ctrlShowSet, sidebarTest);
} else {
hideSidebar(metaContainer, sidebar, ctrlHide, ctrlShowSet, sidebarTest);
}
}
function showSidebar(mContainer, sidebar, ctrlHide, ctrlShowSet, sidebarTest) {
sidebar.style.cursor = 'default';
mContainer.style.marginLeft = '';
sidebarTest.style.width = '';
sidebarTest.style.height = '';
sidebar.style.height = '';
sidebar.style.marginLeft = '';
sidebar.style.width = '';
var i;
for (i = 0; i < sidebarTest.children.length; i++) {
sidebarTest.children[i].style.display = '';
}
document.getElementById('iteration-selector').style.display = '';
document.getElementById('sidebar-iteration-list').style.display = '';
document.getElementById('sidebar-copyright').style.display = '';
for(i = 0; i < ctrlShowSet.length; i ++) {
ctrlShowSet[i].style.display = 'none';
}
}
function hideSidebar(mContainer, sidebar, ctrlHide, ctrlShowSet, sidebarTest) {
document.getElementById('iteration-selector').style.display = 'none';
document.getElementById('sidebar-iteration-list').style.display = 'none';
document.getElementById('sidebar-copyright').style.display = 'none';
var i;
for (i = 0; i < sidebarTest.children.length; i++) {
sidebarTest.children[i].style.display = 'none';
}
sidebarTest.style.display = '';
for(i = 0; i < ctrlShowSet.length; i ++) {
ctrlShowSet[i].style.display = '';
ctrlShowSet[i].style.color = 'black';
}
sidebar.style.width = '15px';
sidebar.style.marginLeft = '-15px';
sidebar.style.height = '100%';
sidebarTest.style.height = '100%';
sidebarTest.style.width = '100%';
mContainer.style.marginLeft = '16px';
sidebar.style.cursor = 'pointer';
}
function previousError() {
var errorSelector = document.getElementById("error-list-selector");
if (errorSelector.length > 1) {
var id = errorSelector.selectedIndex;
if (id - 1 > 0) {
errorSelector.selectedIndex = (id - 1);
} else {
errorSelector.selectedIndex = (errorSelector.length - 1);
}
errorSelected('error-list-selector');
}
}
function nextError() {
var errorSelector = document.getElementById("error-list-selector");
if (errorSelector.length > 1) {
var id = errorSelector.selectedIndex;
if (id + 1 < errorSelector.length) {
errorSelector.selectedIndex = (id + 1);
} else {
errorSelector.selectedIndex = 1;
}
errorSelected('error-list-selector');
}
}
function selectIterationFromSelect() {
var element = document.getElementById("sidebar-iteration-list");
loadDocument(element.value);
updateIterationSelector(element);
}
function clickSelectIteration() {
var element = document.getElementById("sidebar-iteration-list");
for (i = 0; i < element.length; i ++) {
option = element[i];
var cls = option.getAttribute('class');
switch(cls) {
case "warning":
option.style.backgroundColor = "yellow";
option.style.color = "black";
break;
case "skip":
option.style.backgroundColor = "silver";
option.style.color = "black";
break;
case "fail":
option.style.backgroundColor = "red";
option.style.color = "white";
break;
case "exception":
option.style.backgroundColor = "blueviolet";
option.style.color = "white";
break;
default:
option.style.backgroundColor = "white";
option.style.color = "black";
break;
}
};
}
function selectIteration(iteration) {
var selectElement = document.getElementById("sidebar-iteration-list");
var docId = loadDocument(iteration);
selectElement.selectedIndex = docId;
updateIterationSelector(selectElement);
}
function loadDocument(fileId) {
var result = 0;
if(fileId == 'M') {
document.getElementById("main-view").src = "iterations/setup.html";
} else {
var id = pad(fileId, 3);
document.getElementById("main-view").src = "iterations/iteration_" + id + ".html";
result = parseInt(fileId);
}
return result;
}
function updateIterationSelector(element) {
var index = element.selectedIndex
var option_class = element[index].getAttribute('class')
if (option_class != null) {
element.setAttribute('class', "sidebar-iteration-list " + option_class);
} else {
element.setAttribute('class', "sidebar-iteration-list");
}
}
function errorSelected(selectorId) {
var newLocation = document.getElementById(selectorId).value;
window.location.hash = newLocation;
}
function pad(strNumber, padding) {
while((strNumber.length + 1) <= padding) {
strNumber = "0" + strNumber;
}
return strNumber;
}
function showHide(id) {
var ulElement = document.getElementById(id);
if(ulElement.style.display == 'none') {
ulElement.style.display = '';
} else {
ulElement.style.display = 'none';
}
}
function chapterClick(id) {
var id_array = id.split('.');
var node_id = "";
var i = 0;
var destinationElement = document.getElementById(id);
if (destinationElement.style.display == 'none') {
do {
node_id += id_array[i];
var ele = document.getElementById(node_id);
ele.style.display = '';
node_id += '.';
i += 1;
} while (i < id_array.length);
window.location = '#' + id;
} else {
destinationElement.style.display = 'none';
}
}

View File

@@ -0,0 +1,117 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
from core.test_run import TestRun
from test_tools import disk_utils, fs_utils
from test_tools.disk_utils import get_device_filesystem_type, get_sysfs_path
from test_utils.io_stats import IoStats
from test_utils.size import Size, Unit
class Device:
def __init__(self, path):
disk_utils.validate_dev_path(path)
self.path = path
self.size = Size(disk_utils.get_size(self.get_device_id()), Unit.Byte)
self.filesystem = get_device_filesystem_type(self.get_device_id())
self.mount_point = None
def create_filesystem(self, fs_type: disk_utils.Filesystem, force=True, blocksize=None):
disk_utils.create_filesystem(self, fs_type, force, blocksize)
self.filesystem = fs_type
def wipe_filesystem(self, force=True):
disk_utils.wipe_filesystem(self, force)
self.filesystem = None
def is_mounted(self):
output = TestRun.executor.run(f"findmnt {self.path}")
if output.exit_code != 0:
return False
else:
mount_point_line = output.stdout.split('\n')[1]
device_path = fs_utils.readlink(self.path)
self.mount_point = mount_point_line[0:mount_point_line.find(device_path)].strip()
return True
def mount(self, mount_point, options: [str] = None):
if not self.is_mounted():
if disk_utils.mount(self, mount_point, options):
self.mount_point = mount_point
else:
raise Exception(f"Device is already mounted! Actual mount point: {self.mount_point}")
def unmount(self):
if not self.is_mounted():
TestRun.LOGGER.info("Device is not mounted.")
elif disk_utils.unmount(self):
self.mount_point = None
def get_device_link(self, directory: str):
items = self.get_all_device_links(directory)
return next(i for i in items if i.full_path.startswith(directory))
def get_device_id(self):
return fs_utils.readlink(self.path).split('/')[-1]
def get_all_device_links(self, directory: str):
from test_tools import fs_utils
output = fs_utils.ls(f"$(find -L {directory} -samefile {self.path})")
return fs_utils.parse_ls_output(output, self.path)
def get_io_stats(self):
return IoStats.get_io_stats(self.get_device_id())
def get_sysfs_property(self, property_name):
path = posixpath.join(disk_utils.get_sysfs_path(self.get_device_id()),
"queue", property_name)
return TestRun.executor.run_expect_success(f"cat {path}").stdout
def set_sysfs_property(self, property_name, value):
TestRun.LOGGER.info(
f"Setting {property_name} for device {self.get_device_id()} to {value}.")
path = posixpath.join(disk_utils.get_sysfs_path(self.get_device_id()), "queue",
property_name)
fs_utils.write_file(path, str(value))
def set_max_io_size(self, new_max_io_size: Size):
self.set_sysfs_property("max_sectors_kb",
int(new_max_io_size.get_value(Unit.KibiByte)))
def get_max_io_size(self):
return Size(int(self.get_sysfs_property("max_sectors_kb")), Unit.KibiByte)
def get_max_hw_io_size(self):
return Size(int(self.get_sysfs_property("max_hw_sectors_kb")), Unit.KibiByte)
def get_discard_granularity(self):
return self.get_sysfs_property("discard_granularity")
def get_discard_max_bytes(self):
return self.get_sysfs_property("discard_max_bytes")
def get_discard_zeroes_data(self):
return self.get_sysfs_property("discard_zeroes_data")
def get_numa_node(self):
return int(TestRun.executor.run_expect_success(
f"cat {get_sysfs_path(self.get_device_id())}/device/numa_node").stdout)
def __str__(self):
return (
f'system path: {self.path}, short link: /dev/{self.get_device_id()},'
f' filesystem: {self.filesystem}, mount point: {self.mount_point}, size: {self.size}'
)
def __repr__(self):
return str(self)
@staticmethod
def get_scsi_debug_devices():
scsi_debug_devices = TestRun.executor.run_expect_success(
"lsscsi --scsi_id | grep scsi_debug").stdout
return [Device(f'/dev/disk/by-id/scsi-{device.split()[-1]}')
for device in scsi_debug_devices.splitlines()]

View File

@@ -0,0 +1,237 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import itertools
import json
import re
from datetime import timedelta
from enum import IntEnum
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools import disk_utils, fs_utils, nvme_cli
from test_utils import disk_finder
from test_utils.os_utils import wait
from test_utils.size import Unit
from test_tools.disk_utils import get_pci_address
class DiskType(IntEnum):
hdd = 0
hdd4k = 1
sata = 2
nand = 3
optane = 4
class DiskTypeSetBase:
def resolved(self):
raise NotImplementedError()
def types(self):
raise NotImplementedError()
def json(self):
return json.dumps({
"type": "set",
"values": [t.name for t in self.types()]
})
def __lt__(self, other):
return min(self.types()) < min(other.types())
def __le__(self, other):
return min(self.types()) <= min(other.types())
def __eq__(self, other):
return min(self.types()) == min(other.types())
def __ne__(self, other):
return min(self.types()) != min(other.types())
def __gt__(self, other):
return min(self.types()) > min(other.types())
def __ge__(self, other):
return min(self.types()) >= min(other.types())
class DiskTypeSet(DiskTypeSetBase):
def __init__(self, *args):
self.__types = set(*args)
def resolved(self):
return True
def types(self):
return self.__types
class DiskTypeLowerThan(DiskTypeSetBase):
def __init__(self, disk_name):
self.__disk_name = disk_name
def resolved(self):
return self.__disk_name in TestRun.disks
def types(self):
if not self.resolved():
raise LookupError("Disk type not resolved!")
disk_type = TestRun.disks[self.__disk_name].disk_type
return set(filter(lambda d: d < disk_type, [*DiskType]))
def json(self):
return json.dumps({
"type": "operator",
"name": "lt",
"args": [self.__disk_name]
})
class Disk(Device):
def __init__(
self,
path,
disk_type: DiskType,
serial_number,
block_size,
):
Device.__init__(self, path)
self.serial_number = serial_number
self.block_size = Unit(block_size)
self.disk_type = disk_type
self.partitions = []
def create_partitions(
self,
sizes: [],
partition_table_type=disk_utils.PartitionTable.gpt):
disk_utils.create_partitions(self, sizes, partition_table_type)
def remove_partition(self, part):
part_number = int(part.path.split("part")[1])
disk_utils.remove_parition(self, part_number)
self.partitions.remove(part)
def umount_all_partitions(self):
TestRun.LOGGER.info(
f"Umounting all partitions from: {self.path}")
cmd = f'umount -l {fs_utils.readlink(self.path)}*?'
TestRun.executor.run(cmd)
def remove_partitions(self):
for part in self.partitions:
if part.is_mounted():
part.unmount()
if disk_utils.remove_partitions(self):
self.partitions.clear()
def is_detected(self):
if self.serial_number:
serial_numbers = disk_finder.get_all_serial_numbers()
return self.serial_number in serial_numbers
elif self.path:
output = fs_utils.ls_item(f"{self.path}")
return fs_utils.parse_ls_output(output)[0] is not None
raise Exception("Couldn't check if device is detected by the system")
def wait_for_plug_status(self, should_be_visible):
if not wait(lambda: should_be_visible == self.is_detected(),
timedelta(minutes=1),
timedelta(seconds=1)):
raise Exception(f"Timeout occurred while trying to "
f"{'plug' if should_be_visible else 'unplug'} disk.")
def plug(self):
if self.is_detected():
return
TestRun.executor.run_expect_success(self.plug_command)
self.wait_for_plug_status(True)
def unplug(self):
if not self.is_detected():
return
TestRun.executor.run_expect_success(self.unplug_command)
self.wait_for_plug_status(False)
@staticmethod
def plug_all_disks():
TestRun.executor.run_expect_success(NvmeDisk.plug_all_command)
TestRun.executor.run_expect_success(SataDisk.plug_all_command)
def __str__(self):
disk_str = f'system path: {self.path}, type: {self.disk_type.name}, ' \
f'serial: {self.serial_number}, size: {self.size}, ' \
f'block size: {self.block_size}, partitions:\n'
for part in self.partitions:
disk_str += f'\t{part}'
return disk_str
@staticmethod
def create_disk(path,
disk_type: DiskType,
serial_number,
block_size):
if disk_type is DiskType.nand or disk_type is DiskType.optane:
return NvmeDisk(path, disk_type, serial_number, block_size)
else:
return SataDisk(path, disk_type, serial_number, block_size)
class NvmeDisk(Disk):
plug_all_command = "echo 1 > /sys/bus/pci/rescan"
def __init__(self, path, disk_type, serial_number, block_size):
Disk.__init__(self, path, disk_type, serial_number, block_size)
self.plug_command = NvmeDisk.plug_all_command
self.unplug_command = f"echo 1 > /sys/block/{self.get_device_id()}/device/remove || " \
f"echo 1 > /sys/block/{self.get_device_id()}/device/device/remove"
self.pci_address = get_pci_address(self.get_device_id())
def __str__(self):
disk_str = super().__str__()
disk_str = f"pci address: {self.pci_address}, " + disk_str
return disk_str
def format_disk(self, metadata_size=None, block_size=None,
force=True, format_params=None, reset=True):
nvme_cli.format_disk(self, metadata_size, block_size, force, format_params, reset)
def get_lba_formats(self):
return nvme_cli.get_lba_formats(self)
def get_lba_format_in_use(self):
return nvme_cli.get_lba_format_in_use(self)
class SataDisk(Disk):
plug_all_command = "for i in $(find -H /sys/devices/ -path '*/scsi_host/*/scan' -type f); " \
"do echo '- - -' > $i; done;"
def __init__(self, path, disk_type, serial_number, block_size):
Disk.__init__(self, path, disk_type, serial_number, block_size)
self.plug_command = SataDisk.plug_all_command
self.unplug_command = \
f"echo 1 > {self.get_sysfs_properties(self.get_device_id()).full_path}/device/delete"
def get_sysfs_properties(self, device_id):
ls_command = f"$(find -H /sys/devices/ -name {device_id} -type d)"
output = fs_utils.ls_item(f"{ls_command}")
sysfs_addr = fs_utils.parse_ls_output(output)[0]
if not sysfs_addr:
raise Exception(f"Failed to find sysfs address: ls -l {ls_command}")
dirs = sysfs_addr.full_path.split('/')
scsi_address = dirs[-3]
matches = re.search(
r"^(?P<controller>\d+)[-:](?P<port>\d+)[-:](?P<target>\d+)[-:](?P<lun>\d+)$",
scsi_address)
controller_id = matches["controller"]
port_id = matches["port"]
target_id = matches["target"]
lun = matches["lun"]
host_path = "/".join(itertools.takewhile(lambda x: not x.startswith("host"), dirs))
self.plug_command = f"echo '{port_id} {target_id} {lun}' > " \
f"{host_path}/host{controller_id}/scsi_host/host{controller_id}/scan"
return sysfs_addr

View File

@@ -0,0 +1,66 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import os
import posixpath
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools.drbdadm import Drbdadm
from test_utils.filesystem.symlink import Symlink
from test_utils.output import CmdException
class Drbd(Device):
def __init__(self, config):
if Drbdadm.dump_config(config.name).exit_code != 0:
raise ValueError(f"Resource {config.name} not found")
self.config = config
def create_metadata(self, force):
return Drbdadm.create_metadata(self.config.name, force)
def up(self):
output = Drbdadm.up(self.config.name)
if output.exit_code != 0:
raise CmdException(f"Failed to create {self.config.name} drbd instance")
self.path = posixpath.join("/dev/disk/by-id/", posixpath.basename(self.config.device))
self.symlink = Symlink.get_symlink(self.path, self.config.device, True)
self.device = Device(self.path)
return self.device
def wait_for_sync(self):
return Drbdadm.wait_for_sync(self.config.name)
def is_in_sync(self):
return Drbdadm.in_sync(self.config.name)
def get_status(self):
return Drbdadm.get_status(self.config.name)
def set_primary(self, force=False):
return Drbdadm.set_node_primary(self.config.name, force)
def down(self):
output = Drbdadm.down(self.config.name)
if output.exit_code != 0:
raise CmdException(f"Failed to stop {self.config.name} drbd instance")
self.device = None
self.symlink.remove(True, True)
@staticmethod
def down_all():
try:
Drbdadm.down_all()
except CmdException as e:
if "no resources defined" not in str(e):
raise e
@staticmethod
def is_installed():
return TestRun.executor.run("which drbdadm && modinfo drbd").exit_code == 0

View File

@@ -0,0 +1,531 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import threading
from typing import Union
from api.cas.core import Core
from core.test_run import TestRun
from storage_devices.device import Device
from storage_devices.disk import Disk, NvmeDisk
from storage_devices.partition import Partition
from test_tools.fs_utils import readlink
from test_utils.disk_finder import resolve_to_by_id_link
from test_utils.filesystem.symlink import Symlink
from test_utils.size import Size
lvm_config_path = "/etc/lvm/lvm.conf"
filter_prototype_regex = r"^\sfilter\s=\s\["
types_prototype_regex = r"^\stypes\s=\s\["
global_filter_prototype_regex = r"^\sglobal_filter\s=\s\["
tab = "\\\\t"
class LvmConfiguration:
def __init__(
self,
lvm_filters: [] = None,
pv_num: int = None,
vg_num: int = None,
lv_num: int = None,
cache_num: int = None,
cas_dev_num: int = None
):
self.lvm_filters = lvm_filters
self.pv_num = pv_num
self.vg_num = vg_num
self.lv_num = lv_num
self.cache_num = cache_num
self.cas_dev_num = cas_dev_num
@staticmethod
def __read_definition_from_lvm_config(
prototype_regex: str
):
cmd = f"grep '{prototype_regex}' {lvm_config_path}"
output = TestRun.executor.run(cmd).stdout
return output
@classmethod
def __add_block_dev_to_lvm_config(
cls,
block_device_type: str,
number_of_partitions: int = 16
):
types_definition = cls.read_types_definition_from_lvm_config()
if types_definition:
if block_device_type in types_definition:
TestRun.LOGGER.info(f"Device type '{block_device_type}' already present in config")
return
TestRun.LOGGER.info(f"Add block device type to existing list")
new_type_prefix = f"types = [\"{block_device_type}\", {number_of_partitions}, "
config_update_cmd = f"sed -i 's/{types_prototype_regex}/\t{new_type_prefix}/g'" \
f" {lvm_config_path}"
else:
TestRun.LOGGER.info(f"Create new types variable")
new_types = f"types = [\"{block_device_type}\", {number_of_partitions}]"
characteristic_line = f"# Configuration option devices\\/sysfs_scan."
config_update_cmd = f"sed -i /'{characteristic_line}'/i\\ '{tab}{new_types}' " \
f"{lvm_config_path}"
TestRun.LOGGER.info(f"Adding {block_device_type} ({number_of_partitions} partitions) "
f"to supported types in {lvm_config_path}")
TestRun.executor.run(config_update_cmd)
@classmethod
def __add_filter_to_lvm_config(
cls,
filter: str
):
if filter is None:
TestRun.LOGGER.error(f"Lvm filter for lvm config not provided.")
filters_definition = cls.read_filter_definition_from_lvm_config()
if filters_definition:
if filter in filters_definition:
TestRun.LOGGER.info(f"Filter definition '{filter}' already present in config")
return
new_filter_formatted = filter.replace("/", "\\/")
new_filter_prefix = f"filter = [ \"{new_filter_formatted}\", "
TestRun.LOGGER.info(f"Adding filter to existing list")
config_update_cmd = f"sed -i 's/{filter_prototype_regex}/\t{new_filter_prefix}/g'" \
f" {lvm_config_path}"
else:
TestRun.LOGGER.info(f"Create new filter variable")
new_filter = f"filter = [\"{filter}\"]"
characteristic_line = f"# Configuration option devices\\/global_filter."
config_update_cmd = f"sed -i /'{characteristic_line}'/i\\ '{tab}{new_filter}' " \
f"{lvm_config_path}"
TestRun.LOGGER.info(f"Adding filter '{filter}' to {lvm_config_path}")
TestRun.executor.run(config_update_cmd)
@classmethod
def read_types_definition_from_lvm_config(cls):
return cls.__read_definition_from_lvm_config(types_prototype_regex)
@classmethod
def read_filter_definition_from_lvm_config(cls):
return cls.__read_definition_from_lvm_config(filter_prototype_regex)
@classmethod
def read_global_filter_definition_from_lvm_config(cls):
return cls.__read_definition_from_lvm_config(global_filter_prototype_regex)
@classmethod
def add_block_devices_to_lvm_config(
cls,
device_type: str
):
if device_type is None:
TestRun.LOGGER.error(f"No device provided.")
cls.__add_block_dev_to_lvm_config(device_type)
@classmethod
def add_filters_to_lvm_config(
cls,
filters: []
):
if filters is None:
TestRun.LOGGER.error(f"Lvm filters for lvm config not provided.")
for f in filters:
cls.__add_filter_to_lvm_config(f)
@classmethod
def configure_dev_types_in_config(
cls,
devices: ([Device], Device)
):
if isinstance(devices, list):
devs = []
for device in devices:
dev = device.parent_device if isinstance(device, Partition) else device
devs.append(dev)
if any(isinstance(dev, Core) for dev in devs):
cls.add_block_devices_to_lvm_config("cas")
if any(isinstance(dev, NvmeDisk) for dev in devs):
cls.add_block_devices_to_lvm_config("nvme")
else:
dev = devices.parent_device if isinstance(devices, Partition) else devices
if isinstance(dev, Core):
cls.add_block_devices_to_lvm_config("cas")
if isinstance(dev, NvmeDisk):
cls.add_block_devices_to_lvm_config("nvme")
@classmethod
def configure_filters(
cls,
lvm_filters: [],
devices: ([Device], Device)
):
if lvm_filters:
TestRun.LOGGER.info(f"Preparing configuration for LVMs - filters.")
LvmConfiguration.add_filters_to_lvm_config(lvm_filters)
cls.configure_dev_types_in_config(devices)
@staticmethod
def remove_global_filter_from_config():
cmd = f"sed -i '/{global_filter_prototype_regex}/d' {lvm_config_path}"
TestRun.executor.run(cmd)
@staticmethod
def remove_filters_from_config():
cmd = f"sed -i '/{filter_prototype_regex}/d' {lvm_config_path}"
TestRun.executor.run(cmd)
class VolumeGroup:
__unique_vg_id = 0
__lock = threading.Lock()
def __init__(self, name: str = None):
self.name = name
def __eq__(self, other):
try:
return self.name == other.name
except AttributeError:
return False
@classmethod
def __get_vg_name(cls, prefix: str = "vg"):
with cls.__lock:
cls.__unique_vg_id += 1
return f"{prefix}{cls.__unique_vg_id}"
@staticmethod
def get_all_volume_groups():
output_lines = TestRun.executor.run(f"pvscan").stdout.splitlines()
volume_groups = {}
for line in output_lines:
if "PV" not in line:
continue
line_elements = line.split()
pv = line_elements[line_elements.index("PV") + 1]
vg = ""
if "VG" in line:
vg = line_elements[line_elements.index("VG") + 1]
if vg not in volume_groups:
volume_groups[vg] = []
volume_groups[vg].append(pv)
return volume_groups
@staticmethod
def create_vg(vg_name: str, device_paths: str):
if not vg_name:
raise ValueError("Name needed for VG creation.")
if not device_paths:
raise ValueError("Device paths needed for VG creation.")
cmd = f"vgcreate --yes {vg_name} {device_paths} "
TestRun.executor.run_expect_success(cmd)
@classmethod
def is_vg_already_present(cls, dev_number: int, device_paths: str = None):
if not device_paths:
TestRun.LOGGER.exception("No devices provided.")
volume_groups = cls.get_all_volume_groups()
for vg in volume_groups:
for pv in volume_groups[vg]:
if len(volume_groups[vg]) == dev_number and pv in device_paths:
return cls(vg)
for vg in volume_groups:
for pv in volume_groups[vg]:
if pv in device_paths:
TestRun.LOGGER.warning(f"Some devices are used in other LVM volume group")
return False
@classmethod
def create(cls, device_paths: str = None):
vg_name = cls.__get_vg_name()
VolumeGroup.create_vg(vg_name, device_paths)
volume_groups = VolumeGroup.get_all_volume_groups()
if vg_name in volume_groups:
return cls(vg_name)
else:
TestRun.LOGGER.error("Had not found newly created VG.")
@staticmethod
def remove(vg_name: str):
if not vg_name:
raise ValueError("Name needed for VG remove operation.")
cmd = f"vgremove {vg_name}"
return TestRun.executor.run(cmd)
@staticmethod
def get_logical_volumes_path(vg_name: str):
cmd = f"lvdisplay | grep /dev/{vg_name}/ | awk '{{print $3}}'"
paths = TestRun.executor.run(cmd).stdout.splitlines()
return paths
class Lvm(Disk):
__unique_lv_id = 0
__lock = threading.Lock()
def __init__(
self,
path_dm: str, # device mapper path
volume_group: VolumeGroup,
volume_name: str = None
):
Device.__init__(self, resolve_to_by_id_link(path_dm))
self.device_name = path_dm.split('/')[-1]
self.volume_group = volume_group
self.volume_name = volume_name
def __eq__(self, other):
try:
return self.device_name == other.device_name and \
self.volume_group == other.volume_group and \
self.volume_name == other.volume_name
except AttributeError:
return False
@classmethod
def __get_unique_lv_name(cls, prefix: str = "lv"):
with cls.__lock:
cls.__unique_lv_id += 1
return f"{prefix}{cls.__unique_lv_id}"
@classmethod
def __create(
cls,
name: str,
volume_size_cmd: str,
volume_group: VolumeGroup
):
TestRun.LOGGER.info(f"Creating LV '{name}'.")
cmd = f"lvcreate {volume_size_cmd} --name {name} {volume_group.name} --yes"
TestRun.executor.run_expect_success(cmd)
volumes = cls.discover_logical_volumes()
for volume in volumes:
if name == volume.volume_name:
return volume
@classmethod
def configure_global_filter(
cls,
dev_first: Device,
lv_amount: int,
pv_devs: ([Device], Device)
):
device_first = dev_first.parent_device if isinstance(dev_first, Partition) else dev_first
if lv_amount > 1 and isinstance(device_first, Core):
global_filter_def = LvmConfiguration.read_global_filter_definition_from_lvm_config()
if not isinstance(pv_devs, list):
pv_devs = [pv_devs]
if global_filter_def:
TestRun.LOGGER.info(f"Configure 'global filter' variable")
links = []
for pv_dev in pv_devs:
link = pv_dev.get_device_link("/dev/disk/by-id")
links.append(str(link))
for link in links:
if link in global_filter_def:
TestRun.LOGGER.info(f"Global filter definition already contains '{link}'")
continue
new_link_formatted = link.replace("/", "\\/")
new_global_filter_prefix = f"global_filter = [ \"r|{new_link_formatted}|\", "
TestRun.LOGGER.info(f"Adding global filter '{link}' to existing list")
config_update_cmd = f"sed -i 's/{global_filter_prototype_regex}/\t" \
f"{new_global_filter_prefix}/g' {lvm_config_path}"
TestRun.executor.run(config_update_cmd)
else:
for pv_dev in pv_devs:
link = pv_dev.get_device_link("/dev/disk/by-id")
global_filter = f"\"r|{link}|\""
global_filter += ", "
global_filter = global_filter[:-2]
TestRun.LOGGER.info(f"Create new 'global filter' variable")
new_global = f"global_filter = [{global_filter}]"
characteristic_line = f"# Configuration option devices\\/types."
config_update_cmd = f"sed -i /'{characteristic_line}'/i\\ " \
f"'{tab}{new_global}' {lvm_config_path}"
TestRun.LOGGER.info(f"Adding global filter '{global_filter}' to {lvm_config_path}")
TestRun.executor.run(config_update_cmd)
TestRun.LOGGER.info(f"Remove 'filter' in order to 'global_filter' to be used")
if LvmConfiguration.read_filter_definition_from_lvm_config():
LvmConfiguration.remove_filters_from_config()
@classmethod
def create_specific_lvm_configuration(
cls,
devices: ([Device], Device),
lvm_configuration: LvmConfiguration,
lvm_as_core: bool = False
):
pv_per_vg = int(lvm_configuration.pv_num / lvm_configuration.vg_num)
lv_per_vg = int(lvm_configuration.lv_num / lvm_configuration.vg_num)
lv_size_percentage = int(100 / lv_per_vg)
LvmConfiguration.configure_filters(lvm_configuration.lvm_filters, devices)
logical_volumes = []
for vg_iter in range(lvm_configuration.vg_num):
if isinstance(devices, list):
pv_devs = []
start_range = vg_iter * pv_per_vg
end_range = start_range + pv_per_vg
for i in range(start_range, end_range):
pv_devs.append(devices[i])
device_first = devices[0]
else:
pv_devs = devices
device_first = devices
for j in range(lv_per_vg):
lv = cls.create(lv_size_percentage, pv_devs)
logical_volumes.append(lv)
if lvm_as_core:
cls.configure_global_filter(device_first, lv_per_vg, pv_devs)
return logical_volumes
@classmethod
def create(
cls,
volume_size_or_percent: Union[Size, int],
devices: ([Device], Device),
name: str = None
):
if isinstance(volume_size_or_percent, Size):
size_cmd = f"--size {volume_size_or_percent.get_value()}B"
elif isinstance(volume_size_or_percent, int):
size_cmd = f"--extents {volume_size_or_percent}%VG"
else:
TestRun.LOGGER.error(f"Incorrect type of the first argument (volume_size_or_percent).")
if not name:
name = cls.__get_unique_lv_name()
devices_paths = cls.get_devices_path(devices)
dev_number = len(devices) if isinstance(devices, list) else 1
vg = VolumeGroup.is_vg_already_present(dev_number, devices_paths)
if not vg:
vg = VolumeGroup.create(devices_paths)
return cls.__create(name, size_cmd, vg)
@staticmethod
def get_devices_path(devices: ([Device], Device)):
if isinstance(devices, list):
return " ".join([Symlink(dev.path).get_target() for dev in devices])
else:
return Symlink(devices.path).get_target()
@classmethod
def discover_logical_volumes(cls):
vol_groups = VolumeGroup.get_all_volume_groups()
volumes = []
for vg in vol_groups:
lv_discovered = VolumeGroup.get_logical_volumes_path(vg)
if lv_discovered:
for lv_path in lv_discovered:
cls.make_sure_lv_is_active(lv_path)
lv_name = lv_path.split('/')[-1]
volumes.append(
cls(
readlink(lv_path),
VolumeGroup(vg),
lv_name
)
)
else:
TestRun.LOGGER.info(f"No LVMs present in the system.")
return volumes
@classmethod
def discover(cls):
TestRun.LOGGER.info("Discover LVMs in system...")
return cls.discover_logical_volumes()
@staticmethod
def remove(lv_name: str, vg_name: str):
if not lv_name:
raise ValueError("LV name needed for LV remove operation.")
if not vg_name:
raise ValueError("VG name needed for LV remove operation.")
cmd = f"lvremove -f {vg_name}/{lv_name}"
return TestRun.executor.run(cmd)
@staticmethod
def remove_pv(pv_name: str):
if not pv_name:
raise ValueError("Name needed for PV remove operation.")
cmd = f"pvremove {pv_name}"
return TestRun.executor.run(cmd)
@classmethod
def remove_all(cls):
cmd = f"lvdisplay | grep 'LV Path' | awk '{{print $3}}'"
lvm_paths = TestRun.executor.run(cmd).stdout.splitlines()
for lvm_path in lvm_paths:
lv_name = lvm_path.split('/')[-1]
vg_name = lvm_path.split('/')[-2]
cls.remove(lv_name, vg_name)
cmd = f"vgdisplay | grep 'VG Name' | awk '{{print $3}}'"
vg_names = TestRun.executor.run(cmd).stdout.splitlines()
for vg_name in vg_names:
TestRun.executor.run(f"vgchange -an {vg_name}")
VolumeGroup.remove(vg_name)
cmd = f"pvdisplay | grep 'PV Name' | awk '{{print $3}}'"
pv_names = TestRun.executor.run(cmd).stdout.splitlines()
for pv_name in pv_names:
cls.remove_pv(pv_name)
TestRun.LOGGER.info(f"Successfully removed all LVMs.")
@staticmethod
def make_sure_lv_is_active(lv_path: str):
cmd = f"lvscan"
output_lines = TestRun.executor.run_expect_success(cmd).stdout.splitlines()
for line in output_lines:
if "inactive " in line and lv_path in line:
cmd = f"lvchange -ay {lv_path}"
TestRun.executor.run_expect_success(cmd)

View File

@@ -0,0 +1,22 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from storage_devices.device import Device
from test_tools import disk_utils
from test_utils.size import Size
class Partition(Device):
def __init__(self, parent_dev, type, number, begin: Size, end: Size):
Device.__init__(self, disk_utils.get_partition_path(parent_dev.path, number))
self.number = number
self.parent_device = parent_dev
self.type = type
self.begin = begin
self.end = end
def __str__(self):
return f"\tsystem path: {self.path}, size: {self.size}, type: {self.type}, " \
f"parent device: {self.parent_device.path}\n"

View File

@@ -0,0 +1,182 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import threading
from enum import IntEnum, Enum
from core.test_run import TestRun
from storage_devices.device import Device
from storage_devices.disk import Disk
from test_tools.fs_utils import readlink
from test_tools.mdadm import Mdadm
from test_utils.disk_finder import resolve_to_by_id_link
from test_utils.size import Size, Unit
def get_devices_paths_string(devices: [Device]):
return " ".join([d.path for d in devices])
class Level(IntEnum):
Raid0 = 0
Raid1 = 1
Raid4 = 4
Raid5 = 5
Raid6 = 6
Raid10 = 10
class StripSize(IntEnum):
Strip4K = 4
Strip8K = 8
Strip16K = 16
Strip32K = 32
Strip64K = 64
Strip128K = 128
Strip256K = 256
Strip1M = 1024
class MetadataVariant(Enum):
Legacy = "legacy"
Imsm = "imsm"
class RaidConfiguration:
def __init__(
self,
level: Level = None,
metadata: MetadataVariant = MetadataVariant.Imsm,
number_of_devices: int = 0,
size: Size = None,
strip_size: StripSize = None,
name: str = None,
):
self.level = level
self.metadata = metadata
self.number_of_devices = number_of_devices
self.size = size
self.strip_size = strip_size
self.name = name
class Raid(Disk):
__unique_id = 0
__lock = threading.Lock()
def __init__(
self,
path: str,
level: Level,
uuid: str,
container_uuid: str = None,
container_path: str = None,
metadata: MetadataVariant = MetadataVariant.Imsm,
array_devices: [Device] = [],
volume_devices: [Device] = [],
):
Device.__init__(self, resolve_to_by_id_link(path.replace("/dev/", "")))
self.device_name = path.split('/')[-1]
self.level = level
self.uuid = uuid
self.container_uuid = container_uuid
self.container_path = container_path
self.metadata = metadata
self.array_devices = array_devices if array_devices else volume_devices.copy()
self.volume_devices = volume_devices
self.partitions = []
self.__block_size = None
def __eq__(self, other):
try:
return self.uuid == other.uuid
except AttributeError:
return False
@property
def block_size(self):
if not self.__block_size:
self.__block_size = Unit(int(self.get_sysfs_property("logical_block_size")))
return self.__block_size
def stop(self):
Mdadm.stop(self.path)
if self.container_path:
Mdadm.stop(self.container_path)
@classmethod
def discover(cls):
TestRun.LOGGER.info("Discover RAIDs in system...")
raids = []
for raid in Mdadm.examine_result():
raids.append(
cls(
raid["path"],
Level[raid["level"]],
raid["uuid"],
raid["container"]["uuid"] if "container" in raid else None,
raid["container"]["path"] if "container" in raid else None,
MetadataVariant(raid["metadata"]),
[Device(d) for d in raid["array_devices"]],
[Device(d) for d in raid["devices"]]
)
)
return raids
@classmethod
def create(
cls,
raid_configuration: RaidConfiguration,
devices: [Device]
):
import copy
raid_conf = copy.deepcopy(raid_configuration)
if not raid_conf.number_of_devices:
raid_conf.number_of_devices = len(devices)
elif len(devices) < raid_conf.number_of_devices:
raise ValueError("RAID configuration requires at least "
f"{raid_conf.number_of_devices} devices")
md_dir_path = "/dev/md/"
array_devices = devices
volume_devices = devices[:raid_conf.number_of_devices]
if raid_conf.metadata != MetadataVariant.Legacy:
container_conf = RaidConfiguration(
name=cls.__get_unique_name(raid_conf.metadata.value),
metadata=raid_conf.metadata,
number_of_devices=len(array_devices)
)
Mdadm.create(container_conf, get_devices_paths_string(array_devices))
if not raid_conf.name:
raid_conf.name = cls.__get_unique_name()
Mdadm.create(raid_conf, get_devices_paths_string(volume_devices))
raid_link = md_dir_path + raid_conf.name
raid = [r for r in Mdadm.examine_result() if readlink(r["path"]) == readlink(raid_link)][0]
return cls(
raid["path"],
raid_conf.level,
raid["uuid"],
raid["container"]["uuid"] if "container" in raid else None,
raid["container"]["path"] if "container" in raid else None,
raid_conf.metadata,
array_devices,
volume_devices
)
@staticmethod
def remove_all():
Mdadm.stop()
@classmethod
def __get_unique_name(cls, prefix: str = "Raid"):
with cls.__lock:
cls.__unique_id += 1
return f"{prefix}{cls.__unique_id}"

View File

@@ -0,0 +1,80 @@
#
# Copyright(c) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools import disk_utils
from test_tools.fs_utils import ls, parse_ls_output
from test_utils.filesystem.symlink import Symlink
from test_utils.os_utils import reload_kernel_module, unload_kernel_module, is_kernel_module_loaded
from test_utils.size import Size, Unit
class RamDisk(Device):
_module = "brd"
@classmethod
def create(cls, disk_size: Size, disk_count: int = 1):
if disk_count < 1:
raise ValueError("Wrong number of RAM disks requested")
TestRun.LOGGER.info("Configure RAM disks...")
params = {
"rd_size": int(disk_size.get_value(Unit.KiB)),
"rd_nr": disk_count
}
reload_kernel_module(cls._module, params)
if not cls._is_configured(disk_size, disk_count):
raise EnvironmentError(f"Wrong RAM disk configuration after loading '{cls._module}' "
"module")
return cls.list()
@classmethod
def remove_all(cls):
if not is_kernel_module_loaded(cls._module):
return
for ram_disk in cls._list_devices():
TestRun.executor.run(f"umount {ram_disk.full_path}")
link_path = posixpath.join("/dev/disk/by-id", ram_disk.name)
try:
link = Symlink.get_symlink(link_path=link_path, target=ram_disk.full_path)
link.remove(force=True)
except FileNotFoundError:
pass
TestRun.LOGGER.info("Removing RAM disks...")
unload_kernel_module(cls._module)
@classmethod
def list(cls):
ram_disks = []
for ram_disk in cls._list_devices():
link_path = posixpath.join("/dev/disk/by-id", ram_disk.name)
link = Symlink.get_symlink(
link_path=link_path, target=ram_disk.full_path, create=True
)
ram_disks.append(cls(link.full_path))
return ram_disks
@classmethod
def _is_configured(cls, disk_size: Size, disk_count: int):
ram_disks = cls._list_devices()
return (
len(ram_disks) >= disk_count
and Size(disk_utils.get_size(ram_disks[0].name), Unit.Byte).align_down(Unit.MiB.value)
== disk_size.align_down(Unit.MiB.value)
)
@staticmethod
def _list_devices():
ls_ram_disks = ls("/dev/ram*")
if "No such file or directory" in ls_ram_disks:
return []
return parse_ls_output(ls_ram_disks)

View File

@@ -0,0 +1,225 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import math
from aenum import IntFlag, Enum
from datetime import timedelta
from core.test_run import TestRun
from storage_devices.device import Device
from test_utils.filesystem.directory import Directory
from test_utils.os_utils import is_mounted, drop_caches, DropCachesMode
from test_utils.size import Size, Unit
DEBUGFS_MOUNT_POINT = "/sys/kernel/debug"
PREFIX = "trace_"
HEADER_FORMAT = "%a|%C|%d|%e|%n|%N|%S|%5T.%9t\\n"
class BlkTraceMask(IntFlag):
read = 1
write = 1 << 1
flush = 1 << 2
sync = 1 << 3
queue = 1 << 4
requeue = 1 << 5
issue = 1 << 6
complete = 1 << 7
fs = 1 << 8
pc = 1 << 9
notify = 1 << 10
ahead = 1 << 11
meta = 1 << 12
discard = 1 << 13
drv_data = 1 << 14
fua = 1 << 15
class ActionKind(Enum):
IoDeviceRemap = "A"
IoBounce = "B"
IoCompletion = "C"
IoToDriver = "D"
IoFrontMerge = "F"
GetRequest = "G"
IoInsert = "I"
IoMerge = "M"
PlugRequest = "P"
IoHandled = "Q"
RequeueRequest = "R"
SleepRequest = "S"
TimeoutUnplug = "T" # old version of TimerUnplug
UnplugRequest = "U"
TimerUnplug = "UT"
Split = "X"
class RwbsKind(IntFlag):
Undefined = 0
R = 1 # Read
W = 1 << 1 # Write
D = 1 << 2 # Discard
F = 1 << 3 # Flush
S = 1 << 4 # Synchronous
M = 1 << 5 # Metadata
A = 1 << 6 # Read Ahead
N = 1 << 7 # None of the above
def __str__(self):
ret = []
if self & RwbsKind.R:
ret.append("read")
if self & RwbsKind.W:
ret.append("write")
if self & RwbsKind.D:
ret.append("discard")
if self & RwbsKind.F:
ret.append("flush")
if self & RwbsKind.S:
ret.append("sync")
if self & RwbsKind.M:
ret.append("metadata")
if self & RwbsKind.A:
ret.append("readahead")
if self & RwbsKind.N:
ret.append("none")
return "|".join(ret)
class BlkTrace:
def __init__(self, device: Device, *masks: BlkTraceMask):
self._mount_debugfs()
if device is None:
raise Exception("Device not provided")
self.device = device
self.masks = "" if not masks else f' -a {" -a ".join([m.name for m in masks])}'
self.blktrace_pid = -1
self.__outputDirectoryPath = None
@staticmethod
def _mount_debugfs():
if not is_mounted(DEBUGFS_MOUNT_POINT):
TestRun.executor.run_expect_success(f"mount -t debugfs none {DEBUGFS_MOUNT_POINT}")
def start_monitoring(self, buffer_size: Size = None, number_of_subbuffers: int = None):
if self.blktrace_pid != -1:
raise Exception(f"blktrace already running with PID: {self.blktrace_pid}")
self.__outputDirectoryPath = Directory.create_temp_directory().full_path
drop_caches(DropCachesMode.ALL)
number_of_subbuffers = ("" if number_of_subbuffers is None
else f" --num-sub-buffers={number_of_subbuffers}")
buffer_size = ("" if buffer_size is None
else f" --buffer-size={buffer_size.get_value(Unit.KibiByte)}")
command = (f"blktrace{number_of_subbuffers}{buffer_size} --dev={self.device.path}"
f"{self.masks} --output={PREFIX} --output-dir={self.__outputDirectoryPath}")
echo_output = TestRun.executor.run_expect_success(
f"nohup {command} </dev/null &>{self.__outputDirectoryPath}/out & echo $!"
)
self.blktrace_pid = int(echo_output.stdout)
TestRun.LOGGER.info(f"blktrace monitoring for device {self.device.path} started"
f" (PID: {self.blktrace_pid}, output dir: {self.__outputDirectoryPath}")
def stop_monitoring(self):
if self.blktrace_pid == -1:
raise Exception("PID for blktrace is not set - has monitoring been started?")
drop_caches(DropCachesMode.ALL)
TestRun.executor.run_expect_success(f"kill -s SIGINT {self.blktrace_pid}")
self.blktrace_pid = -1
# dummy command for swallowing output of killed command
TestRun.executor.run("sleep 2 && echo dummy")
TestRun.LOGGER.info(f"blktrace monitoring for device {self.device.path} stopped")
return self.__parse_blktrace_output()
def __parse_blktrace_output(self):
TestRun.LOGGER.info(f"Parsing blktrace headers from {self.__outputDirectoryPath}... "
"Be patient")
command = (f'blkparse --input-dir={self.__outputDirectoryPath} --input={PREFIX} '
f'--format="{HEADER_FORMAT}"')
blkparse_output = TestRun.executor.run_expect_success(
command, timeout=timedelta(minutes=60)
)
parsed_headers = []
for line in blkparse_output.stdout.splitlines():
# At the end per-cpu summary is posted - there is no need for it now
if line.startswith('CPU'):
break
header = Header.parse(line)
if header is None:
continue
parsed_headers.append(header)
TestRun.LOGGER.info(
f"Parsed {len(parsed_headers)} blktrace headers from {self.__outputDirectoryPath}"
)
parsed_headers.sort(key=lambda x: x.timestamp)
return parsed_headers
class Header:
def __init__(self):
self.action = None
self.block_count = None
self.byte_count = None
self.command = None
self.error_value = None
self.rwbs = RwbsKind.Undefined
self.sector_number = None
self.timestamp = None
@staticmethod
def parse(header_line: str):
# messages/notifies are not formatted according to --format
# so should be ignored (or parsed using standard format):
if "m N" in header_line:
return None
header_fields = header_line.split('|')
if len(header_fields) != 8:
return None
timestamp_fields = header_fields[7].split('.')
timestamp_nano = int(timestamp_fields[-1]) if len(timestamp_fields) == 2 else 0
header = Header()
header.action = ActionKind(header_fields[0])
header.command = header_fields[1]
if len(header_fields[2]):
header.rwbs = RwbsKind['|'.join(list(header_fields[2]))]
header.error_value = int(header_fields[3])
header.block_count = int(header_fields[4])
header.byte_count = int(header_fields[5])
header.sector_number = int(header_fields[6])
header.timestamp = int(timestamp_fields[0]) * math.pow(10, 9) + timestamp_nano
return header
def __str__(self):
ret = []
if self.action:
ret.append(f"action: {self.action.name}")
if self.block_count:
ret.append(f"block_count: {self.block_count}")
if self.byte_count:
ret.append(f"byte_count: {self.byte_count}")
if self.command:
ret.append(f"command: {self.command}")
if self.error_value:
ret.append(f"error_value: {self.error_value}")
if self.rwbs:
ret.append(f"rwbs: {self.rwbs}")
if self.sector_number:
ret.append(f"sector_number: {self.sector_number}")
if self.timestamp:
ret.append(f"timestamp: {self.timestamp}")
return " ".join(ret)

View File

@@ -0,0 +1,882 @@
#!/bin/bash
#
# The BSD License (http://www.opensource.org/licenses/bsd-license.php)
# specifies the terms and conditions of use for checksec.sh:
#
# Copyright (c) 2009-2011, Tobias Klein.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Tobias Klein nor the name of trapkit.de may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# Name : checksec.sh
# Version : 1.5
# Author : Tobias Klein
# Date : November 2011
# Download: http://www.trapkit.de/tools/checksec.html
# Changes : http://www.trapkit.de/tools/checksec_changes.txt
#
# Description:
#
# Modern Linux distributions offer some mitigation techniques to make it
# harder to exploit software vulnerabilities reliably. Mitigations such
# as RELRO, NoExecute (NX), Stack Canaries, Address Space Layout
# Randomization (ASLR) and Position Independent Executables (PIE) have
# made reliably exploiting any vulnerabilities that do exist far more
# challenging. The checksec.sh script is designed to test what *standard*
# Linux OS and PaX (http://pax.grsecurity.net/) security features are being
# used.
#
# As of version 1.3 the script also lists the status of various Linux kernel
# protection mechanisms.
#
# Credits:
#
# Thanks to Brad Spengler (grsecurity.net) for the PaX support.
# Thanks to Jon Oberheide (jon.oberheide.org) for the kernel support.
# Thanks to Ollie Whitehouse (Research In Motion) for rpath/runpath support.
#
# Others that contributed to checksec.sh (in no particular order):
#
# Simon Ruderich, Denis Scherbakov, Stefan Kuttler, Radoslaw Madej,
# Anthony G. Basile, Martin Vaeth and Brian Davis.
#
# global vars
have_readelf=1
verbose=false
# FORTIFY_SOURCE vars
FS_end=_chk
FS_cnt_total=0
FS_cnt_checked=0
FS_cnt_unchecked=0
FS_chk_func_libc=0
FS_functions=0
FS_libc=0
# version information
version() {
echo "checksec v1.5, Tobias Klein, www.trapkit.de, November 2011"
echo
}
# help
help() {
echo "Usage: checksec [OPTION]"
echo
echo "Options:"
echo
echo " --file <executable-file>"
echo " --dir <directory> [-v]"
echo " --proc <process name>"
echo " --proc-all"
echo " --proc-libs <process ID>"
echo " --kernel"
echo " --fortify-file <executable-file>"
echo " --fortify-proc <process ID>"
echo " --version"
echo " --help"
echo
echo "For more information, see:"
echo " http://www.trapkit.de/tools/checksec.html"
echo
}
# check if command exists
command_exists () {
type $1 > /dev/null 2>&1;
}
# check if directory exists
dir_exists () {
if [ -d $1 ] ; then
return 0
else
return 1
fi
}
# check user privileges
root_privs () {
if [ $(/usr/bin/id -u) -eq 0 ] ; then
return 0
else
return 1
fi
}
# check if input is numeric
isNumeric () {
echo "$@" | grep -q -v "[^0-9]"
}
# check if input is a string
isString () {
echo "$@" | grep -q -v "[^A-Za-z]"
}
# check file(s)
filecheck() {
# check for RELRO support
if readelf -l $1 2>/dev/null | grep -q 'GNU_RELRO'; then
if readelf -d $1 2>/dev/null | grep -q 'BIND_NOW'; then
echo -n -e '\033[32mFull RELRO \033[m '
else
echo -n -e '\033[33mPartial RELRO\033[m '
fi
else
echo -n -e '\033[31mNo RELRO \033[m '
fi
# check for stack canary support
if readelf -s $1 2>/dev/null | grep -q '__stack_chk_fail'; then
echo -n -e '\033[32mCanary found \033[m '
else
echo -n -e '\033[31mNo canary found\033[m '
fi
# check for NX support
if readelf -W -l $1 2>/dev/null | grep 'GNU_STACK' | grep -q 'RWE'; then
echo -n -e '\033[31mNX disabled\033[m '
else
echo -n -e '\033[32mNX enabled \033[m '
fi
# check for PIE support
if readelf -h $1 2>/dev/null | grep -q 'Type:[[:space:]]*EXEC'; then
echo -n -e '\033[31mNo PIE \033[m '
elif readelf -h $1 2>/dev/null | grep -q 'Type:[[:space:]]*DYN'; then
if readelf -d $1 2>/dev/null | grep -q '(DEBUG)'; then
echo -n -e '\033[32mPIE enabled \033[m '
else
echo -n -e '\033[33mDSO \033[m '
fi
else
echo -n -e '\033[33mNot an ELF file\033[m '
fi
# check for rpath / run path
if readelf -d $1 2>/dev/null | grep -q 'rpath'; then
echo -n -e '\033[31mRPATH \033[m '
else
echo -n -e '\033[32mNo RPATH \033[m '
fi
if readelf -d $1 2>/dev/null | grep -q 'runpath'; then
echo -n -e '\033[31mRUNPATH \033[m '
else
echo -n -e '\033[32mNo RUNPATH \033[m '
fi
}
# check process(es)
proccheck() {
# check for RELRO support
if readelf -l $1/exe 2>/dev/null | grep -q 'Program Headers'; then
if readelf -l $1/exe 2>/dev/null | grep -q 'GNU_RELRO'; then
if readelf -d $1/exe 2>/dev/null | grep -q 'BIND_NOW'; then
echo -n -e '\033[32mFull RELRO \033[m '
else
echo -n -e '\033[33mPartial RELRO \033[m '
fi
else
echo -n -e '\033[31mNo RELRO \033[m '
fi
else
echo -n -e '\033[31mPermission denied (please run as root)\033[m\n'
exit 1
fi
# check for stack canary support
if readelf -s $1/exe 2>/dev/null | grep -q 'Symbol table'; then
if readelf -s $1/exe 2>/dev/null | grep -q '__stack_chk_fail'; then
echo -n -e '\033[32mCanary found \033[m '
else
echo -n -e '\033[31mNo canary found \033[m '
fi
else
if [ "$1" != "1" ] ; then
echo -n -e '\033[33mPermission denied \033[m '
else
echo -n -e '\033[33mNo symbol table found\033[m '
fi
fi
# first check for PaX support
if cat $1/status 2> /dev/null | grep -q 'PaX:'; then
pageexec=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b6) )
segmexec=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b10) )
mprotect=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b8) )
randmmap=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b9) )
if [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "M" && "$randmmap" = "R" ]] ; then
echo -n -e '\033[32mPaX enabled\033[m '
elif [[ "$pageexec" = "p" && "$segmexec" = "s" && "$randmmap" = "R" ]] ; then
echo -n -e '\033[33mPaX ASLR only\033[m '
elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "m" && "$randmmap" = "R" ]] ; then
echo -n -e '\033[33mPaX mprot off \033[m'
elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "M" && "$randmmap" = "r" ]] ; then
echo -n -e '\033[33mPaX ASLR off\033[m '
elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "m" && "$randmmap" = "r" ]] ; then
echo -n -e '\033[33mPaX NX only\033[m '
else
echo -n -e '\033[31mPaX disabled\033[m '
fi
# fallback check for NX support
elif readelf -W -l $1/exe 2>/dev/null | grep 'GNU_STACK' | grep -q 'RWE'; then
echo -n -e '\033[31mNX disabled\033[m '
else
echo -n -e '\033[32mNX enabled \033[m '
fi
# check for PIE support
if readelf -h $1/exe 2>/dev/null | grep -q 'Type:[[:space:]]*EXEC'; then
echo -n -e '\033[31mNo PIE \033[m '
elif readelf -h $1/exe 2>/dev/null | grep -q 'Type:[[:space:]]*DYN'; then
if readelf -d $1/exe 2>/dev/null | grep -q '(DEBUG)'; then
echo -n -e '\033[32mPIE enabled \033[m '
else
echo -n -e '\033[33mDynamic Shared Object\033[m '
fi
else
echo -n -e '\033[33mNot an ELF file \033[m '
fi
}
# check mapped libraries
libcheck() {
libs=( $(awk '{ print $6 }' /proc/$1/maps | grep '/' | sort -u | xargs file | grep ELF | awk '{ print $1 }' | sed 's/:/ /') )
printf "\n* Loaded libraries (file information, # of mapped files: ${#libs[@]}):\n\n"
for element in $(seq 0 $((${#libs[@]} - 1)))
do
echo " ${libs[$element]}:"
echo -n " "
filecheck ${libs[$element]}
printf "\n\n"
done
}
# check for system-wide ASLR support
aslrcheck() {
# PaX ASLR support
if !(cat /proc/1/status 2> /dev/null | grep -q 'Name:') ; then
echo -n -e ':\033[33m insufficient privileges for PaX ASLR checks\033[m\n'
echo -n -e ' Fallback to standard Linux ASLR check'
fi
if cat /proc/1/status 2> /dev/null | grep -q 'PaX:'; then
printf ": "
if cat /proc/1/status 2> /dev/null | grep 'PaX:' | grep -q 'R'; then
echo -n -e '\033[32mPaX ASLR enabled\033[m\n\n'
else
echo -n -e '\033[31mPaX ASLR disabled\033[m\n\n'
fi
else
# standard Linux 'kernel.randomize_va_space' ASLR support
# (see the kernel file 'Documentation/sysctl/kernel.txt' for a detailed description)
printf " (kernel.randomize_va_space): "
if /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 1'; then
echo -n -e '\033[33mOn (Setting: 1)\033[m\n\n'
printf " Description - Make the addresses of mmap base, stack and VDSO page randomized.\n"
printf " This, among other things, implies that shared libraries will be loaded to \n"
printf " random addresses. Also for PIE-linked binaries, the location of code start\n"
printf " is randomized. Heap addresses are *not* randomized.\n\n"
elif /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 2'; then
echo -n -e '\033[32mOn (Setting: 2)\033[m\n\n'
printf " Description - Make the addresses of mmap base, heap, stack and VDSO page randomized.\n"
printf " This, among other things, implies that shared libraries will be loaded to random \n"
printf " addresses. Also for PIE-linked binaries, the location of code start is randomized.\n\n"
elif /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 0'; then
echo -n -e '\033[31mOff (Setting: 0)\033[m\n'
else
echo -n -e '\033[31mNot supported\033[m\n'
fi
printf " See the kernel file 'Documentation/sysctl/kernel.txt' for more details.\n\n"
fi
}
# check cpu nx flag
nxcheck() {
if grep -q nx /proc/cpuinfo; then
echo -n -e '\033[32mYes\033[m\n\n'
else
echo -n -e '\033[31mNo\033[m\n\n'
fi
}
# check for kernel protection mechanisms
kernelcheck() {
printf " Description - List the status of kernel protection mechanisms. Rather than\n"
printf " inspect kernel mechanisms that may aid in the prevention of exploitation of\n"
printf " userspace processes, this option lists the status of kernel configuration\n"
printf " options that harden the kernel itself against attack.\n\n"
printf " Kernel config: "
if [ -f /proc/config.gz ] ; then
kconfig="zcat /proc/config.gz"
printf "\033[32m/proc/config.gz\033[m\n\n"
elif [ -f /boot/config-`uname -r` ] ; then
kconfig="cat /boot/config-`uname -r`"
printf "\033[33m/boot/config-`uname -r`\033[m\n\n"
printf " Warning: The config on disk may not represent running kernel config!\n\n";
elif [ -f "${KBUILD_OUTPUT:-/usr/src/linux}"/.config ] ; then
kconfig="cat ${KBUILD_OUTPUT:-/usr/src/linux}/.config"
printf "\033[33m%s\033[m\n\n" "${KBUILD_OUTPUT:-/usr/src/linux}/.config"
printf " Warning: The config on disk may not represent running kernel config!\n\n";
else
printf "\033[31mNOT FOUND\033[m\n\n"
exit 0
fi
printf " GCC stack protector support: "
if $kconfig | grep -qi 'CONFIG_CC_STACKPROTECTOR=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Strict user copy checks: "
if $kconfig | grep -qi 'CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Enforce read-only kernel data: "
if $kconfig | grep -qi 'CONFIG_DEBUG_RODATA=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Restrict /dev/mem access: "
if $kconfig | grep -qi 'CONFIG_STRICT_DEVMEM=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Restrict /dev/kmem access: "
if $kconfig | grep -qi 'CONFIG_DEVKMEM=y'; then
printf "\033[31mDisabled\033[m\n"
else
printf "\033[32mEnabled\033[m\n"
fi
printf "\n"
printf "* grsecurity / PaX: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC=y'; then
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_HIGH=y'; then
printf "\033[32mHigh GRKERNSEC\033[m\n\n"
elif $kconfig | grep -qi 'CONFIG_GRKERNSEC_MEDIUM=y'; then
printf "\033[33mMedium GRKERNSEC\033[m\n\n"
elif $kconfig | grep -qi 'CONFIG_GRKERNSEC_LOW=y'; then
printf "\033[31mLow GRKERNSEC\033[m\n\n"
else
printf "\033[33mCustom GRKERNSEC\033[m\n\n"
fi
printf " Non-executable kernel pages: "
if $kconfig | grep -qi 'CONFIG_PAX_KERNEXEC=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Prevent userspace pointer deref: "
if $kconfig | grep -qi 'CONFIG_PAX_MEMORY_UDEREF=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Prevent kobject refcount overflow: "
if $kconfig | grep -qi 'CONFIG_PAX_REFCOUNT=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Bounds check heap object copies: "
if $kconfig | grep -qi 'CONFIG_PAX_USERCOPY=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Disable writing to kmem/mem/port: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_KMEM=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Disable privileged I/O: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_IO=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Harden module auto-loading: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_MODHARDEN=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Hide kernel symbols: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_HIDESYM=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
else
printf "\033[31mNo GRKERNSEC\033[m\n\n"
printf " The grsecurity / PaX patchset is available here:\n"
printf " http://grsecurity.net/\n"
fi
printf "\n"
printf "* Kernel Heap Hardening: "
if $kconfig | grep -qi 'CONFIG_KERNHEAP=y'; then
if $kconfig | grep -qi 'CONFIG_KERNHEAP_FULLPOISON=y'; then
printf "\033[32mFull KERNHEAP\033[m\n\n"
else
printf "\033[33mPartial KERNHEAP\033[m\n\n"
fi
else
printf "\033[31mNo KERNHEAP\033[m\n\n"
printf " The KERNHEAP hardening patchset is available here:\n"
printf " https://www.subreption.com/kernheap/\n\n"
fi
}
# --- FORTIFY_SOURCE subfunctions (start) ---
# is FORTIFY_SOURCE supported by libc?
FS_libc_check() {
printf "* FORTIFY_SOURCE support available (libc) : "
if [ "${#FS_chk_func_libc[@]}" != "0" ] ; then
printf "\033[32mYes\033[m\n"
else
printf "\033[31mNo\033[m\n"
exit 1
fi
}
# was the binary compiled with FORTIFY_SOURCE?
FS_binary_check() {
printf "* Binary compiled with FORTIFY_SOURCE support: "
for FS_elem_functions in $(seq 0 $((${#FS_functions[@]} - 1)))
do
if [[ ${FS_functions[$FS_elem_functions]} =~ _chk ]] ; then
printf "\033[32mYes\033[m\n"
return
fi
done
printf "\033[31mNo\033[m\n"
exit 1
}
FS_comparison() {
echo
printf " ------ EXECUTABLE-FILE ------- . -------- LIBC --------\n"
printf " FORTIFY-able library functions | Checked function names\n"
printf " -------------------------------------------------------\n"
for FS_elem_libc in $(seq 0 $((${#FS_chk_func_libc[@]} - 1)))
do
for FS_elem_functions in $(seq 0 $((${#FS_functions[@]} - 1)))
do
FS_tmp_func=${FS_functions[$FS_elem_functions]}
FS_tmp_libc=${FS_chk_func_libc[$FS_elem_libc]}
if [[ $FS_tmp_func =~ ^$FS_tmp_libc$ ]] ; then
printf " \033[31m%-30s\033[m | __%s%s\n" $FS_tmp_func $FS_tmp_libc $FS_end
let FS_cnt_total++
let FS_cnt_unchecked++
elif [[ $FS_tmp_func =~ ^$FS_tmp_libc(_chk) ]] ; then
printf " \033[32m%-30s\033[m | __%s%s\n" $FS_tmp_func $FS_tmp_libc $FS_end
let FS_cnt_total++
let FS_cnt_checked++
fi
done
done
}
FS_summary() {
echo
printf "SUMMARY:\n\n"
printf "* Number of checked functions in libc : ${#FS_chk_func_libc[@]}\n"
printf "* Total number of library functions in the executable: ${#FS_functions[@]}\n"
printf "* Number of FORTIFY-able functions in the executable : %s\n" $FS_cnt_total
printf "* Number of checked functions in the executable : \033[32m%s\033[m\n" $FS_cnt_checked
printf "* Number of unchecked functions in the executable : \033[31m%s\033[m\n" $FS_cnt_unchecked
echo
}
# --- FORTIFY_SOURCE subfunctions (end) ---
if !(command_exists readelf) ; then
printf "\033[31mWarning: 'readelf' not found! It's required for most checks.\033[m\n\n"
have_readelf=0
fi
# parse command-line arguments
case "$1" in
--version)
version
exit 0
;;
--help)
help
exit 0
;;
--dir)
if [ "$3" = "-v" ] ; then
verbose=true
fi
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid directory.\033[m\n\n"
exit 1
fi
# remove trailing slashes
tempdir=`echo $2 | sed -e "s/\/*$//"`
if [ ! -d $tempdir ] ; then
printf "\033[31mError: The directory '$tempdir' does not exist.\033[m\n\n"
exit 1
fi
cd $tempdir
printf "RELRO STACK CANARY NX PIE RPATH RUNPATH FILE\n"
for N in [A-Za-z]*; do
if [ "$N" != "[A-Za-z]*" ]; then
# read permissions?
if [ ! -r $N ]; then
printf "\033[31mError: No read permissions for '$tempdir/$N' (run as root).\033[m\n"
else
# ELF executable?
out=`file $N`
if [[ ! $out =~ ELF ]] ; then
if [ "$verbose" = "true" ] ; then
printf "\033[34m*** Not an ELF file: $tempdir/"
file $N
printf "\033[m"
fi
else
filecheck $N
if [ `find $tempdir/$N \( -perm -004000 -o -perm -002000 \) -type f -print` ]; then
printf "\033[37;41m%s%s\033[m" $2 $N
else
printf "%s%s" $tempdir/ $N
fi
echo
fi
fi
fi
done
exit 0
;;
--file)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid file.\033[m\n\n"
exit 1
fi
# does the file exist?
if [ ! -e $2 ] ; then
printf "\033[31mError: The file '$2' does not exist.\033[m\n\n"
exit 1
fi
# read permissions?
if [ ! -r $2 ] ; then
printf "\033[31mError: No read permissions for '$2' (run as root).\033[m\n\n"
exit 1
fi
# ELF executable?
out=`file $2`
if [[ ! $out =~ ELF ]] ; then
printf "\033[31mError: Not an ELF file: "
file $2
printf "\033[m\n"
exit 1
fi
printf "RELRO STACK CANARY NX PIE RPATH RUNPATH FILE\n"
filecheck $2
if [ `find $2 \( -perm -004000 -o -perm -002000 \) -type f -print` ] ; then
printf "\033[37;41m%s%s\033[m" $2 $N
else
printf "%s" $2
fi
echo
exit 0
;;
--proc-all)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
cd /proc
printf "* System-wide ASLR"
aslrcheck
printf "* Does the CPU support NX: "
nxcheck
printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
for N in [1-9]*; do
if [ $N != $$ ] && readlink -q $N/exe > /dev/null; then
printf "%16s" `head -1 $N/status | cut -b 7-`
printf "%7d " $N
proccheck $N
echo
fi
done
if [ ! -e /usr/bin/id ] ; then
printf "\n\033[33mNote: If you are running 'checksec.sh' as an unprivileged user, you\n"
printf " will not see all processes. Please run the script as root.\033[m\n\n"
else
if !(root_privs) ; then
printf "\n\033[33mNote: You are running 'checksec.sh' as an unprivileged user.\n"
printf " Too see all processes, please run the script as root.\033[m\n\n"
fi
fi
exit 0
;;
--proc)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid process name.\033[m\n\n"
exit 1
fi
if !(isString "$2") ; then
printf "\033[31mError: Please provide a valid process name.\033[m\n\n"
exit 1
fi
cd /proc
printf "* System-wide ASLR"
aslrcheck
printf "* Does the CPU support NX: "
nxcheck
printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
for N in `ps -Ao pid,comm | grep $2 | cut -b1-6`; do
if [ -d $N ] ; then
printf "%16s" `head -1 $N/status | cut -b 7-`
printf "%7d " $N
# read permissions?
if [ ! -r $N/exe ] ; then
if !(root_privs) ; then
printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
exit 1
fi
if [ ! `readlink $N/exe` ] ; then
printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
exit 1
fi
exit 1
fi
proccheck $N
echo
fi
done
exit 0
;;
--proc-libs)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
if !(isNumeric "$2") ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
cd /proc
printf "* System-wide ASLR"
aslrcheck
printf "* Does the CPU support NX: "
nxcheck
printf "* Process information:\n\n"
printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
N=$2
if [ -d $N ] ; then
printf "%16s" `head -1 $N/status | cut -b 7-`
printf "%7d " $N
# read permissions?
if [ ! -r $N/exe ] ; then
if !(root_privs) ; then
printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
exit 1
fi
if [ ! `readlink $N/exe` ] ; then
printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
exit 1
fi
exit 1
fi
proccheck $N
echo
libcheck $N
fi
exit 0
;;
--kernel)
cd /proc
printf "* Kernel protection information:\n\n"
kernelcheck
exit 0
;;
--fortify-file)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid file.\033[m\n\n"
exit 1
fi
# does the file exist?
if [ ! -e $2 ] ; then
printf "\033[31mError: The file '$2' does not exist.\033[m\n\n"
exit 1
fi
# read permissions?
if [ ! -r $2 ] ; then
printf "\033[31mError: No read permissions for '$2' (run as root).\033[m\n\n"
exit 1
fi
# ELF executable?
out=`file $2`
if [[ ! $out =~ ELF ]] ; then
printf "\033[31mError: Not an ELF file: "
file $2
printf "\033[m\n"
exit 1
fi
if [ -e /lib/libc.so.6 ] ; then
FS_libc=/lib/libc.so.6
elif [ -e /lib64/libc.so.6 ] ; then
FS_libc=/lib64/libc.so.6
elif [ -e /lib/i386-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/i386-linux-gnu/libc.so.6
elif [ -e /lib/x86_64-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/x86_64-linux-gnu/libc.so.6
else
printf "\033[31mError: libc not found.\033[m\n\n"
exit 1
fi
FS_chk_func_libc=( $(readelf -s $FS_libc | grep _chk@@ | awk '{ print $8 }' | cut -c 3- | sed -e 's/_chk@.*//') )
FS_functions=( $(readelf -s $2 | awk '{ print $8 }' | sed 's/_*//' | sed -e 's/@.*//') )
FS_libc_check
FS_binary_check
FS_comparison
FS_summary
exit 0
;;
--fortify-proc)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
if !(isNumeric "$2") ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
cd /proc
N=$2
if [ -d $N ] ; then
# read permissions?
if [ ! -r $N/exe ] ; then
if !(root_privs) ; then
printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
exit 1
fi
if [ ! `readlink $N/exe` ] ; then
printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
exit 1
fi
exit 1
fi
if [ -e /lib/libc.so.6 ] ; then
FS_libc=/lib/libc.so.6
elif [ -e /lib64/libc.so.6 ] ; then
FS_libc=/lib64/libc.so.6
elif [ -e /lib/i386-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/i386-linux-gnu/libc.so.6
elif [ -e /lib/x86_64-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/x86_64-linux-gnu/libc.so.6
else
printf "\033[31mError: libc not found.\033[m\n\n"
exit 1
fi
printf "* Process name (PID) : %s (%d)\n" `head -1 $N/status | cut -b 7-` $N
FS_chk_func_libc=( $(readelf -s $FS_libc | grep _chk@@ | awk '{ print $8 }' | cut -c 3- | sed -e 's/_chk@.*//') )
FS_functions=( $(readelf -s $2/exe | awk '{ print $8 }' | sed 's/_*//' | sed -e 's/@.*//') )
FS_libc_check
FS_binary_check
FS_comparison
FS_summary
fi
exit 0
;;
*)
if [ "$#" != "0" ] ; then
printf "\033[31mError: Unknown option '$1'.\033[m\n\n"
fi
help
exit 1
;;
esac

View File

@@ -0,0 +1,40 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import test_utils.linux_command as linux_comm
import test_utils.size as size
from core.test_run import TestRun
class Dd(linux_comm.LinuxCommand):
def __init__(self):
linux_comm.LinuxCommand.__init__(self, TestRun.executor, 'dd')
def block_size(self, value: size.Size):
return self.set_param('bs', int(value.get_value()))
def count(self, value):
return self.set_param('count', value)
def input(self, value):
return self.set_param('if', value)
def iflag(self, *values):
return self.set_param('iflag', *values)
def oflag(self, *values):
return self.set_param('oflag', *values)
def conv(self, *values):
return self.set_param('conv', *values)
def output(self, value):
return self.set_param('of', value)
def seek(self, value):
return self.set_param('seek', value)
def skip(self, value):
return self.set_param('skip', value)

View File

@@ -0,0 +1,47 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import test_utils.linux_command as linux_comm
import test_utils.size as size
from core.test_run import TestRun
class Ddrescue(linux_comm.LinuxCommand):
def __init__(self):
linux_comm.LinuxCommand.__init__(self, TestRun.executor, 'ddrescue')
self.source_path = None
self.destination_path = None
self.param_name_prefix = "--"
def source(self, value):
self.source_path = value
return self
def destination(self, value):
self.destination_path = value
return self
def reverse(self):
return self.set_flags("reverse")
def synchronous(self):
return self.set_flags("synchronous")
def direct(self):
return self.set_flags("direct")
def force(self):
return self.set_flags("force")
def block_size(self, value: size.Size):
return self.set_param('sector-size', int(value.get_value()))
def size(self, value: size.Size):
return self.set_param('size', int(value.get_value()))
def __str__(self):
command = linux_comm.LinuxCommand.__str__(self)
command += f" {self.source_path} {self.destination_path}"
return command

View File

@@ -0,0 +1,329 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from enum import Enum
from core.test_run import TestRun
from storage_devices.device import Device
from test_utils.disk_finder import resolve_to_by_id_link
from test_utils.linux_command import LinuxCommand
from test_utils.size import Size, Unit
class DmTarget(Enum):
# Fill argument types for other targets if you need them
LINEAR = (str, int)
STRIPED = (int, int, list)
ERROR = ()
ZERO = ()
CRYPT = ()
DELAY = (str, int, int, str, int, int)
FLAKEY = (str, int, int, int)
MIRROR = ()
MULTIPATH = ()
RAID = ()
SNAPSHOT = ()
def __str__(self):
return self.name.lower()
class DmTable:
class TableEntry:
pass
class DmTable:
class TableEntry:
def __init__(self, offset: int, length: int, target: DmTarget, *params):
self.offset = int(offset)
self.length = int(length)
self.target = DmTarget(target)
self.params = list(params)
self.validate()
def validate(self):
if self.target.value:
for i in range(len(self.params)):
try:
self.params[i] = self.target.value[i](self.params[i])
except IndexError:
raise ValueError("invalid dm target parameter")
def __str__(self):
ret = f"{self.offset} {self.length} {self.target}"
for param in self.params:
ret += f" {param}"
return ret
def __init__(self):
self.table = []
@classmethod
def uniform_error_table(
cls, start_lba: int, stop_lba: int, num_error_zones: int, error_zone_size: Size
):
table = cls()
increment = (stop_lba - start_lba) // num_error_zones
for zone_start in range(start_lba, stop_lba, increment):
table.add_entry(
DmTable.TableEntry(
zone_start,
error_zone_size.get_value(Unit.Blocks512),
DmTarget.ERROR,
)
)
return table
@classmethod
def passthrough_table(cls, device: Device):
table = cls()
table.add_entry(
DmTable.TableEntry(
0,
device.size.get_value(Unit.Blocks512),
DmTarget.LINEAR,
device.path,
0,
)
)
return table
@classmethod
def error_table(cls, offset: int, size: Size):
table = cls()
table.add_entry(
DmTable.TableEntry(offset, size.get_value(Unit.Blocks512), DmTarget.ERROR)
)
return table
def fill_gaps(self, device: Device, fill_end=True):
gaps = self.get_gaps()
for gap in gaps[:-1]:
self.add_entry(
DmTable.TableEntry(
gap[0], gap[1], DmTarget.LINEAR, device.path, int(gap[0])
)
)
table_end = gaps[-1][0]
if fill_end and (Size(table_end, Unit.Blocks512) < device.size):
self.add_entry(
DmTable.TableEntry(
table_end,
device.size.get_value(Unit.Blocks512) - table_end,
DmTarget.LINEAR,
device.path,
table_end,
)
)
return self
def add_entry(self, entry: DmTable.TableEntry):
self.table.append(entry)
return self
def get_gaps(self):
if not self.table:
return [(0, -1)]
gaps = []
self.table.sort(key=lambda entry: entry.offset)
if self.table[0].offset != 0:
gaps.append((0, self.table[0].offset))
for e1, e2 in zip(self.table, self.table[1:]):
if e1.offset + e1.length != e2.offset:
gaps.append(
(e1.offset + e1.length, e2.offset - (e1.offset + e1.length))
)
if len(self.table) > 1:
gaps.append((e2.offset + e2.length, -1))
else:
gaps.append((self.table[0].offset + self.table[0].length, -1))
return gaps
def validate(self):
self.table.sort(key=lambda entry: entry.offset)
if self.table[0].offset != 0:
raise ValueError(f"dm table should start at LBA 0: {self.table[0]}")
for e1, e2 in zip(self.table, self.table[1:]):
if e1.offset + e1.length != e2.offset:
raise ValueError(
f"dm table should not have any holes or overlaps: {e1} -> {e2}"
)
def get_size(self):
self.table.sort(key=lambda entry: entry.offset)
return Size(self.table[-1].offset + self.table[-1].length, Unit.Blocks512)
def __str__(self):
output = ""
for entry in self.table:
output += f"{entry}\n"
return output
class DeviceMapper(LinuxCommand):
@classmethod
def remove_all(cls, force=True):
TestRun.LOGGER.info("Removing all device mapper devices")
cmd = "dmsetup remove_all"
if force:
cmd += " --force"
return TestRun.executor.run_expect_success(cmd)
def __init__(self, name: str):
LinuxCommand.__init__(self, TestRun.executor, "dmsetup")
self.name = name
@staticmethod
def wrap_table(table: DmTable):
return f"<< ENDHERE\n{str(table)}ENDHERE\n"
def get_path(self):
return f"/dev/mapper/{self.name}"
def clear(self):
return TestRun.executor.run_expect_success(f"{self.command_name} clear {self.name}")
def create(self, table: DmTable):
try:
table.validate()
except ValueError:
for entry in table.table:
TestRun.LOGGER.error(f"{entry}")
raise
TestRun.LOGGER.info(f"Creating device mapper device '{self.name}'")
for entry in table.table:
TestRun.LOGGER.debug(f"{entry}")
return TestRun.executor.run_expect_success(
f"{self.command_name} create {self.name} {self.wrap_table(table)}"
)
def remove(self):
TestRun.LOGGER.info(f"Removing device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} remove {self.name}")
def suspend(self):
TestRun.LOGGER.info(f"Suspending device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} suspend {self.name}")
def resume(self):
TestRun.LOGGER.info(f"Resuming device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} resume {self.name}")
def reload(self, table: DmTable):
table.validate()
TestRun.LOGGER.info(f"Reloading table for device mapper device '{self.name}'")
for entry in table.table:
TestRun.LOGGER.debug(f"{entry}")
return TestRun.executor.run_expect_success(
f"{self.command_name} reload {self.name} {self.wrap_table(table)}"
)
class ErrorDevice(Device):
def __init__(self, name: str, base_device: Device, table: DmTable = None):
self.device = base_device
self.mapper = DeviceMapper(name)
self.name = name
self.table = DmTable.passthrough_table(base_device) if not table else table
self.active = False
self.start()
self.path = resolve_to_by_id_link(self.mapper.get_path().replace('/dev/', ''))
@property
def system_path(self):
if self.active:
output = TestRun.executor.run_expect_success(f"realpath {self.mapper.get_path()}")
return output.stdout
return None
@property
def size(self):
if self.active:
return self.table.get_size()
return None
def start(self):
self.mapper.create(self.table)
self.active = True
def stop(self):
self.mapper.remove()
self.active = False
def change_table(self, table: DmTable, permanent=True):
if self.active:
self.mapper.suspend()
self.mapper.reload(table)
self.mapper.resume()
if permanent:
self.table = table
def suspend_errors(self):
empty_table = DmTable.passthrough_table(self.device)
TestRun.LOGGER.info(f"Suspending issuing errors for error device '{self.name}'")
self.change_table(empty_table, False)
def resume_errors(self):
TestRun.LOGGER.info(f"Resuming issuing errors for error device '{self.name}'")
self.change_table(self.table, False)
def suspend(self):
if not self.active:
TestRun.LOGGER.warning(
f"cannot suspend error device '{self.name}'! It's already running"
)
self.mapper.suspend()
self.active = False
def resume(self):
if self.active:
TestRun.LOGGER.warning(
f"cannot resume error device '{self.name}'! It's already running"
)
self.mapper.resume()
self.active = True

View File

@@ -0,0 +1,397 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
import re
import time
from enum import Enum
from core.test_run import TestRun
from test_tools import fs_utils
from test_tools.dd import Dd
from test_tools.fs_utils import readlink, parse_ls_output, ls
from test_utils.output import CmdException
from test_utils.size import Size, Unit
SECTOR_SIZE = 512
class Filesystem(Enum):
xfs = 0
ext3 = 1
ext4 = 2
class PartitionTable(Enum):
msdos = 0
gpt = 1
class PartitionType(Enum):
efi = 0
primary = 1
extended = 2
logical = 3
lvm = 4
msr = 5
swap = 6
standard = 7
unknown = 8
def create_filesystem(device, filesystem: Filesystem, force=True, blocksize=None):
TestRun.LOGGER.info(
f"Creating filesystem ({filesystem.name}) on device: {device.path}")
force_param = ' -f ' if filesystem == Filesystem.xfs else ' -F '
force_param = force_param if force else ''
block_size_param = f' -b size={blocksize}' if filesystem == Filesystem.xfs \
else f' -b {blocksize}'
block_size_param = block_size_param if blocksize else ''
cmd = f'mkfs.{filesystem.name} {force_param} {device.path} {block_size_param}'
cmd = re.sub(' +', ' ', cmd)
TestRun.executor.run_expect_success(cmd)
TestRun.LOGGER.info(
f"Successfully created filesystem on device: {device.path}")
def create_partition_table(device, partition_table_type: PartitionTable = PartitionTable.gpt):
TestRun.LOGGER.info(
f"Creating partition table ({partition_table_type.name}) for device: {device.path}")
cmd = f'parted --script {device.path} mklabel {partition_table_type.name}'
TestRun.executor.run_expect_success(cmd)
device.partition_table = partition_table_type
TestRun.LOGGER.info(
f"Successfully created {partition_table_type.name} "
f"partition table on device: {device.path}")
def get_partition_path(parent_dev, number):
# TODO: change this to be less specific hw dependent (kernel)
if "dev/cas" not in parent_dev:
id_separator = '-part'
else:
id_separator = 'p' # "cas1-1p1"
return f'{parent_dev}{id_separator}{number}'
def remove_parition(device, part_number):
TestRun.LOGGER.info(f"Removing part {part_number} from {device.path}")
cmd = f'parted --script {device.path} rm {part_number}'
output = TestRun.executor.run(cmd)
if output.exit_code != 0:
TestRun.executor.run_expect_success("partprobe")
def create_partition(
device,
part_size,
part_number,
part_type: PartitionType = PartitionType.primary,
unit=Unit.MebiByte,
aligned: bool = True):
TestRun.LOGGER.info(
f"Creating {part_type.name} partition on device: {device.path}")
begin = get_first_partition_offset(device, aligned)
for part in device.partitions:
begin += part.size
if part.type == PartitionType.logical:
begin += Size(1, Unit.MebiByte if not aligned else device.block_size)
if part_type == PartitionType.logical:
begin += Size(1, Unit.MebiByte if not aligned else device.block_size)
if part_size != Size.zero():
end = (begin + part_size)
end_cmd = f'{end.get_value(unit)}{unit_to_string(unit)}'
else:
end_cmd = '100%'
cmd = f'parted --script {device.path} mkpart ' \
f'{part_type.name} ' \
f'{begin.get_value(unit)}{unit_to_string(unit)} ' \
f'{end_cmd}'
output = TestRun.executor.run(cmd)
if output.exit_code != 0:
TestRun.executor.run_expect_success("partprobe")
TestRun.executor.run_expect_success("udevadm settle")
if not check_partition_after_create(
part_size,
part_number,
device.path,
part_type,
aligned):
raise Exception("Could not create partition!")
if part_type != PartitionType.extended:
from storage_devices.partition import Partition
new_part = Partition(device,
part_type,
part_number,
begin,
end if type(end) is Size else device.size)
dd = Dd().input("/dev/zero") \
.output(new_part.path) \
.count(1) \
.block_size(Size(1, Unit.Blocks4096)) \
.oflag("direct")
dd.run()
device.partitions.append(new_part)
TestRun.LOGGER.info(f"Successfully created {part_type.name} partition on {device.path}")
def available_disk_size(device):
dev = f"/dev/{device.get_device_id()}"
# get number of device's sectors
disk_sectors = int(TestRun.executor.run(f"fdisk -l {dev} | grep {dev} | grep sectors "
f"| awk '{{print $7 }}' ").stdout)
# get last occupied sector
last_occupied_sector = int(TestRun.executor.run(f"fdisk -l {dev} | grep {dev} "
f"| awk '{{print $3 }}' | tail -1").stdout)
available_disk_sectors = disk_sectors - last_occupied_sector
return Size(available_disk_sectors, Unit(get_block_size(device)))
def create_partitions(device, sizes: [], partition_table_type=PartitionTable.gpt):
create_partition_table(device, partition_table_type)
partition_type = PartitionType.primary
partition_number_offset = 0
msdos_part_max_size = Size(2, Unit.TeraByte)
for s in sizes:
size = Size(
s.get_value(device.block_size) - device.block_size.value, device.block_size)
if partition_table_type == PartitionTable.msdos and \
len(sizes) > 4 and len(device.partitions) == 3:
if available_disk_size(device) > msdos_part_max_size:
part_size = msdos_part_max_size
else:
part_size = Size.zero()
create_partition(device,
part_size,
4,
PartitionType.extended)
partition_type = PartitionType.logical
partition_number_offset = 1
partition_number = len(device.partitions) + 1 + partition_number_offset
create_partition(device,
size,
partition_number,
partition_type,
Unit.MebiByte,
True)
def get_block_size(device):
try:
block_size = float(TestRun.executor.run(
f"cat {get_sysfs_path(device)}/queue/hw_sector_size").stdout)
except ValueError:
block_size = Unit.Blocks512.value
return block_size
def get_size(device):
output = TestRun.executor.run_expect_success(f"cat {get_sysfs_path(device)}/size")
blocks_count = int(output.stdout)
return blocks_count * SECTOR_SIZE
def get_sysfs_path(device):
sysfs_path = f"/sys/class/block/{device}"
if TestRun.executor.run(f"test -d {sysfs_path}").exit_code != 0:
sysfs_path = f"/sys/block/{device}"
return sysfs_path
def get_pci_address(device):
pci_address = TestRun.executor.run(f"cat /sys/block/{device}/device/address").stdout
return pci_address
def check_partition_after_create(size, part_number, parent_dev_path, part_type, aligned):
partition_path = get_partition_path(parent_dev_path, part_number)
if "dev/cas" not in partition_path:
cmd = f"find {partition_path} -type l"
else:
cmd = f"find {partition_path}"
output = TestRun.executor.run_expect_success(cmd).stdout
if partition_path not in output:
TestRun.LOGGER.info(
"Partition created, but could not find it in system, trying 'hdparm -z'")
TestRun.executor.run_expect_success(f"hdparm -z {parent_dev_path}")
output_after_hdparm = TestRun.executor.run_expect_success(
f"parted --script {parent_dev_path} print").stdout
TestRun.LOGGER.info(output_after_hdparm)
counter = 0
while partition_path not in output and counter < 10:
time.sleep(2)
output = TestRun.executor.run(cmd).stdout
counter += 1
if len(output.split('\n')) > 1 or partition_path not in output:
return False
if aligned and part_type != PartitionType.extended \
and size.get_value(Unit.Byte) % Unit.Blocks4096.value != 0:
TestRun.LOGGER.warning(
f"Partition {partition_path} is not 4k aligned: {size.get_value(Unit.KibiByte)}KiB")
partition_size = get_size(readlink(partition_path).split('/')[-1])
if part_type == PartitionType.extended or \
partition_size == size.get_value(Unit.Byte):
return True
TestRun.LOGGER.warning(
f"Partition size {partition_size} does not match expected {size.get_value(Unit.Byte)} size."
)
return True
def get_first_partition_offset(device, aligned: bool):
if aligned:
return Size(1, Unit.MebiByte)
# 33 sectors are reserved for the backup GPT
return Size(34, Unit(device.blocksize)) \
if device.partition_table == PartitionTable.gpt else Size(1, device.blocksize)
def remove_partitions(device):
from test_utils.os_utils import Udev
if device.is_mounted():
device.unmount()
for partition in device.partitions:
unmount(partition)
TestRun.LOGGER.info(f"Removing partitions from device: {device.path} "
f"({device.get_device_id()}).")
device.wipe_filesystem()
Udev.trigger()
Udev.settle()
output = TestRun.executor.run(f"ls {device.path}* -1")
if len(output.stdout.split('\n')) > 1:
TestRun.LOGGER.error(f"Could not remove partitions from device {device.path}")
return False
return True
def mount(device, mount_point, options: [str] = None):
if not fs_utils.check_if_directory_exists(mount_point):
fs_utils.create_directory(mount_point, True)
TestRun.LOGGER.info(f"Mounting device {device.path} ({device.get_device_id()}) "
f"to {mount_point}.")
cmd = f"mount {device.path} {mount_point}"
if options:
cmd = f"{cmd} -o {','.join(options)}"
output = TestRun.executor.run(cmd)
if output.exit_code != 0:
raise Exception(f"Failed to mount {device.path} to {mount_point}")
device.mount_point = mount_point
def unmount(device):
TestRun.LOGGER.info(f"Unmounting device {device.path} ({device.get_device_id()}).")
if device.mount_point is not None:
output = TestRun.executor.run(f"umount {device.mount_point}")
if output.exit_code != 0:
TestRun.LOGGER.error("Could not unmount device.")
return False
return True
else:
TestRun.LOGGER.info("Device is not mounted.")
return True
def unit_to_string(unit):
unit_string = {
Unit.Byte: 'B',
Unit.Blocks512: 's',
Unit.Blocks4096: 's',
Unit.KibiByte: 'KiB',
Unit.MebiByte: 'MiB',
Unit.GibiByte: 'GiB',
Unit.TebiByte: 'TiB',
Unit.KiloByte: 'kB',
Unit.MegaByte: 'MB',
Unit.GigaByte: 'GB',
Unit.TeraByte: 'TB'
}
return unit_string.get(unit, "Invalid unit.")
def wipe_filesystem(device, force=True):
TestRun.LOGGER.info(f"Erasing the device: {device.path}")
force_param = ' -f' if force else ''
cmd = f'wipefs -a{force_param} {device.path}'
TestRun.executor.run_expect_success(cmd)
TestRun.LOGGER.info(
f"Successfully wiped device: {device.path}")
def check_if_device_supports_trim(device):
if device.get_device_id().startswith("nvme"):
return True
command_output = TestRun.executor.run(
f'hdparm -I {device.path} | grep "TRIM supported"')
return command_output.exit_code == 0
def get_device_filesystem_type(device_id):
cmd = f'lsblk -l -o NAME,FSTYPE | sort | uniq | grep "{device_id} "'
try:
stdout = TestRun.executor.run_expect_success(cmd).stdout
except CmdException:
# unusual devices might not be listed in output (i.e. RAID containers)
if TestRun.executor.run(f"test -b /dev/{device_id}").exit_code != 0:
raise
else:
return None
split_stdout = stdout.strip().split()
if len(split_stdout) <= 1:
return None
else:
try:
return Filesystem[split_stdout[1]]
except KeyError:
TestRun.LOGGER.warning(f"Unrecognized filesystem: {split_stdout[1]}")
return None
def _is_by_id_path(path: str):
"""check if given path already is proper by-id path"""
dev_by_id_dir = "/dev/disk/by-id"
by_id_paths = parse_ls_output(ls(dev_by_id_dir), dev_by_id_dir)
return path in [posixpath.join(dev_by_id_dir, id_path.full_path) for id_path in by_id_paths]
def _is_dev_path_whitelisted(path: str):
"""check if given path is whitelisted"""
whitelisted_paths = [r"cas\d+-\d+", r"/dev/dm-\d+"]
for whitelisted_path in whitelisted_paths:
if re.search(whitelisted_path, path) is not None:
return True
return False
def validate_dev_path(path: str):
if not posixpath.isabs(path):
raise ValueError(f'Given path "{path}" is not absolute.')
if _is_dev_path_whitelisted(path):
return path
if _is_by_id_path(path):
return path
raise ValueError(f'By-id device link {path} is broken.')

View File

@@ -0,0 +1,67 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from core.test_run import TestRun
class Drbdadm:
# create metadata for resource
@staticmethod
def create_metadata(resource_name: str, force: bool):
cmd = "drbdadm create-md" + (" --force" if force else "") + f" {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# enable resource
@staticmethod
def up(resource_name: str):
cmd = f"drbdadm up {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# disable resource
@staticmethod
def down_all():
cmd = f"drbdadm down all"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def down(resource_name):
cmd = f"drbdadm down {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# promote resource to primary
@staticmethod
def set_node_primary(resource_name: str, force=False):
cmd = f"drbdadm primary {resource_name}"
cmd += " --force" if force else ""
return TestRun.executor.run_expect_success(cmd)
# demote resource to secondary
@staticmethod
def set_node_secondary(resource_name: str):
cmd = f"drbdadm secondary {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# check status for all or for specified resource
@staticmethod
def get_status(resource_name: str = ""):
cmd = f"drbdadm status {resource_name}"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def in_sync(resource_name: str):
cmd = f"drbdadm status {resource_name} | grep Inconsistent"
return TestRun.executor.run(cmd).exit_code == 1
# wait sync
@staticmethod
def wait_for_sync(resource_name: str = ""):
# ssh connection might timeout in case on long sync
cmd = f"drbdadm wait-sync {resource_name}"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def dump_config(resource_name: str):
cmd = f"drbdadm dump {resource_name}"
return TestRun.executor.run(cmd)

View File

@@ -0,0 +1,105 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import datetime
import uuid
import test_tools.fio.fio_param
import test_tools.fs_utils
from core.test_run import TestRun
from test_tools import fs_utils
from test_utils import os_utils
class Fio:
def __init__(self, executor_obj=None):
self.fio_version = "fio-3.30"
self.default_run_time = datetime.timedelta(hours=1)
self.jobs = []
self.executor = executor_obj if executor_obj is not None else TestRun.executor
self.base_cmd_parameters: test_tools.fio.fio_param.FioParam = None
self.global_cmd_parameters: test_tools.fio.fio_param.FioParam = None
def create_command(self, output_type=test_tools.fio.fio_param.FioOutput.json):
self.base_cmd_parameters = test_tools.fio.fio_param.FioParamCmd(self, self.executor)
self.global_cmd_parameters = test_tools.fio.fio_param.FioParamConfig(self, self.executor)
self.fio_file = f'fio_run_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}_{uuid.uuid4().hex}'
self.base_cmd_parameters\
.set_param('eta', 'always')\
.set_param('output-format', output_type.value)\
.set_param('output', self.fio_file)
self.global_cmd_parameters.set_flags('group_reporting')
return self.global_cmd_parameters
def is_installed(self):
return self.executor.run("fio --version").stdout.strip() == self.fio_version
def install(self):
fio_url = f"http://brick.kernel.dk/snaps/{self.fio_version}.tar.bz2"
fio_package = os_utils.download_file(fio_url)
fs_utils.uncompress_archive(fio_package)
TestRun.executor.run_expect_success(f"cd {fio_package.parent_dir}/{self.fio_version}"
f" && ./configure && make -j && make install")
def calculate_timeout(self):
if "time_based" not in self.global_cmd_parameters.command_flags:
return self.default_run_time
total_time = self.global_cmd_parameters.get_parameter_value("runtime")
if len(total_time) != 1:
raise ValueError("Wrong fio 'runtime' parameter configuration")
total_time = int(total_time[0])
ramp_time = self.global_cmd_parameters.get_parameter_value("ramp_time")
if ramp_time is not None:
if len(ramp_time) != 1:
raise ValueError("Wrong fio 'ramp_time' parameter configuration")
ramp_time = int(ramp_time[0])
total_time += ramp_time
return datetime.timedelta(seconds=total_time)
def run(self, timeout: datetime.timedelta = None):
if timeout is None:
timeout = self.calculate_timeout()
self.prepare_run()
return self.executor.run(str(self), timeout)
def run_in_background(self):
self.prepare_run()
return self.executor.run_in_background(str(self))
def prepare_run(self):
if not self.is_installed():
self.install()
if len(self.jobs) > 0:
self.executor.run(f"{str(self)}-showcmd -")
TestRun.LOGGER.info(self.executor.run(f"cat {self.fio_file}").stdout)
TestRun.LOGGER.info(str(self))
def execution_cmd_parameters(self):
if len(self.jobs) > 0:
separator = "\n\n"
return f"{str(self.global_cmd_parameters)}\n" \
f"{separator.join(str(job) for job in self.jobs)}"
else:
return str(self.global_cmd_parameters)
def __str__(self):
if len(self.jobs) > 0:
command = f"echo '{self.execution_cmd_parameters()}' |" \
f" {str(self.base_cmd_parameters)} -"
else:
fio_parameters = test_tools.fio.fio_param.FioParamCmd(self, self.executor)
fio_parameters.command_env_var.update(self.base_cmd_parameters.command_env_var)
fio_parameters.command_param.update(self.base_cmd_parameters.command_param)
fio_parameters.command_param.update(self.global_cmd_parameters.command_param)
fio_parameters.command_flags.extend(self.global_cmd_parameters.command_flags)
fio_parameters.set_param('name', 'fio')
command = str(fio_parameters)
return command

View File

@@ -0,0 +1,388 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import datetime
import json
import secrets
from enum import Enum
from types import SimpleNamespace as Namespace
from connection.base_executor import BaseExecutor
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools.fio.fio_result import FioResult
from test_utils.linux_command import LinuxCommand
from test_utils.size import Size
class CpusAllowedPolicy(Enum):
shared = 0,
split = 1
class ErrorFilter(Enum):
none = 0,
read = 1,
write = 2,
io = 3,
verify = 4,
all = 5
class FioOutput(Enum):
normal = 'normal'
terse = 'terse'
json = 'json'
jsonplus = 'json+'
class IoEngine(Enum):
# Basic read or write I/O. fseek is used to position the I/O location.
sync = 0,
# Linux native asynchronous I/O.
libaio = 1,
# Basic pread or pwrite I/O.
psync = 2,
# Basic readv or writev I/O.
# Will emulate queuing by coalescing adjacent IOs into a single submission.
vsync = 3,
# Basic preadv or pwritev I/O.
pvsync = 4,
# POSIX asynchronous I/O using aio_read and aio_write.
posixaio = 5,
# File is memory mapped with mmap and data copied using memcpy.
mmap = 6,
# RADOS Block Device
rbd = 7,
# SPDK Block Device
spdk_bdev = 8
class ReadWrite(Enum):
randread = 0,
randrw = 1,
randwrite = 2,
read = 3,
readwrite = 4,
write = 5,
trim = 6,
randtrim = 7,
trimwrite = 8
class VerifyMethod(Enum):
# Use an md5 sum of the data area and store it in the header of each block.
md5 = 0,
# Use an experimental crc64 sum of the data area and store it in the header of each block.
crc64 = 1,
# Use optimized sha1 as the checksum function.
sha1 = 2,
# Verify a strict pattern.
# Normally fio includes a header with some basic information and a checksum, but if this
# option is set, only the specific pattern set with verify_pattern is verified.
pattern = 3,
# Write extra information about each I/O (timestamp, block number, etc.).
# The block number is verified.
meta = 4
class RandomGenerator(Enum):
tausworthe = 0,
lfsr = 1,
tausworthe64 = 2
class FioParam(LinuxCommand):
def __init__(self, fio, command_executor: BaseExecutor, command_name):
LinuxCommand.__init__(self, command_executor, command_name)
self.verification_pattern = ''
self.fio = fio
def get_verification_pattern(self):
if not self.verification_pattern:
self.verification_pattern = f'0x{secrets.token_hex(32)}'
return self.verification_pattern
def allow_mounted_write(self, value: bool = True):
return self.set_param('allow_mounted_write', int(value))
# example: "bs=8k,32k" => 8k for reads, 32k for writes and trims
def block_size(self, *sizes: Size):
return self.set_param('blocksize', *[int(size) for size in sizes])
def blocksize_range(self, ranges):
value = []
for bs_range in ranges:
str_range = str(int(bs_range[0])) + '-' + str(int(bs_range[1]))
value.append(str_range)
return self.set_param('blocksize_range', ",".join(value))
def bs_split(self, value):
return self.set_param('bssplit', value)
def buffer_pattern(self, pattern):
return self.set_param('buffer_pattern', pattern)
def continue_on_error(self, value: ErrorFilter):
return self.set_param('continue_on_error', value.name)
def cpus_allowed(self, value):
return self.set_param('cpus_allowed', ",".join(value))
def cpus_allowed_policy(self, value: CpusAllowedPolicy):
return self.set_param('cpus_allowed_policy', value.name)
def direct(self, value: bool = True):
if 'buffered' in self.command_param:
self.remove_param('buffered')
return self.set_param('direct', int(value))
def directory(self, directory):
return self.set_param('directory', directory)
def do_verify(self, value: bool = True):
return self.set_param('do_verify', int(value))
def exit_all_on_error(self, value: bool = True):
return self.set_flags('exitall_on_error') if value \
else self.remove_flag('exitall_on_error')
def group_reporting(self, value: bool = True):
return self.set_flags('group_reporting') if value else self.remove_flag('group_reporting')
def file_name(self, path):
return self.set_param('filename', path)
def file_size(self, size: Size):
return self.set_param('filesize', int(size))
def file_size_range(self, ranges):
value = []
for bs_range in ranges:
str_range = str(int(bs_range[0])) + '-' + str(int(bs_range[1]))
value.append(str_range)
return self.set_param('filesize', ",".join(value))
def fsync(self, value: int):
return self.set_param('fsync', value)
def ignore_errors(self, read_errors, write_errors, verify_errors):
separator = ':'
return self.set_param(
'ignore_error',
separator.join(str(err) for err in read_errors),
separator.join(str(err) for err in write_errors),
separator.join(str(err) for err in verify_errors))
def io_depth(self, value: int):
if value != 1:
if 'ioengine' in self.command_param and \
self.command_param['ioengine'] == 'sync':
TestRun.LOGGER.warning("Setting iodepth will have no effect with "
"'ioengine=sync' setting")
return self.set_param('iodepth', value)
def io_engine(self, value: IoEngine):
if value == IoEngine.sync:
if 'iodepth' in self.command_param and self.command_param['iodepth'] != 1:
TestRun.LOGGER.warning("Setting 'ioengine=sync' will cause iodepth setting "
"to be ignored")
return self.set_param('ioengine', value.name)
def io_size(self, value: Size):
return self.set_param('io_size', int(value.get_value()))
def loops(self, value: int):
return self.set_param('loops', value)
def no_random_map(self, value: bool = True):
if 'verify' in self.command_param:
raise ValueError("'NoRandomMap' parameter is mutually exclusive with verify")
if value:
return self.set_flags('norandommap')
else:
return self.remove_flag('norandommap')
def nr_files(self, value: int):
return self.set_param('nrfiles', value)
def num_ios(self, value: int):
return self.set_param('number_ios', value)
def num_jobs(self, value: int):
return self.set_param('numjobs', value)
def offset(self, value: Size):
return self.set_param('offset', int(value.get_value()))
def offset_increment(self, value: Size):
return self.set_param('offset_increment', f"{value.value}{value.unit.get_short_name()}")
def percentage_random(self, value: int):
if value <= 100:
return self.set_param('percentage_random', value)
raise ValueError("Argument out of range. Should be 0-100.")
def pool(self, value):
return self.set_param('pool', value)
def ramp_time(self, value: datetime.timedelta):
return self.set_param('ramp_time', int(value.total_seconds()))
def random_distribution(self, value):
return self.set_param('random_distribution', value)
def rand_repeat(self, value: int):
return self.set_param('randrepeat', value)
def rand_seed(self, value: int):
return self.set_param('randseed', value)
def read_write(self, rw: ReadWrite):
return self.set_param('readwrite', rw.name)
def run_time(self, value: datetime.timedelta):
if value.total_seconds() == 0:
raise ValueError("Runtime parameter must not be set to 0.")
return self.set_param('runtime', int(value.total_seconds()))
def serialize_overlap(self, value: bool = True):
return self.set_param('serialize_overlap', int(value))
def size(self, value: Size):
return self.set_param('size', int(value.get_value()))
def stonewall(self, value: bool = True):
return self.set_flags('stonewall') if value else self.remove_param('stonewall')
def sync(self, value: bool = True):
return self.set_param('sync', int(value))
def time_based(self, value: bool = True):
return self.set_flags('time_based') if value else self.remove_flag('time_based')
def thread(self, value: bool = True):
return self.set_flags('thread') if value else self.remove_param('thread')
def lat_percentiles(self, value: bool):
return self.set_param('lat_percentiles', int(value))
def scramble_buffers(self, value: bool):
return self.set_param('scramble_buffers', int(value))
def slat_percentiles(self, value: bool):
return self.set_param('slat_percentiles', int(value))
def spdk_core_mask(self, value: str):
return self.set_param('spdk_core_mask', value)
def spdk_json_conf(self, path):
return self.set_param('spdk_json_conf', path)
def clat_percentiles(self, value: bool):
return self.set_param('clat_percentiles', int(value))
def percentile_list(self, value: []):
val = ':'.join(value) if len(value) > 0 else '100'
return self.set_param('percentile_list', val)
def verification_with_pattern(self, pattern=None):
if pattern is not None and pattern != '':
self.verification_pattern = pattern
return self.verify(VerifyMethod.pattern) \
.set_param('verify_pattern', self.get_verification_pattern()) \
.do_verify()
def verify(self, value: VerifyMethod):
return self.set_param('verify', value.name)
def create_only(self, value: bool = False):
return self.set_param('create_only', int(value))
def verify_pattern(self, pattern=None):
return self.set_param('verify_pattern', pattern or self.get_verification_pattern())
def verify_backlog(self, value: int):
return self.set_param('verify_backlog', value)
def verify_dump(self, value: bool = True):
return self.set_param('verify_dump', int(value))
def verify_fatal(self, value: bool = True):
return self.set_param('verify_fatal', int(value))
def verify_only(self, value: bool = True):
return self.set_flags('verify_only') if value else self.remove_param('verify_only')
def write_hint(self, value: str):
return self.set_param('write_hint', value)
def write_percentage(self, value: int):
if value <= 100:
return self.set_param('rwmixwrite', value)
raise ValueError("Argument out of range. Should be 0-100.")
def random_generator(self, value: RandomGenerator):
return self.set_param('random_generator', value.name)
def target(self, target):
if isinstance(target, Device):
return self.file_name(target.path)
return self.file_name(target)
def add_job(self, job_name=None):
if not job_name:
job_name = f'job{len(self.fio.jobs)}'
new_job = FioParamConfig(self.fio, self.command_executor, f'[{job_name}]')
self.fio.jobs.append(new_job)
return new_job
def clear_jobs(self):
self.fio.jobs = []
return self
def edit_global(self):
return self.fio.global_cmd_parameters
def run(self, fio_timeout: datetime.timedelta = None):
if "per_job_logs" in self.fio.global_cmd_parameters.command_param:
self.fio.global_cmd_parameters.set_param("per_job_logs", '0')
fio_output = self.fio.run(fio_timeout)
if fio_output.exit_code != 0:
raise Exception(f"Exception occurred while trying to execute fio, exit_code:"
f"{fio_output.exit_code}.\n"
f"stdout: {fio_output.stdout}\nstderr: {fio_output.stderr}")
TestRun.executor.run(f"sed -i '/^[[:alnum:]]/d' {self.fio.fio_file}") # Remove warnings
out = self.command_executor.run_expect_success(f"cat {self.fio.fio_file}").stdout
return self.get_results(out)
def run_in_background(self):
if "per_job_logs" in self.fio.global_cmd_parameters.command_param:
self.fio.global_cmd_parameters.set_param("per_job_logs", '0')
return self.fio.run_in_background()
@staticmethod
def get_results(result):
data = json.loads(result, object_hook=lambda d: Namespace(**d))
jobs_list = []
if hasattr(data, 'jobs'):
jobs = data.jobs
for job in jobs:
job_result = FioResult(data, job)
jobs_list.append(job_result)
return jobs_list
class FioParamCmd(FioParam):
def __init__(self, fio, command_executor: BaseExecutor, command_name='fio'):
FioParam.__init__(self, fio, command_executor, command_name)
self.param_name_prefix = "--"
class FioParamConfig(FioParam):
def __init__(self, fio, command_executor: BaseExecutor, command_name='[global]'):
FioParam.__init__(self, fio, command_executor, command_name)
self.param_name_prefix = "\n"

View File

@@ -0,0 +1,19 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import secrets
from aenum import Enum
class Pattern(Enum):
cyclic = "0x00336699ccffcc996633"
sequential = "0x" + "".join([f"{i:02x}" for i in range(0, 256)])
high = "0xaa"
low = "0x84210"
zeroes = "0x00"
ones = "0xff"
bin_1 = high
bin_2 = "0x55"
random = "0x" + secrets.token_hex()

View File

@@ -0,0 +1,164 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from test_utils.size import Size, Unit, UnitPerSecond
from test_utils.time import Time
class FioResult:
def __init__(self, result, job):
self.result = result
self.job = job
def __str__(self):
result_dict = {
"Total read I/O": self.read_io(),
"Total read bandwidth ": self.read_bandwidth(),
"Read bandwidth average ": self.read_bandwidth_average(),
"Read bandwidth deviation ": self.read_bandwidth_deviation(),
"Read IOPS": self.read_iops(),
"Read runtime": self.read_runtime(),
"Read average completion latency": self.read_completion_latency_average(),
"Total write I/O": self.write_io(),
"Total write bandwidth ": self.write_bandwidth(),
"Write bandwidth average ": self.write_bandwidth_average(),
"Write bandwidth deviation ": self.write_bandwidth_deviation(),
"Write IOPS": self.write_iops(),
"Write runtime": self.write_runtime(),
"Write average completion latency": self.write_completion_latency_average(),
}
disks_name = self.disks_name()
if disks_name:
result_dict.update({"Disk name": ",".join(disks_name)})
result_dict.update({"Total number of errors": self.total_errors()})
s = ""
for key in result_dict.keys():
s += f"{key}: {result_dict[key]}\n"
return s
def total_errors(self):
return getattr(self.job, "total_err", 0)
def disks_name(self):
disks_name = []
if hasattr(self.result, "disk_util"):
for disk in self.result.disk_util:
disks_name.append(disk.name)
return disks_name
def read_io(self):
return Size(self.job.read.io_kbytes, Unit.KibiByte)
def read_bandwidth(self):
return Size(self.job.read.bw, UnitPerSecond(Unit.KibiByte))
def read_bandwidth_average(self):
return Size(self.job.read.bw_mean, UnitPerSecond(Unit.KibiByte))
def read_bandwidth_deviation(self):
return Size(self.job.read.bw_dev, UnitPerSecond(Unit.KibiByte))
def read_iops(self):
return self.job.read.iops
def read_runtime(self):
return Time(microseconds=self.job.read.runtime)
def read_completion_latency_min(self):
return Time(nanoseconds=self.job.read.lat_ns.min)
def read_completion_latency_max(self):
return Time(nanoseconds=self.job.read.lat_ns.max)
def read_completion_latency_average(self):
return Time(nanoseconds=self.job.read.lat_ns.mean)
def read_completion_latency_percentile(self):
return self.job.read.lat_ns.percentile.__dict__
def read_requests_number(self):
return self.result.disk_util[0].read_ios
def write_io(self):
return Size(self.job.write.io_kbytes, Unit.KibiByte)
def write_bandwidth(self):
return Size(self.job.write.bw, UnitPerSecond(Unit.KibiByte))
def write_bandwidth_average(self):
return Size(self.job.write.bw_mean, UnitPerSecond(Unit.KibiByte))
def write_bandwidth_deviation(self):
return Size(self.job.write.bw_dev, UnitPerSecond(Unit.KibiByte))
def write_iops(self):
return self.job.write.iops
def write_runtime(self):
return Time(microseconds=self.job.write.runtime)
def write_completion_latency_average(self):
return Time(nanoseconds=self.job.write.lat_ns.mean)
def write_completion_latency_min(self):
return Time(nanoseconds=self.job.write.lat_ns.min)
def write_completion_latency_max(self):
return Time(nanoseconds=self.job.write.lat_ns.max)
def write_completion_latency_average(self):
return Time(nanoseconds=self.job.write.lat_ns.mean)
def write_completion_latency_percentile(self):
return self.job.write.lat_ns.percentile.__dict__
def write_requests_number(self):
return self.result.disk_util[0].write_ios
def trim_io(self):
return Size(self.job.trim.io_kbytes, Unit.KibiByte)
def trim_bandwidth(self):
return Size(self.job.trim.bw, UnitPerSecond(Unit.KibiByte))
def trim_bandwidth_average(self):
return Size(self.job.trim.bw_mean, UnitPerSecond(Unit.KibiByte))
def trim_bandwidth_deviation(self):
return Size(self.job.trim.bw_dev, UnitPerSecond(Unit.KibiByte))
def trim_iops(self):
return self.job.trim.iops
def trim_runtime(self):
return Time(microseconds=self.job.trim.runtime)
def trim_completion_latency_average(self):
return Time(nanoseconds=self.job.trim.lat_ns.mean)
def trim_completion_latency_min(self):
return Time(nanoseconds=self.job.trim.lat_ns.min)
def trim_completion_latency_max(self):
return Time(nanoseconds=self.job.trim.lat_ns.max)
def trim_completion_latency_average(self):
return Time(nanoseconds=self.job.trim.lat_ns.mean)
def trim_completion_latency_percentile(self):
return self.job.trim.lat_ns.percentile.__dict__
@staticmethod
def result_list_to_dict(results):
result_dict = {}
for result in results:
result_dict[result.job.jobname] = result.job
return result_dict

View File

@@ -0,0 +1,378 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import base64
import math
import textwrap
from aenum import IntFlag, Enum
from collections import namedtuple
from datetime import datetime
from core.test_run import TestRun
from test_tools.dd import Dd
from test_utils.size import Size, Unit
class Permissions(IntFlag):
r = 4
w = 2
x = 1
def __str__(self):
ret_string = ""
for p in Permissions:
if p in self:
ret_string += p.name
return ret_string
class PermissionsUsers(IntFlag):
u = 4
g = 2
o = 1
def __str__(self):
ret_string = ""
for p in PermissionsUsers:
if p in self:
ret_string += p.name
return ret_string
class PermissionSign(Enum):
add = '+'
remove = '-'
set = '='
class FilesPermissions():
perms_exceptions = {}
def __init__(self, files_list: list):
self.files_list = files_list
def add_exceptions(self, perms: dict):
self.perms_exceptions.update(perms)
def check(self, file_perm: int = 644, dir_perm: int = 755):
failed_perms = []
FailedPerm = namedtuple("FailedPerm", ["file", "current_perm", "expected_perm"])
for file in self.files_list:
perm = get_permissions(file)
if file in self.perms_exceptions:
if perm != self.perms_exceptions[file]:
failed_perms.append(FailedPerm(file, perm, self.perms_exceptions[file]))
continue
if check_if_regular_file_exists(file):
if perm != file_perm:
failed_perms.append(FailedPerm(file, perm, file_perm))
elif check_if_directory_exists(file):
if perm != dir_perm:
failed_perms.append(FailedPerm(file, perm, dir_perm))
else:
raise Exception(f"{file}: File type not recognized.")
return failed_perms
def create_directory(path, parents: bool = False):
cmd = f"mkdir {'--parents ' if parents else ''}\"{path}\""
return TestRun.executor.run_expect_success(cmd)
def check_if_directory_exists(path):
return TestRun.executor.run(f"test -d \"{path}\"").exit_code == 0
def check_if_file_exists(path):
return TestRun.executor.run(f"test -e \"{path}\"").exit_code == 0
def check_if_regular_file_exists(path):
return TestRun.executor.run(f"test -f \"{path}\"").exit_code == 0
def check_if_symlink_exists(path):
return TestRun.executor.run(f"test -L \"{path}\"").exit_code == 0
def copy(source: str,
destination: str,
force: bool = False,
recursive: bool = False,
dereference: bool = False):
cmd = f"cp{' --force' if force else ''}" \
f"{' --recursive' if recursive else ''}" \
f"{' --dereference' if dereference else ''} " \
f"\"{source}\" \"{destination}\""
return TestRun.executor.run_expect_success(cmd)
def move(source, destination, force: bool = False):
cmd = f"mv{' --force' if force else ''} \"{source}\" \"{destination}\""
return TestRun.executor.run_expect_success(cmd)
def remove(path, force: bool = False, recursive: bool = False, ignore_errors: bool = False):
cmd = f"rm{' --force' if force else ''}{' --recursive' if recursive else ''} \"{path}\""
output = TestRun.executor.run(cmd)
if output.exit_code != 0 and not ignore_errors:
raise Exception(f"Could not remove file {path}."
f"\nstdout: {output.stdout}\nstderr: {output.stderr}")
return output
def get_permissions(path, dereference: bool = True):
cmd = f"stat --format='%a' {'--dereference' if dereference else ''} \"{path}\""
return int(TestRun.executor.run_expect_success(cmd).stdout)
def chmod(path, permissions: Permissions, users: PermissionsUsers,
sign: PermissionSign = PermissionSign.set, recursive: bool = False):
cmd = f"chmod{' --recursive' if recursive else ''} " \
f"{str(users)}{sign.value}{str(permissions)} \"{path}\""
output = TestRun.executor.run(cmd)
return output
def chmod_numerical(path, permissions: int, recursive: bool = False):
cmd = f"chmod{' --recursive' if recursive else ''} {permissions} \"{path}\""
return TestRun.executor.run_expect_success(cmd)
def chown(path, owner, group, recursive):
cmd = f"chown {'--recursive ' if recursive else ''}{owner}:{group} \"{path}\""
return TestRun.executor.run_expect_success(cmd)
def create_file(path):
if not path.strip():
raise ValueError("Path cannot be empty or whitespaces.")
cmd = f"touch \"{path}\""
return TestRun.executor.run_expect_success(cmd)
def compare(file, other_file):
output = TestRun.executor.run(
f"cmp --silent \"{file}\" \"{other_file}\"")
if output.exit_code == 0:
return True
elif output.exit_code > 1:
raise Exception(f"Compare command execution failed. {output.stdout}\n{output.stderr}")
else:
return False
def diff(file, other_file):
output = TestRun.executor.run(
f"diff \"{file}\" \"{other_file}\"")
if output.exit_code == 0:
return None
elif output.exit_code > 1:
raise Exception(f"Diff command execution failed. {output.stdout}\n{output.stderr}")
else:
return output.stderr
# For some reason separators other than '/' don't work when using sed on system paths
# This requires escaping '/' in pattern and target string
def escape_sed_string(string: str, sed_replace: bool = False):
string = string.replace("'", r"\x27").replace("/", r"\/")
# '&' has special meaning in sed replace and needs to be escaped
if sed_replace:
string = string.replace("&", r"\&")
return string
def insert_line_before_pattern(file, pattern, new_line):
pattern = escape_sed_string(pattern)
new_line = escape_sed_string(new_line)
cmd = f"sed -i '/{pattern}/i {new_line}' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def replace_first_pattern_occurrence(file, pattern, new_string):
pattern = escape_sed_string(pattern)
new_string = escape_sed_string(new_string, sed_replace=True)
cmd = f"sed -i '0,/{pattern}/s//{new_string}/' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def replace_in_lines(file, pattern, new_string, regexp=False):
pattern = escape_sed_string(pattern)
new_string = escape_sed_string(new_string, sed_replace=True)
cmd = f"sed -i{' -r' if regexp else ''} 's/{pattern}/{new_string}/g' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def append_line(file, string):
cmd = f"echo '{string}' >> \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def remove_lines(file, pattern, regexp=False):
pattern = escape_sed_string(pattern)
cmd = f"sed -i{' -r' if regexp else ''} '/{pattern}/d' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def read_file(file):
if not file.strip():
raise ValueError("File path cannot be empty or whitespace.")
output = TestRun.executor.run_expect_success(f"cat \"{file}\"")
return output.stdout
def write_file(file, content, overwrite: bool = True, unix_line_end: bool = True):
if not file.strip():
raise ValueError("File path cannot be empty or whitespace.")
if not content:
raise ValueError("Content cannot be empty.")
if unix_line_end:
content.replace('\r', '')
content += '\n'
max_length = 60000
split_content = textwrap.TextWrapper(width=max_length, replace_whitespace=False).wrap(content)
split_content[-1] += '\n'
for s in split_content:
redirection_char = '>' if overwrite else '>>'
overwrite = False
encoded_content = base64.b64encode(s.encode("utf-8"))
cmd = f"printf '{encoded_content.decode('utf-8')}' " \
f"| base64 --decode {redirection_char} \"{file}\""
TestRun.executor.run_expect_success(cmd)
def uncompress_archive(file, destination=None):
from test_utils.filesystem.file import File
if not isinstance(file, File):
file = File(file)
if not destination:
destination = file.parent_dir
command = (f"unzip -u {file.full_path} -d {destination}"
if str(file).endswith(".zip")
else f"tar --extract --file={file.full_path} --directory={destination}")
TestRun.executor.run_expect_success(command)
def ls(path, options=''):
default_options = "-lA --time-style=+'%Y-%m-%d %H:%M:%S'"
output = TestRun.executor.run(
f"ls {default_options} {options} \"{path}\"")
return output.stdout
def ls_item(path):
output = ls(path, '-d')
return output.splitlines()[0] if output else None
def parse_ls_output(ls_output, dir_path=''):
split_output = ls_output.split('\n')
fs_items = []
for line in split_output:
if not line.strip():
continue
line_fields = line.split()
if len(line_fields) < 8:
continue
file_type = line[0]
if file_type not in ['-', 'd', 'l', 'b', 'c', 'p', 's']:
continue
permissions = line_fields[0][1:].replace('.', '')
owner = line_fields[2]
group = line_fields[3]
has_size = ',' not in line_fields[4]
if has_size:
size = Size(float(line_fields[4]), Unit.Byte)
else:
size = None
line_fields.pop(4)
split_date = line_fields[5].split('-')
split_time = line_fields[6].split(':')
modification_time = datetime(int(split_date[0]), int(split_date[1]), int(split_date[2]),
int(split_time[0]), int(split_time[1]), int(split_time[2]))
if dir_path and file_type != 'l':
full_path = '/'.join([dir_path, line_fields[7]])
else:
full_path = line_fields[7]
from test_utils.filesystem.file import File, FsItem
from test_utils.filesystem.directory import Directory
from test_utils.filesystem.symlink import Symlink
if file_type == '-':
fs_item = File(full_path)
elif file_type == 'd':
fs_item = Directory(full_path)
elif file_type == 'l':
fs_item = Symlink(full_path)
else:
fs_item = FsItem(full_path)
fs_item.permissions.user = Permissions['|'.join(list(permissions[:3].replace('-', '')))] \
if permissions[:3] != '---' else Permissions(0)
fs_item.permissions.group = Permissions['|'.join(list(permissions[3:6].replace('-', '')))] \
if permissions[3:6] != '---' else Permissions(0)
fs_item.permissions.other = Permissions['|'.join(list(permissions[6:].replace('-', '')))] \
if permissions[6:] != '---' else Permissions(0)
fs_item.owner = owner
fs_item.group = group
fs_item.size = size
fs_item.modification_time = modification_time
fs_items.append(fs_item)
return fs_items
def find_all_files(path: str, recursive: bool = True):
if not path.strip():
raise ValueError("No path given.")
output = TestRun.executor.run_expect_success(f"find \"{path}\" {'-maxdepth 1' if not recursive else ''} \( -type f -o -type l \) -print")
return output.stdout.splitlines()
def find_all_dirs(path: str, recursive: bool = True):
if not path.strip():
raise ValueError("No path given.")
output = TestRun.executor.run_expect_success(f"find \"{path}\" {'-maxdepth 1' if not recursive else ''} -type d -print")
return output.stdout.splitlines()
def find_all_items(path: str, recursive: bool = True):
return [*find_all_files(path, recursive), *find_all_dirs(path, recursive)]
def readlink(link: str, options="--canonicalize-existing"):
return TestRun.executor.run_expect_success(
f"readlink {options} \"{link}\""
).stdout
def create_random_test_file(target_file_path: str,
file_size: Size = Size(1, Unit.MebiByte),
random: bool = True):
from test_utils.filesystem.file import File
bs = Size(512, Unit.KibiByte)
cnt = math.ceil(file_size.value / bs.value)
file = File.create_file(target_file_path)
dd = Dd().output(target_file_path) \
.input("/dev/urandom" if random else "/dev/zero") \
.block_size(bs) \
.count(cnt) \
.oflag("direct")
dd.run()
file.refresh_item()
return file

View File

@@ -0,0 +1,179 @@
#
# Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from core.test_run import TestRun