tests: Embed test framework within OCL repository

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga
2022-12-23 12:50:17 +01:00
parent bc0c8c1bf5
commit 849f59855c
91 changed files with 9930 additions and 2 deletions

View File

@@ -0,0 +1,225 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import math
from aenum import IntFlag, Enum
from datetime import timedelta
from core.test_run import TestRun
from storage_devices.device import Device
from test_utils.filesystem.directory import Directory
from test_utils.os_utils import is_mounted, drop_caches, DropCachesMode
from test_utils.size import Size, Unit
DEBUGFS_MOUNT_POINT = "/sys/kernel/debug"
PREFIX = "trace_"
HEADER_FORMAT = "%a|%C|%d|%e|%n|%N|%S|%5T.%9t\\n"
class BlkTraceMask(IntFlag):
read = 1
write = 1 << 1
flush = 1 << 2
sync = 1 << 3
queue = 1 << 4
requeue = 1 << 5
issue = 1 << 6
complete = 1 << 7
fs = 1 << 8
pc = 1 << 9
notify = 1 << 10
ahead = 1 << 11
meta = 1 << 12
discard = 1 << 13
drv_data = 1 << 14
fua = 1 << 15
class ActionKind(Enum):
IoDeviceRemap = "A"
IoBounce = "B"
IoCompletion = "C"
IoToDriver = "D"
IoFrontMerge = "F"
GetRequest = "G"
IoInsert = "I"
IoMerge = "M"
PlugRequest = "P"
IoHandled = "Q"
RequeueRequest = "R"
SleepRequest = "S"
TimeoutUnplug = "T" # old version of TimerUnplug
UnplugRequest = "U"
TimerUnplug = "UT"
Split = "X"
class RwbsKind(IntFlag):
Undefined = 0
R = 1 # Read
W = 1 << 1 # Write
D = 1 << 2 # Discard
F = 1 << 3 # Flush
S = 1 << 4 # Synchronous
M = 1 << 5 # Metadata
A = 1 << 6 # Read Ahead
N = 1 << 7 # None of the above
def __str__(self):
ret = []
if self & RwbsKind.R:
ret.append("read")
if self & RwbsKind.W:
ret.append("write")
if self & RwbsKind.D:
ret.append("discard")
if self & RwbsKind.F:
ret.append("flush")
if self & RwbsKind.S:
ret.append("sync")
if self & RwbsKind.M:
ret.append("metadata")
if self & RwbsKind.A:
ret.append("readahead")
if self & RwbsKind.N:
ret.append("none")
return "|".join(ret)
class BlkTrace:
def __init__(self, device: Device, *masks: BlkTraceMask):
self._mount_debugfs()
if device is None:
raise Exception("Device not provided")
self.device = device
self.masks = "" if not masks else f' -a {" -a ".join([m.name for m in masks])}'
self.blktrace_pid = -1
self.__outputDirectoryPath = None
@staticmethod
def _mount_debugfs():
if not is_mounted(DEBUGFS_MOUNT_POINT):
TestRun.executor.run_expect_success(f"mount -t debugfs none {DEBUGFS_MOUNT_POINT}")
def start_monitoring(self, buffer_size: Size = None, number_of_subbuffers: int = None):
if self.blktrace_pid != -1:
raise Exception(f"blktrace already running with PID: {self.blktrace_pid}")
self.__outputDirectoryPath = Directory.create_temp_directory().full_path
drop_caches(DropCachesMode.ALL)
number_of_subbuffers = ("" if number_of_subbuffers is None
else f" --num-sub-buffers={number_of_subbuffers}")
buffer_size = ("" if buffer_size is None
else f" --buffer-size={buffer_size.get_value(Unit.KibiByte)}")
command = (f"blktrace{number_of_subbuffers}{buffer_size} --dev={self.device.path}"
f"{self.masks} --output={PREFIX} --output-dir={self.__outputDirectoryPath}")
echo_output = TestRun.executor.run_expect_success(
f"nohup {command} </dev/null &>{self.__outputDirectoryPath}/out & echo $!"
)
self.blktrace_pid = int(echo_output.stdout)
TestRun.LOGGER.info(f"blktrace monitoring for device {self.device.path} started"
f" (PID: {self.blktrace_pid}, output dir: {self.__outputDirectoryPath}")
def stop_monitoring(self):
if self.blktrace_pid == -1:
raise Exception("PID for blktrace is not set - has monitoring been started?")
drop_caches(DropCachesMode.ALL)
TestRun.executor.run_expect_success(f"kill -s SIGINT {self.blktrace_pid}")
self.blktrace_pid = -1
# dummy command for swallowing output of killed command
TestRun.executor.run("sleep 2 && echo dummy")
TestRun.LOGGER.info(f"blktrace monitoring for device {self.device.path} stopped")
return self.__parse_blktrace_output()
def __parse_blktrace_output(self):
TestRun.LOGGER.info(f"Parsing blktrace headers from {self.__outputDirectoryPath}... "
"Be patient")
command = (f'blkparse --input-dir={self.__outputDirectoryPath} --input={PREFIX} '
f'--format="{HEADER_FORMAT}"')
blkparse_output = TestRun.executor.run_expect_success(
command, timeout=timedelta(minutes=60)
)
parsed_headers = []
for line in blkparse_output.stdout.splitlines():
# At the end per-cpu summary is posted - there is no need for it now
if line.startswith('CPU'):
break
header = Header.parse(line)
if header is None:
continue
parsed_headers.append(header)
TestRun.LOGGER.info(
f"Parsed {len(parsed_headers)} blktrace headers from {self.__outputDirectoryPath}"
)
parsed_headers.sort(key=lambda x: x.timestamp)
return parsed_headers
class Header:
def __init__(self):
self.action = None
self.block_count = None
self.byte_count = None
self.command = None
self.error_value = None
self.rwbs = RwbsKind.Undefined
self.sector_number = None
self.timestamp = None
@staticmethod
def parse(header_line: str):
# messages/notifies are not formatted according to --format
# so should be ignored (or parsed using standard format):
if "m N" in header_line:
return None
header_fields = header_line.split('|')
if len(header_fields) != 8:
return None
timestamp_fields = header_fields[7].split('.')
timestamp_nano = int(timestamp_fields[-1]) if len(timestamp_fields) == 2 else 0
header = Header()
header.action = ActionKind(header_fields[0])
header.command = header_fields[1]
if len(header_fields[2]):
header.rwbs = RwbsKind['|'.join(list(header_fields[2]))]
header.error_value = int(header_fields[3])
header.block_count = int(header_fields[4])
header.byte_count = int(header_fields[5])
header.sector_number = int(header_fields[6])
header.timestamp = int(timestamp_fields[0]) * math.pow(10, 9) + timestamp_nano
return header
def __str__(self):
ret = []
if self.action:
ret.append(f"action: {self.action.name}")
if self.block_count:
ret.append(f"block_count: {self.block_count}")
if self.byte_count:
ret.append(f"byte_count: {self.byte_count}")
if self.command:
ret.append(f"command: {self.command}")
if self.error_value:
ret.append(f"error_value: {self.error_value}")
if self.rwbs:
ret.append(f"rwbs: {self.rwbs}")
if self.sector_number:
ret.append(f"sector_number: {self.sector_number}")
if self.timestamp:
ret.append(f"timestamp: {self.timestamp}")
return " ".join(ret)

View File

@@ -0,0 +1,882 @@
#!/bin/bash
#
# The BSD License (http://www.opensource.org/licenses/bsd-license.php)
# specifies the terms and conditions of use for checksec.sh:
#
# Copyright (c) 2009-2011, Tobias Klein.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Tobias Klein nor the name of trapkit.de may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# Name : checksec.sh
# Version : 1.5
# Author : Tobias Klein
# Date : November 2011
# Download: http://www.trapkit.de/tools/checksec.html
# Changes : http://www.trapkit.de/tools/checksec_changes.txt
#
# Description:
#
# Modern Linux distributions offer some mitigation techniques to make it
# harder to exploit software vulnerabilities reliably. Mitigations such
# as RELRO, NoExecute (NX), Stack Canaries, Address Space Layout
# Randomization (ASLR) and Position Independent Executables (PIE) have
# made reliably exploiting any vulnerabilities that do exist far more
# challenging. The checksec.sh script is designed to test what *standard*
# Linux OS and PaX (http://pax.grsecurity.net/) security features are being
# used.
#
# As of version 1.3 the script also lists the status of various Linux kernel
# protection mechanisms.
#
# Credits:
#
# Thanks to Brad Spengler (grsecurity.net) for the PaX support.
# Thanks to Jon Oberheide (jon.oberheide.org) for the kernel support.
# Thanks to Ollie Whitehouse (Research In Motion) for rpath/runpath support.
#
# Others that contributed to checksec.sh (in no particular order):
#
# Simon Ruderich, Denis Scherbakov, Stefan Kuttler, Radoslaw Madej,
# Anthony G. Basile, Martin Vaeth and Brian Davis.
#
# global vars
have_readelf=1
verbose=false
# FORTIFY_SOURCE vars
FS_end=_chk
FS_cnt_total=0
FS_cnt_checked=0
FS_cnt_unchecked=0
FS_chk_func_libc=0
FS_functions=0
FS_libc=0
# version information
version() {
echo "checksec v1.5, Tobias Klein, www.trapkit.de, November 2011"
echo
}
# help
help() {
echo "Usage: checksec [OPTION]"
echo
echo "Options:"
echo
echo " --file <executable-file>"
echo " --dir <directory> [-v]"
echo " --proc <process name>"
echo " --proc-all"
echo " --proc-libs <process ID>"
echo " --kernel"
echo " --fortify-file <executable-file>"
echo " --fortify-proc <process ID>"
echo " --version"
echo " --help"
echo
echo "For more information, see:"
echo " http://www.trapkit.de/tools/checksec.html"
echo
}
# check if command exists
command_exists () {
type $1 > /dev/null 2>&1;
}
# check if directory exists
dir_exists () {
if [ -d $1 ] ; then
return 0
else
return 1
fi
}
# check user privileges
root_privs () {
if [ $(/usr/bin/id -u) -eq 0 ] ; then
return 0
else
return 1
fi
}
# check if input is numeric
isNumeric () {
echo "$@" | grep -q -v "[^0-9]"
}
# check if input is a string
isString () {
echo "$@" | grep -q -v "[^A-Za-z]"
}
# check file(s)
filecheck() {
# check for RELRO support
if readelf -l $1 2>/dev/null | grep -q 'GNU_RELRO'; then
if readelf -d $1 2>/dev/null | grep -q 'BIND_NOW'; then
echo -n -e '\033[32mFull RELRO \033[m '
else
echo -n -e '\033[33mPartial RELRO\033[m '
fi
else
echo -n -e '\033[31mNo RELRO \033[m '
fi
# check for stack canary support
if readelf -s $1 2>/dev/null | grep -q '__stack_chk_fail'; then
echo -n -e '\033[32mCanary found \033[m '
else
echo -n -e '\033[31mNo canary found\033[m '
fi
# check for NX support
if readelf -W -l $1 2>/dev/null | grep 'GNU_STACK' | grep -q 'RWE'; then
echo -n -e '\033[31mNX disabled\033[m '
else
echo -n -e '\033[32mNX enabled \033[m '
fi
# check for PIE support
if readelf -h $1 2>/dev/null | grep -q 'Type:[[:space:]]*EXEC'; then
echo -n -e '\033[31mNo PIE \033[m '
elif readelf -h $1 2>/dev/null | grep -q 'Type:[[:space:]]*DYN'; then
if readelf -d $1 2>/dev/null | grep -q '(DEBUG)'; then
echo -n -e '\033[32mPIE enabled \033[m '
else
echo -n -e '\033[33mDSO \033[m '
fi
else
echo -n -e '\033[33mNot an ELF file\033[m '
fi
# check for rpath / run path
if readelf -d $1 2>/dev/null | grep -q 'rpath'; then
echo -n -e '\033[31mRPATH \033[m '
else
echo -n -e '\033[32mNo RPATH \033[m '
fi
if readelf -d $1 2>/dev/null | grep -q 'runpath'; then
echo -n -e '\033[31mRUNPATH \033[m '
else
echo -n -e '\033[32mNo RUNPATH \033[m '
fi
}
# check process(es)
proccheck() {
# check for RELRO support
if readelf -l $1/exe 2>/dev/null | grep -q 'Program Headers'; then
if readelf -l $1/exe 2>/dev/null | grep -q 'GNU_RELRO'; then
if readelf -d $1/exe 2>/dev/null | grep -q 'BIND_NOW'; then
echo -n -e '\033[32mFull RELRO \033[m '
else
echo -n -e '\033[33mPartial RELRO \033[m '
fi
else
echo -n -e '\033[31mNo RELRO \033[m '
fi
else
echo -n -e '\033[31mPermission denied (please run as root)\033[m\n'
exit 1
fi
# check for stack canary support
if readelf -s $1/exe 2>/dev/null | grep -q 'Symbol table'; then
if readelf -s $1/exe 2>/dev/null | grep -q '__stack_chk_fail'; then
echo -n -e '\033[32mCanary found \033[m '
else
echo -n -e '\033[31mNo canary found \033[m '
fi
else
if [ "$1" != "1" ] ; then
echo -n -e '\033[33mPermission denied \033[m '
else
echo -n -e '\033[33mNo symbol table found\033[m '
fi
fi
# first check for PaX support
if cat $1/status 2> /dev/null | grep -q 'PaX:'; then
pageexec=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b6) )
segmexec=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b10) )
mprotect=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b8) )
randmmap=( $(cat $1/status 2> /dev/null | grep 'PaX:' | cut -b9) )
if [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "M" && "$randmmap" = "R" ]] ; then
echo -n -e '\033[32mPaX enabled\033[m '
elif [[ "$pageexec" = "p" && "$segmexec" = "s" && "$randmmap" = "R" ]] ; then
echo -n -e '\033[33mPaX ASLR only\033[m '
elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "m" && "$randmmap" = "R" ]] ; then
echo -n -e '\033[33mPaX mprot off \033[m'
elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "M" && "$randmmap" = "r" ]] ; then
echo -n -e '\033[33mPaX ASLR off\033[m '
elif [[ "$pageexec" = "P" || "$segmexec" = "S" ]] && [[ "$mprotect" = "m" && "$randmmap" = "r" ]] ; then
echo -n -e '\033[33mPaX NX only\033[m '
else
echo -n -e '\033[31mPaX disabled\033[m '
fi
# fallback check for NX support
elif readelf -W -l $1/exe 2>/dev/null | grep 'GNU_STACK' | grep -q 'RWE'; then
echo -n -e '\033[31mNX disabled\033[m '
else
echo -n -e '\033[32mNX enabled \033[m '
fi
# check for PIE support
if readelf -h $1/exe 2>/dev/null | grep -q 'Type:[[:space:]]*EXEC'; then
echo -n -e '\033[31mNo PIE \033[m '
elif readelf -h $1/exe 2>/dev/null | grep -q 'Type:[[:space:]]*DYN'; then
if readelf -d $1/exe 2>/dev/null | grep -q '(DEBUG)'; then
echo -n -e '\033[32mPIE enabled \033[m '
else
echo -n -e '\033[33mDynamic Shared Object\033[m '
fi
else
echo -n -e '\033[33mNot an ELF file \033[m '
fi
}
# check mapped libraries
libcheck() {
libs=( $(awk '{ print $6 }' /proc/$1/maps | grep '/' | sort -u | xargs file | grep ELF | awk '{ print $1 }' | sed 's/:/ /') )
printf "\n* Loaded libraries (file information, # of mapped files: ${#libs[@]}):\n\n"
for element in $(seq 0 $((${#libs[@]} - 1)))
do
echo " ${libs[$element]}:"
echo -n " "
filecheck ${libs[$element]}
printf "\n\n"
done
}
# check for system-wide ASLR support
aslrcheck() {
# PaX ASLR support
if !(cat /proc/1/status 2> /dev/null | grep -q 'Name:') ; then
echo -n -e ':\033[33m insufficient privileges for PaX ASLR checks\033[m\n'
echo -n -e ' Fallback to standard Linux ASLR check'
fi
if cat /proc/1/status 2> /dev/null | grep -q 'PaX:'; then
printf ": "
if cat /proc/1/status 2> /dev/null | grep 'PaX:' | grep -q 'R'; then
echo -n -e '\033[32mPaX ASLR enabled\033[m\n\n'
else
echo -n -e '\033[31mPaX ASLR disabled\033[m\n\n'
fi
else
# standard Linux 'kernel.randomize_va_space' ASLR support
# (see the kernel file 'Documentation/sysctl/kernel.txt' for a detailed description)
printf " (kernel.randomize_va_space): "
if /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 1'; then
echo -n -e '\033[33mOn (Setting: 1)\033[m\n\n'
printf " Description - Make the addresses of mmap base, stack and VDSO page randomized.\n"
printf " This, among other things, implies that shared libraries will be loaded to \n"
printf " random addresses. Also for PIE-linked binaries, the location of code start\n"
printf " is randomized. Heap addresses are *not* randomized.\n\n"
elif /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 2'; then
echo -n -e '\033[32mOn (Setting: 2)\033[m\n\n'
printf " Description - Make the addresses of mmap base, heap, stack and VDSO page randomized.\n"
printf " This, among other things, implies that shared libraries will be loaded to random \n"
printf " addresses. Also for PIE-linked binaries, the location of code start is randomized.\n\n"
elif /sbin/sysctl -a 2>/dev/null | grep -q 'kernel.randomize_va_space = 0'; then
echo -n -e '\033[31mOff (Setting: 0)\033[m\n'
else
echo -n -e '\033[31mNot supported\033[m\n'
fi
printf " See the kernel file 'Documentation/sysctl/kernel.txt' for more details.\n\n"
fi
}
# check cpu nx flag
nxcheck() {
if grep -q nx /proc/cpuinfo; then
echo -n -e '\033[32mYes\033[m\n\n'
else
echo -n -e '\033[31mNo\033[m\n\n'
fi
}
# check for kernel protection mechanisms
kernelcheck() {
printf " Description - List the status of kernel protection mechanisms. Rather than\n"
printf " inspect kernel mechanisms that may aid in the prevention of exploitation of\n"
printf " userspace processes, this option lists the status of kernel configuration\n"
printf " options that harden the kernel itself against attack.\n\n"
printf " Kernel config: "
if [ -f /proc/config.gz ] ; then
kconfig="zcat /proc/config.gz"
printf "\033[32m/proc/config.gz\033[m\n\n"
elif [ -f /boot/config-`uname -r` ] ; then
kconfig="cat /boot/config-`uname -r`"
printf "\033[33m/boot/config-`uname -r`\033[m\n\n"
printf " Warning: The config on disk may not represent running kernel config!\n\n";
elif [ -f "${KBUILD_OUTPUT:-/usr/src/linux}"/.config ] ; then
kconfig="cat ${KBUILD_OUTPUT:-/usr/src/linux}/.config"
printf "\033[33m%s\033[m\n\n" "${KBUILD_OUTPUT:-/usr/src/linux}/.config"
printf " Warning: The config on disk may not represent running kernel config!\n\n";
else
printf "\033[31mNOT FOUND\033[m\n\n"
exit 0
fi
printf " GCC stack protector support: "
if $kconfig | grep -qi 'CONFIG_CC_STACKPROTECTOR=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Strict user copy checks: "
if $kconfig | grep -qi 'CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Enforce read-only kernel data: "
if $kconfig | grep -qi 'CONFIG_DEBUG_RODATA=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Restrict /dev/mem access: "
if $kconfig | grep -qi 'CONFIG_STRICT_DEVMEM=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Restrict /dev/kmem access: "
if $kconfig | grep -qi 'CONFIG_DEVKMEM=y'; then
printf "\033[31mDisabled\033[m\n"
else
printf "\033[32mEnabled\033[m\n"
fi
printf "\n"
printf "* grsecurity / PaX: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC=y'; then
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_HIGH=y'; then
printf "\033[32mHigh GRKERNSEC\033[m\n\n"
elif $kconfig | grep -qi 'CONFIG_GRKERNSEC_MEDIUM=y'; then
printf "\033[33mMedium GRKERNSEC\033[m\n\n"
elif $kconfig | grep -qi 'CONFIG_GRKERNSEC_LOW=y'; then
printf "\033[31mLow GRKERNSEC\033[m\n\n"
else
printf "\033[33mCustom GRKERNSEC\033[m\n\n"
fi
printf " Non-executable kernel pages: "
if $kconfig | grep -qi 'CONFIG_PAX_KERNEXEC=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Prevent userspace pointer deref: "
if $kconfig | grep -qi 'CONFIG_PAX_MEMORY_UDEREF=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Prevent kobject refcount overflow: "
if $kconfig | grep -qi 'CONFIG_PAX_REFCOUNT=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Bounds check heap object copies: "
if $kconfig | grep -qi 'CONFIG_PAX_USERCOPY=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Disable writing to kmem/mem/port: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_KMEM=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Disable privileged I/O: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_IO=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Harden module auto-loading: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_MODHARDEN=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
printf " Hide kernel symbols: "
if $kconfig | grep -qi 'CONFIG_GRKERNSEC_HIDESYM=y'; then
printf "\033[32mEnabled\033[m\n"
else
printf "\033[31mDisabled\033[m\n"
fi
else
printf "\033[31mNo GRKERNSEC\033[m\n\n"
printf " The grsecurity / PaX patchset is available here:\n"
printf " http://grsecurity.net/\n"
fi
printf "\n"
printf "* Kernel Heap Hardening: "
if $kconfig | grep -qi 'CONFIG_KERNHEAP=y'; then
if $kconfig | grep -qi 'CONFIG_KERNHEAP_FULLPOISON=y'; then
printf "\033[32mFull KERNHEAP\033[m\n\n"
else
printf "\033[33mPartial KERNHEAP\033[m\n\n"
fi
else
printf "\033[31mNo KERNHEAP\033[m\n\n"
printf " The KERNHEAP hardening patchset is available here:\n"
printf " https://www.subreption.com/kernheap/\n\n"
fi
}
# --- FORTIFY_SOURCE subfunctions (start) ---
# is FORTIFY_SOURCE supported by libc?
FS_libc_check() {
printf "* FORTIFY_SOURCE support available (libc) : "
if [ "${#FS_chk_func_libc[@]}" != "0" ] ; then
printf "\033[32mYes\033[m\n"
else
printf "\033[31mNo\033[m\n"
exit 1
fi
}
# was the binary compiled with FORTIFY_SOURCE?
FS_binary_check() {
printf "* Binary compiled with FORTIFY_SOURCE support: "
for FS_elem_functions in $(seq 0 $((${#FS_functions[@]} - 1)))
do
if [[ ${FS_functions[$FS_elem_functions]} =~ _chk ]] ; then
printf "\033[32mYes\033[m\n"
return
fi
done
printf "\033[31mNo\033[m\n"
exit 1
}
FS_comparison() {
echo
printf " ------ EXECUTABLE-FILE ------- . -------- LIBC --------\n"
printf " FORTIFY-able library functions | Checked function names\n"
printf " -------------------------------------------------------\n"
for FS_elem_libc in $(seq 0 $((${#FS_chk_func_libc[@]} - 1)))
do
for FS_elem_functions in $(seq 0 $((${#FS_functions[@]} - 1)))
do
FS_tmp_func=${FS_functions[$FS_elem_functions]}
FS_tmp_libc=${FS_chk_func_libc[$FS_elem_libc]}
if [[ $FS_tmp_func =~ ^$FS_tmp_libc$ ]] ; then
printf " \033[31m%-30s\033[m | __%s%s\n" $FS_tmp_func $FS_tmp_libc $FS_end
let FS_cnt_total++
let FS_cnt_unchecked++
elif [[ $FS_tmp_func =~ ^$FS_tmp_libc(_chk) ]] ; then
printf " \033[32m%-30s\033[m | __%s%s\n" $FS_tmp_func $FS_tmp_libc $FS_end
let FS_cnt_total++
let FS_cnt_checked++
fi
done
done
}
FS_summary() {
echo
printf "SUMMARY:\n\n"
printf "* Number of checked functions in libc : ${#FS_chk_func_libc[@]}\n"
printf "* Total number of library functions in the executable: ${#FS_functions[@]}\n"
printf "* Number of FORTIFY-able functions in the executable : %s\n" $FS_cnt_total
printf "* Number of checked functions in the executable : \033[32m%s\033[m\n" $FS_cnt_checked
printf "* Number of unchecked functions in the executable : \033[31m%s\033[m\n" $FS_cnt_unchecked
echo
}
# --- FORTIFY_SOURCE subfunctions (end) ---
if !(command_exists readelf) ; then
printf "\033[31mWarning: 'readelf' not found! It's required for most checks.\033[m\n\n"
have_readelf=0
fi
# parse command-line arguments
case "$1" in
--version)
version
exit 0
;;
--help)
help
exit 0
;;
--dir)
if [ "$3" = "-v" ] ; then
verbose=true
fi
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid directory.\033[m\n\n"
exit 1
fi
# remove trailing slashes
tempdir=`echo $2 | sed -e "s/\/*$//"`
if [ ! -d $tempdir ] ; then
printf "\033[31mError: The directory '$tempdir' does not exist.\033[m\n\n"
exit 1
fi
cd $tempdir
printf "RELRO STACK CANARY NX PIE RPATH RUNPATH FILE\n"
for N in [A-Za-z]*; do
if [ "$N" != "[A-Za-z]*" ]; then
# read permissions?
if [ ! -r $N ]; then
printf "\033[31mError: No read permissions for '$tempdir/$N' (run as root).\033[m\n"
else
# ELF executable?
out=`file $N`
if [[ ! $out =~ ELF ]] ; then
if [ "$verbose" = "true" ] ; then
printf "\033[34m*** Not an ELF file: $tempdir/"
file $N
printf "\033[m"
fi
else
filecheck $N
if [ `find $tempdir/$N \( -perm -004000 -o -perm -002000 \) -type f -print` ]; then
printf "\033[37;41m%s%s\033[m" $2 $N
else
printf "%s%s" $tempdir/ $N
fi
echo
fi
fi
fi
done
exit 0
;;
--file)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid file.\033[m\n\n"
exit 1
fi
# does the file exist?
if [ ! -e $2 ] ; then
printf "\033[31mError: The file '$2' does not exist.\033[m\n\n"
exit 1
fi
# read permissions?
if [ ! -r $2 ] ; then
printf "\033[31mError: No read permissions for '$2' (run as root).\033[m\n\n"
exit 1
fi
# ELF executable?
out=`file $2`
if [[ ! $out =~ ELF ]] ; then
printf "\033[31mError: Not an ELF file: "
file $2
printf "\033[m\n"
exit 1
fi
printf "RELRO STACK CANARY NX PIE RPATH RUNPATH FILE\n"
filecheck $2
if [ `find $2 \( -perm -004000 -o -perm -002000 \) -type f -print` ] ; then
printf "\033[37;41m%s%s\033[m" $2 $N
else
printf "%s" $2
fi
echo
exit 0
;;
--proc-all)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
cd /proc
printf "* System-wide ASLR"
aslrcheck
printf "* Does the CPU support NX: "
nxcheck
printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
for N in [1-9]*; do
if [ $N != $$ ] && readlink -q $N/exe > /dev/null; then
printf "%16s" `head -1 $N/status | cut -b 7-`
printf "%7d " $N
proccheck $N
echo
fi
done
if [ ! -e /usr/bin/id ] ; then
printf "\n\033[33mNote: If you are running 'checksec.sh' as an unprivileged user, you\n"
printf " will not see all processes. Please run the script as root.\033[m\n\n"
else
if !(root_privs) ; then
printf "\n\033[33mNote: You are running 'checksec.sh' as an unprivileged user.\n"
printf " Too see all processes, please run the script as root.\033[m\n\n"
fi
fi
exit 0
;;
--proc)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid process name.\033[m\n\n"
exit 1
fi
if !(isString "$2") ; then
printf "\033[31mError: Please provide a valid process name.\033[m\n\n"
exit 1
fi
cd /proc
printf "* System-wide ASLR"
aslrcheck
printf "* Does the CPU support NX: "
nxcheck
printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
for N in `ps -Ao pid,comm | grep $2 | cut -b1-6`; do
if [ -d $N ] ; then
printf "%16s" `head -1 $N/status | cut -b 7-`
printf "%7d " $N
# read permissions?
if [ ! -r $N/exe ] ; then
if !(root_privs) ; then
printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
exit 1
fi
if [ ! `readlink $N/exe` ] ; then
printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
exit 1
fi
exit 1
fi
proccheck $N
echo
fi
done
exit 0
;;
--proc-libs)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
if !(isNumeric "$2") ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
cd /proc
printf "* System-wide ASLR"
aslrcheck
printf "* Does the CPU support NX: "
nxcheck
printf "* Process information:\n\n"
printf " COMMAND PID RELRO STACK CANARY NX/PaX PIE\n"
N=$2
if [ -d $N ] ; then
printf "%16s" `head -1 $N/status | cut -b 7-`
printf "%7d " $N
# read permissions?
if [ ! -r $N/exe ] ; then
if !(root_privs) ; then
printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
exit 1
fi
if [ ! `readlink $N/exe` ] ; then
printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
exit 1
fi
exit 1
fi
proccheck $N
echo
libcheck $N
fi
exit 0
;;
--kernel)
cd /proc
printf "* Kernel protection information:\n\n"
kernelcheck
exit 0
;;
--fortify-file)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid file.\033[m\n\n"
exit 1
fi
# does the file exist?
if [ ! -e $2 ] ; then
printf "\033[31mError: The file '$2' does not exist.\033[m\n\n"
exit 1
fi
# read permissions?
if [ ! -r $2 ] ; then
printf "\033[31mError: No read permissions for '$2' (run as root).\033[m\n\n"
exit 1
fi
# ELF executable?
out=`file $2`
if [[ ! $out =~ ELF ]] ; then
printf "\033[31mError: Not an ELF file: "
file $2
printf "\033[m\n"
exit 1
fi
if [ -e /lib/libc.so.6 ] ; then
FS_libc=/lib/libc.so.6
elif [ -e /lib64/libc.so.6 ] ; then
FS_libc=/lib64/libc.so.6
elif [ -e /lib/i386-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/i386-linux-gnu/libc.so.6
elif [ -e /lib/x86_64-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/x86_64-linux-gnu/libc.so.6
else
printf "\033[31mError: libc not found.\033[m\n\n"
exit 1
fi
FS_chk_func_libc=( $(readelf -s $FS_libc | grep _chk@@ | awk '{ print $8 }' | cut -c 3- | sed -e 's/_chk@.*//') )
FS_functions=( $(readelf -s $2 | awk '{ print $8 }' | sed 's/_*//' | sed -e 's/@.*//') )
FS_libc_check
FS_binary_check
FS_comparison
FS_summary
exit 0
;;
--fortify-proc)
if [ $have_readelf -eq 0 ] ; then
exit 1
fi
if [ -z "$2" ] ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
if !(isNumeric "$2") ; then
printf "\033[31mError: Please provide a valid process ID.\033[m\n\n"
exit 1
fi
cd /proc
N=$2
if [ -d $N ] ; then
# read permissions?
if [ ! -r $N/exe ] ; then
if !(root_privs) ; then
printf "\033[31mNo read permissions for '/proc/$N/exe' (run as root).\033[m\n\n"
exit 1
fi
if [ ! `readlink $N/exe` ] ; then
printf "\033[31mPermission denied. Requested process ID belongs to a kernel thread.\033[m\n\n"
exit 1
fi
exit 1
fi
if [ -e /lib/libc.so.6 ] ; then
FS_libc=/lib/libc.so.6
elif [ -e /lib64/libc.so.6 ] ; then
FS_libc=/lib64/libc.so.6
elif [ -e /lib/i386-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/i386-linux-gnu/libc.so.6
elif [ -e /lib/x86_64-linux-gnu/libc.so.6 ] ; then
FS_libc=/lib/x86_64-linux-gnu/libc.so.6
else
printf "\033[31mError: libc not found.\033[m\n\n"
exit 1
fi
printf "* Process name (PID) : %s (%d)\n" `head -1 $N/status | cut -b 7-` $N
FS_chk_func_libc=( $(readelf -s $FS_libc | grep _chk@@ | awk '{ print $8 }' | cut -c 3- | sed -e 's/_chk@.*//') )
FS_functions=( $(readelf -s $2/exe | awk '{ print $8 }' | sed 's/_*//' | sed -e 's/@.*//') )
FS_libc_check
FS_binary_check
FS_comparison
FS_summary
fi
exit 0
;;
*)
if [ "$#" != "0" ] ; then
printf "\033[31mError: Unknown option '$1'.\033[m\n\n"
fi
help
exit 1
;;
esac

View File

@@ -0,0 +1,40 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import test_utils.linux_command as linux_comm
import test_utils.size as size
from core.test_run import TestRun
class Dd(linux_comm.LinuxCommand):
def __init__(self):
linux_comm.LinuxCommand.__init__(self, TestRun.executor, 'dd')
def block_size(self, value: size.Size):
return self.set_param('bs', int(value.get_value()))
def count(self, value):
return self.set_param('count', value)
def input(self, value):
return self.set_param('if', value)
def iflag(self, *values):
return self.set_param('iflag', *values)
def oflag(self, *values):
return self.set_param('oflag', *values)
def conv(self, *values):
return self.set_param('conv', *values)
def output(self, value):
return self.set_param('of', value)
def seek(self, value):
return self.set_param('seek', value)
def skip(self, value):
return self.set_param('skip', value)

View File

@@ -0,0 +1,47 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import test_utils.linux_command as linux_comm
import test_utils.size as size
from core.test_run import TestRun
class Ddrescue(linux_comm.LinuxCommand):
def __init__(self):
linux_comm.LinuxCommand.__init__(self, TestRun.executor, 'ddrescue')
self.source_path = None
self.destination_path = None
self.param_name_prefix = "--"
def source(self, value):
self.source_path = value
return self
def destination(self, value):
self.destination_path = value
return self
def reverse(self):
return self.set_flags("reverse")
def synchronous(self):
return self.set_flags("synchronous")
def direct(self):
return self.set_flags("direct")
def force(self):
return self.set_flags("force")
def block_size(self, value: size.Size):
return self.set_param('sector-size', int(value.get_value()))
def size(self, value: size.Size):
return self.set_param('size', int(value.get_value()))
def __str__(self):
command = linux_comm.LinuxCommand.__str__(self)
command += f" {self.source_path} {self.destination_path}"
return command

View File

@@ -0,0 +1,329 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from enum import Enum
from core.test_run import TestRun
from storage_devices.device import Device
from test_utils.disk_finder import resolve_to_by_id_link
from test_utils.linux_command import LinuxCommand
from test_utils.size import Size, Unit
class DmTarget(Enum):
# Fill argument types for other targets if you need them
LINEAR = (str, int)
STRIPED = (int, int, list)
ERROR = ()
ZERO = ()
CRYPT = ()
DELAY = (str, int, int, str, int, int)
FLAKEY = (str, int, int, int)
MIRROR = ()
MULTIPATH = ()
RAID = ()
SNAPSHOT = ()
def __str__(self):
return self.name.lower()
class DmTable:
class TableEntry:
pass
class DmTable:
class TableEntry:
def __init__(self, offset: int, length: int, target: DmTarget, *params):
self.offset = int(offset)
self.length = int(length)
self.target = DmTarget(target)
self.params = list(params)
self.validate()
def validate(self):
if self.target.value:
for i in range(len(self.params)):
try:
self.params[i] = self.target.value[i](self.params[i])
except IndexError:
raise ValueError("invalid dm target parameter")
def __str__(self):
ret = f"{self.offset} {self.length} {self.target}"
for param in self.params:
ret += f" {param}"
return ret
def __init__(self):
self.table = []
@classmethod
def uniform_error_table(
cls, start_lba: int, stop_lba: int, num_error_zones: int, error_zone_size: Size
):
table = cls()
increment = (stop_lba - start_lba) // num_error_zones
for zone_start in range(start_lba, stop_lba, increment):
table.add_entry(
DmTable.TableEntry(
zone_start,
error_zone_size.get_value(Unit.Blocks512),
DmTarget.ERROR,
)
)
return table
@classmethod
def passthrough_table(cls, device: Device):
table = cls()
table.add_entry(
DmTable.TableEntry(
0,
device.size.get_value(Unit.Blocks512),
DmTarget.LINEAR,
device.path,
0,
)
)
return table
@classmethod
def error_table(cls, offset: int, size: Size):
table = cls()
table.add_entry(
DmTable.TableEntry(offset, size.get_value(Unit.Blocks512), DmTarget.ERROR)
)
return table
def fill_gaps(self, device: Device, fill_end=True):
gaps = self.get_gaps()
for gap in gaps[:-1]:
self.add_entry(
DmTable.TableEntry(
gap[0], gap[1], DmTarget.LINEAR, device.path, int(gap[0])
)
)
table_end = gaps[-1][0]
if fill_end and (Size(table_end, Unit.Blocks512) < device.size):
self.add_entry(
DmTable.TableEntry(
table_end,
device.size.get_value(Unit.Blocks512) - table_end,
DmTarget.LINEAR,
device.path,
table_end,
)
)
return self
def add_entry(self, entry: DmTable.TableEntry):
self.table.append(entry)
return self
def get_gaps(self):
if not self.table:
return [(0, -1)]
gaps = []
self.table.sort(key=lambda entry: entry.offset)
if self.table[0].offset != 0:
gaps.append((0, self.table[0].offset))
for e1, e2 in zip(self.table, self.table[1:]):
if e1.offset + e1.length != e2.offset:
gaps.append(
(e1.offset + e1.length, e2.offset - (e1.offset + e1.length))
)
if len(self.table) > 1:
gaps.append((e2.offset + e2.length, -1))
else:
gaps.append((self.table[0].offset + self.table[0].length, -1))
return gaps
def validate(self):
self.table.sort(key=lambda entry: entry.offset)
if self.table[0].offset != 0:
raise ValueError(f"dm table should start at LBA 0: {self.table[0]}")
for e1, e2 in zip(self.table, self.table[1:]):
if e1.offset + e1.length != e2.offset:
raise ValueError(
f"dm table should not have any holes or overlaps: {e1} -> {e2}"
)
def get_size(self):
self.table.sort(key=lambda entry: entry.offset)
return Size(self.table[-1].offset + self.table[-1].length, Unit.Blocks512)
def __str__(self):
output = ""
for entry in self.table:
output += f"{entry}\n"
return output
class DeviceMapper(LinuxCommand):
@classmethod
def remove_all(cls, force=True):
TestRun.LOGGER.info("Removing all device mapper devices")
cmd = "dmsetup remove_all"
if force:
cmd += " --force"
return TestRun.executor.run_expect_success(cmd)
def __init__(self, name: str):
LinuxCommand.__init__(self, TestRun.executor, "dmsetup")
self.name = name
@staticmethod
def wrap_table(table: DmTable):
return f"<< ENDHERE\n{str(table)}ENDHERE\n"
def get_path(self):
return f"/dev/mapper/{self.name}"
def clear(self):
return TestRun.executor.run_expect_success(f"{self.command_name} clear {self.name}")
def create(self, table: DmTable):
try:
table.validate()
except ValueError:
for entry in table.table:
TestRun.LOGGER.error(f"{entry}")
raise
TestRun.LOGGER.info(f"Creating device mapper device '{self.name}'")
for entry in table.table:
TestRun.LOGGER.debug(f"{entry}")
return TestRun.executor.run_expect_success(
f"{self.command_name} create {self.name} {self.wrap_table(table)}"
)
def remove(self):
TestRun.LOGGER.info(f"Removing device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} remove {self.name}")
def suspend(self):
TestRun.LOGGER.info(f"Suspending device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} suspend {self.name}")
def resume(self):
TestRun.LOGGER.info(f"Resuming device mapper device '{self.name}'")
return TestRun.executor.run_expect_success(f"{self.command_name} resume {self.name}")
def reload(self, table: DmTable):
table.validate()
TestRun.LOGGER.info(f"Reloading table for device mapper device '{self.name}'")
for entry in table.table:
TestRun.LOGGER.debug(f"{entry}")
return TestRun.executor.run_expect_success(
f"{self.command_name} reload {self.name} {self.wrap_table(table)}"
)
class ErrorDevice(Device):
def __init__(self, name: str, base_device: Device, table: DmTable = None):
self.device = base_device
self.mapper = DeviceMapper(name)
self.name = name
self.table = DmTable.passthrough_table(base_device) if not table else table
self.active = False
self.start()
self.path = resolve_to_by_id_link(self.mapper.get_path().replace('/dev/', ''))
@property
def system_path(self):
if self.active:
output = TestRun.executor.run_expect_success(f"realpath {self.mapper.get_path()}")
return output.stdout
return None
@property
def size(self):
if self.active:
return self.table.get_size()
return None
def start(self):
self.mapper.create(self.table)
self.active = True
def stop(self):
self.mapper.remove()
self.active = False
def change_table(self, table: DmTable, permanent=True):
if self.active:
self.mapper.suspend()
self.mapper.reload(table)
self.mapper.resume()
if permanent:
self.table = table
def suspend_errors(self):
empty_table = DmTable.passthrough_table(self.device)
TestRun.LOGGER.info(f"Suspending issuing errors for error device '{self.name}'")
self.change_table(empty_table, False)
def resume_errors(self):
TestRun.LOGGER.info(f"Resuming issuing errors for error device '{self.name}'")
self.change_table(self.table, False)
def suspend(self):
if not self.active:
TestRun.LOGGER.warning(
f"cannot suspend error device '{self.name}'! It's already running"
)
self.mapper.suspend()
self.active = False
def resume(self):
if self.active:
TestRun.LOGGER.warning(
f"cannot resume error device '{self.name}'! It's already running"
)
self.mapper.resume()
self.active = True

View File

@@ -0,0 +1,397 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import posixpath
import re
import time
from enum import Enum
from core.test_run import TestRun
from test_tools import fs_utils
from test_tools.dd import Dd
from test_tools.fs_utils import readlink, parse_ls_output, ls
from test_utils.output import CmdException
from test_utils.size import Size, Unit
SECTOR_SIZE = 512
class Filesystem(Enum):
xfs = 0
ext3 = 1
ext4 = 2
class PartitionTable(Enum):
msdos = 0
gpt = 1
class PartitionType(Enum):
efi = 0
primary = 1
extended = 2
logical = 3
lvm = 4
msr = 5
swap = 6
standard = 7
unknown = 8
def create_filesystem(device, filesystem: Filesystem, force=True, blocksize=None):
TestRun.LOGGER.info(
f"Creating filesystem ({filesystem.name}) on device: {device.path}")
force_param = ' -f ' if filesystem == Filesystem.xfs else ' -F '
force_param = force_param if force else ''
block_size_param = f' -b size={blocksize}' if filesystem == Filesystem.xfs \
else f' -b {blocksize}'
block_size_param = block_size_param if blocksize else ''
cmd = f'mkfs.{filesystem.name} {force_param} {device.path} {block_size_param}'
cmd = re.sub(' +', ' ', cmd)
TestRun.executor.run_expect_success(cmd)
TestRun.LOGGER.info(
f"Successfully created filesystem on device: {device.path}")
def create_partition_table(device, partition_table_type: PartitionTable = PartitionTable.gpt):
TestRun.LOGGER.info(
f"Creating partition table ({partition_table_type.name}) for device: {device.path}")
cmd = f'parted --script {device.path} mklabel {partition_table_type.name}'
TestRun.executor.run_expect_success(cmd)
device.partition_table = partition_table_type
TestRun.LOGGER.info(
f"Successfully created {partition_table_type.name} "
f"partition table on device: {device.path}")
def get_partition_path(parent_dev, number):
# TODO: change this to be less specific hw dependent (kernel)
if "dev/cas" not in parent_dev:
id_separator = '-part'
else:
id_separator = 'p' # "cas1-1p1"
return f'{parent_dev}{id_separator}{number}'
def remove_parition(device, part_number):
TestRun.LOGGER.info(f"Removing part {part_number} from {device.path}")
cmd = f'parted --script {device.path} rm {part_number}'
output = TestRun.executor.run(cmd)
if output.exit_code != 0:
TestRun.executor.run_expect_success("partprobe")
def create_partition(
device,
part_size,
part_number,
part_type: PartitionType = PartitionType.primary,
unit=Unit.MebiByte,
aligned: bool = True):
TestRun.LOGGER.info(
f"Creating {part_type.name} partition on device: {device.path}")
begin = get_first_partition_offset(device, aligned)
for part in device.partitions:
begin += part.size
if part.type == PartitionType.logical:
begin += Size(1, Unit.MebiByte if not aligned else device.block_size)
if part_type == PartitionType.logical:
begin += Size(1, Unit.MebiByte if not aligned else device.block_size)
if part_size != Size.zero():
end = (begin + part_size)
end_cmd = f'{end.get_value(unit)}{unit_to_string(unit)}'
else:
end_cmd = '100%'
cmd = f'parted --script {device.path} mkpart ' \
f'{part_type.name} ' \
f'{begin.get_value(unit)}{unit_to_string(unit)} ' \
f'{end_cmd}'
output = TestRun.executor.run(cmd)
if output.exit_code != 0:
TestRun.executor.run_expect_success("partprobe")
TestRun.executor.run_expect_success("udevadm settle")
if not check_partition_after_create(
part_size,
part_number,
device.path,
part_type,
aligned):
raise Exception("Could not create partition!")
if part_type != PartitionType.extended:
from storage_devices.partition import Partition
new_part = Partition(device,
part_type,
part_number,
begin,
end if type(end) is Size else device.size)
dd = Dd().input("/dev/zero") \
.output(new_part.path) \
.count(1) \
.block_size(Size(1, Unit.Blocks4096)) \
.oflag("direct")
dd.run()
device.partitions.append(new_part)
TestRun.LOGGER.info(f"Successfully created {part_type.name} partition on {device.path}")
def available_disk_size(device):
dev = f"/dev/{device.get_device_id()}"
# get number of device's sectors
disk_sectors = int(TestRun.executor.run(f"fdisk -l {dev} | grep {dev} | grep sectors "
f"| awk '{{print $7 }}' ").stdout)
# get last occupied sector
last_occupied_sector = int(TestRun.executor.run(f"fdisk -l {dev} | grep {dev} "
f"| awk '{{print $3 }}' | tail -1").stdout)
available_disk_sectors = disk_sectors - last_occupied_sector
return Size(available_disk_sectors, Unit(get_block_size(device)))
def create_partitions(device, sizes: [], partition_table_type=PartitionTable.gpt):
create_partition_table(device, partition_table_type)
partition_type = PartitionType.primary
partition_number_offset = 0
msdos_part_max_size = Size(2, Unit.TeraByte)
for s in sizes:
size = Size(
s.get_value(device.block_size) - device.block_size.value, device.block_size)
if partition_table_type == PartitionTable.msdos and \
len(sizes) > 4 and len(device.partitions) == 3:
if available_disk_size(device) > msdos_part_max_size:
part_size = msdos_part_max_size
else:
part_size = Size.zero()
create_partition(device,
part_size,
4,
PartitionType.extended)
partition_type = PartitionType.logical
partition_number_offset = 1
partition_number = len(device.partitions) + 1 + partition_number_offset
create_partition(device,
size,
partition_number,
partition_type,
Unit.MebiByte,
True)
def get_block_size(device):
try:
block_size = float(TestRun.executor.run(
f"cat {get_sysfs_path(device)}/queue/hw_sector_size").stdout)
except ValueError:
block_size = Unit.Blocks512.value
return block_size
def get_size(device):
output = TestRun.executor.run_expect_success(f"cat {get_sysfs_path(device)}/size")
blocks_count = int(output.stdout)
return blocks_count * SECTOR_SIZE
def get_sysfs_path(device):
sysfs_path = f"/sys/class/block/{device}"
if TestRun.executor.run(f"test -d {sysfs_path}").exit_code != 0:
sysfs_path = f"/sys/block/{device}"
return sysfs_path
def get_pci_address(device):
pci_address = TestRun.executor.run(f"cat /sys/block/{device}/device/address").stdout
return pci_address
def check_partition_after_create(size, part_number, parent_dev_path, part_type, aligned):
partition_path = get_partition_path(parent_dev_path, part_number)
if "dev/cas" not in partition_path:
cmd = f"find {partition_path} -type l"
else:
cmd = f"find {partition_path}"
output = TestRun.executor.run_expect_success(cmd).stdout
if partition_path not in output:
TestRun.LOGGER.info(
"Partition created, but could not find it in system, trying 'hdparm -z'")
TestRun.executor.run_expect_success(f"hdparm -z {parent_dev_path}")
output_after_hdparm = TestRun.executor.run_expect_success(
f"parted --script {parent_dev_path} print").stdout
TestRun.LOGGER.info(output_after_hdparm)
counter = 0
while partition_path not in output and counter < 10:
time.sleep(2)
output = TestRun.executor.run(cmd).stdout
counter += 1
if len(output.split('\n')) > 1 or partition_path not in output:
return False
if aligned and part_type != PartitionType.extended \
and size.get_value(Unit.Byte) % Unit.Blocks4096.value != 0:
TestRun.LOGGER.warning(
f"Partition {partition_path} is not 4k aligned: {size.get_value(Unit.KibiByte)}KiB")
partition_size = get_size(readlink(partition_path).split('/')[-1])
if part_type == PartitionType.extended or \
partition_size == size.get_value(Unit.Byte):
return True
TestRun.LOGGER.warning(
f"Partition size {partition_size} does not match expected {size.get_value(Unit.Byte)} size."
)
return True
def get_first_partition_offset(device, aligned: bool):
if aligned:
return Size(1, Unit.MebiByte)
# 33 sectors are reserved for the backup GPT
return Size(34, Unit(device.blocksize)) \
if device.partition_table == PartitionTable.gpt else Size(1, device.blocksize)
def remove_partitions(device):
from test_utils.os_utils import Udev
if device.is_mounted():
device.unmount()
for partition in device.partitions:
unmount(partition)
TestRun.LOGGER.info(f"Removing partitions from device: {device.path} "
f"({device.get_device_id()}).")
device.wipe_filesystem()
Udev.trigger()
Udev.settle()
output = TestRun.executor.run(f"ls {device.path}* -1")
if len(output.stdout.split('\n')) > 1:
TestRun.LOGGER.error(f"Could not remove partitions from device {device.path}")
return False
return True
def mount(device, mount_point, options: [str] = None):
if not fs_utils.check_if_directory_exists(mount_point):
fs_utils.create_directory(mount_point, True)
TestRun.LOGGER.info(f"Mounting device {device.path} ({device.get_device_id()}) "
f"to {mount_point}.")
cmd = f"mount {device.path} {mount_point}"
if options:
cmd = f"{cmd} -o {','.join(options)}"
output = TestRun.executor.run(cmd)
if output.exit_code != 0:
raise Exception(f"Failed to mount {device.path} to {mount_point}")
device.mount_point = mount_point
def unmount(device):
TestRun.LOGGER.info(f"Unmounting device {device.path} ({device.get_device_id()}).")
if device.mount_point is not None:
output = TestRun.executor.run(f"umount {device.mount_point}")
if output.exit_code != 0:
TestRun.LOGGER.error("Could not unmount device.")
return False
return True
else:
TestRun.LOGGER.info("Device is not mounted.")
return True
def unit_to_string(unit):
unit_string = {
Unit.Byte: 'B',
Unit.Blocks512: 's',
Unit.Blocks4096: 's',
Unit.KibiByte: 'KiB',
Unit.MebiByte: 'MiB',
Unit.GibiByte: 'GiB',
Unit.TebiByte: 'TiB',
Unit.KiloByte: 'kB',
Unit.MegaByte: 'MB',
Unit.GigaByte: 'GB',
Unit.TeraByte: 'TB'
}
return unit_string.get(unit, "Invalid unit.")
def wipe_filesystem(device, force=True):
TestRun.LOGGER.info(f"Erasing the device: {device.path}")
force_param = ' -f' if force else ''
cmd = f'wipefs -a{force_param} {device.path}'
TestRun.executor.run_expect_success(cmd)
TestRun.LOGGER.info(
f"Successfully wiped device: {device.path}")
def check_if_device_supports_trim(device):
if device.get_device_id().startswith("nvme"):
return True
command_output = TestRun.executor.run(
f'hdparm -I {device.path} | grep "TRIM supported"')
return command_output.exit_code == 0
def get_device_filesystem_type(device_id):
cmd = f'lsblk -l -o NAME,FSTYPE | sort | uniq | grep "{device_id} "'
try:
stdout = TestRun.executor.run_expect_success(cmd).stdout
except CmdException:
# unusual devices might not be listed in output (i.e. RAID containers)
if TestRun.executor.run(f"test -b /dev/{device_id}").exit_code != 0:
raise
else:
return None
split_stdout = stdout.strip().split()
if len(split_stdout) <= 1:
return None
else:
try:
return Filesystem[split_stdout[1]]
except KeyError:
TestRun.LOGGER.warning(f"Unrecognized filesystem: {split_stdout[1]}")
return None
def _is_by_id_path(path: str):
"""check if given path already is proper by-id path"""
dev_by_id_dir = "/dev/disk/by-id"
by_id_paths = parse_ls_output(ls(dev_by_id_dir), dev_by_id_dir)
return path in [posixpath.join(dev_by_id_dir, id_path.full_path) for id_path in by_id_paths]
def _is_dev_path_whitelisted(path: str):
"""check if given path is whitelisted"""
whitelisted_paths = [r"cas\d+-\d+", r"/dev/dm-\d+"]
for whitelisted_path in whitelisted_paths:
if re.search(whitelisted_path, path) is not None:
return True
return False
def validate_dev_path(path: str):
if not posixpath.isabs(path):
raise ValueError(f'Given path "{path}" is not absolute.')
if _is_dev_path_whitelisted(path):
return path
if _is_by_id_path(path):
return path
raise ValueError(f'By-id device link {path} is broken.')

View File

@@ -0,0 +1,67 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from core.test_run import TestRun
class Drbdadm:
# create metadata for resource
@staticmethod
def create_metadata(resource_name: str, force: bool):
cmd = "drbdadm create-md" + (" --force" if force else "") + f" {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# enable resource
@staticmethod
def up(resource_name: str):
cmd = f"drbdadm up {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# disable resource
@staticmethod
def down_all():
cmd = f"drbdadm down all"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def down(resource_name):
cmd = f"drbdadm down {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# promote resource to primary
@staticmethod
def set_node_primary(resource_name: str, force=False):
cmd = f"drbdadm primary {resource_name}"
cmd += " --force" if force else ""
return TestRun.executor.run_expect_success(cmd)
# demote resource to secondary
@staticmethod
def set_node_secondary(resource_name: str):
cmd = f"drbdadm secondary {resource_name}"
return TestRun.executor.run_expect_success(cmd)
# check status for all or for specified resource
@staticmethod
def get_status(resource_name: str = ""):
cmd = f"drbdadm status {resource_name}"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def in_sync(resource_name: str):
cmd = f"drbdadm status {resource_name} | grep Inconsistent"
return TestRun.executor.run(cmd).exit_code == 1
# wait sync
@staticmethod
def wait_for_sync(resource_name: str = ""):
# ssh connection might timeout in case on long sync
cmd = f"drbdadm wait-sync {resource_name}"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def dump_config(resource_name: str):
cmd = f"drbdadm dump {resource_name}"
return TestRun.executor.run(cmd)

View File

@@ -0,0 +1,105 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import datetime
import uuid
import test_tools.fio.fio_param
import test_tools.fs_utils
from core.test_run import TestRun
from test_tools import fs_utils
from test_utils import os_utils
class Fio:
def __init__(self, executor_obj=None):
self.fio_version = "fio-3.30"
self.default_run_time = datetime.timedelta(hours=1)
self.jobs = []
self.executor = executor_obj if executor_obj is not None else TestRun.executor
self.base_cmd_parameters: test_tools.fio.fio_param.FioParam = None
self.global_cmd_parameters: test_tools.fio.fio_param.FioParam = None
def create_command(self, output_type=test_tools.fio.fio_param.FioOutput.json):
self.base_cmd_parameters = test_tools.fio.fio_param.FioParamCmd(self, self.executor)
self.global_cmd_parameters = test_tools.fio.fio_param.FioParamConfig(self, self.executor)
self.fio_file = f'fio_run_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}_{uuid.uuid4().hex}'
self.base_cmd_parameters\
.set_param('eta', 'always')\
.set_param('output-format', output_type.value)\
.set_param('output', self.fio_file)
self.global_cmd_parameters.set_flags('group_reporting')
return self.global_cmd_parameters
def is_installed(self):
return self.executor.run("fio --version").stdout.strip() == self.fio_version
def install(self):
fio_url = f"http://brick.kernel.dk/snaps/{self.fio_version}.tar.bz2"
fio_package = os_utils.download_file(fio_url)
fs_utils.uncompress_archive(fio_package)
TestRun.executor.run_expect_success(f"cd {fio_package.parent_dir}/{self.fio_version}"
f" && ./configure && make -j && make install")
def calculate_timeout(self):
if "time_based" not in self.global_cmd_parameters.command_flags:
return self.default_run_time
total_time = self.global_cmd_parameters.get_parameter_value("runtime")
if len(total_time) != 1:
raise ValueError("Wrong fio 'runtime' parameter configuration")
total_time = int(total_time[0])
ramp_time = self.global_cmd_parameters.get_parameter_value("ramp_time")
if ramp_time is not None:
if len(ramp_time) != 1:
raise ValueError("Wrong fio 'ramp_time' parameter configuration")
ramp_time = int(ramp_time[0])
total_time += ramp_time
return datetime.timedelta(seconds=total_time)
def run(self, timeout: datetime.timedelta = None):
if timeout is None:
timeout = self.calculate_timeout()
self.prepare_run()
return self.executor.run(str(self), timeout)
def run_in_background(self):
self.prepare_run()
return self.executor.run_in_background(str(self))
def prepare_run(self):
if not self.is_installed():
self.install()
if len(self.jobs) > 0:
self.executor.run(f"{str(self)}-showcmd -")
TestRun.LOGGER.info(self.executor.run(f"cat {self.fio_file}").stdout)
TestRun.LOGGER.info(str(self))
def execution_cmd_parameters(self):
if len(self.jobs) > 0:
separator = "\n\n"
return f"{str(self.global_cmd_parameters)}\n" \
f"{separator.join(str(job) for job in self.jobs)}"
else:
return str(self.global_cmd_parameters)
def __str__(self):
if len(self.jobs) > 0:
command = f"echo '{self.execution_cmd_parameters()}' |" \
f" {str(self.base_cmd_parameters)} -"
else:
fio_parameters = test_tools.fio.fio_param.FioParamCmd(self, self.executor)
fio_parameters.command_env_var.update(self.base_cmd_parameters.command_env_var)
fio_parameters.command_param.update(self.base_cmd_parameters.command_param)
fio_parameters.command_param.update(self.global_cmd_parameters.command_param)
fio_parameters.command_flags.extend(self.global_cmd_parameters.command_flags)
fio_parameters.set_param('name', 'fio')
command = str(fio_parameters)
return command

View File

@@ -0,0 +1,388 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import datetime
import json
import secrets
from enum import Enum
from types import SimpleNamespace as Namespace
from connection.base_executor import BaseExecutor
from core.test_run import TestRun
from storage_devices.device import Device
from test_tools.fio.fio_result import FioResult
from test_utils.linux_command import LinuxCommand
from test_utils.size import Size
class CpusAllowedPolicy(Enum):
shared = 0,
split = 1
class ErrorFilter(Enum):
none = 0,
read = 1,
write = 2,
io = 3,
verify = 4,
all = 5
class FioOutput(Enum):
normal = 'normal'
terse = 'terse'
json = 'json'
jsonplus = 'json+'
class IoEngine(Enum):
# Basic read or write I/O. fseek is used to position the I/O location.
sync = 0,
# Linux native asynchronous I/O.
libaio = 1,
# Basic pread or pwrite I/O.
psync = 2,
# Basic readv or writev I/O.
# Will emulate queuing by coalescing adjacent IOs into a single submission.
vsync = 3,
# Basic preadv or pwritev I/O.
pvsync = 4,
# POSIX asynchronous I/O using aio_read and aio_write.
posixaio = 5,
# File is memory mapped with mmap and data copied using memcpy.
mmap = 6,
# RADOS Block Device
rbd = 7,
# SPDK Block Device
spdk_bdev = 8
class ReadWrite(Enum):
randread = 0,
randrw = 1,
randwrite = 2,
read = 3,
readwrite = 4,
write = 5,
trim = 6,
randtrim = 7,
trimwrite = 8
class VerifyMethod(Enum):
# Use an md5 sum of the data area and store it in the header of each block.
md5 = 0,
# Use an experimental crc64 sum of the data area and store it in the header of each block.
crc64 = 1,
# Use optimized sha1 as the checksum function.
sha1 = 2,
# Verify a strict pattern.
# Normally fio includes a header with some basic information and a checksum, but if this
# option is set, only the specific pattern set with verify_pattern is verified.
pattern = 3,
# Write extra information about each I/O (timestamp, block number, etc.).
# The block number is verified.
meta = 4
class RandomGenerator(Enum):
tausworthe = 0,
lfsr = 1,
tausworthe64 = 2
class FioParam(LinuxCommand):
def __init__(self, fio, command_executor: BaseExecutor, command_name):
LinuxCommand.__init__(self, command_executor, command_name)
self.verification_pattern = ''
self.fio = fio
def get_verification_pattern(self):
if not self.verification_pattern:
self.verification_pattern = f'0x{secrets.token_hex(32)}'
return self.verification_pattern
def allow_mounted_write(self, value: bool = True):
return self.set_param('allow_mounted_write', int(value))
# example: "bs=8k,32k" => 8k for reads, 32k for writes and trims
def block_size(self, *sizes: Size):
return self.set_param('blocksize', *[int(size) for size in sizes])
def blocksize_range(self, ranges):
value = []
for bs_range in ranges:
str_range = str(int(bs_range[0])) + '-' + str(int(bs_range[1]))
value.append(str_range)
return self.set_param('blocksize_range', ",".join(value))
def bs_split(self, value):
return self.set_param('bssplit', value)
def buffer_pattern(self, pattern):
return self.set_param('buffer_pattern', pattern)
def continue_on_error(self, value: ErrorFilter):
return self.set_param('continue_on_error', value.name)
def cpus_allowed(self, value):
return self.set_param('cpus_allowed', ",".join(value))
def cpus_allowed_policy(self, value: CpusAllowedPolicy):
return self.set_param('cpus_allowed_policy', value.name)
def direct(self, value: bool = True):
if 'buffered' in self.command_param:
self.remove_param('buffered')
return self.set_param('direct', int(value))
def directory(self, directory):
return self.set_param('directory', directory)
def do_verify(self, value: bool = True):
return self.set_param('do_verify', int(value))
def exit_all_on_error(self, value: bool = True):
return self.set_flags('exitall_on_error') if value \
else self.remove_flag('exitall_on_error')
def group_reporting(self, value: bool = True):
return self.set_flags('group_reporting') if value else self.remove_flag('group_reporting')
def file_name(self, path):
return self.set_param('filename', path)
def file_size(self, size: Size):
return self.set_param('filesize', int(size))
def file_size_range(self, ranges):
value = []
for bs_range in ranges:
str_range = str(int(bs_range[0])) + '-' + str(int(bs_range[1]))
value.append(str_range)
return self.set_param('filesize', ",".join(value))
def fsync(self, value: int):
return self.set_param('fsync', value)
def ignore_errors(self, read_errors, write_errors, verify_errors):
separator = ':'
return self.set_param(
'ignore_error',
separator.join(str(err) for err in read_errors),
separator.join(str(err) for err in write_errors),
separator.join(str(err) for err in verify_errors))
def io_depth(self, value: int):
if value != 1:
if 'ioengine' in self.command_param and \
self.command_param['ioengine'] == 'sync':
TestRun.LOGGER.warning("Setting iodepth will have no effect with "
"'ioengine=sync' setting")
return self.set_param('iodepth', value)
def io_engine(self, value: IoEngine):
if value == IoEngine.sync:
if 'iodepth' in self.command_param and self.command_param['iodepth'] != 1:
TestRun.LOGGER.warning("Setting 'ioengine=sync' will cause iodepth setting "
"to be ignored")
return self.set_param('ioengine', value.name)
def io_size(self, value: Size):
return self.set_param('io_size', int(value.get_value()))
def loops(self, value: int):
return self.set_param('loops', value)
def no_random_map(self, value: bool = True):
if 'verify' in self.command_param:
raise ValueError("'NoRandomMap' parameter is mutually exclusive with verify")
if value:
return self.set_flags('norandommap')
else:
return self.remove_flag('norandommap')
def nr_files(self, value: int):
return self.set_param('nrfiles', value)
def num_ios(self, value: int):
return self.set_param('number_ios', value)
def num_jobs(self, value: int):
return self.set_param('numjobs', value)
def offset(self, value: Size):
return self.set_param('offset', int(value.get_value()))
def offset_increment(self, value: Size):
return self.set_param('offset_increment', f"{value.value}{value.unit.get_short_name()}")
def percentage_random(self, value: int):
if value <= 100:
return self.set_param('percentage_random', value)
raise ValueError("Argument out of range. Should be 0-100.")
def pool(self, value):
return self.set_param('pool', value)
def ramp_time(self, value: datetime.timedelta):
return self.set_param('ramp_time', int(value.total_seconds()))
def random_distribution(self, value):
return self.set_param('random_distribution', value)
def rand_repeat(self, value: int):
return self.set_param('randrepeat', value)
def rand_seed(self, value: int):
return self.set_param('randseed', value)
def read_write(self, rw: ReadWrite):
return self.set_param('readwrite', rw.name)
def run_time(self, value: datetime.timedelta):
if value.total_seconds() == 0:
raise ValueError("Runtime parameter must not be set to 0.")
return self.set_param('runtime', int(value.total_seconds()))
def serialize_overlap(self, value: bool = True):
return self.set_param('serialize_overlap', int(value))
def size(self, value: Size):
return self.set_param('size', int(value.get_value()))
def stonewall(self, value: bool = True):
return self.set_flags('stonewall') if value else self.remove_param('stonewall')
def sync(self, value: bool = True):
return self.set_param('sync', int(value))
def time_based(self, value: bool = True):
return self.set_flags('time_based') if value else self.remove_flag('time_based')
def thread(self, value: bool = True):
return self.set_flags('thread') if value else self.remove_param('thread')
def lat_percentiles(self, value: bool):
return self.set_param('lat_percentiles', int(value))
def scramble_buffers(self, value: bool):
return self.set_param('scramble_buffers', int(value))
def slat_percentiles(self, value: bool):
return self.set_param('slat_percentiles', int(value))
def spdk_core_mask(self, value: str):
return self.set_param('spdk_core_mask', value)
def spdk_json_conf(self, path):
return self.set_param('spdk_json_conf', path)
def clat_percentiles(self, value: bool):
return self.set_param('clat_percentiles', int(value))
def percentile_list(self, value: []):
val = ':'.join(value) if len(value) > 0 else '100'
return self.set_param('percentile_list', val)
def verification_with_pattern(self, pattern=None):
if pattern is not None and pattern != '':
self.verification_pattern = pattern
return self.verify(VerifyMethod.pattern) \
.set_param('verify_pattern', self.get_verification_pattern()) \
.do_verify()
def verify(self, value: VerifyMethod):
return self.set_param('verify', value.name)
def create_only(self, value: bool = False):
return self.set_param('create_only', int(value))
def verify_pattern(self, pattern=None):
return self.set_param('verify_pattern', pattern or self.get_verification_pattern())
def verify_backlog(self, value: int):
return self.set_param('verify_backlog', value)
def verify_dump(self, value: bool = True):
return self.set_param('verify_dump', int(value))
def verify_fatal(self, value: bool = True):
return self.set_param('verify_fatal', int(value))
def verify_only(self, value: bool = True):
return self.set_flags('verify_only') if value else self.remove_param('verify_only')
def write_hint(self, value: str):
return self.set_param('write_hint', value)
def write_percentage(self, value: int):
if value <= 100:
return self.set_param('rwmixwrite', value)
raise ValueError("Argument out of range. Should be 0-100.")
def random_generator(self, value: RandomGenerator):
return self.set_param('random_generator', value.name)
def target(self, target):
if isinstance(target, Device):
return self.file_name(target.path)
return self.file_name(target)
def add_job(self, job_name=None):
if not job_name:
job_name = f'job{len(self.fio.jobs)}'
new_job = FioParamConfig(self.fio, self.command_executor, f'[{job_name}]')
self.fio.jobs.append(new_job)
return new_job
def clear_jobs(self):
self.fio.jobs = []
return self
def edit_global(self):
return self.fio.global_cmd_parameters
def run(self, fio_timeout: datetime.timedelta = None):
if "per_job_logs" in self.fio.global_cmd_parameters.command_param:
self.fio.global_cmd_parameters.set_param("per_job_logs", '0')
fio_output = self.fio.run(fio_timeout)
if fio_output.exit_code != 0:
raise Exception(f"Exception occurred while trying to execute fio, exit_code:"
f"{fio_output.exit_code}.\n"
f"stdout: {fio_output.stdout}\nstderr: {fio_output.stderr}")
TestRun.executor.run(f"sed -i '/^[[:alnum:]]/d' {self.fio.fio_file}") # Remove warnings
out = self.command_executor.run_expect_success(f"cat {self.fio.fio_file}").stdout
return self.get_results(out)
def run_in_background(self):
if "per_job_logs" in self.fio.global_cmd_parameters.command_param:
self.fio.global_cmd_parameters.set_param("per_job_logs", '0')
return self.fio.run_in_background()
@staticmethod
def get_results(result):
data = json.loads(result, object_hook=lambda d: Namespace(**d))
jobs_list = []
if hasattr(data, 'jobs'):
jobs = data.jobs
for job in jobs:
job_result = FioResult(data, job)
jobs_list.append(job_result)
return jobs_list
class FioParamCmd(FioParam):
def __init__(self, fio, command_executor: BaseExecutor, command_name='fio'):
FioParam.__init__(self, fio, command_executor, command_name)
self.param_name_prefix = "--"
class FioParamConfig(FioParam):
def __init__(self, fio, command_executor: BaseExecutor, command_name='[global]'):
FioParam.__init__(self, fio, command_executor, command_name)
self.param_name_prefix = "\n"

View File

@@ -0,0 +1,19 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import secrets
from aenum import Enum
class Pattern(Enum):
cyclic = "0x00336699ccffcc996633"
sequential = "0x" + "".join([f"{i:02x}" for i in range(0, 256)])
high = "0xaa"
low = "0x84210"
zeroes = "0x00"
ones = "0xff"
bin_1 = high
bin_2 = "0x55"
random = "0x" + secrets.token_hex()

View File

@@ -0,0 +1,164 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from test_utils.size import Size, Unit, UnitPerSecond
from test_utils.time import Time
class FioResult:
def __init__(self, result, job):
self.result = result
self.job = job
def __str__(self):
result_dict = {
"Total read I/O": self.read_io(),
"Total read bandwidth ": self.read_bandwidth(),
"Read bandwidth average ": self.read_bandwidth_average(),
"Read bandwidth deviation ": self.read_bandwidth_deviation(),
"Read IOPS": self.read_iops(),
"Read runtime": self.read_runtime(),
"Read average completion latency": self.read_completion_latency_average(),
"Total write I/O": self.write_io(),
"Total write bandwidth ": self.write_bandwidth(),
"Write bandwidth average ": self.write_bandwidth_average(),
"Write bandwidth deviation ": self.write_bandwidth_deviation(),
"Write IOPS": self.write_iops(),
"Write runtime": self.write_runtime(),
"Write average completion latency": self.write_completion_latency_average(),
}
disks_name = self.disks_name()
if disks_name:
result_dict.update({"Disk name": ",".join(disks_name)})
result_dict.update({"Total number of errors": self.total_errors()})
s = ""
for key in result_dict.keys():
s += f"{key}: {result_dict[key]}\n"
return s
def total_errors(self):
return getattr(self.job, "total_err", 0)
def disks_name(self):
disks_name = []
if hasattr(self.result, "disk_util"):
for disk in self.result.disk_util:
disks_name.append(disk.name)
return disks_name
def read_io(self):
return Size(self.job.read.io_kbytes, Unit.KibiByte)
def read_bandwidth(self):
return Size(self.job.read.bw, UnitPerSecond(Unit.KibiByte))
def read_bandwidth_average(self):
return Size(self.job.read.bw_mean, UnitPerSecond(Unit.KibiByte))
def read_bandwidth_deviation(self):
return Size(self.job.read.bw_dev, UnitPerSecond(Unit.KibiByte))
def read_iops(self):
return self.job.read.iops
def read_runtime(self):
return Time(microseconds=self.job.read.runtime)
def read_completion_latency_min(self):
return Time(nanoseconds=self.job.read.lat_ns.min)
def read_completion_latency_max(self):
return Time(nanoseconds=self.job.read.lat_ns.max)
def read_completion_latency_average(self):
return Time(nanoseconds=self.job.read.lat_ns.mean)
def read_completion_latency_percentile(self):
return self.job.read.lat_ns.percentile.__dict__
def read_requests_number(self):
return self.result.disk_util[0].read_ios
def write_io(self):
return Size(self.job.write.io_kbytes, Unit.KibiByte)
def write_bandwidth(self):
return Size(self.job.write.bw, UnitPerSecond(Unit.KibiByte))
def write_bandwidth_average(self):
return Size(self.job.write.bw_mean, UnitPerSecond(Unit.KibiByte))
def write_bandwidth_deviation(self):
return Size(self.job.write.bw_dev, UnitPerSecond(Unit.KibiByte))
def write_iops(self):
return self.job.write.iops
def write_runtime(self):
return Time(microseconds=self.job.write.runtime)
def write_completion_latency_average(self):
return Time(nanoseconds=self.job.write.lat_ns.mean)
def write_completion_latency_min(self):
return Time(nanoseconds=self.job.write.lat_ns.min)
def write_completion_latency_max(self):
return Time(nanoseconds=self.job.write.lat_ns.max)
def write_completion_latency_average(self):
return Time(nanoseconds=self.job.write.lat_ns.mean)
def write_completion_latency_percentile(self):
return self.job.write.lat_ns.percentile.__dict__
def write_requests_number(self):
return self.result.disk_util[0].write_ios
def trim_io(self):
return Size(self.job.trim.io_kbytes, Unit.KibiByte)
def trim_bandwidth(self):
return Size(self.job.trim.bw, UnitPerSecond(Unit.KibiByte))
def trim_bandwidth_average(self):
return Size(self.job.trim.bw_mean, UnitPerSecond(Unit.KibiByte))
def trim_bandwidth_deviation(self):
return Size(self.job.trim.bw_dev, UnitPerSecond(Unit.KibiByte))
def trim_iops(self):
return self.job.trim.iops
def trim_runtime(self):
return Time(microseconds=self.job.trim.runtime)
def trim_completion_latency_average(self):
return Time(nanoseconds=self.job.trim.lat_ns.mean)
def trim_completion_latency_min(self):
return Time(nanoseconds=self.job.trim.lat_ns.min)
def trim_completion_latency_max(self):
return Time(nanoseconds=self.job.trim.lat_ns.max)
def trim_completion_latency_average(self):
return Time(nanoseconds=self.job.trim.lat_ns.mean)
def trim_completion_latency_percentile(self):
return self.job.trim.lat_ns.percentile.__dict__
@staticmethod
def result_list_to_dict(results):
result_dict = {}
for result in results:
result_dict[result.job.jobname] = result.job
return result_dict

View File

@@ -0,0 +1,378 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import base64
import math
import textwrap
from aenum import IntFlag, Enum
from collections import namedtuple
from datetime import datetime
from core.test_run import TestRun
from test_tools.dd import Dd
from test_utils.size import Size, Unit
class Permissions(IntFlag):
r = 4
w = 2
x = 1
def __str__(self):
ret_string = ""
for p in Permissions:
if p in self:
ret_string += p.name
return ret_string
class PermissionsUsers(IntFlag):
u = 4
g = 2
o = 1
def __str__(self):
ret_string = ""
for p in PermissionsUsers:
if p in self:
ret_string += p.name
return ret_string
class PermissionSign(Enum):
add = '+'
remove = '-'
set = '='
class FilesPermissions():
perms_exceptions = {}
def __init__(self, files_list: list):
self.files_list = files_list
def add_exceptions(self, perms: dict):
self.perms_exceptions.update(perms)
def check(self, file_perm: int = 644, dir_perm: int = 755):
failed_perms = []
FailedPerm = namedtuple("FailedPerm", ["file", "current_perm", "expected_perm"])
for file in self.files_list:
perm = get_permissions(file)
if file in self.perms_exceptions:
if perm != self.perms_exceptions[file]:
failed_perms.append(FailedPerm(file, perm, self.perms_exceptions[file]))
continue
if check_if_regular_file_exists(file):
if perm != file_perm:
failed_perms.append(FailedPerm(file, perm, file_perm))
elif check_if_directory_exists(file):
if perm != dir_perm:
failed_perms.append(FailedPerm(file, perm, dir_perm))
else:
raise Exception(f"{file}: File type not recognized.")
return failed_perms
def create_directory(path, parents: bool = False):
cmd = f"mkdir {'--parents ' if parents else ''}\"{path}\""
return TestRun.executor.run_expect_success(cmd)
def check_if_directory_exists(path):
return TestRun.executor.run(f"test -d \"{path}\"").exit_code == 0
def check_if_file_exists(path):
return TestRun.executor.run(f"test -e \"{path}\"").exit_code == 0
def check_if_regular_file_exists(path):
return TestRun.executor.run(f"test -f \"{path}\"").exit_code == 0
def check_if_symlink_exists(path):
return TestRun.executor.run(f"test -L \"{path}\"").exit_code == 0
def copy(source: str,
destination: str,
force: bool = False,
recursive: bool = False,
dereference: bool = False):
cmd = f"cp{' --force' if force else ''}" \
f"{' --recursive' if recursive else ''}" \
f"{' --dereference' if dereference else ''} " \
f"\"{source}\" \"{destination}\""
return TestRun.executor.run_expect_success(cmd)
def move(source, destination, force: bool = False):
cmd = f"mv{' --force' if force else ''} \"{source}\" \"{destination}\""
return TestRun.executor.run_expect_success(cmd)
def remove(path, force: bool = False, recursive: bool = False, ignore_errors: bool = False):
cmd = f"rm{' --force' if force else ''}{' --recursive' if recursive else ''} \"{path}\""
output = TestRun.executor.run(cmd)
if output.exit_code != 0 and not ignore_errors:
raise Exception(f"Could not remove file {path}."
f"\nstdout: {output.stdout}\nstderr: {output.stderr}")
return output
def get_permissions(path, dereference: bool = True):
cmd = f"stat --format='%a' {'--dereference' if dereference else ''} \"{path}\""
return int(TestRun.executor.run_expect_success(cmd).stdout)
def chmod(path, permissions: Permissions, users: PermissionsUsers,
sign: PermissionSign = PermissionSign.set, recursive: bool = False):
cmd = f"chmod{' --recursive' if recursive else ''} " \
f"{str(users)}{sign.value}{str(permissions)} \"{path}\""
output = TestRun.executor.run(cmd)
return output
def chmod_numerical(path, permissions: int, recursive: bool = False):
cmd = f"chmod{' --recursive' if recursive else ''} {permissions} \"{path}\""
return TestRun.executor.run_expect_success(cmd)
def chown(path, owner, group, recursive):
cmd = f"chown {'--recursive ' if recursive else ''}{owner}:{group} \"{path}\""
return TestRun.executor.run_expect_success(cmd)
def create_file(path):
if not path.strip():
raise ValueError("Path cannot be empty or whitespaces.")
cmd = f"touch \"{path}\""
return TestRun.executor.run_expect_success(cmd)
def compare(file, other_file):
output = TestRun.executor.run(
f"cmp --silent \"{file}\" \"{other_file}\"")
if output.exit_code == 0:
return True
elif output.exit_code > 1:
raise Exception(f"Compare command execution failed. {output.stdout}\n{output.stderr}")
else:
return False
def diff(file, other_file):
output = TestRun.executor.run(
f"diff \"{file}\" \"{other_file}\"")
if output.exit_code == 0:
return None
elif output.exit_code > 1:
raise Exception(f"Diff command execution failed. {output.stdout}\n{output.stderr}")
else:
return output.stderr
# For some reason separators other than '/' don't work when using sed on system paths
# This requires escaping '/' in pattern and target string
def escape_sed_string(string: str, sed_replace: bool = False):
string = string.replace("'", r"\x27").replace("/", r"\/")
# '&' has special meaning in sed replace and needs to be escaped
if sed_replace:
string = string.replace("&", r"\&")
return string
def insert_line_before_pattern(file, pattern, new_line):
pattern = escape_sed_string(pattern)
new_line = escape_sed_string(new_line)
cmd = f"sed -i '/{pattern}/i {new_line}' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def replace_first_pattern_occurrence(file, pattern, new_string):
pattern = escape_sed_string(pattern)
new_string = escape_sed_string(new_string, sed_replace=True)
cmd = f"sed -i '0,/{pattern}/s//{new_string}/' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def replace_in_lines(file, pattern, new_string, regexp=False):
pattern = escape_sed_string(pattern)
new_string = escape_sed_string(new_string, sed_replace=True)
cmd = f"sed -i{' -r' if regexp else ''} 's/{pattern}/{new_string}/g' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def append_line(file, string):
cmd = f"echo '{string}' >> \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def remove_lines(file, pattern, regexp=False):
pattern = escape_sed_string(pattern)
cmd = f"sed -i{' -r' if regexp else ''} '/{pattern}/d' \"{file}\""
return TestRun.executor.run_expect_success(cmd)
def read_file(file):
if not file.strip():
raise ValueError("File path cannot be empty or whitespace.")
output = TestRun.executor.run_expect_success(f"cat \"{file}\"")
return output.stdout
def write_file(file, content, overwrite: bool = True, unix_line_end: bool = True):
if not file.strip():
raise ValueError("File path cannot be empty or whitespace.")
if not content:
raise ValueError("Content cannot be empty.")
if unix_line_end:
content.replace('\r', '')
content += '\n'
max_length = 60000
split_content = textwrap.TextWrapper(width=max_length, replace_whitespace=False).wrap(content)
split_content[-1] += '\n'
for s in split_content:
redirection_char = '>' if overwrite else '>>'
overwrite = False
encoded_content = base64.b64encode(s.encode("utf-8"))
cmd = f"printf '{encoded_content.decode('utf-8')}' " \
f"| base64 --decode {redirection_char} \"{file}\""
TestRun.executor.run_expect_success(cmd)
def uncompress_archive(file, destination=None):
from test_utils.filesystem.file import File
if not isinstance(file, File):
file = File(file)
if not destination:
destination = file.parent_dir
command = (f"unzip -u {file.full_path} -d {destination}"
if str(file).endswith(".zip")
else f"tar --extract --file={file.full_path} --directory={destination}")
TestRun.executor.run_expect_success(command)
def ls(path, options=''):
default_options = "-lA --time-style=+'%Y-%m-%d %H:%M:%S'"
output = TestRun.executor.run(
f"ls {default_options} {options} \"{path}\"")
return output.stdout
def ls_item(path):
output = ls(path, '-d')
return output.splitlines()[0] if output else None
def parse_ls_output(ls_output, dir_path=''):
split_output = ls_output.split('\n')
fs_items = []
for line in split_output:
if not line.strip():
continue
line_fields = line.split()
if len(line_fields) < 8:
continue
file_type = line[0]
if file_type not in ['-', 'd', 'l', 'b', 'c', 'p', 's']:
continue
permissions = line_fields[0][1:].replace('.', '')
owner = line_fields[2]
group = line_fields[3]
has_size = ',' not in line_fields[4]
if has_size:
size = Size(float(line_fields[4]), Unit.Byte)
else:
size = None
line_fields.pop(4)
split_date = line_fields[5].split('-')
split_time = line_fields[6].split(':')
modification_time = datetime(int(split_date[0]), int(split_date[1]), int(split_date[2]),
int(split_time[0]), int(split_time[1]), int(split_time[2]))
if dir_path and file_type != 'l':
full_path = '/'.join([dir_path, line_fields[7]])
else:
full_path = line_fields[7]
from test_utils.filesystem.file import File, FsItem
from test_utils.filesystem.directory import Directory
from test_utils.filesystem.symlink import Symlink
if file_type == '-':
fs_item = File(full_path)
elif file_type == 'd':
fs_item = Directory(full_path)
elif file_type == 'l':
fs_item = Symlink(full_path)
else:
fs_item = FsItem(full_path)
fs_item.permissions.user = Permissions['|'.join(list(permissions[:3].replace('-', '')))] \
if permissions[:3] != '---' else Permissions(0)
fs_item.permissions.group = Permissions['|'.join(list(permissions[3:6].replace('-', '')))] \
if permissions[3:6] != '---' else Permissions(0)
fs_item.permissions.other = Permissions['|'.join(list(permissions[6:].replace('-', '')))] \
if permissions[6:] != '---' else Permissions(0)
fs_item.owner = owner
fs_item.group = group
fs_item.size = size
fs_item.modification_time = modification_time
fs_items.append(fs_item)
return fs_items
def find_all_files(path: str, recursive: bool = True):
if not path.strip():
raise ValueError("No path given.")
output = TestRun.executor.run_expect_success(f"find \"{path}\" {'-maxdepth 1' if not recursive else ''} \( -type f -o -type l \) -print")
return output.stdout.splitlines()
def find_all_dirs(path: str, recursive: bool = True):
if not path.strip():
raise ValueError("No path given.")
output = TestRun.executor.run_expect_success(f"find \"{path}\" {'-maxdepth 1' if not recursive else ''} -type d -print")
return output.stdout.splitlines()
def find_all_items(path: str, recursive: bool = True):
return [*find_all_files(path, recursive), *find_all_dirs(path, recursive)]
def readlink(link: str, options="--canonicalize-existing"):
return TestRun.executor.run_expect_success(
f"readlink {options} \"{link}\""
).stdout
def create_random_test_file(target_file_path: str,
file_size: Size = Size(1, Unit.MebiByte),
random: bool = True):
from test_utils.filesystem.file import File
bs = Size(512, Unit.KibiByte)
cnt = math.ceil(file_size.value / bs.value)
file = File.create_file(target_file_path)
dd = Dd().output(target_file_path) \
.input("/dev/urandom" if random else "/dev/zero") \
.block_size(bs) \
.count(cnt) \
.oflag("direct")
dd.run()
file.refresh_item()
return file

View File

@@ -0,0 +1,179 @@
#
# Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from core.test_run import TestRun
from storage_devices.device import Device
from test_utils.size import Size, Unit, UnitPerSecond
from test_utils.time import Time
import csv
class IOstatExtended:
iostat_option = "x"
def __init__(self, device_statistics: dict):
# Notes about params:
# await param is displayed only on flag -s
# avgrq-sz doesn't appear in newer versions of iostat -x
self.device_name = device_statistics["Device"]
# rrqm/s
self.read_requests_merged_per_sec = float(device_statistics["rrqm/s"])
# wrqm/s
self.write_requests_merged_per_sec = float(device_statistics["wrqm/s"])
# r/s
self.read_requests_per_sec = float(device_statistics["r/s"])
# w/s
self.write_requests_per_sec = float(device_statistics["w/s"])
# rkB/s
self.reads_per_sec = Size(float(device_statistics["rkB/s"]), UnitPerSecond(Unit.KiloByte))
# wkB/s
self.writes_per_sec = Size(float(device_statistics["wkB/s"]), UnitPerSecond(Unit.KiloByte))
# avgqu-sz - in newer versions is named aqu-sz
self.average_queue_length = float(
device_statistics["aqu-sz"]
if "aqu-sz" in device_statistics
else device_statistics.get("avgqu-sz", 0)
)
# r_await
self.read_average_service_time = Time(milliseconds=float(device_statistics["r_await"]))
# w_await
self.write_average_service_time = Time(milliseconds=float(device_statistics["w_await"]))
# iostat's documentation says to not trust 11th field
# util
self.utilization = float(device_statistics["%util"])
def __str__(self):
return (
f"\n=========={self.device_name} IO stats: ==========\n"
f"Read requests merged per second: {self.read_requests_merged_per_sec}\n"
f"Write requests merged per second: {self.write_requests_merged_per_sec}\n"
f"Read requests: {self.read_requests_per_sec}\n"
f"Write requests: {self.write_requests_per_sec}\n"
f"Reads per second: {self.reads_per_sec}\n"
f"Writes per second {self.writes_per_sec}\n"
f"Average queue length {self.average_queue_length}\n"
f"Read average service time {self.read_average_service_time}\n"
f"Write average service time: {self.write_average_service_time}\n"
f"Utilization: {self.utilization}\n"
f"=================================================\n"
)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not other:
return False
return (
self.read_requests_merged_per_sec == other.read_requests_merged_per_sec
and self.write_requests_merged_per_sec == other.write_requests_merged_per_sec
and self.read_requests_per_sec == other.read_requests_per_sec
and self.write_requests_per_sec == other.write_requests_per_sec
and self.reads_per_sec == other.reads_per_sec
and self.writes_per_sec == other.writes_per_sec
and self.average_queue_length == other.average_queue_length
and self.read_average_service_time == other.read_average_service_time
and self.write_average_service_time == other.write_average_service_time
and self.utilization == other.utilization
)
@classmethod
def get_iostat_list(
cls,
devices_list: [Device],
since_boot: bool = True,
interval: int = 1,
):
"""
Returns list of IOstat objects containing extended statistics displayed
in kibibytes/kibibytes per second.
"""
return _get_iostat_list(cls, devices_list, since_boot, interval)
class IOstatBasic:
iostat_option = "d"
def __init__(self, device_statistics):
self.device_name = device_statistics["Device"]
# tps
self.transfers_per_second = float(device_statistics["tps"])
# kB_read/s
self.reads_per_second = Size(
float(device_statistics["kB_read/s"]), UnitPerSecond(Unit.KiloByte)
)
# kB_wrtn/s
self.writes_per_second = Size(
float(device_statistics["kB_wrtn/s"]), UnitPerSecond(Unit.KiloByte)
)
# kB_read
self.total_reads = Size(float(device_statistics["kB_read"]), Unit.KibiByte)
# kB_wrtn
self.total_writes = Size(float(device_statistics["kB_wrtn"]), Unit.KibiByte)
def __str__(self):
return (
f"\n=========={self.device_name} IO stats: ==========\n"
f"Transfers per second: {self.transfers_per_second}\n"
f"Kilobytes read per second: {self.reads_per_second}\n"
f"Kilobytes written per second: {self.writes_per_second}\n"
f"Kilobytes read: {self.total_reads}\n"
f"Kilobytes written: {self.total_writes}\n"
f"=================================================\n"
)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, IOstatBasic):
return False
return vars(self) == vars(other)
@classmethod
def get_iostat_list(
cls,
devices_list: [Device],
since_boot: bool = True,
interval: int = 1,
):
"""
Returns list of IOstat objects containing basic statistics displayed
in kibibytes/kibibytes per second.
"""
return _get_iostat_list(cls, devices_list, since_boot, interval)
def _get_iostat_list(
class_type: type,
devices_list: [Device],
since_boot: bool,
interval: int,
):
if interval < 1:
raise ValueError("iostat interval must be positive!")
iostat_cmd = f"iostat -k -{class_type.iostat_option} "
if not since_boot:
iostat_cmd += f"-y {interval} 1 "
iostat_cmd += " ".join([name.get_device_id() for name in devices_list])
sed_cmd = "sed -n '/^$/d;s/\s\+/,/g;/^Device/,$p'"
cmd = f"{iostat_cmd} | {sed_cmd}"
lines = TestRun.executor.run(cmd).stdout.splitlines()
table_contents = csv.DictReader(lines, delimiter=",")
ret = []
for device in table_contents:
ret += [class_type(device)]
return ret

View File

@@ -0,0 +1,129 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import wget
import os
from enum import Enum
from core.test_run import TestRun
from test_tools import fs_utils
from test_utils.os_utils import DEBUGFS_MOUNT_POINT
KEDR_0_6_URL = "https://github.com/euspectre/kedr/archive/v0.6.tar.gz"
BUILD_DIR = "build"
LEAKS_LOGS_PATH = f"{DEBUGFS_MOUNT_POINT}/kedr_leak_check"
KMALLOC_FAULT_SIMULATION_PATH = "/sys/kernel/debug/kedr_fault_simulation"
class KedrProfile(Enum):
MEM_LEAK_CHECK = "leak_check.conf"
FAULT_SIM = "fsim.conf"
class Kedr:
@staticmethod
def is_installed():
return "KEDR version" in TestRun.executor.run("kedr --version").stdout.strip()
@classmethod
def install(cls):
if cls.is_installed():
TestRun.LOGGER.info("Kedr is already installed!")
return
# TODO check if cmake is installed before
# TODO consider using os_utils.download_file()
kedr_archive = wget.download(KEDR_0_6_URL)
TestRun.executor.rsync_to(
f"\"{kedr_archive}\"",
f"{TestRun.config['working_dir']}")
kedr_dir = TestRun.executor.run_expect_success(
f"cd {TestRun.config['working_dir']} && "
f"tar -ztf \"{kedr_archive}\" | sed -e 's@/.*@@' | uniq"
).stdout
TestRun.executor.run_expect_success(
f"cd {TestRun.config['working_dir']} && "
f"tar -xf \"{kedr_archive}\" && "
f"cd {kedr_dir} && "
f"mkdir -p {BUILD_DIR} && "
f"cd {BUILD_DIR} && "
f"cmake ../sources/ && "
f"make && "
f"make install"
)
os.remove(kedr_archive)
TestRun.LOGGER.info("Kedr installed succesfully")
@classmethod
def is_loaded(cls):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
if "KEDR status: loaded" in TestRun.executor.run_expect_success("kedr status").stdout:
return True
else:
return False
@classmethod
def start(cls, module, profile: KedrProfile = KedrProfile.MEM_LEAK_CHECK):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
TestRun.LOGGER.info(f"Starting kedr with {profile} profile")
start_cmd = f"kedr start {module} -f {profile.value}"
TestRun.executor.run_expect_success(start_cmd)
# TODO extend to scenarios other than kmalloc
def setup_fault_injections(condition: str = "1"):
TestRun.executor.run_expect_success(
f'echo "kmalloc" > {KMALLOC_FAULT_SIMULATION_PATH}/points/kmalloc/current_indicator')
TestRun.executor.run_expect_success(
f'echo "{condition}" > {KMALLOC_FAULT_SIMULATION_PATH}/points/kmalloc/expression')
@classmethod
def fsim_show_last_fault(cls):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
if not cls.is_loaded():
raise Exception("Kedr is not loaded!")
return fs_utils.read_file(f"{KMALLOC_FAULT_SIMULATION_PATH}/last_fault")
@classmethod
def stop(cls):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
TestRun.executor.run_expect_success("kedr stop")
@classmethod
def check_for_mem_leaks(cls, module):
if not cls.is_installed():
raise Exception("Kedr is not installed!")
if not cls.is_loaded():
raise Exception("Kedr is not loaded!")
if fs_utils.check_if_directory_exists(f"{LEAKS_LOGS_PATH}/{module}"):
logs_path = f"{LEAKS_LOGS_PATH}/{module}"
elif fs_utils.check_if_directory_exists(f"{DEBUGFS_MOUNT_POINT}"):
logs_path = f"{LEAKS_LOGS_PATH}"
else:
raise Exception("Couldn't find kedr logs dir!")
leaks = fs_utils.read_file(f"{logs_path}/possible_leaks")
frees = fs_utils.read_file(f"{logs_path}/unallocated_frees")
summary = fs_utils.read_file(f"{logs_path}/info")
if leaks or frees:
raise Exception("Memory leaks found!\n"
f"Kedr summary: {summary}\n"
f"Possible memory leaks: {leaks}\n"
f"Unallocated frees: {frees}\n")

View File

@@ -0,0 +1,140 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import re
from core.test_run import TestRun
from test_utils.size import Unit
class Mdadm:
@staticmethod
def assemble(device_paths: str = None):
cmd = f"mdadm --assemble " + (device_paths if device_paths else "--scan")
return TestRun.executor.run(cmd)
@staticmethod
def create(conf, device_paths: str):
if not conf.name:
raise ValueError("Name needed for RAID creation.")
if not device_paths:
raise ValueError("Device paths needed for RAID creation.")
cmd = f"mdadm --create --run /dev/md/{conf.name} "
if conf.metadata.value != "legacy":
cmd += f"--metadata={conf.metadata.value} "
if conf.level is not None:
cmd += f"--level={conf.level.value} "
if conf.number_of_devices:
cmd += f"--raid-devices={conf.number_of_devices} "
if conf.strip_size:
cmd += f"--chunk={conf.strip_size} "
if conf.size:
cmd += f"--size={int(conf.size.get_value(Unit.KibiByte))} "
cmd += device_paths
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def detail(raid_device_paths: str):
if not raid_device_paths:
raise ValueError("Provide paths of RAID devices to show details for.")
cmd = f"mdadm --detail {raid_device_paths} --prefer=by-id"
return TestRun.executor.run_expect_success(cmd)
@classmethod
def detail_result(cls, raid_device_paths: str):
output = cls.detail(raid_device_paths)
details = {}
for device_details in re.split("^/dev/", output.stdout, flags=re.MULTILINE):
if not device_details:
continue
lines = device_details.splitlines()
key = "/dev/" + lines[0].rstrip(':')
details[key] = {}
details[key]["path"] = key
details[key]["devices"] = cls.__parse_devices(device_details)
details[key]["level"] = cls.__parse_level(device_details)
details[key]["uuid"] = cls.__parse_uuid(device_details)
metadata = cls.__parse_metadata(device_details)
if metadata:
details[key]["metadata"] = metadata
return details
@staticmethod
def examine(brief: bool = True, device_paths: str = None):
cmd = f"mdadm --examine "
if brief:
cmd += "--brief "
cmd += (device_paths if device_paths else "--scan")
return TestRun.executor.run_expect_success(cmd)
@classmethod
def examine_result(cls, device_paths: str = None):
output = cls.examine(device_paths=device_paths)
raids = []
uuid_path_prefix = "/dev/disk/by-id/md-uuid-"
# sometimes links for RAIDs are not properly created, force udev to create them
TestRun.executor.run("udevadm trigger && udevadm settle")
for line in output.stdout.splitlines():
split_line = line.split()
try:
uuid = [i for i in split_line if i.startswith("UUID=")][0].split("=")[-1]
except IndexError:
continue
raid_link = uuid_path_prefix + uuid
raid = Mdadm.detail_result(raid_link)[raid_link]
if raid["level"] == "Container":
continue
raid["metadata"], raid["array_devices"] = "legacy", []
container = (
[i for i in split_line if i.startswith("container=")][0]
if "container=" in line else None
)
if container:
container_link = uuid_path_prefix + container.split("=")[-1]
raid["container"] = cls.detail_result(container_link)[container_link]
raid["metadata"] = raid["container"]["metadata"]
raid["array_devices"] = raid["container"]["devices"]
raids.append(raid)
return raids
@staticmethod
def stop(device_paths: str = None):
cmd = f"mdadm --stop " + (device_paths if device_paths else "--scan")
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def zero_superblock(device_paths: str):
cmd = f"mdadm --zero-superblock {device_paths}"
return TestRun.executor.run_expect_success(cmd)
@staticmethod
def __parse_devices(details: str):
devices = []
for detail in [d.strip() for d in details.splitlines() if " /dev/" in d]:
devices.append(detail.split()[-1])
return devices
@staticmethod
def __parse_level(details: str):
level = [line for line in details.splitlines() if "Raid Level" in line][0].split(" : ")[-1]
return level.capitalize()
@staticmethod
def __parse_uuid(details: str):
uuid = [line for line in details.splitlines() if "UUID" in line][0].split(" : ")[-1]
return uuid
@staticmethod
def __parse_metadata(details: str):
try:
return [
line.strip() for line in details.splitlines()
if line.strip().startswith("Version :")
][0].split(" : ")[-1]
except IndexError:
return None

View File

@@ -0,0 +1,60 @@
#
# Copyright(c) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import json
from core.test_run import TestRun
def format_disk(device, metadata_size=None, block_size=None,
force=True, format_params=None, reset=True):
force_param = '-f' if force else ''
reset_param = '-r' if reset else ''
format_params = ' '.join(format_params) if format_params else ''
lbafs = get_lba_formats(device)
if metadata_size:
lbafs = [lbaf for lbaf in lbafs if lbaf['metadata_size'] == metadata_size]
if block_size:
lbafs = [lbaf for lbaf in lbafs if lbaf['block_size'] == block_size]
if len(lbafs) == 1:
TestRun.LOGGER.info(
f"Formatting device {device.path} with {metadata_size} metadata size "
f"and {lbafs[0]['block_size']} block size")
TestRun.executor.run_expect_success(
f"nvme format {device.path} -l {lbafs[0]['lba_format']} "
f"{force_param} {reset_param} {format_params}")
TestRun.LOGGER.info(f"Successfully format device: {device.path}")
else:
raise Exception(f"Wrong parameters to format device: {device.path}")
elif block_size:
lbafs = [lbaf for lbaf in lbafs if lbaf['block_size'] == block_size]
if len(lbafs) > 0:
TestRun.LOGGER.info(
f"Formatting device {device.path} with {block_size} block size")
TestRun.executor.run_expect_success(
f"nvme format {device.path} -b {block_size} "
f"{force_param} {reset_param} {format_params}")
TestRun.LOGGER.info(f"Successfully format device: {device.path}")
else:
raise Exception(f"Wrong parameters to format device: {device.path}")
else:
raise Exception("Cannot format device without specified parameters")
def get_lba_formats(device):
output = json.loads(TestRun.executor.run_expect_success(
f"nvme id-ns {device.path} -o json").stdout)
entries = output['lbafs']
lbafs = []
for entry in entries:
lbaf = {"lba_format": entries.index(entry),
"metadata_size": entry['ms'],
"block_size": 2 ** entry['ds'],
"in_use": entries.index(entry) == output['flbas']}
lbafs.append(lbaf)
return lbafs
def get_lba_format_in_use(device):
lbafs = get_lba_formats(device)
return next((lbaf for lbaf in lbafs if lbaf['in_use']))

View File

@@ -0,0 +1,121 @@
#
# Copyright(c) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import re
from core.test_run import TestRun
from test_utils.output import CmdException
class RpmSet():
def __init__(self, packages_paths: list):
self.packages = packages_paths
def _get_package_names(self):
return " ".join([os.path.splitext(os.path.basename(pckg))[0] for pckg in self.packages])
def check_if_installed(self):
if not self.packages:
raise ValueError("No packages given.")
output = TestRun.executor.run(f"rpm --query {self._get_package_names()}")
return output.exit_code == 0
def install(self):
TestRun.LOGGER.info(f"Installing RPM packages")
if not self.packages:
raise ValueError("No packages given.")
output = TestRun.executor.run(
f"rpm --upgrade --verbose --replacepkgs {' '.join(self.packages)}"
)
if (
output.exit_code != 0
or re.search("error", output.stdout, re.IGNORECASE)
or re.search("error", output.stderr, re.IGNORECASE)
):
raise CmdException("Installation failed or errors found during the process.", output)
def uninstall(self):
TestRun.LOGGER.info(f"Uninstalling RPM packages")
if not self.check_if_installed():
raise FileNotFoundError("Could not uninstall - packages not installed yet.")
output = TestRun.executor.run(f"rpm --erase --verbose {self._get_package_names()}")
if (
output.exit_code != 0
or re.search("error", output.stdout, re.IGNORECASE)
or re.search("error", output.stderr, re.IGNORECASE)
):
raise CmdException("Uninstallation failed or errors found during the process.", output)
@staticmethod
def uninstall_all_matching(*packages_names: str):
for name in packages_names:
TestRun.LOGGER.info(f"Uninstalling all RPM packages matching '{name}'")
TestRun.executor.run_expect_success(
f"rpm --query --all | grep {name} | "
f"xargs --no-run-if-empty rpm --erase --verbose"
)
class DebSet():
def __init__(self, packages_paths: list):
self.packages = packages_paths
def _get_package_names(self):
return " ".join([os.path.basename(pckg).split("_")[0] for pckg in self.packages])
def check_if_installed(self):
if not self.packages:
raise ValueError("No packages given.")
output = TestRun.executor.run(f"dpkg --no-pager --list {self._get_package_names()}")
return output.exit_code == 0
def install(self):
TestRun.LOGGER.info(f"Installing DEB packages")
if not self.packages:
raise ValueError("No packages given.")
output = TestRun.executor.run(
f"dpkg --force-confdef --force-confold --install {' '.join(self.packages)}"
)
if (
output.exit_code != 0
or re.search("error", output.stdout, re.IGNORECASE)
or re.search("error", output.stderr, re.IGNORECASE)
):
raise CmdException("Installation failed or errors found during the process.", output)
def uninstall(self):
TestRun.LOGGER.info(f"Uninstalling DEB packages")
if not self.check_if_installed():
raise FileNotFoundError("Could not uninstall - packages not installed yet.")
output = TestRun.executor.run(f"dpkg --purge {self._get_package_names()}")
if (
output.exit_code != 0
or re.search("error", output.stdout, re.IGNORECASE)
or re.search("error", output.stderr, re.IGNORECASE)
):
raise CmdException("Uninstallation failed or errors found during the process.", output)
@staticmethod
def uninstall_all_matching(*packages_names: str):
for name in packages_names:
TestRun.LOGGER.info(f"Uninstalling all DEB packages matching '{name}'")
TestRun.executor.run_expect_success(
f"dpkg-query --no-pager --showformat='${{Package}}\n' --show | grep {name} | "
f"xargs --no-run-if-empty dpkg --purge"
)

View File

@@ -0,0 +1,34 @@
<?xml version="1.0" encoding="utf-8"?>
<Peach xmlns="http://peachfuzzer.com/2012/Peach" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://peachfuzzer.com/2012/Peach /peach/peach.xsd">
<!-- The structure of a data block -->
<DataModel name="Value">
<!-- Place for an auto generated config -->
</DataModel>
<DataModel name="NewLine">
<String name="NewLine" value="\n" mutable="false"/>
</DataModel>
<!-- Basic state machine logic needed to test a protocol -->
<!-- Encoding fuzzed parameter to base64 and adding new line at the end -->
<StateModel name="TheState" initialState="Initial">
<State name="Initial">
<Action type="output">
<DataModel ref="Value">
<Transformer class="Base64Encode"/>
</DataModel>
</Action>
<Action type="output">
<DataModel ref="NewLine"/>
</Action>
<Action type="close"/>
</State>
</StateModel>
<!-- Write output to fuzzedParams.txt file -->
<Test name="Default">
<StateModel ref="TheState"/>
<Publisher class="File">
<Param name="FileName" value="fuzzedParams.txt"/>
<Param name="Append" value="true"/>
<Param name="Overwrite" value="false"/>
</Publisher>
</Test>
</Peach>

View File

@@ -0,0 +1,208 @@
#
# Copyright(c) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import wget
import base64
import posixpath
import random
import tempfile
import lxml.etree as etree
from collections import namedtuple
from core.test_run import TestRun
from test_tools import fs_utils
from test_tools.fs_utils import create_directory, check_if_file_exists, write_file
class PeachFuzzer:
"""
API to work with Peach Fuzzer tool in Test-Framework.
Peach Fuzzer is used only for generating fuzzed values that later are used in Test-Framework
in order to execute fuzzed CLI commands or to prepare fuzzed config files.
"""
peach_fuzzer_3_0_url = "https://sourceforge.net/projects/peachfuzz/files/Peach/3.0/" \
"peach-3.0.202-linux-x86_64-release.zip"
base_dir = "/root/Fuzzy"
peach_dir = "peach-3.0.202-linux-x86_64-release"
xml_config_template = posixpath.join(posixpath.dirname(__file__), "config_template.xml")
xml_config_file = posixpath.join(base_dir, "fuzzerConfig.xml")
xml_namespace = "http://peachfuzzer.com/2012/Peach"
fuzzy_output_file = posixpath.join(base_dir, "fuzzedParams.txt")
tested_param_placeholder = b"{param}"
# escape backslash first, so it doesn't interfere with escaping other characters
escape_chars = '\\\n"\'&|;()`<>$! '
@classmethod
def get_fuzzed_command(cls, command_template: bytes, count: int):
"""
Generate command with fuzzed parameter provided on command_template.
Command is ready to be executed with test executor
:param command_template: byte string with command to be executed.
parameter to be replaced with fuzzed string has to be tested_param_placeholder
:param count: amount of fuzzed commands to generate
:returns: named tuple with fuzzed param and CLI ready to be executed with Test-Framework
executors. Param is returned in order to implement correct values checkers in the tests
"""
TestRun.LOGGER.info(f"Try to get commands with fuzzed parameters")
FuzzedCommand = namedtuple('FuzzedCommand', ['param', 'command'])
if cls.tested_param_placeholder not in command_template:
TestRun.block("No param placeholder is found in command template!")
cmd_prefix = b"echo "
cmd_suffix = b" | base64 --decode | sh"
for fuzzed_parameter in cls.generate_peach_fuzzer_parameters(count):
yield FuzzedCommand(fuzzed_parameter,
cmd_prefix + base64.b64encode(command_template.replace(
cls.tested_param_placeholder, fuzzed_parameter)) + cmd_suffix)
@classmethod
def generate_peach_fuzzer_parameters(cls, count: int):
"""
Generate fuzzed parameter according to Peach Fuzzer XML config
Fuzzed parameter later can be used for either generating cli command or config.
:param count: amount of fuzzed strings to generate
:returns: fuzzed value in byte string
"""
if not cls._is_installed():
TestRun.LOGGER.info("Try to install Peach Fuzzer")
cls._install()
if not cls._is_xml_config_prepared():
TestRun.block("No Peach Fuzzer XML config needed to generate fuzzed values was found!")
fs_utils.remove(cls.fuzzy_output_file, force=True, ignore_errors=True)
TestRun.LOGGER.info(f"Generate {count} unique fuzzed values")
cmd = f"cd {cls.base_dir}; {cls.peach_dir}/peach --range 0,{count - 1} " \
f"--seed {random.randrange(2 ** 32)} {cls.xml_config_file} > " \
f"{cls.base_dir}/peachOutput.log"
TestRun.executor.run_expect_success(cmd)
if not check_if_file_exists(cls.fuzzy_output_file):
TestRun.block("No expected fuzzy output file was found!")
# process fuzzy output file locally on the controller as it can be very big
local_fuzzy_file = tempfile.NamedTemporaryFile(delete=False)
local_fuzzy_file.close()
TestRun.executor.rsync_from(cls.fuzzy_output_file, local_fuzzy_file.name)
with open(local_fuzzy_file.name, "r") as fd:
for fuzzed_param_line in fd:
fuzzed_param_bytes = base64.b64decode(fuzzed_param_line)
fuzzed_param_bytes = cls._escape_special_chars(fuzzed_param_bytes)
yield fuzzed_param_bytes
@classmethod
def generate_config(cls, data_model_config: list):
"""
Generate Peach Fuzzer XML config based on template provided in xml_config_template
and data template passed as an argument.
:param data_model_config: dictionary with config that has to be used for generating
DataModel section in PeachFuzzer XML config. Config can be stored in test in more compact
form, e.g. in yaml, and can be converted to dict just before passing to this function.
Example of such config in yaml:
- name: String
attributes:
name: CacheId
value: '1'
size: '14'
mutable: 'true'
children:
- name: Hint
attributes:
name: NumericalString
value: 'true'
"""
if not posixpath.exists(cls.xml_config_template):
TestRun.block("Peach fuzzer xml config template not found!")
root = etree.parse(cls.xml_config_template)
data_model = root.find(f'{{{cls.xml_namespace}}}DataModel[@name="Value"]')
cls.__create_xml_nodes(data_model, data_model_config)
create_directory(cls.base_dir, True)
write_file(cls.xml_config_file, etree.tostring(root, encoding="unicode"))
@classmethod
def copy_config(cls, config_file: str):
"""
Instead of generating config with "generate_config" method, config can be prepared manually
and just passed as is to PeachFuzzer.
:param config_file: Peach Fuzzer XML config to be copied to the DUT
"""
if not posixpath.exists(config_file):
TestRun.block("Peach fuzzer xml config to be copied doesn't exist!")
create_directory(cls.base_dir, True)
TestRun.executor.rsync_to(config_file, cls.xml_config_file)
@classmethod
def __create_xml_nodes(cls, xml_node, config):
"""
Create XML code for Peach Fuzzer based on python dict config
"""
for element in config:
new_node = etree.Element(element["name"])
for attr_name, attr_value in element["attributes"].items():
new_node.set(attr_name, attr_value)
if element.get("children"):
cls.__create_xml_nodes(new_node, element.get("children"))
xml_node.append(new_node)
@classmethod
def _install(cls):
"""
Install Peach Fuzzer on the DUT
"""
peach_archive = wget.download(cls.peach_fuzzer_3_0_url)
create_directory(cls.base_dir, True)
TestRun.executor.rsync_to(f"\"{peach_archive}\"", f"{cls.base_dir}")
TestRun.executor.run_expect_success(
f'cd {cls.base_dir} && unzip -u "{peach_archive}"')
if cls._is_installed():
TestRun.LOGGER.info("Peach fuzzer installed successfully")
os.remove(peach_archive)
else:
TestRun.block("Peach fuzzer installation failed!")
@classmethod
def _is_installed(cls):
"""
Check if Peach Fuzzer is installed on the DUT
"""
if not cls._is_mono_installed():
TestRun.block("Mono is not installed, can't continue with Peach Fuzzer!")
if fs_utils.check_if_directory_exists(posixpath.join(cls.base_dir, cls.peach_dir)):
return "Peach" in TestRun.executor.run(
f"cd {cls.base_dir} && {cls.peach_dir}/peach --version").stdout.strip()
else:
return False
@classmethod
def _escape_special_chars(cls, fuzzed_str: bytes):
"""
Escape special chars provided in escape_chars list in the fuzzed string generated by
Peach Fuzzer
Escaping is done for example in order to make fuzzed string executable in Linux CLI
If fuzzed string will be used in other places, escape_chars list may be overwritten.
"""
for i in cls.escape_chars:
i = bytes(i, "utf-8")
if i in fuzzed_str[:]:
fuzzed_str = fuzzed_str.replace(i, b'\\' + i)
return fuzzed_str
@classmethod
def _is_xml_config_prepared(cls):
"""
Check if Peach Fuzzer XML config is present on the DUT
"""
if fs_utils.check_if_file_exists(cls.xml_config_file):
return True
else:
return False
@staticmethod
def _is_mono_installed():
"""
Check if Mono (.NET compatible framework) is installed on the DUT
If it's not, it has to be installed manually.
For RHEL-based OSes it's usually mono-complete package
"""
return TestRun.executor.run("which mono").exit_code == 0