Update TF and functional tests API

Signed-off-by: Slawomir Jankowski <slawomir.jankowski@intel.com>
Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Slawomir Jankowski 2020-12-07 16:52:36 +01:00 committed by Robert Baldyga
parent ecbd4fbe30
commit 17f440de10
56 changed files with 209 additions and 210 deletions

View File

@ -19,12 +19,12 @@ class Cache:
self.__metadata_size = None self.__metadata_size = None
def __get_cache_id(self): def __get_cache_id(self):
cmd = f"{list_cmd()} | grep {self.cache_device.system_path}" cmd = f"{list_cmd(by_id_path=False)} | grep {self.cache_device.short_path}"
output = TestRun.executor.run(cmd) output = TestRun.executor.run(cmd)
if output.exit_code == 0 and output.stdout.strip(): if output.exit_code == 0 and output.stdout.strip():
return output.stdout.split()[1] return output.stdout.split()[1]
else: else:
raise Exception(f"There is no cache started on {self.cache_device.system_path}.") raise Exception(f"There is no cache started on {self.cache_device.path}.")
def get_core_devices(self): def get_core_devices(self):
return get_cores(self.cache_id) return get_cores(self.cache_id)

View File

@ -35,7 +35,7 @@ def start_cache(cache_dev: Device, cache_mode: CacheMode = None,
_cache_id = None if cache_id is None else str(cache_id) _cache_id = None if cache_id is None else str(cache_id)
_cache_mode = None if cache_mode is None else cache_mode.name.lower() _cache_mode = None if cache_mode is None else cache_mode.name.lower()
output = TestRun.executor.run(start_cmd( output = TestRun.executor.run(start_cmd(
cache_dev=cache_dev.system_path, cache_mode=_cache_mode, cache_line_size=_cache_line_size, cache_dev=cache_dev.path, cache_mode=_cache_mode, cache_line_size=_cache_line_size,
cache_id=_cache_id, force=force, load=load, shortcut=shortcut)) cache_id=_cache_id, force=force, load=load, shortcut=shortcut))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to start cache.", output) raise CmdException("Failed to start cache.", output)
@ -53,11 +53,11 @@ def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = Fals
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False): def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False):
_core_id = None if core_id is None else str(core_id) _core_id = None if core_id is None else str(core_id)
output = TestRun.executor.run( output = TestRun.executor.run(
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.system_path, add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.path,
core_id=_core_id, shortcut=shortcut)) core_id=_core_id, shortcut=shortcut))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to add core.", output) raise CmdException("Failed to add core.", output)
core = Core(core_dev.system_path, cache.cache_id) core = Core(core_dev.path, cache.cache_id)
return core return core
@ -71,18 +71,18 @@ def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool
def remove_detached(core_device: Device, shortcut: bool = False): def remove_detached(core_device: Device, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
remove_detached_cmd(core_device=core_device.system_path, shortcut=shortcut)) remove_detached_cmd(core_device=core_device.path, shortcut=shortcut))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to remove detached core.", output) raise CmdException("Failed to remove detached core.", output)
return output return output
def try_add(core_device: Device, cache_id: int, core_id: int = None): def try_add(core_device: Device, cache_id: int, core_id: int = None):
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.system_path, output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path,
str(core_id) if core_id is not None else None)) str(core_id) if core_id is not None else None))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to execute try add script command.", output) raise CmdException("Failed to execute try add script command.", output)
return Core(core_device.system_path, cache_id) return Core(core_device.path, cache_id)
def purge_cache(cache_id: int): def purge_cache(cache_id: int):
@ -128,16 +128,17 @@ def flush(cache_id: int, core_id: int = None, shortcut: bool = False):
def load_cache(device: Device, shortcut: bool = False): def load_cache(device: Device, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
load_cmd(cache_dev=device.system_path, shortcut=shortcut)) load_cmd(cache_dev=device.path, shortcut=shortcut))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to load cache.", output) raise CmdException("Failed to load cache.", output)
return Cache(device) return Cache(device)
def list_caches(output_format: OutputFormat = None, shortcut: bool = False): def list_caches(output_format: OutputFormat = None, by_id_path: bool = True,
shortcut: bool = False):
_output_format = None if output_format is None else output_format.name _output_format = None if output_format is None else output_format.name
output = TestRun.executor.run( output = TestRun.executor.run(
list_cmd(output_format=_output_format, shortcut=shortcut)) list_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to list caches.", output) raise CmdException("Failed to list caches.", output)
return output return output
@ -154,7 +155,7 @@ def print_version(output_format: OutputFormat = None, shortcut: bool = False):
def zero_metadata(cache_dev: Device, shortcut: bool = False): def zero_metadata(cache_dev: Device, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
zero_metadata_cmd(cache_dev=cache_dev.system_path, shortcut=shortcut)) zero_metadata_cmd(cache_dev=cache_dev.path, shortcut=shortcut))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to wipe metadata.", output) raise CmdException("Failed to wipe metadata.", output)
return output return output
@ -179,7 +180,8 @@ def remove_all_detached_cores():
def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = False, def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = False,
io_class_id: int = None, filter: List[StatsFilter] = None, io_class_id: int = None, filter: List[StatsFilter] = None,
output_format: OutputFormat = None, shortcut: bool = False): output_format: OutputFormat = None, by_id_path: bool = True,
shortcut: bool = False):
_output_format = None if output_format is None else output_format.name _output_format = None if output_format is None else output_format.name
_core_id = None if core_id is None else str(core_id) _core_id = None if core_id is None else str(core_id)
_io_class_id = None if io_class_id is None else str(io_class_id) _io_class_id = None if io_class_id is None else str(io_class_id)
@ -192,7 +194,8 @@ def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = Fa
print_statistics_cmd( print_statistics_cmd(
cache_id=str(cache_id), core_id=_core_id, cache_id=str(cache_id), core_id=_core_id,
per_io_class=per_io_class, io_class_id=_io_class_id, per_io_class=per_io_class, io_class_id=_io_class_id,
filter=_filter, output_format=_output_format, shortcut=shortcut)) filter=_filter, output_format=_output_format,
by_id_path=by_id_path, shortcut=shortcut))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Printing statistics failed.", output) raise CmdException("Printing statistics failed.", output)
return output return output

View File

@ -2,24 +2,20 @@
# Copyright(c) 2019-2020 Intel Corporation # Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear # SPDX-License-Identifier: BSD-3-Clause-Clear
# #
import csv
import csv
import json import json
import re import re
from api.cas import casadm
from test_utils.output import CmdException
from test_utils.size import parse_unit
from storage_devices.device import Device
from api.cas.cache_config import *
from api.cas.casadm_params import *
from api.cas.version import CasVersion
from datetime import timedelta from datetime import timedelta
from typing import List from typing import List
from api.cas import casadm from api.cas import casadm
from api.cas.cache_config import * from api.cas.cache_config import *
from api.cas.casadm_params import * from api.cas.casadm_params import *
from api.cas.version import CasVersion
from storage_devices.device import Device
from test_utils.size import parse_unit from test_utils.size import parse_unit
from test_utils.output import CmdException
class Stats(dict): class Stats(dict):

View File

@ -95,7 +95,8 @@ def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = Non
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False, def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False,
io_class_id: str = None, filter: str = None, io_class_id: str = None, filter: str = None,
output_format: str = None, shortcut: bool = False): output_format: str = None, by_id_path: bool = True,
shortcut: bool = False):
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id
if core_id is not None: if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id command += (" -j " if shortcut else " --core-id ") + core_id
@ -109,6 +110,8 @@ def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool
command += (" -f " if shortcut else " --filter ") + filter command += (" -f " if shortcut else " --filter ") + filter
if output_format is not None: if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path:
command += (" -b " if shortcut else " --by-id-path ")
return casadm_bin + command return casadm_bin + command
@ -126,10 +129,12 @@ def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False)
return casadm_bin + command return casadm_bin + command
def list_cmd(output_format: str = None, shortcut: bool = False): def list_cmd(output_format: str = None, by_id_path: bool = True, shortcut: bool = False):
command = " -L" if shortcut else " --list-caches" command = " -L" if shortcut else " --list-caches"
if output_format == "table" or output_format == "csv": if output_format == "table" or output_format == "csv":
command += (" -o " if shortcut else " --output-format ") + output_format command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path:
command += (" -b " if shortcut else " --by-id-path ")
return casadm_bin + command return casadm_bin + command

View File

@ -26,7 +26,7 @@ SEQ_CUT_OFF_THRESHOLD_DEFAULT = Size(1, Unit.MebiByte)
class Core(Device): class Core(Device):
def __init__(self, core_device: str, cache_id: int): def __init__(self, core_device: str, cache_id: int):
self.core_device = Device(core_device) self.core_device = Device(core_device)
self.system_path = None self.path = None
core_info = self.__get_core_info() core_info = self.__get_core_info()
if core_info["core_id"] != "-": if core_info["core_id"] != "-":
self.core_id = int(core_info["core_id"]) self.core_id = int(core_info["core_id"])
@ -38,14 +38,14 @@ class Core(Device):
def __get_core_info(self): def __get_core_info(self):
output = TestRun.executor.run( output = TestRun.executor.run(
list_cmd(OutputFormat.csv.name)) list_cmd(OutputFormat.csv.name, by_id_path=False))
if output.exit_code != 0: if output.exit_code != 0:
raise Exception("Failed to execute list caches command.") raise Exception("Failed to execute list caches command.")
output_lines = output.stdout.splitlines() output_lines = output.stdout.splitlines()
for line in output_lines: for line in output_lines:
split_line = line.split(',') split_line = line.split(',')
if split_line[0] == "core" and (split_line[2] == self.core_device.system_path if split_line[0] == "core" and (split_line[2] == self.core_device.short_path
or split_line[5] == self.system_path): or split_line[5] == self.path):
return {"core_id": split_line[1], return {"core_id": split_line[1],
"core_device": split_line[2], "core_device": split_line[2],
"status": split_line[3], "status": split_line[3],
@ -132,7 +132,7 @@ class Core(Device):
def check_if_is_present_in_os(self, should_be_visible=True): def check_if_is_present_in_os(self, should_be_visible=True):
device_in_system_message = "CAS device exists in OS." device_in_system_message = "CAS device exists in OS."
device_not_in_system_message = "CAS device does not exist in OS." device_not_in_system_message = "CAS device does not exist in OS."
item = fs_utils.ls_item(f"{self.system_path}") item = fs_utils.ls_item(f"{self.path}")
if item is not None: if item is not None:
if should_be_visible: if should_be_visible:
TestRun.LOGGER.info(device_in_system_message) TestRun.LOGGER.info(device_in_system_message)

View File

@ -70,11 +70,8 @@ class CacheConfigLine:
self.extra_flags = extra_flags self.extra_flags = extra_flags
def __str__(self): def __str__(self):
cache_symlink = self.cache_device.get_device_link("/dev/disk/by-id") params = [str(self.cache_id), self.cache_device.path,
cache_device_path = ( self.cache_mode.name, self.extra_flags]
cache_symlink.full_path if cache_symlink is not None else self.cache_device.system_path
)
params = [str(self.cache_id), cache_device_path, self.cache_mode.name, self.extra_flags]
return '\t'.join(params) return '\t'.join(params)
@ -88,9 +85,6 @@ class CoreConfigLine:
self.extra_flags = extra_flags self.extra_flags = extra_flags
def __str__(self): def __str__(self):
core_symlink = self.core_device.get_device_link("/dev/disk/by-id") params = [str(self.cache_id), str(self.core_id),
core_device_path = ( self.core_device.path, self.extra_flags]
core_symlink.full_path if core_symlink is not None else self.core_device.system_path
)
params = [str(self.cache_id), str(self.core_id), core_device_path, self.extra_flags]
return '\t'.join(params) return '\t'.join(params)

@ -1 +1 @@
Subproject commit cd1c19c5636f47b832a44040448d4a9f3e690aec Subproject commit 13b7361c25de004fa426c12007d58731270d3cb3

View File

@ -73,7 +73,7 @@ def test_cleaning_policies_in_write_back(cleaning_policy):
with TestRun.step("Run 'fio'"): with TestRun.step("Run 'fio'"):
fio = fio_prepare() fio = fio_prepare()
for i in range(cores_count): for i in range(cores_count):
fio.add_job().target(core[i].system_path) fio.add_job().target(core[i].path)
fio.run() fio.run()
time.sleep(3) time.sleep(3)
core_writes_before_wait_for_cleaning = ( core_writes_before_wait_for_cleaning = (
@ -138,7 +138,7 @@ def test_cleaning_policies_in_write_through(cleaning_policy):
with TestRun.step("Run 'fio'"): with TestRun.step("Run 'fio'"):
fio = fio_prepare() fio = fio_prepare()
for i in range(cores_count): for i in range(cores_count):
fio.add_job().target(core[i].system_path) fio.add_job().target(core[i].path)
fio.run() fio.run()
time.sleep(3) time.sleep(3)

View File

@ -57,13 +57,13 @@ def test_concurrent_cores_flush(cache_mode):
block_size = Size(4, Unit.MebiByte) block_size = Size(4, Unit.MebiByte)
count = int(cache_size.value / 2 / block_size.value) count = int(cache_size.value / 2 / block_size.value)
dd_pid = Dd().output(core1.system_path) \ dd_pid = Dd().output(core1.path) \
.input("/dev/urandom") \ .input("/dev/urandom") \
.block_size(block_size) \ .block_size(block_size) \
.count(count) \ .count(count) \
.run_in_background() .run_in_background()
Dd().output(core2.system_path) \ Dd().output(core2.path) \
.input("/dev/urandom") \ .input("/dev/urandom") \
.block_size(block_size) \ .block_size(block_size) \
.count(count) \ .count(count) \
@ -160,7 +160,7 @@ def test_concurrent_caches_flush(cache_mode):
count = int(cache_size.value / block_size.value) count = int(cache_size.value / block_size.value)
total_saturation = block_size * count total_saturation = block_size * count
for core in cores: for core in cores:
Dd().output(core.system_path) \ Dd().output(core.path) \
.input("/dev/urandom") \ .input("/dev/urandom") \
.block_size(block_size) \ .block_size(block_size) \
.count(count) \ .count(count) \

View File

@ -363,7 +363,7 @@ def fio_prepare(core, io_mode, io_size=io_size):
.io_engine(IoEngine.libaio) .io_engine(IoEngine.libaio)
.size(io_size) .size(io_size)
.read_write(io_mode) .read_write(io_mode)
.target(core.system_path) .target(core.path)
.direct(1) .direct(1)
) )
return fio return fio

View File

@ -84,14 +84,14 @@ def test_seq_cutoff_multi_core(thresholds_list, cache_mode, io_type, io_type_las
fio_job = fio.add_job(job_name=f"core_{core.core_id}") fio_job = fio.add_job(job_name=f"core_{core.core_id}")
fio_job.size(io_sizes[i]) fio_job.size(io_sizes[i])
fio_job.read_write(io_type) fio_job.read_write(io_type)
fio_job.target(core.system_path) fio_job.target(core.path)
writes_before.append(core.get_statistics().block_stats.cache.writes) writes_before.append(core.get_statistics().block_stats.cache.writes)
# Run random IO against the last core # Run random IO against the last core
fio_job = fio.add_job(job_name=f"core_{cores[-1].core_id}") fio_job = fio.add_job(job_name=f"core_{cores[-1].core_id}")
fio_job.size(io_sizes[-1]) fio_job.size(io_sizes[-1])
fio_job.read_write(io_type_last) fio_job.read_write(io_type_last)
fio_job.target(cores[-1].system_path) fio_job.target(cores[-1].path)
writes_before.append(cores[-1].get_statistics().block_stats.cache.writes) writes_before.append(cores[-1].get_statistics().block_stats.cache.writes)
with TestRun.step("Running IO against all cores"): with TestRun.step("Running IO against all cores"):
@ -150,7 +150,7 @@ def test_seq_cutoff_thresh(threshold_param, cls, io_dir, policy, verify_type):
.io_engine(IoEngine.libaio) .io_engine(IoEngine.libaio)
.size(io_size) .size(io_size)
.read_write(io_dir) .read_write(io_dir)
.target(f"{cores[0].system_path}") .target(f"{cores[0].path}")
.direct() .direct()
).run() ).run()
@ -194,7 +194,7 @@ def test_seq_cutoff_thresh_fill(threshold_param, cls, io_dir):
.io_engine(IoEngine.libaio) .io_engine(IoEngine.libaio)
.size(cache.cache_device.size) .size(cache.cache_device.size)
.read_write(io_dir) .read_write(io_dir)
.target(f"{cores[0].system_path}") .target(f"{cores[0].path}")
.direct() .direct()
).run() ).run()
@ -218,7 +218,7 @@ def test_seq_cutoff_thresh_fill(threshold_param, cls, io_dir):
.io_engine(IoEngine.libaio) .io_engine(IoEngine.libaio)
.size(io_size) .size(io_size)
.read_write(io_dir) .read_write(io_dir)
.target(f"{cores[0].system_path}") .target(f"{cores[0].path}")
.direct() .direct()
).run() ).run()

View File

@ -44,7 +44,7 @@ def test_purge(purge_target):
dd = ( dd = (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(core.system_path) .output(core.path)
.count(100) .count(100)
.block_size(Size(1, Unit.Blocks512)) .block_size(Size(1, Unit.Blocks512))
.oflag("direct") .oflag("direct")

View File

@ -40,10 +40,10 @@ def test_cli_start_stop_default_id(shortcut):
if len(caches) != 1: if len(caches) != 1:
TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}. " TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}. "
f"Should be only 1.") f"Should be only 1.")
if cache.cache_device.system_path != cache_device.system_path: if cache.cache_device.path != cache_device.path:
TestRun.fail(f"The cache has started using a wrong device:" TestRun.fail(f"The cache has started using a wrong device:"
f" {cache.cache_device.system_path}." f" {cache.cache_device.path}."
f"\nShould use {cache_device.system_path}.") f"\nShould use {cache_device.path}.")
with TestRun.step("Stop the cache."): with TestRun.step("Stop the cache."):
casadm.stop_cache(cache.cache_id, shortcut=shortcut) casadm.stop_cache(cache.cache_id, shortcut=shortcut)
@ -83,10 +83,10 @@ def test_cli_start_stop_custom_id(shortcut):
if len(caches) != 1: if len(caches) != 1:
TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}. " TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}. "
f"Should be only 1.") f"Should be only 1.")
if cache.cache_device.system_path != cache_device.system_path: if cache.cache_device.path != cache_device.path:
TestRun.fail(f"The cache has started using a wrong device:" TestRun.fail(f"The cache has started using a wrong device:"
f" {cache.cache_device.system_path}." f" {cache.cache_device.path}."
f"\nShould use {cache_device.system_path}.") f"\nShould use {cache_device.path}.")
with TestRun.step("Stop the cache."): with TestRun.step("Stop the cache."):
casadm.stop_cache(cache.cache_id, shortcut=shortcut) casadm.stop_cache(cache.cache_id, shortcut=shortcut)
@ -127,7 +127,7 @@ def test_cli_add_remove_default_id(shortcut):
caches = casadm_parser.get_caches() caches = casadm_parser.get_caches()
if len(caches[0].get_core_devices()) != 1: if len(caches[0].get_core_devices()) != 1:
TestRun.fail("One core should be present in the cache.") TestRun.fail("One core should be present in the cache.")
if caches[0].get_core_devices()[0].system_path != core.system_path: if caches[0].get_core_devices()[0].path != core.path:
TestRun.fail("The core path should be equal to the path of the core added.") TestRun.fail("The core path should be equal to the path of the core added.")
with TestRun.step("Remove the core from the cache."): with TestRun.step("Remove the core from the cache."):
@ -180,7 +180,7 @@ def test_cli_add_remove_custom_id(shortcut):
caches = casadm_parser.get_caches() caches = casadm_parser.get_caches()
if len(caches[0].get_core_devices()) != 1: if len(caches[0].get_core_devices()) != 1:
TestRun.fail("One core should be present in the cache.") TestRun.fail("One core should be present in the cache.")
if caches[0].get_core_devices()[0].system_path != core.system_path: if caches[0].get_core_devices()[0].path != core.path:
TestRun.fail("The core path should be equal to the path of the core added.") TestRun.fail("The core path should be equal to the path of the core added.")
with TestRun.step("Remove the core from the cache."): with TestRun.step("Remove the core from the cache."):
@ -227,7 +227,7 @@ def test_cli_load_and_force(shortcut):
with TestRun.step("Try to load cache with 'force'."): with TestRun.step("Try to load cache with 'force'."):
output = TestRun.executor.run( output = TestRun.executor.run(
start_cmd(cache_dev=cache_device.system_path, force=True, load=True, shortcut=shortcut) start_cmd(cache_dev=cache_device.path, force=True, load=True, shortcut=shortcut)
) )
if output.exit_code == 0: if output.exit_code == 0:
TestRun.fail("Loading cache with 'force' option should fail.") TestRun.fail("Loading cache with 'force' option should fail.")

View File

@ -292,7 +292,7 @@ def check_seqcutoff_parameters(core, seqcutoff_params):
if failed_params: if failed_params:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Sequential cut-off parameters are not correct " f"Sequential cut-off parameters are not correct "
f"for {core.system_path}:\n{failed_params}" f"for {core.path}:\n{failed_params}"
) )

View File

@ -188,18 +188,18 @@ def base_prepare(item):
# stop only those RAIDs, which are comprised of test disks # stop only those RAIDs, which are comprised of test disks
if all(map(lambda device: if all(map(lambda device:
any(map(lambda disk_path: any(map(lambda disk_path:
disk_path in device.system_path, disk_path in device.path,
[bd.system_path for bd in TestRun.dut.disks])), [bd.path for bd in TestRun.dut.disks])),
raid.array_devices)): raid.array_devices)):
raid.umount_all_partitions() raid.umount_all_partitions()
raid.remove_partitions() raid.remove_partitions()
raid.stop() raid.stop()
for device in raid.array_devices: for device in raid.array_devices:
Mdadm.zero_superblock(device.system_path) Mdadm.zero_superblock(device.path)
for disk in TestRun.dut.disks: for disk in TestRun.dut.disks:
disk.umount_all_partitions() disk.umount_all_partitions()
Mdadm.zero_superblock(disk.system_path) Mdadm.zero_superblock(disk.path)
TestRun.executor.run_expect_success("udevadm settle") TestRun.executor.run_expect_success("udevadm settle")
disk.remove_partitions() disk.remove_partitions()
create_partition_table(disk, PartitionTable.gpt) create_partition_table(disk, PartitionTable.gpt)

View File

@ -40,10 +40,10 @@ def test_data_integrity_12h(cache_mode):
cache, core = prepare(cache_mode) cache, core = prepare(cache_mode)
with TestRun.step("Fill cache"): with TestRun.step("Fill cache"):
fill_cache(core.system_path) fill_cache(core.path)
with TestRun.step("Run test workloads with verification"): with TestRun.step("Run test workloads with verification"):
run_workload(core.system_path) run_workload(core.path)
def prepare(cache_mode): def prepare(cache_mode):

View File

@ -71,12 +71,12 @@ def test_data_integrity_5d_dss(filesystems):
with TestRun.step("Create filesystems and mount cores"): with TestRun.step("Create filesystems and mount cores"):
for i, core in enumerate(cores): for i, core in enumerate(cores):
mount_point = core.system_path.replace('/dev/', '/mnt/') mount_point = core.path.replace('/dev/', '/mnt/')
if not fs_utils.check_if_directory_exists(mount_point): if not fs_utils.check_if_directory_exists(mount_point):
fs_utils.create_directory(mount_point) fs_utils.create_directory(mount_point)
TestRun.LOGGER.info(f"Create filesystem {filesystems[i].name} on {core.system_path}") TestRun.LOGGER.info(f"Create filesystem {filesystems[i].name} on {core.path}")
core.create_filesystem(filesystems[i]) core.create_filesystem(filesystems[i])
TestRun.LOGGER.info(f"Mount filesystem {filesystems[i].name} on {core.system_path} to " TestRun.LOGGER.info(f"Mount filesystem {filesystems[i].name} on {core.path} to "
f"{mount_point}") f"{mount_point}")
core.mount(mount_point) core.mount(mount_point)
sync() sync()

View File

@ -29,7 +29,7 @@ def test_another_cache_with_same_id():
cache_dev_1.create_partitions([Size(2, Unit.GibiByte)]) cache_dev_1.create_partitions([Size(2, Unit.GibiByte)])
TestRun.executor.run_expect_success( TestRun.executor.run_expect_success(
cli.start_cmd( cli.start_cmd(
cache_dev_1.partitions[0].system_path, cache_id="1", force=True cache_dev_1.partitions[0].path, cache_id="1", force=True
) )
) )
@ -38,7 +38,7 @@ def test_another_cache_with_same_id():
cache_dev_2.create_partitions([Size(2, Unit.GibiByte)]) cache_dev_2.create_partitions([Size(2, Unit.GibiByte)])
TestRun.executor.run_expect_fail( TestRun.executor.run_expect_fail(
cli.start_cmd( cli.start_cmd(
cache_dev_2.partitions[0].system_path, cache_id="1", force=True cache_dev_2.partitions[0].path, cache_id="1", force=True
) )
) )
@ -69,7 +69,7 @@ def test_another_core_with_same_id():
TestRun.executor.run_expect_success( TestRun.executor.run_expect_success(
cli.add_core_cmd( cli.add_core_cmd(
cache_id=f"{cache.cache_id}", cache_id=f"{cache.cache_id}",
core_dev=f"{core_dev_1.partitions[0].system_path}", core_dev=f"{core_dev_1.partitions[0].path}",
core_id="1", core_id="1",
) )
) )
@ -80,7 +80,7 @@ def test_another_core_with_same_id():
TestRun.executor.run_expect_fail( TestRun.executor.run_expect_fail(
cli.add_core_cmd( cli.add_core_cmd(
cache_id=f"{cache.cache_id}", cache_id=f"{cache.cache_id}",
core_dev=f"{core_dev_2.partitions[0].system_path}", core_dev=f"{core_dev_2.partitions[0].path}",
core_id="1", core_id="1",
) )
) )

View File

@ -202,7 +202,7 @@ def test_one_core_fail(cache_mode):
with TestRun.step("Check if core device is really out of cache."): with TestRun.step("Check if core device is really out of cache."):
output = str(casadm.list_caches().stdout.splitlines()) output = str(casadm.list_caches().stdout.splitlines())
if core_part1.system_path in output: if core_part1.path in output:
TestRun.fail("The first core device should be unplugged!") TestRun.fail("The first core device should be unplugged!")
with TestRun.step("Check if the remaining core is able to use cache."): with TestRun.step("Check if the remaining core is able to use cache."):
@ -232,7 +232,7 @@ def dd_builder(cache_mode: CacheMode, dev: Core, size: Size):
.block_size(block_size) .block_size(block_size)
.count(blocks)) .count(blocks))
if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode): if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
dd.input(dev.system_path).output("/dev/null") dd.input(dev.path).output("/dev/null")
else: else:
dd.input("/dev/urandom").output(dev.system_path) dd.input("/dev/urandom").output(dev.path)
return dd return dd

View File

@ -66,7 +66,7 @@ def test_stop_no_flush_load_cache(cache_mode, filesystem):
with TestRun.step("Try to start cache without loading metadata."): with TestRun.step("Try to start cache without loading metadata."):
output = TestRun.executor.run_expect_fail(cli.start_cmd( output = TestRun.executor.run_expect_fail(cli.start_cmd(
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()), cache_dev=str(cache_part.path), cache_mode=str(cache_mode.name.lower()),
force=False, load=False)) force=False, load=False))
cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata) cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata)
@ -127,7 +127,7 @@ def test_stop_no_flush_load_cache_no_fs(cache_mode):
with TestRun.step("Fill exported object with data."): with TestRun.step("Fill exported object with data."):
dd = (Dd() dd = (Dd()
.input("/dev/zero") .input("/dev/zero")
.output(core.system_path) .output(core.path)
.block_size(Size(1, Unit.Blocks4096)) .block_size(Size(1, Unit.Blocks4096))
.oflag("direct")) .oflag("direct"))
dd.run() dd.run()
@ -143,7 +143,7 @@ def test_stop_no_flush_load_cache_no_fs(cache_mode):
with TestRun.step("Try to start cache without loading metadata."): with TestRun.step("Try to start cache without loading metadata."):
output = TestRun.executor.run_expect_fail(cli.start_cmd( output = TestRun.executor.run_expect_fail(cli.start_cmd(
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()), cache_dev=str(cache_part.path), cache_mode=str(cache_mode.name.lower()),
force=False, load=False)) force=False, load=False))
cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata) cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata)

View File

@ -156,13 +156,13 @@ def test_add_cached_core(cache_mode):
with TestRun.step("Try adding the same core device to the second cache instance."): with TestRun.step("Try adding the same core device to the second cache instance."):
output = TestRun.executor.run_expect_fail( output = TestRun.executor.run_expect_fail(
cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.system_path), cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.path),
core_id=str(core.core_id))) core_id=str(core.core_id)))
cli_messages.check_stderr_msg(output, cli_messages.add_cached_core) cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)
with TestRun.step("Try adding the same core device to the same cache for the second time."): with TestRun.step("Try adding the same core device to the same cache for the second time."):
output = TestRun.executor.run_expect_fail( output = TestRun.executor.run_expect_fail(
cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.system_path))) cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.path)))
cli_messages.check_stderr_msg(output, cli_messages.add_cached_core) cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)
with TestRun.step("Stop caches."): with TestRun.step("Stop caches."):

View File

@ -37,7 +37,7 @@ def test_remove_core_during_io():
.io_engine(IoEngine.libaio) .io_engine(IoEngine.libaio)
.block_size(Size(4, Unit.KibiByte)) .block_size(Size(4, Unit.KibiByte))
.read_write(ReadWrite.randrw) .read_write(ReadWrite.randrw)
.target(f"{core.system_path}") .target(f"{core.path}")
.direct(1) .direct(1)
.run_time(timedelta(minutes=4)) .run_time(timedelta(minutes=4))
.time_based() .time_based()
@ -78,7 +78,7 @@ def test_stop_cache_during_io():
.io_engine(IoEngine.libaio) .io_engine(IoEngine.libaio)
.block_size(Size(4, Unit.KibiByte)) .block_size(Size(4, Unit.KibiByte))
.read_write(ReadWrite.randrw) .read_write(ReadWrite.randrw)
.target(f"{core.system_path}") .target(f"{core.path}")
.direct(1) .direct(1)
.run_time(timedelta(minutes=4)) .run_time(timedelta(minutes=4))
.time_based() .time_based()

View File

@ -205,7 +205,7 @@ def fio_prepare(core):
.create_command() .create_command()
.io_engine(IoEngine.libaio) .io_engine(IoEngine.libaio)
.read_write(ReadWrite.randrw) .read_write(ReadWrite.randrw)
.target(core.system_path) .target(core.path)
.continue_on_error(ErrorFilter.io) .continue_on_error(ErrorFilter.io)
.direct(1) .direct(1)
.run_time(timedelta(seconds=30)) .run_time(timedelta(seconds=30))

View File

@ -72,7 +72,7 @@ def test_core_inactive_stats():
dd = ( dd = (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(core.system_path) .output(core.path)
.count(1000) .count(1000)
.block_size(Size(4, Unit.KibiByte)) .block_size(Size(4, Unit.KibiByte))
).run() ).run()

View File

@ -126,7 +126,7 @@ def test_flush_inactive_devices():
InitConfig.create_init_config_from_running_configuration() InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to CAS device."): with TestRun.step("Run random writes to CAS device."):
run_fio([first_core.system_path, second_core.system_path]) run_fio([first_core.path, second_core.path])
with TestRun.step("Stop cache without flushing dirty data."): with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True) cache.stop(no_data_flush=True)
@ -273,7 +273,7 @@ def test_load_cache_with_inactive_core():
plug_device.unplug() plug_device.unplug()
with TestRun.step("Load cache."): with TestRun.step("Load cache."):
output = TestRun.executor.run(cli.load_cmd(cache_dev.system_path)) output = TestRun.executor.run(cli.load_cmd(cache_dev.path))
cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing) cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing)
with TestRun.step("Plug missing device and stop cache."): with TestRun.step("Plug missing device and stop cache."):
@ -429,7 +429,7 @@ def test_print_statistics_inactive(cache_mode):
InitConfig.create_init_config_from_running_configuration() InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run IO."): with TestRun.step("Run IO."):
run_fio([first_core.system_path, second_core.system_path]) run_fio([first_core.path, second_core.path])
with TestRun.step("Print statistics and check if there is no inactive usage section."): with TestRun.step("Print statistics and check if there is no inactive usage section."):
active_stats = cache.get_statistics() active_stats = cache.get_statistics()
@ -460,7 +460,7 @@ def test_print_statistics_inactive(cache_mode):
time.sleep(1) time.sleep(1)
first_core_status = first_core.get_status() first_core_status = first_core.get_status()
if first_core_status != CoreStatus.active: if first_core_status != CoreStatus.active:
TestRun.fail(f"Core {first_core.system_path} should be in active state but it is not. " TestRun.fail(f"Core {first_core.path} should be in active state but it is not. "
f"Actual state: {first_core_status}.") f"Actual state: {first_core_status}.")
with TestRun.step("Check cache statistics section of inactive devices."): with TestRun.step("Check cache statistics section of inactive devices."):
@ -543,7 +543,7 @@ def test_remove_detached_cores():
InitConfig.create_init_config_from_running_configuration() InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."): with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores]) run_fio([c.path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain " with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain "
"dirty data."): "dirty data."):
@ -577,7 +577,7 @@ def test_remove_detached_cores():
with TestRun.step("Verify that cores are no longer listed."): with TestRun.step("Verify that cores are no longer listed."):
output = casadm.list_caches().stdout output = casadm.list_caches().stdout
for dev in core_devs: for dev in core_devs:
if dev.system_path in output: if dev.path in output:
TestRun.fail(f"CAS device is still listed in casadm list output:\n{output}") TestRun.fail(f"CAS device is still listed in casadm list output:\n{output}")
@ -612,7 +612,7 @@ def test_remove_inactive_devices():
InitConfig.create_init_config_from_running_configuration() InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."): with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores]) run_fio([c.path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two " with TestRun.step("Flush dirty data from two CAS devices and verify than other two "
"contain dirty data."): "contain dirty data."):
@ -657,7 +657,7 @@ def test_remove_inactive_devices():
"dirty CAS device as expected.") "dirty CAS device as expected.")
cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_core) cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_core)
output = casadm.list_caches().stdout output = casadm.list_caches().stdout
if core.system_path not in output: if core.path not in output:
TestRun.fail(f"CAS device is not listed in casadm list output but it should be." TestRun.fail(f"CAS device is not listed in casadm list output but it should be."
f"\n{output}") f"\n{output}")
core.remove_core(force=True) core.remove_core(force=True)
@ -695,7 +695,7 @@ def test_stop_cache_with_inactive_devices():
InitConfig.create_init_config_from_running_configuration() InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes and verify that CAS device contains dirty data."): with TestRun.step("Run random writes and verify that CAS device contains dirty data."):
run_fio([core.system_path]) run_fio([core.path])
if core.get_dirty_blocks() == Size.zero(): if core.get_dirty_blocks() == Size.zero():
TestRun.fail("There is no dirty data on core device.") TestRun.fail("There is no dirty data on core device.")

View File

@ -203,10 +203,10 @@ def test_udev_cache_load(cache_mode):
elif len(caches) > 1: elif len(caches) > 1:
caches_list = '\n'.join(caches) caches_list = '\n'.join(caches)
TestRun.fail(f"There is more than 1 cache loaded:\n{caches_list}") TestRun.fail(f"There is more than 1 cache loaded:\n{caches_list}")
elif caches[0].cache_device.system_path != cache_dev.system_path: elif caches[0].cache_device.path != cache_dev.path:
TestRun.fail(f"Cache loaded on wrong device. " TestRun.fail(f"Cache loaded on wrong device. "
f"Actual: {caches[0].cache_device.system_path}, " f"Actual: {caches[0].cache_device.path}, "
f"expected: {cache_dev.system_path}") f"expected: {cache_dev.path}")
elif caches[0].get_cache_mode() != cache_mode: elif caches[0].get_cache_mode() != cache_mode:
TestRun.fail(f"Cache did load with different cache mode. " TestRun.fail(f"Cache did load with different cache mode. "
f"Actual: {caches[0].get_cache_mode()}, expected: {cache_mode}") f"Actual: {caches[0].get_cache_mode()}, expected: {cache_mode}")
@ -268,7 +268,7 @@ def test_neg_udev_cache_load():
if len(cas_devices["caches"]) != 1: if len(cas_devices["caches"]) != 1:
TestRun.LOGGER.error(f"There is wrong number of caches. Expected: 1, actual: " TestRun.LOGGER.error(f"There is wrong number of caches. Expected: 1, actual: "
f"{len(cas_devices['caches'])}") f"{len(cas_devices['caches'])}")
elif cas_devices["caches"][1]["device"] != cache_disk.partitions[0].system_path or \ elif cas_devices["caches"][1]["device"] != cache_disk.partitions[0].path or \
CacheStatus[(cas_devices["caches"][1]["status"]).lower()] != CacheStatus.running: CacheStatus[(cas_devices["caches"][1]["status"]).lower()] != CacheStatus.running:
TestRun.LOGGER.error(f"Cache did not load properly: {cas_devices['caches'][1]}") TestRun.LOGGER.error(f"Cache did not load properly: {cas_devices['caches'][1]}")
if len(cas_devices["cores"]) != 2: if len(cas_devices["cores"]) != 2:
@ -277,7 +277,7 @@ def test_neg_udev_cache_load():
correct_core_devices = [] correct_core_devices = []
for i in first_cache_core_numbers: for i in first_cache_core_numbers:
correct_core_devices.append(core_disk.partitions[i].system_path) correct_core_devices.append(core_disk.partitions[i].path)
for core in cas_devices["cores"].values(): for core in cas_devices["cores"].values():
if core["device"] not in correct_core_devices or \ if core["device"] not in correct_core_devices or \
CoreStatus[core["status"].lower()] != CoreStatus.active or \ CoreStatus[core["status"].lower()] != CoreStatus.active or \
@ -297,7 +297,7 @@ def test_neg_udev_cache_load():
core_pool_expected_devices = [] core_pool_expected_devices = []
for i in range(0, cores_count): for i in range(0, cores_count):
if i not in first_cache_core_numbers: if i not in first_cache_core_numbers:
core_pool_expected_devices.append(core_disk.partitions[i].system_path) core_pool_expected_devices.append(core_disk.partitions[i].path)
for c in cas_devices["core_pool"]: for c in cas_devices["core_pool"]:
if c["device"] not in core_pool_expected_devices: if c["device"] not in core_pool_expected_devices:
TestRun.LOGGER.error(f"Wrong core device added to core pool: {c}.") TestRun.LOGGER.error(f"Wrong core device added to core pool: {c}.")
@ -305,11 +305,11 @@ def test_neg_udev_cache_load():
def check_if_dev_in_core_pool(dev, should_be_in_core_pool=True): def check_if_dev_in_core_pool(dev, should_be_in_core_pool=True):
cas_devices_dict = casadm_parser.get_cas_devices_dict() cas_devices_dict = casadm_parser.get_cas_devices_dict()
is_in_core_pool = any(dev.system_path == d["device"] for d in cas_devices_dict["core_pool"]) is_in_core_pool = any(dev.path == d["device"] for d in cas_devices_dict["core_pool"])
if not (should_be_in_core_pool ^ is_in_core_pool): if not (should_be_in_core_pool ^ is_in_core_pool):
TestRun.LOGGER.info(f"Core device {dev.system_path} is" TestRun.LOGGER.info(f"Core device {dev.path} is"
f"{'' if should_be_in_core_pool else ' not'} listed in core pool " f"{'' if should_be_in_core_pool else ' not'} listed in core pool "
f"as expected.") f"as expected.")
else: else:
TestRun.fail(f"Core device {dev.system_path} is{' not' if should_be_in_core_pool else ''} " TestRun.fail(f"Core device {dev.path} is{' not' if should_be_in_core_pool else ''} "
f"listed in core pool.") f"listed in core pool.")

View File

@ -64,7 +64,7 @@ def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
else: else:
power_control = TestRun.plugin_manager.get_plugin('power_control') power_control = TestRun.plugin_manager.get_plugin('power_control')
power_control.power_cycle() power_control.power_cycle()
cache_dev.system_path = cache_dev_link.get_target() cache_dev.path = cache_dev_link.get_target()
with TestRun.step("Load cache."): with TestRun.step("Load cache."):
casadm.load_cache(cache_dev) casadm.load_cache(cache_dev)

View File

@ -86,7 +86,7 @@ def test_load_x_to_one_without_params(cache_mode, cleaning_policy, cache_line_si
.num_jobs(cores_amount) .num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split)) .cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores: for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path) fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run() fio.run()
with TestRun.step("Stop cache."): with TestRun.step("Stop cache."):
@ -134,7 +134,7 @@ def test_load_x_to_one_without_params(cache_mode, cleaning_policy, cache_line_si
.num_jobs(cores_amount) .num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split)) .cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores: for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path) fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run() fio.run()
with TestRun.step("Check if there are no error statistics."): with TestRun.step("Check if there are no error statistics."):
@ -213,7 +213,7 @@ def test_load_x_to_one_with_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount) .num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split)) .cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores: for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path) fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run() fio.run()
with TestRun.step("Stop cache."): with TestRun.step("Stop cache."):
@ -261,7 +261,7 @@ def test_load_x_to_one_with_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount) .num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split)) .cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores: for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path) fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run() fio.run()
with TestRun.step("Check if there are no error statistics."): with TestRun.step("Check if there are no error statistics."):
@ -347,7 +347,7 @@ def test_load_x_to_one_diff_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount) .num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split)) .cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores: for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path) fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run() fio.run()
with TestRun.step("Stop cache."): with TestRun.step("Stop cache."):
@ -403,7 +403,7 @@ def test_load_x_to_one_diff_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount) .num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split)) .cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores: for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path) fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run() fio.run()
with TestRun.step("Check if there are no error statistics."): with TestRun.step("Check if there are no error statistics."):

View File

@ -51,7 +51,7 @@ def test_load_occupied_id():
caches = casadm_parser.get_caches() caches = casadm_parser.get_caches()
if len(caches) != 1: if len(caches) != 1:
TestRun.LOGGER.error("Inappropriate number of caches after load!") TestRun.LOGGER.error("Inappropriate number of caches after load!")
if caches[0].cache_device.system_path != cache_device_2.system_path: if caches[0].cache_device.path != cache_device_2.path:
TestRun.LOGGER.error("Wrong cache device system path!") TestRun.LOGGER.error("Wrong cache device system path!")
if caches[0].cache_id != 1: if caches[0].cache_id != 1:
TestRun.LOGGER.error("Wrong cache id.") TestRun.LOGGER.error("Wrong cache id.")

View File

@ -42,7 +42,7 @@ def test_write_fetch_full_misses(cache_mode, cache_line_size):
io_stats_before_io = cache_disk.get_io_stats() io_stats_before_io = cache_disk.get_io_stats()
blocksize = cache_line_size.value / 2 blocksize = cache_line_size.value / 2
skip_size = cache_line_size.value / 2 skip_size = cache_line_size.value / 2
run_fio(target=core.system_path, run_fio(target=core.path,
operation_type=ReadWrite.write, operation_type=ReadWrite.write,
skip=skip_size, skip=skip_size,
blocksize=blocksize, blocksize=blocksize,
@ -87,7 +87,7 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
with TestRun.step("Fill core partition with pattern."): with TestRun.step("Fill core partition with pattern."):
cache_mode_traits = CacheMode.get_traits(cache_mode) cache_mode_traits = CacheMode.get_traits(cache_mode)
if CacheModeTrait.InsertRead in cache_mode_traits: if CacheModeTrait.InsertRead in cache_mode_traits:
run_fio(target=core_part.system_path, run_fio(target=core_part.path,
operation_type=ReadWrite.write, operation_type=ReadWrite.write,
blocksize=Size(4, Unit.KibiByte), blocksize=Size(4, Unit.KibiByte),
io_size=io_size, io_size=io_size,
@ -103,7 +103,7 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
with TestRun.step("Cache half of file."): with TestRun.step("Cache half of file."):
operation_type = ReadWrite.read if CacheModeTrait.InsertRead in cache_mode_traits \ operation_type = ReadWrite.read if CacheModeTrait.InsertRead in cache_mode_traits \
else ReadWrite.write else ReadWrite.write
run_fio(target=core.system_path, run_fio(target=core.path,
operation_type=operation_type, operation_type=operation_type,
skip=cache_line_size.value, skip=cache_line_size.value,
blocksize=cache_line_size.value, blocksize=cache_line_size.value,
@ -117,7 +117,7 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
io_stats_before_io = cache_disk.get_io_stats() io_stats_before_io = cache_disk.get_io_stats()
blocksize = cache_line_size.value / 2 * 3 blocksize = cache_line_size.value / 2 * 3
skip_size = cache_line_size.value / 2 skip_size = cache_line_size.value / 2
run_fio(target=core.system_path, run_fio(target=core.path,
operation_type=ReadWrite.write, operation_type=ReadWrite.write,
skip=skip_size, skip=skip_size,
blocksize=blocksize, blocksize=blocksize,

View File

@ -41,8 +41,8 @@ def test_trim_start_discard():
non_cas_part = dev.partitions[1] non_cas_part = dev.partitions[1]
with TestRun.step("Writing different pattern on partitions"): with TestRun.step("Writing different pattern on partitions"):
cas_fio = write_pattern(cas_part.system_path) cas_fio = write_pattern(cas_part.path)
non_cas_fio = write_pattern(non_cas_part.system_path) non_cas_fio = write_pattern(non_cas_part.path)
cas_fio.run() cas_fio.run()
non_cas_fio.run() non_cas_fio.run()
@ -206,15 +206,15 @@ def check_discards(discards_count, device, discards_expected):
if discards_expected: if discards_expected:
if discards_count > 0: if discards_count > 0:
TestRun.LOGGER.info( TestRun.LOGGER.info(
f"{discards_count} TRIM instructions generated for {device.system_path}") f"{discards_count} TRIM instructions generated for {device.path}")
else: else:
TestRun.LOGGER.error(f"No TRIM instructions found in requests to {device.system_path}") TestRun.LOGGER.error(f"No TRIM instructions found in requests to {device.path}")
else: else:
if discards_count > 0: if discards_count > 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"{discards_count} TRIM instructions generated for {device.system_path}") f"{discards_count} TRIM instructions generated for {device.path}")
else: else:
TestRun.LOGGER.info(f"No TRIM instructions found in requests to {device.system_path}") TestRun.LOGGER.info(f"No TRIM instructions found in requests to {device.path}")
def start_monitoring(core_dev, cache_dev, cas_dev): def start_monitoring(core_dev, cache_dev, cas_dev):

View File

@ -81,7 +81,7 @@ def test_ioclass_core_id(filesystem):
if filesystem: if filesystem:
dd_dst_paths = [cached_mountpoint + "/test_file", not_cached_mountpoint + "/test_file"] dd_dst_paths = [cached_mountpoint + "/test_file", not_cached_mountpoint + "/test_file"]
else: else:
dd_dst_paths = [core_1.system_path, core_2.system_path] dd_dst_paths = [core_1.path, core_2.path]
for path in dd_dst_paths: for path in dd_dst_paths:
dd = ( dd = (

View File

@ -39,7 +39,7 @@ def test_ioclass_directory_depth(filesystem):
cache, core = prepare() cache, core = prepare()
Udev.disable() Udev.disable()
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} " with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."): f"at {mountpoint}."):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
@ -156,7 +156,7 @@ def test_ioclass_directory_file_operations(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem " with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}."): f"and mounting {core.path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem) core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint) core.mount(mount_point=mountpoint)
sync() sync()
@ -289,7 +289,7 @@ def test_ioclass_directory_dir_operations(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem " with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."): f"and mount {core.path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem) core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint) core.mount(mount_point=mountpoint)
sync() sync()

View File

@ -53,7 +53,7 @@ def test_ioclass_file_extension():
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."): with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3) core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint) core.mount(mountpoint)
@ -128,7 +128,7 @@ def test_ioclass_file_name_prefix():
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"): with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}"):
previous_occupancy = cache.get_occupancy() previous_occupancy = cache.get_occupancy()
core.create_filesystem(Filesystem.ext3) core.create_filesystem(Filesystem.ext3)
@ -285,7 +285,7 @@ def test_ioclass_file_offset():
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."): with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3) core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint) core.mount(mountpoint)
@ -374,7 +374,7 @@ def test_ioclass_file_size(filesystem):
with TestRun.step("Prepare and load IO class config."): with TestRun.step("Prepare and load IO class config."):
load_file_size_io_classes(cache, base_size) load_file_size_io_classes(cache, base_size)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} " with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."): f"at {mountpoint}."):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)

View File

@ -53,7 +53,7 @@ def test_ioclass_process_name():
dd = ( dd = (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(core.system_path) .output(core.path)
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
.seek(i) .seek(i)
@ -91,7 +91,7 @@ def test_ioclass_pid():
dd_command = str( dd_command = str(
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(core.system_path) .output(core.path)
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
) )

View File

@ -39,7 +39,7 @@ def test_ioclass_usage_sum():
Udev.disable() Udev.disable()
with TestRun.step( with TestRun.step(
f"Prepare filesystem and mount {core.system_path} at {mountpoint}" f"Prepare filesystem and mount {core.path} at {mountpoint}"
): ):
filesystem = Filesystem.xfs filesystem = Filesystem.xfs
core.create_filesystem(filesystem) core.create_filesystem(filesystem)

View File

@ -66,7 +66,7 @@ def test_ioclass_lba():
for lba in range(min_cached_lba, max_cached_lba, 8): for lba in range(min_cached_lba, max_cached_lba, 8):
dd = ( dd = (
Dd().input("/dev/zero") Dd().input("/dev/zero")
.output(f"{core.system_path}") .output(f"{core.path}")
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
.seek(lba) .seek(lba)
@ -90,7 +90,7 @@ def test_ioclass_lba():
continue continue
dd = ( dd = (
Dd().input("/dev/zero") Dd().input("/dev/zero")
.output(f"{core.system_path}") .output(f"{core.path}")
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
.seek(rand_lba) .seek(rand_lba)
@ -140,7 +140,7 @@ def test_ioclass_request_size():
req_size = random.choice(cached_req_sizes) req_size = random.choice(cached_req_sizes)
dd = ( dd = (
Dd().input("/dev/zero") Dd().input("/dev/zero")
.output(core.system_path) .output(core.path)
.count(1) .count(1)
.block_size(req_size) .block_size(req_size)
.oflag("direct") .oflag("direct")
@ -163,7 +163,7 @@ def test_ioclass_request_size():
req_size = random.choice(not_cached_req_sizes) req_size = random.choice(not_cached_req_sizes)
dd = ( dd = (
Dd().input("/dev/zero") Dd().input("/dev/zero")
.output(core.system_path) .output(core.path)
.count(1) .count(1)
.block_size(req_size) .block_size(req_size)
.oflag("direct") .oflag("direct")
@ -212,12 +212,12 @@ def test_ioclass_direct(filesystem):
.io_engine(IoEngine.libaio) \ .io_engine(IoEngine.libaio) \
.size(io_size).offset(io_size) \ .size(io_size).offset(io_size) \
.read_write(ReadWrite.write) \ .read_write(ReadWrite.write) \
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path) .target(f"{mountpoint}/tmp_file" if filesystem else core.path)
with TestRun.step("Prepare filesystem."): with TestRun.step("Prepare filesystem."):
if filesystem: if filesystem:
TestRun.LOGGER.info( TestRun.LOGGER.info(
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at" f"Preparing {filesystem.name} filesystem and mounting {core.path} at"
f" {mountpoint}" f" {mountpoint}"
) )
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
@ -305,7 +305,7 @@ def test_ioclass_metadata(filesystem):
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} " with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."): f"at {mountpoint}."):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
@ -444,7 +444,7 @@ def test_ioclass_id_as_condition(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem " with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."): f"and mount {core.path} at {mountpoint}."):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
fs_utils.create_directory(base_dir_path) fs_utils.create_directory(base_dir_path)
@ -553,7 +553,7 @@ def test_ioclass_conditions_or(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem " with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."): f"and mount {core.path} at {mountpoint}."):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
for i in range(1, 6): for i in range(1, 6):
@ -614,7 +614,7 @@ def test_ioclass_conditions_and(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}") f"and mounting {core.path} at {mountpoint}")
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
sync() sync()
@ -662,7 +662,7 @@ def test_ioclass_effective_ioclass(filesystem):
f"file_size:ge:{file_size_bytes // 2}"] f"file_size:ge:{file_size_bytes // 2}"]
with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem " with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}"): f"and mounting {core.path} at {mountpoint}"):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
fs_utils.create_directory(test_dir) fs_utils.create_directory(test_dir)

View File

@ -72,7 +72,7 @@ def test_acp_functional(cache_mode):
.direct() .direct()
.size(chunk_size) .size(chunk_size)
.block_size(Size(1, Unit.Blocks4096)) .block_size(Size(1, Unit.Blocks4096))
.target(f"{core.system_path}")) .target(f"{core.path}"))
for chunk in chunk_list: for chunk in chunk_list:
fio.add_job().offset(chunk.offset).io_size(chunk.writes_size) fio.add_job().offset(chunk.offset).io_size(chunk.writes_size)
fio.run() fio.run()

View File

@ -88,8 +88,8 @@ def test_recovery_all_options(cache_mode, cache_line_size, cleaning_policy, file
core.unmount() core.unmount()
TestRun.LOGGER.info(f"Number of dirty blocks in cache: {cache.get_dirty_blocks()}") TestRun.LOGGER.info(f"Number of dirty blocks in cache: {cache.get_dirty_blocks()}")
power_cycle_dut() power_cycle_dut()
cache_device.system_path = cache_device_link.get_target() cache_device.path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target() core_device.path = core_device_link.get_target()
with TestRun.step("Try to start cache without load and force option."): with TestRun.step("Try to start cache without load and force option."):
try: try:

View File

@ -56,12 +56,12 @@ def test_recovery_flush_reset_raw(cache_mode):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
with TestRun.step("Copy file to CAS."): with TestRun.step("Copy file to CAS."):
copy_file(source=source_file.full_path, target=core.system_path, size=test_file_size, copy_file(source=source_file.full_path, target=core.path, size=test_file_size,
direct="oflag") direct="oflag")
with TestRun.step("Sync and flush buffers."): with TestRun.step("Sync and flush buffers."):
os_utils.sync() os_utils.sync()
output = TestRun.executor.run(f"hdparm -f {core.system_path}") output = TestRun.executor.run(f"hdparm -f {core.path}")
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Error during hdparm", output) raise CmdException("Error during hdparm", output)
@ -70,8 +70,8 @@ def test_recovery_flush_reset_raw(cache_mode):
with TestRun.step("Hard reset DUT during data flushing."): with TestRun.step("Hard reset DUT during data flushing."):
power_cycle_dut(wait_for_flush_begin=True, core_device=core_device) power_cycle_dut(wait_for_flush_begin=True, core_device=core_device)
cache_device.system_path = cache_device_link.get_target() cache_device.path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target() core_device.path = core_device_link.get_target()
with TestRun.step("Copy file from core and check if current md5sum is different than " with TestRun.step("Copy file from core and check if current md5sum is different than "
"before restart."): "before restart."):
@ -155,8 +155,8 @@ def test_recovery_flush_reset_fs(cache_mode, fs):
with TestRun.step("Hard reset DUT during data flushing."): with TestRun.step("Hard reset DUT during data flushing."):
power_cycle_dut(True, core_device) power_cycle_dut(True, core_device)
cache_device.system_path = cache_device_link.get_target() cache_device.path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target() core_device.path = core_device_link.get_target()
with TestRun.step("Load cache."): with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_device) cache = casadm.load_cache(cache_device)

View File

@ -131,7 +131,7 @@ def test_recovery_unplug_cache_raw(cache_mode, cls):
core = cache.add_core(core_device) core = cache.add_core(core_device)
with TestRun.step("Copy file to CAS."): with TestRun.step("Copy file to CAS."):
copy_file(source=source_file.full_path, target=core.system_path, copy_file(source=source_file.full_path, target=core.path,
size=test_file_size, direct="oflag") size=test_file_size, direct="oflag")
TestRun.LOGGER.info(str(core.get_statistics())) TestRun.LOGGER.info(str(core.get_statistics()))
@ -156,7 +156,7 @@ def test_recovery_unplug_cache_raw(cache_mode, cls):
cache.stop() cache.stop()
with TestRun.step("Copy file from core device and check md5sum."): with TestRun.step("Copy file from core device and check md5sum."):
copy_file(source=core_device.system_path, target=target_file.full_path, copy_file(source=core_device.path, target=target_file.full_path,
size=test_file_size, direct="iflag") size=test_file_size, direct="iflag")
compare_files(source_file, target_file) compare_files(source_file, target_file)

View File

@ -170,4 +170,4 @@ def test_flush_over_640_gibibytes_raw_device(cache_mode):
def check_disk_size(device: Device): def check_disk_size(device: Device):
if device.size < required_disk_size: if device.size < required_disk_size:
pytest.skip(f"Not enough space on device {device.system_path}.") pytest.skip(f"Not enough space on device {device.path}.")

View File

@ -60,7 +60,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Write data to the exported object."): with TestRun.step("Write data to the exported object."):
test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte)) test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
dd = Dd().output(core.system_path) \ dd = Dd().output(core.path) \
.input(test_file_main.full_path) \ .input(test_file_main.full_path) \
.block_size(bs) \ .block_size(bs) \
.count(int(test_file_main.size / bs)) \ .count(int(test_file_main.size / bs)) \
@ -85,7 +85,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Read data from the exported object."): with TestRun.step("Read data from the exported object."):
test_file_1 = File.create_file("/tmp/test_file_1") test_file_1 = File.create_file("/tmp/test_file_1")
dd = Dd().output(test_file_1.full_path) \ dd = Dd().output(test_file_1.full_path) \
.input(core.system_path) \ .input(core.path) \
.block_size(bs) \ .block_size(bs) \
.count(int(test_file_main.size / bs)) \ .count(int(test_file_main.size / bs)) \
.oflag("direct") .oflag("direct")
@ -100,7 +100,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Read data from the core device."): with TestRun.step("Read data from the core device."):
test_file_2 = File.create_file("/tmp/test_file_2") test_file_2 = File.create_file("/tmp/test_file_2")
dd = Dd().output(test_file_2.full_path) \ dd = Dd().output(test_file_2.full_path) \
.input(core_part.system_path) \ .input(core_part.path) \
.block_size(bs) \ .block_size(bs) \
.count(int(test_file_main.size / bs)) \ .count(int(test_file_main.size / bs)) \
.oflag("direct") .oflag("direct")
@ -133,7 +133,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Read data from the core device."): with TestRun.step("Read data from the core device."):
test_file_3 = File.create_file("/tmp/test_file_2") test_file_3 = File.create_file("/tmp/test_file_2")
dd = Dd().output(test_file_3.full_path) \ dd = Dd().output(test_file_3.full_path) \
.input(core_part.system_path) \ .input(core_part.path) \
.block_size(bs) \ .block_size(bs) \
.count(int(test_file_main.size / bs)) \ .count(int(test_file_main.size / bs)) \
.oflag("direct") .oflag("direct")
@ -277,7 +277,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Write data to exported object."): with TestRun.step("Write data to exported object."):
test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte)) test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
dd = Dd().output(core.system_path) \ dd = Dd().output(core.path) \
.input(test_file_main.full_path) \ .input(test_file_main.full_path) \
.block_size(bs) \ .block_size(bs) \
.count(int(test_file_main.size / bs)) \ .count(int(test_file_main.size / bs)) \
@ -302,7 +302,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Read data from the exported object."): with TestRun.step("Read data from the exported object."):
test_file_1 = File.create_file("/tmp/test_file_1") test_file_1 = File.create_file("/tmp/test_file_1")
dd = Dd().output(test_file_1.full_path) \ dd = Dd().output(test_file_1.full_path) \
.input(core.system_path) \ .input(core.path) \
.block_size(bs) \ .block_size(bs) \
.count(int(test_file_main.size / bs)) \ .count(int(test_file_main.size / bs)) \
.oflag("direct") .oflag("direct")
@ -317,7 +317,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Read data from the core device."): with TestRun.step("Read data from the core device."):
test_file_2 = File.create_file("/tmp/test_file_2") test_file_2 = File.create_file("/tmp/test_file_2")
dd = Dd().output(test_file_2.full_path) \ dd = Dd().output(test_file_2.full_path) \
.input(core_part.system_path) \ .input(core_part.path) \
.block_size(bs) \ .block_size(bs) \
.count(int(test_file_main.size / bs)) \ .count(int(test_file_main.size / bs)) \
.oflag("direct") .oflag("direct")
@ -350,7 +350,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Read data from core device again."): with TestRun.step("Read data from core device again."):
test_file_3 = File.create_file("/tmp/test_file_3") test_file_3 = File.create_file("/tmp/test_file_3")
dd = Dd().output(test_file_3.full_path) \ dd = Dd().output(test_file_3.full_path) \
.input(core_part.system_path) \ .input(core_part.path) \
.block_size(bs) \ .block_size(bs) \
.count(int(test_file_main.size / bs)) \ .count(int(test_file_main.size / bs)) \
.oflag("direct") .oflag("direct")

View File

@ -76,7 +76,7 @@ def test_user_cli():
with TestRun.step("Try to start cache."): with TestRun.step("Try to start cache."):
try: try:
output = run_as_other_user(cli.start_cmd(cache_dev.system_path), user_name) output = run_as_other_user(cli.start_cmd(cache_dev.path), user_name)
if output.exit_code == 0: if output.exit_code == 0:
TestRun.LOGGER.error("Starting cache should fail!") TestRun.LOGGER.error("Starting cache should fail!")
except CmdException: except CmdException:
@ -105,7 +105,7 @@ def test_user_cli():
with TestRun.step("Try to add core to cache."): with TestRun.step("Try to add core to cache."):
try: try:
output = run_as_other_user(cli.add_core_cmd(str(cache.cache_id), output = run_as_other_user(cli.add_core_cmd(str(cache.cache_id),
core_part2.system_path), user_name) core_part2.path), user_name)
if output.exit_code == 0: if output.exit_code == 0:
TestRun.LOGGER.error("Adding core to cache should fail!") TestRun.LOGGER.error("Adding core to cache should fail!")
except CmdException: except CmdException:
@ -244,7 +244,7 @@ def test_user_cli():
with TestRun.step("Try to start cache with 'sudo'."): with TestRun.step("Try to start cache with 'sudo'."):
try: try:
run_as_other_user(cli.start_cmd(cache_dev.system_path, force=True), user_name, True) run_as_other_user(cli.start_cmd(cache_dev.path, force=True), user_name, True)
except CmdException: except CmdException:
TestRun.LOGGER.error("Non-root sudoer user should be able to start cache.") TestRun.LOGGER.error("Non-root sudoer user should be able to start cache.")
@ -259,7 +259,7 @@ def test_user_cli():
with TestRun.step("Try to add core to cache with 'sudo'."): with TestRun.step("Try to add core to cache with 'sudo'."):
try: try:
run_as_other_user(cli.add_core_cmd(str(cache.cache_id), run_as_other_user(cli.add_core_cmd(str(cache.cache_id),
core_part1.system_path), user_name, True) core_part1.path), user_name, True)
except CmdException: except CmdException:
TestRun.LOGGER.error("Non-root sudoer user should be able to add core to cache.") TestRun.LOGGER.error("Non-root sudoer user should be able to add core to cache.")

View File

@ -100,7 +100,7 @@ def test_block_stats_write(cache_mode, zero_stats):
dd = ( dd = (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(f"{core.system_path}") .output(f"{core.path}")
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
.oflag("direct") .oflag("direct")
@ -225,7 +225,7 @@ def test_block_stats_read(cache_mode, zero_stats):
dd = ( dd = (
Dd() Dd()
.output("/dev/zero") .output("/dev/zero")
.input(f"{core.system_path}") .input(f"{core.path}")
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
.iflag("direct") .iflag("direct")

View File

@ -271,8 +271,8 @@ def dd_builder(cache_mode, cache_line_size, count, device):
.count(count)) .count(count))
if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode): if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
dd.input(device.system_path).output("/dev/null").iflag("direct") dd.input(device.path).output("/dev/null").iflag("direct")
else: else:
dd.input("/dev/urandom").output(device.system_path).oflag("direct") dd.input("/dev/urandom").output(device.path).oflag("direct")
return dd return dd

View File

@ -66,7 +66,7 @@ def test_cache_config_stats():
fio = fio_prepare() fio = fio_prepare()
for i in range(caches_count): for i in range(caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path) fio.add_job().target(cores[i][j].path)
fio_pid = fio.run_in_background() fio_pid = fio.run_in_background()
with TestRun.step(f"Wait {time_to_wait} seconds"): with TestRun.step(f"Wait {time_to_wait} seconds"):
@ -107,7 +107,7 @@ def test_core_config_stats():
fio = fio_prepare() fio = fio_prepare()
for i in range(caches_count): for i in range(caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path) fio.add_job().target(cores[i][j].path)
fio_pid = fio.run_in_background() fio_pid = fio.run_in_background()
with TestRun.step(f"Wait {time_to_wait} seconds"): with TestRun.step(f"Wait {time_to_wait} seconds"):
@ -255,11 +255,11 @@ def validate_cache_config_statistics(caches, after_io: bool = False):
failed_stats += ( failed_stats += (
f"For cache number {caches[i].cache_id} cache ID is " f"For cache number {caches[i].cache_id} cache ID is "
f"{caches_stats[i].config_stats.cache_id}\n") f"{caches_stats[i].config_stats.cache_id}\n")
if caches_stats[i].config_stats.cache_dev != caches[i].cache_device.system_path: if caches_stats[i].config_stats.cache_dev != caches[i].cache_device.path:
failed_stats += ( failed_stats += (
f"For cache number {caches[i].cache_id} cache device " f"For cache number {caches[i].cache_id} cache device "
f"is {caches_stats[i].config_stats.cache_dev}, " f"is {caches_stats[i].config_stats.cache_dev}, "
f"should be {caches[i].cache_device.system_path}\n") f"should be {caches[i].cache_device.path}\n")
if caches_stats[i].config_stats.cache_size.value != caches[i].size.value: if caches_stats[i].config_stats.cache_size.value != caches[i].size.value:
failed_stats += ( failed_stats += (
f"For cache number {caches[i].cache_id} cache size is " f"For cache number {caches[i].cache_id} cache size is "
@ -344,23 +344,23 @@ def validate_core_config_statistics(cores, caches=None):
for j in range(cores_per_cache) for j in range(cores_per_cache)
] ]
for j in range(cores_per_cache): for j in range(cores_per_cache):
if cores_stats[j].config_stats.exp_obj != cores[i][j].system_path: if cores_stats[j].config_stats.exp_obj != cores[i][j].path:
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} " f"For exported object {cores[i][j].path} "
f"value in stats is {cores_stats[j].config_stats.exp_obj}\n") f"value in stats is {cores_stats[j].config_stats.exp_obj}\n")
if cores_stats[j].config_stats.core_id != cores[i][j].core_id: if cores_stats[j].config_stats.core_id != cores[i][j].core_id:
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} " f"For exported object {cores[i][j].path} "
f"core ID is {cores_stats[j].config_stats.core_id}, " f"core ID is {cores_stats[j].config_stats.core_id}, "
f"should be {cores[i][j].core_id}\n") f"should be {cores[i][j].core_id}\n")
if cores_stats[j].config_stats.core_dev != cores[i][j].core_device.system_path: if cores_stats[j].config_stats.core_dev != cores[i][j].core_device.path:
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} " f"For exported object {cores[i][j].path} "
f"core device is {cores_stats[j].config_stats.core_dev}, " f"core device is {cores_stats[j].config_stats.core_dev}, "
f"should be {cores[i][j].core_device.system_path}\n") f"should be {cores[i][j].core_device.path}\n")
if cores_stats[j].config_stats.core_size.value != cores[i][j].size.value: if cores_stats[j].config_stats.core_size.value != cores[i][j].size.value:
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} " f"For exported object {cores[i][j].path} "
f"core size is {cores_stats[j].config_stats.core_size.value}, " f"core size is {cores_stats[j].config_stats.core_size.value}, "
f"should be {cores[i][j].size.value}\n") f"should be {cores[i][j].size.value}\n")
if ( if (
@ -368,16 +368,16 @@ def validate_core_config_statistics(cores, caches=None):
!= cores[i][j].get_status() != cores[i][j].get_status()
): ):
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} core " f"For exported object {cores[i][j].path} core "
f"status is {cores_stats[j].config_stats.status}, should be " f"status is {cores_stats[j].config_stats.status}, should be "
f"{str(cores[i][j].get_status()).split('.')[1].capitalize()}\n") f"{str(cores[i][j].get_status()).split('.')[1].capitalize()}\n")
if cores_stats[j].config_stats.seq_cutoff_policy is None: if cores_stats[j].config_stats.seq_cutoff_policy is None:
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} value of " f"For exported object {cores[i][j].path} value of "
f"Sequential cut-off policy should not be empty\n") f"Sequential cut-off policy should not be empty\n")
if cores_stats[j].config_stats.seq_cutoff_threshold.value <= 0: if cores_stats[j].config_stats.seq_cutoff_threshold.value <= 0:
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} value of " f"For exported object {cores[i][j].path} value of "
f"Sequential cut-off threshold should be greater then 0\n") f"Sequential cut-off threshold should be greater then 0\n")
if caches: if caches:
cache_mode = CacheMode[ cache_mode = CacheMode[
@ -386,21 +386,21 @@ def validate_core_config_statistics(cores, caches=None):
if CacheModeTrait.LazyWrites in CacheMode.get_traits(cache_mode): if CacheModeTrait.LazyWrites in CacheMode.get_traits(cache_mode):
if cores_stats[j].config_stats.dirty_for.total_seconds() <= 0: if cores_stats[j].config_stats.dirty_for.total_seconds() <= 0:
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} in " f"For exported object {cores[i][j].path} in "
f"{cache_mode} cache mode, value of 'Dirty for' " f"{cache_mode} cache mode, value of 'Dirty for' "
f"after IO is {cores_stats[j].config_stats.dirty_for}, " f"after IO is {cores_stats[j].config_stats.dirty_for}, "
f"should be greater then 0\n") f"should be greater then 0\n")
else: else:
if cores_stats[j].config_stats.dirty_for.total_seconds() != 0: if cores_stats[j].config_stats.dirty_for.total_seconds() != 0:
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} in " f"For exported object {cores[i][j].path} in "
f"{cache_mode} cache mode, value of 'Dirty for' " f"{cache_mode} cache mode, value of 'Dirty for' "
f"after IO is {cores_stats[j].config_stats.dirty_for}, " f"after IO is {cores_stats[j].config_stats.dirty_for}, "
f"should equal 0\n") f"should equal 0\n")
else: else:
if cores_stats[j].config_stats.dirty_for.total_seconds() < 0: if cores_stats[j].config_stats.dirty_for.total_seconds() < 0:
failed_stats += ( failed_stats += (
f"For exported object {cores[i][j].system_path} value of " f"For exported object {cores[i][j].path} value of "
f"'Dirty for' is {cores_stats[j].config_stats.dirty_for}, " f"'Dirty for' is {cores_stats[j].config_stats.dirty_for}, "
f"should be greater or equal 0\n") f"should be greater or equal 0\n")
@ -412,7 +412,7 @@ def validate_core_config_statistics(cores, caches=None):
def validate_statistics_flat(device, stats, stat_filter, per_core: bool): def validate_statistics_flat(device, stats, stat_filter, per_core: bool):
device_name = ( device_name = (
f"core device {device.system_path}" if per_core else f"core device {device.path}" if per_core else
f"cache number {device.cache_id}") f"cache number {device.cache_id}")
failed_stats = "" failed_stats = ""
if stat_filter == StatsFilter.usage: if stat_filter == StatsFilter.usage:

View File

@ -310,7 +310,7 @@ def prepare(random_cls, cache_count=1, cores_per_cache=1):
Udev.disable() Udev.disable()
caches, cores = [], [] caches, cores = [], []
for i, cache_device in enumerate(cache_devices): for i, cache_device in enumerate(cache_devices):
TestRun.LOGGER.info(f"Starting cache on {cache_device.system_path}") TestRun.LOGGER.info(f"Starting cache on {cache_device.path}")
cache = casadm.start_cache(cache_device, cache = casadm.start_cache(cache_device,
force=True, force=True,
cache_mode=cache_modes[i], cache_mode=cache_modes[i],
@ -320,7 +320,7 @@ def prepare(random_cls, cache_count=1, cores_per_cache=1):
cache.set_cleaning_policy(CleaningPolicy.nop) cache.set_cleaning_policy(CleaningPolicy.nop)
for core_device in core_devices[i * cores_per_cache:(i + 1) * cores_per_cache]: for core_device in core_devices[i * cores_per_cache:(i + 1) * cores_per_cache]:
TestRun.LOGGER.info( TestRun.LOGGER.info(
f"Adding core device {core_device.system_path} to cache {cache.cache_id}") f"Adding core device {core_device.path} to cache {cache.cache_id}")
core = cache.add_core(core_dev=core_device) core = cache.add_core(core_dev=core_device)
core.reset_counters() core.reset_counters()
cores.append(core) cores.append(core)

View File

@ -78,7 +78,7 @@ def test_stat_max_cache():
fio = fio_prepare() fio = fio_prepare()
for i in range(caches_count): for i in range(caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path) fio.add_job().target(cores[i][j].path)
fio.run() fio.run()
sleep(3) sleep(3)
@ -128,7 +128,7 @@ def test_stat_max_core(cache_mode):
with TestRun.step("Run 'fio'"): with TestRun.step("Run 'fio'"):
fio = fio_prepare() fio = fio_prepare()
for j in range(cores_per_cache): for j in range(cores_per_cache):
fio.add_job().target(cores[j].system_path) fio.add_job().target(cores[j].path)
fio.run() fio.run()
sleep(3) sleep(3)

View File

@ -61,7 +61,7 @@ def test_stats_values():
fio = fio_prepare() fio = fio_prepare()
for i in range(caches_count): for i in range(caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path) fio.add_job().target(cores[i][j].path)
fio.run() fio.run()
sleep(3) sleep(3)
@ -156,22 +156,22 @@ def check_stats_initial(caches, cores):
if stat_name.lower() == "free": if stat_name.lower() == "free":
if stat_value != caches[i].size.value: if stat_value != caches[i].size.value:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} " f"For core device {cores[i][j].path} "
f"value for '{stat_name}' is {stat_value}, " f"value for '{stat_name}' is {stat_value}, "
f"should equal cache size: {caches[i].size.value}\n") f"should equal cache size: {caches[i].size.value}\n")
elif stat_value != 0: elif stat_value != 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} value for " f"For core device {cores[i][j].path} value for "
f"'{stat_name}' is {stat_value}, should equal 0\n") f"'{stat_name}' is {stat_value}, should equal 0\n")
for stat_name, stat_value in cores_stats_perc[j].items(): for stat_name, stat_value in cores_stats_perc[j].items():
if stat_name.lower() == "free": if stat_name.lower() == "free":
if stat_value != 100: if stat_value != 100:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} percentage value " f"For core device {cores[i][j].path} percentage value "
f"for '{stat_name}' is {stat_value}, should equal 100\n") f"for '{stat_name}' is {stat_value}, should equal 100\n")
elif stat_value != 0: elif stat_value != 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} percentage value " f"For core device {cores[i][j].path} percentage value "
f"for '{stat_name}' is {stat_value}, should equal 0\n") f"for '{stat_name}' is {stat_value}, should equal 0\n")
@ -191,7 +191,7 @@ def check_stats_after_io(caches, cores, after_reload: bool = False):
) )
for j in range(cores_per_cache): for j in range(cores_per_cache):
fail_message = ( fail_message = (
f"For core device {cores[i][j].system_path} in {cache_mode} cache mode ") f"For core device {cores[i][j].path} in {cache_mode} cache mode ")
if after_reload: if after_reload:
validate_usage_stats( validate_usage_stats(
cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message) cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message)

View File

@ -194,7 +194,7 @@ def test_kedr_basic_io_raw(module, unload_modules, install_kedr):
.run_time(timedelta(minutes=4)) .run_time(timedelta(minutes=4))
.time_based() .time_based()
.read_write(ReadWrite.randrw) .read_write(ReadWrite.randrw)
.target(f"{core.system_path}") .target(f"{core.path}")
.direct() .direct()
).run() ).run()

View File

@ -79,7 +79,7 @@ def test_stress_small_cas_device(cache_line_size, cores_number, cache_config):
.num_jobs(cores_number) .num_jobs(cores_number)
.cpus_allowed_policy(CpusAllowedPolicy.split)) .cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores: for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path) fio.add_job(f"job_{core.core_id}").target(core.path)
output = fio.run()[0] output = fio.run()[0]
TestRun.LOGGER.info(f"Total read I/O [KiB]: {str(output.read_io())}\n" TestRun.LOGGER.info(f"Total read I/O [KiB]: {str(output.read_io())}\n"
f"Total write I/O [KiB]: {str(output.write_io())}") f"Total write I/O [KiB]: {str(output.write_io())}")
@ -88,7 +88,7 @@ def test_stress_small_cas_device(cache_line_size, cores_number, cache_config):
md5sum_core = [] md5sum_core = []
for core in cores: for core in cores:
md5sum_core.append(TestRun.executor.run( md5sum_core.append(TestRun.executor.run(
f"md5sum -b {core.system_path}").stdout.split(" ")[0]) f"md5sum -b {core.path}").stdout.split(" ")[0])
with TestRun.step("Stop cache."): with TestRun.step("Stop cache."):
cache.stop() cache.stop()
@ -97,7 +97,7 @@ def test_stress_small_cas_device(cache_line_size, cores_number, cache_config):
md5sum_core_dev = [] md5sum_core_dev = []
for core_dev in core_dev.partitions: for core_dev in core_dev.partitions:
md5sum_core_dev.append(TestRun.executor.run( md5sum_core_dev.append(TestRun.executor.run(
f"md5sum -b {core_dev.system_path}").stdout.split(" ")[0]) f"md5sum -b {core_dev.path}").stdout.split(" ")[0])
with TestRun.step("Compare md5 sum of exported objects and cores."): with TestRun.step("Compare md5 sum of exported objects and cores."):
if md5sum_core_dev != md5sum_core: if md5sum_core_dev != md5sum_core:

View File

@ -105,5 +105,5 @@ def run_io(exported_objects):
.io_depth(32) \ .io_depth(32) \
.run_time(timedelta(minutes=5)) \ .run_time(timedelta(minutes=5)) \
.num_jobs(5) \ .num_jobs(5) \
.target(exported_objects[i].system_path) .target(exported_objects[i].path)
fio.run_in_background() fio.run_in_background()

View File

@ -39,8 +39,8 @@ def test_trim_start_discard():
non_cas_part = dev.partitions[1] non_cas_part = dev.partitions[1]
with TestRun.step("Writing different pattern on partitions"): with TestRun.step("Writing different pattern on partitions"):
cas_fio = write_pattern(cas_part.system_path) cas_fio = write_pattern(cas_part.path)
non_cas_fio = write_pattern(non_cas_part.system_path) non_cas_fio = write_pattern(non_cas_part.path)
cas_fio.run() cas_fio.run()
non_cas_fio.run() non_cas_fio.run()

View File

@ -44,7 +44,7 @@ def test_discard_on_huge_core():
# RCU-sched type stall sometimes appears in dmesg log after more # RCU-sched type stall sometimes appears in dmesg log after more
# than one execution of blkdiscard. # than one execution of blkdiscard.
for _ in range(8): for _ in range(8):
TestRun.executor.run_expect_success(f"blkdiscard {core.system_path}") TestRun.executor.run_expect_success(f"blkdiscard {core.path}")
with TestRun.step("Check dmesg for RCU-sched stall."): with TestRun.step("Check dmesg for RCU-sched stall."):
check_for_rcu_sched_type_stall() check_for_rcu_sched_type_stall()

View File

@ -47,7 +47,8 @@ class casadm:
def list_caches(cls): def list_caches(cls):
cmd = [cls.casadm_path, cmd = [cls.casadm_path,
'--list-caches', '--list-caches',
'--output-format', 'csv'] '--output-format', 'csv',
'--by-id-path']
return cls.run_cmd(cmd) return cls.run_cmd(cmd)
@classmethod @classmethod