Merge pull request #152 from robertbaldyga/move-ocl-tests

Move OCL tests from test-framework repository
This commit is contained in:
Daniel Madej 2019-10-21 10:56:28 +02:00 committed by GitHub
commit 5a7d2ed5c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 4102 additions and 0 deletions

3
.gitmodules vendored
View File

@ -1,3 +1,6 @@
[submodule "ocf"]
path = ocf
url = https://github.com/Open-CAS/ocf.git
[submodule "test/functional/test-framework"]
path = test/functional/test-framework
url = https://github.com/Open-CAS/test-framework.git

View File

View File

View File

@ -0,0 +1,142 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from api.cas.cli import *
from api.cas.casadm_parser import *
from test_utils.os_utils import *
from api.cas.cache_config import *
from storage_devices.device import Device
from core.test_run import TestRun
from api.cas.casadm_params import *
class Cache:
def __init__(self, device_system_path):
self.cache_device = Device(device_system_path)
self.cache_id = int(self.__get_cache_id())
self.__cache_line_size = None
self.__metadata_mode = None
self.__metadata_size = None
def __get_cache_id(self):
cmd = f"{list_cmd()} | grep {self.cache_device.system_path}"
output = TestRun.executor.run(cmd)
if output.exit_code == 0 and output.stdout.strip():
return output.stdout.split()[1]
else:
raise Exception(f"There is no cache started on {self.cache_device.system_path}.")
def get_core_devices(self):
return get_cores(self.cache_id)
def get_cache_line_size(self):
if self.__cache_line_size is None:
stats = self.get_cache_statistics()
stats_line_size = stats["cache line size"]
self.__cache_line_size = CacheLineSize(stats_line_size.get_value(Unit.Byte))
return self.__cache_line_size
def get_cleaning_policy(self):
stats = self.get_cache_statistics()
cp = stats["cleaning policy"]
return CleaningPolicy[cp]
def get_eviction_policy(self):
stats = self.get_cache_statistics()
ep = stats["eviction policy"]
return EvictionPolicy[ep]
def get_metadata_mode(self):
if self.__metadata_mode is None:
stats = self.get_cache_statistics()
mm = stats["metadata mode"]
self.__metadata_mode = MetadataMode[mm]
return self.__metadata_mode
def get_metadata_size(self):
if self.__metadata_size is None:
stats = self.get_cache_statistics()
self.__metadata_size = stats["metadata memory footprint"]
return self.__metadata_size
def get_occupancy(self):
return self.get_cache_statistics()["occupancy"]
def get_status(self):
status = self.get_cache_statistics()["status"].replace(' ', '_')
return CacheStatus[status]
def get_cache_mode(self):
return CacheMode[self.get_cache_statistics()["write policy"].upper()]
def get_dirty_blocks(self):
return self.get_cache_statistics()["dirty"]
def get_dirty_for(self):
return self.get_cache_statistics()["dirty for"]
def get_clean_blocks(self):
return self.get_cache_statistics()["clean"]
def get_flush_parameters_alru(self):
return get_flush_parameters_alru(self.cache_id)
def get_flush_parameters_acp(self):
return get_flush_parameters_acp(self.cache_id)
# Casadm methods:
def get_cache_statistics(self,
io_class_id: int = None,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
return get_statistics(self.cache_id, None, io_class_id,
stat_filter, percentage_val)
def flush_cache(self):
casadm.flush(cache_id=self.cache_id)
sync()
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0
def stop(self, no_data_flush: bool = False):
return casadm.stop_cache(self.cache_id, no_data_flush)
def add_core(self, core_dev, core_id: int = None):
return casadm.add_core(self, core_dev, core_id)
def remove_core(self, core_id, force: bool = False):
return casadm.remove_core(self.cache_id, core_id, force)
def reset_counters(self):
return casadm.reset_counters(self.cache_id)
def set_cache_mode(self, cache_mode: CacheMode, flush: bool = True):
return casadm.set_cache_mode(cache_mode, self.cache_id, flush)
def load_io_class(self, file_path: str):
return casadm.load_io_classes(self.cache_id, file_path)
def list_io_classes(self, output_format: OutputFormat):
return casadm.list_io_classes(self.cache_id, output_format)
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
return casadm.set_param_cutoff(self.cache_id,
seq_cutoff_param.threshold,
seq_cutoff_param.policy)
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
return casadm.set_param_cleaning(self.cache_id, cleaning_policy)
def set_params_acp(self, acp_params: FlushParametersAcp):
return casadm.set_param_cleaning_acp(self.cache_id,
acp_params.wake_up_time.total_milliseconds(),
acp_params.flush_max_buffers)
def set_params_alru(self, alru_params: FlushParametersAlru):
return casadm.set_param_cleaning_alru(self.cache_id,
alru_params.wake_up_time.total_seconds(),
alru_params.staleness_time.total_seconds(),
alru_params.flush_max_buffers,
alru_params.activity_threshold.total_milliseconds())

View File

@ -0,0 +1,114 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from enum import IntEnum, Enum
from test_utils.size import Size, Unit
from datetime import timedelta
class CacheLineSize(IntEnum):
LINE_4KiB = Size(4, Unit.KibiByte)
LINE_8KiB = Size(8, Unit.KibiByte)
LINE_16KiB = Size(16, Unit.KibiByte)
LINE_32KiB = Size(32, Unit.KibiByte)
LINE_64KiB = Size(64, Unit.KibiByte)
DEFAULT = LINE_4KiB
class CacheMode(Enum):
WT = 0
WB = 1
WA = 2
PT = 3
WO = 4
DEFAULT = WT
class SeqCutOffPolicy(Enum):
full = 0
always = 1
never = 2
DEFAULT = full
class EvictionPolicy(Enum):
lru = 0
lmp = 1
nop = 2
class MetadataMode(Enum):
normal = 0
atomic = 1
class CleaningPolicy(Enum):
alru = 0
nop = 1
acp = 2
DEFAULT = alru
class CacheStatus(Enum):
not_running = 0
running = 1
stopping = 2
initializing = 3
flushing = 4
incomplete = 5
class Time(timedelta):
def total_milliseconds(self):
return self.total_seconds() * 1000
class FlushParametersAlru:
def __init__(self):
self.activity_threshold = None
self.flush_max_buffers = None
self.staleness_time = None
self.wake_up_time = None
@staticmethod
def default_alru_params():
alru_params = FlushParametersAlru()
alru_params.activity_threshold = Time(milliseconds=10000)
alru_params.flush_max_buffers = 100
alru_params.staleness_time = Time(seconds=120)
alru_params.wake_up_time = Time(seconds=20)
return alru_params
class FlushParametersAcp:
def __init__(self):
self.flush_max_buffers = None
self.wake_up_time = None
@staticmethod
def default_acp_params():
acp_params = FlushParametersAcp()
acp_params.flush_max_buffers = 128
acp_params.wake_up_time = Time(milliseconds=10)
return acp_params
class SeqCutOffParameters:
def __init__(self):
self.policy = None
self.threshold = None
@staticmethod
def default_seq_cut_off_params():
seq_cut_off_params = SeqCutOffParameters()
seq_cut_off_params.policy = SeqCutOffPolicy.full
seq_cut_off_params.threshold = Size(1024, Unit.KibiByte)
# TODO: Use case for this will be to iterate over configurations (kernel params such as
# TODO: io scheduler, metadata layout) and prepare env before starting cache
class CacheConfig:
def __init__(self):
pass

View File

@ -0,0 +1,18 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from aenum import Enum
from config.configuration import cas_kernel_module, disk_kernel_module
from test_utils import os_utils
from test_utils.os_utils import ModuleRemoveMethod
class CasModule(Enum):
cache = cas_kernel_module
disk = disk_kernel_module
def reload_all_cas_modules():
os_utils.unload_kernel_module(CasModule.cache, ModuleRemoveMethod.modprobe)
os_utils.load_kernel_module(CasModule.cache)

View File

@ -0,0 +1,305 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from .cli import *
from .casctl import stop as casctl_stop
from core.test_run import TestRun
from .casadm_params import *
from api.cas.cache_config import CacheLineSize, CacheMode, SeqCutOffPolicy, CleaningPolicy
from test_utils.size import Size, Unit
from typing import List
from storage_devices.device import Device
from api.cas.core import Core
from api.cas.cache import Cache
def help(shortcut: bool = False):
return TestRun.executor.run(help_cmd(shortcut))
def start_cache(cache_dev: Device, cache_mode: CacheMode = None,
cache_line_size: CacheLineSize = None, cache_id: int = None,
force: bool = False, load: bool = False, shortcut: bool = False):
_cache_line_size = None if cache_line_size is None else str(
CacheLineSize.get_value(Unit.KibiByte))
_cache_id = None if cache_id is None else str(cache_id)
_cache_mode = None if cache_mode is None else cache_mode.name.lower()
output = TestRun.executor.run(start_cmd(
cache_dev=cache_dev.system_path, cache_mode=_cache_mode, cache_line_size=_cache_line_size,
cache_id=_cache_id, force=force, load=load, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Failed to start cache. stdout: {output.stdout} \n stderr :{output.stderr}")
return Cache(cache_dev.system_path)
def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = False):
output = TestRun.executor.run(
stop_cmd(cache_id=str(cache_id), no_data_flush=no_data_flush, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Failed to stop cache. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False):
_core_id = None if core_id is None else str(id)
output = TestRun.executor.run(
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.system_path,
core_id=_core_id, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Failed to add core. stdout: {output.stdout} \n stderr :{output.stderr}")
return Core(core_dev.system_path, cache.cache_id)
def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False):
output = TestRun.executor.run(
remove_core_cmd(cache_id=str(cache_id), core_id=str(core_id),
force=force, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Failed to remove core. stdout: {output.stdout} \n stderr :{output.stderr}")
def remove_detached(core_device: Device, shortcut: bool = False):
output = TestRun.executor.run(
remove_detached_cmd(core_device=core_device.system_path, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Failed to remove detached core. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def reset_counters(cache_id: int, core_id: int = None, shortcut: bool = False):
_core_id = None if core_id is None else str(core_id)
output = TestRun.executor.run(
reset_counters_cmd(cache_id=str(cache_id), core_id=_core_id, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Failed to reset counters. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def flush(cache_id: int, core_id: int = None, shortcut: bool = False):
if core_id is None:
command = flush_cache_cmd(cache_id=str(cache_id), shortcut=shortcut)
else:
command = flush_core_cmd(cache_id=str(cache_id), core_id=str(core_id), shortcut=shortcut)
output = TestRun.executor.run(command)
if output.exit_code != 0:
raise Exception(
f"Flushing failed. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def load_cache(device: Device, shortcut: bool = False):
output = TestRun.executor.run(
load_cmd(cache_dev=device.system_path, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Failed to load cache. stdout: {output.stdout} \n stderr :{output.stderr}")
return Cache(device.system_path)
def list_caches(output_format: OutputFormat = None, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
list_cmd(output_format=_output_format, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Failed to list caches. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def print_version(output_format: OutputFormat = None, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
version_cmd(output_format=_output_format, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Failed to print version. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def format_nvme(cache_dev: Device, force: bool = False, shortcut: bool = False):
output = TestRun.executor.run(
format_cmd(cache_dev=cache_dev.system_path, force=force, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Format command failed. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def stop_all_caches():
if "No caches running" in list_caches().stdout:
return
TestRun.LOGGER.info("Stop all caches")
casctl_stop()
output = list_caches()
if "No caches running" not in output.stdout:
raise Exception(
f"Error while stopping caches. stdout: {output.stdout} \n stderr :{output.stderr}")
def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = False,
io_class_id: int = None, filter: List[StatsFilter] = None,
output_format: OutputFormat = None, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
_core_id = None if core_id is None else str(core_id)
_io_class_id = None if io_class_id is None else str(io_class_id)
if filter is None:
_filter = filter
else:
names = (x.name for x in filter)
_filter = ",".join(names)
output = TestRun.executor.run(
print_statistics_cmd(
cache_id=str(cache_id), core_id=_core_id,
per_io_class=per_io_class, io_class_id=_io_class_id,
filter=_filter, output_format=_output_format, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Printing statistics failed. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def set_cache_mode(cache_mode: CacheMode, cache_id: int,
flush: bool = True, shortcut: bool = False):
flush_cache = None
if cache_mode in [CacheMode.WB, CacheMode.WO]:
flush_cache = "yes" if flush else "no"
output = TestRun.executor.run(
set_cache_mode_cmd(cache_mode=cache_mode.name.lower(), cache_id=str(cache_id),
flush_cache=flush_cache, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Set cache mode command failed. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def load_io_classes(cache_id: int, file: str, shortcut: bool = False):
output = TestRun.executor.run(
load_io_classes_cmd(cache_id=str(cache_id), file=file, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Load IO class command failed. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def list_io_classes(cache_id: int, output_format: OutputFormat, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
list_io_classes_cmd(cache_id=str(cache_id),
output_format=_output_format, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"List IO class command failed. stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def get_param_cutoff(cache_id: int, core_id: int,
output_format: OutputFormat = None, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
get_param_cutoff_cmd(cache_id=str(cache_id), core_id=str(core_id),
output_format=_output_format, shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Getting sequential cutoff params failed."
f" stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def get_param_cleaning(cache_id: int, output_format: OutputFormat = None, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
get_param_cleaning_cmd(cache_id=str(cache_id), output_format=_output_format,
shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Getting cleaning policy params failed."
f" stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def get_param_cleaning_alru(cache_id: int, output_format: OutputFormat = None,
shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
get_param_cleaning_alru_cmd(cache_id=str(cache_id), output_format=_output_format,
shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Getting alru cleaning policy params failed."
f" stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def get_param_cleaning_acp(cache_id: int, output_format: OutputFormat = None,
shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
get_param_cleaning_acp_cmd(cache_id=str(cache_id), output_format=_output_format,
shortcut=shortcut))
if output.exit_code != 0:
raise Exception(
f"Getting acp cleaning policy params failed."
f" stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
policy: SeqCutOffPolicy = None):
_threshold = None if threshold is None else threshold.get_value(Unit.KibiByte)
if core_id is None:
command = set_param_cutoff_cmd(
cache_id=str(cache_id), threshold=_threshold,
policy=policy.name)
else:
command = set_param_cutoff_cmd(
cache_id=str(cache_id), core_id=str(core_id),
threshold=_threshold, policy=policy.name)
output = TestRun.executor.run(command)
if output.exit_code != 0:
raise Exception(
f"Error while setting sequential cut-off params."
f" stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def set_param_cleaning(cache_id: int, policy: CleaningPolicy):
output = TestRun.executor.run(
set_param_cleaning_cmd(cache_id=str(cache_id), policy=policy.name))
if output.exit_code != 0:
raise Exception(
f"Error while setting cleaning policy."
f" stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def set_param_cleaning_alru(cache_id: int, wake_up: int = None, staleness_time: int = None,
flush_max_buffers: int = None, activity_threshold: int = None):
output = TestRun.executor.run(
set_param_cleaning_alru_cmd(
cache_id=str(cache_id), wake_up=str(wake_up), staleness_time=str(staleness_time),
flush_max_buffers=str(flush_max_buffers), activity_threshold=str(activity_threshold)))
if output.exit_code != 0:
raise Exception(
f"Error while setting alru cleaning policy parameters."
f" stdout: {output.stdout} \n stderr :{output.stderr}")
return output
def set_param_cleaning_acp(cache_id: int, wake_up: int = None, flush_max_buffers: int = None):
output = TestRun.executor.run(
set_param_cleaning_acp_cmd(cache_id=str(cache_id), wake_up=str(wake_up),
flush_max_buffers=str(flush_max_buffers)))
if output.exit_code != 0:
raise Exception(
f"Error while setting acp cleaning policy parameters."
f" stdout: {output.stdout} \n stderr :{output.stderr}")
return output

View File

@ -0,0 +1,20 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from enum import Enum
class OutputFormat(Enum):
table = 0
csv = 1
class StatsFilter(Enum):
all = 0
conf = 1
usage = 2
req = 3
blk = 4
err = 5

View File

@ -0,0 +1,209 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from api.cas import casadm
from test_utils.size import parse_unit
from api.cas.cache_config import *
from api.cas.casadm_params import *
from datetime import timedelta
from typing import List
from packaging import version
import re
def parse_stats_unit(unit: str):
if unit is None:
return ""
unit = re.search(r".*[^\]]", unit).group()
if unit == "s":
return "s"
elif unit == "%":
return "%"
elif unit == "Requests":
return "requests"
else:
return parse_unit(unit)
def get_filter(filter: List[casadm.StatsFilter]):
"""Prepare list of statistic sections which should be retrieved and parsed. """
if filter is None or StatsFilter.all in filter:
_filter = [
f for f in StatsFilter if (f != StatsFilter.all and f != StatsFilter.conf)
]
else:
_filter = [
f for f in filter if (f != StatsFilter.all and f != StatsFilter.conf)
]
return _filter
def get_statistics(
cache_id: int,
core_id: int = None,
io_class_id: int = None,
filter: List[casadm.StatsFilter] = None,
percentage_val: bool = False,
):
stats = {}
_filter = get_filter(filter)
per_io_class = True if io_class_id is not None else False
# No need to retrieve all stats if user specified only 'conf' flag
if filter != [StatsFilter.conf]:
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
per_io_class=per_io_class,
io_class_id=io_class_id,
filter=_filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
if filter is None or StatsFilter.conf in filter or StatsFilter.all in filter:
# Conf statistics have different unit or may have no unit at all. For parsing
# convenience they are gathered separately. As this is only configuration stats
# there is no risk they are divergent.
conf_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
per_io_class=per_io_class,
io_class_id=io_class_id,
filter=[StatsFilter.conf],
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys = conf_stats[0]
stat_values = conf_stats[1]
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
# Some of configuration stats have no unit
try:
stat_name, stat_unit = name.split(" [")
except ValueError:
stat_name = name
stat_unit = None
stat_name = stat_name.lower()
# 'dirty for' and 'cache size' stats occurs twice
if stat_name in stats:
continue
stat_unit = parse_stats_unit(stat_unit)
if isinstance(stat_unit, Unit):
stats[stat_name] = Size(float(val), stat_unit)
elif stat_unit == "s":
stats[stat_name] = timedelta(seconds=int(val))
elif stat_unit == "":
# Some of stats without unit can be a number like IDs,
# some of them can be string like device path
try:
stats[stat_name] = float(val)
except ValueError:
stats[stat_name] = val
# No need to parse all stats if user specified only 'conf' flag
if filter == [StatsFilter.conf]:
return stats
stat_keys = csv_stats[0]
stat_values = csv_stats[1]
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
if percentage_val and " [%]" in name:
stats[name.split(" [")[0].lower()] = float(val)
elif not percentage_val and "[%]" not in name:
stat_name, stat_unit = name.split(" [")
stat_unit = parse_stats_unit(stat_unit)
stat_name = stat_name.lower()
if isinstance(stat_unit, Unit):
stats[stat_name] = Size(float(val), stat_unit)
elif stat_unit == "requests":
stats[stat_name] = float(val)
else:
raise ValueError(f"Invalid unit {stat_unit}")
return stats
def get_caches(): # This method does not return inactive or detached CAS devices
from api.cas.cache import Cache
caches_list = []
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n')
for line in lines:
args = line.split(',')
if args[0] == "cache":
current_cache = Cache(args[2])
caches_list.append(current_cache)
return caches_list
def get_cores(cache_id: int):
from api.cas.core import Core, CoreStatus
cores_list = []
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n')
is_proper_core_line = False
for line in lines:
args = line.split(',')
if args[0] == "core" and is_proper_core_line:
core_status_str = args[3].lower()
is_valid_status = CoreStatus[core_status_str].value[0] <= 1
if is_valid_status:
cores_list.append(Core(args[2], cache_id))
if args[0] == "cache":
is_proper_core_line = True if int(args[1]) == cache_id else False
return cores_list
def get_flush_parameters_alru(cache_id: int):
casadm_output = casadm.get_param_cleaning_alru(cache_id,
casadm.OutputFormat.csv).stdout.spltlines()
flush_parameters = FlushParametersAlru()
for line in casadm_output:
if 'max buffers' in line:
flush_parameters.flush_max_buffers = int(line.split(',')[1])
if 'Activity threshold' in line:
flush_parameters.activity_threshold = Time(milliseconds=int(line.split(',')[1]))
if 'Stale buffer time' in line:
flush_parameters.staneless_time = Time(seconds=int(line.split(',')[1]))
if 'Wake up time' in line:
flush_parameters.wake_up_time = Time(seconds=int(line.split(',')[1]))
return flush_parameters
def get_flush_parameters_acp(cache_id: int):
casadm_output = casadm.get_param_cleaning_acp(cache_id,
casadm.OutputFormat.csv).stdout.spltlines()
flush_parameters = FlushParametersAcp()
for line in casadm_output:
if 'max buffers' in line:
flush_parameters.flush_max_buffers = int(line.split(',')[1])
if 'Wake up time' in line:
flush_parameters.wake_up_time = Time(milliseconds=int(line.split(',')[1]))
return flush_parameters
def get_seq_cut_off_parameters(cache_id: int, core_id: int):
casadm_output = casadm.get_param_cutoff(
cache_id, core_id, casadm.OutputFormat.csv).stdout.splitlines()
seq_cut_off_params = SeqCutOffParameters()
for line in casadm_output:
if 'threshold' in line:
seq_cut_off_params.threshold = line.split(',')[1]
if 'policy' in line:
seq_cut_off_params.policy = SeqCutOffPolicy(line.split(',')[1])
def get_casadm_version():
casadm_output = casadm.print_version(OutputFormat.csv).stdout.split('\n')
version_str = casadm_output[1].split(',')[-1]
return version.parse(version_str)

View File

@ -0,0 +1,23 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from .cli import *
from core.test_run import TestRun
def help(shortcut: bool = False):
return TestRun.executor.run(ctl_help(shortcut))
def start():
return TestRun.executor.run(ctl_start())
def stop(flush: bool = False):
return TestRun.executor.run(ctl_stop(flush))
def init(force: bool = False):
return TestRun.executor.run(ctl_init(force))

View File

@ -0,0 +1,254 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import logging
LOGGER = logging.getLogger(__name__)
casadm_bin = "casadm"
casctl = "casctl"
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False):
command = f" -A -i {cache_id} -d {core_dev}" if shortcut \
else f" --add-core --cache-id {cache_id} --core-device {core_dev}"
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def remove_core_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False):
command = f" -R -i {cache_id} -j {core_id}" if shortcut \
else f" --remove-core --cache-id {cache_id} --core-id {core_id}"
if force:
command += " -f" if shortcut else " --force"
return casadm_bin + command
def remove_detached_cmd(core_device: str, shortcut: bool = False):
command = " --remove-detached" + (" -d " if shortcut else " --device ") + core_device
return casadm_bin + command
def help_cmd(shortcut: bool = False):
return casadm_bin + (" -H" if shortcut else " --help")
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False):
command = (" -Z -i " if shortcut else " --reset-counters --cache-id ") + cache_id
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def flush_cache_cmd(cache_id: str, shortcut: bool = False):
command = (" -F -i " if shortcut else " --flush-cache --cache-id ") + cache_id
return casadm_bin + command
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False):
command = (f" -E -i {cache_id} -j {core_id}" if shortcut
else f" --flush-core --cache-id {cache_id} --core-id {core_id}")
return casadm_bin + command
def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = None,
cache_id: str = None, force: bool = False,
load: bool = False, shortcut: bool = False):
command = " -S" if shortcut else " --start-cache"
command += (" -d " if shortcut else " --cache-device ") + cache_dev
if cache_mode is not None:
command += (" -c " if shortcut else " --cache-mode ") + cache_mode
if cache_line_size is not None:
command += (" -x " if shortcut else " --cache-line-size ") + cache_line_size
if cache_id is not None:
command += (" -i " if shortcut else " --cache-id ") + cache_id
if force:
command += " -f" if shortcut else " --force"
if load:
command += " -l" if shortcut else " --load"
return casadm_bin + command
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False,
io_class_id: str = None, filter: str = None,
output_format: str = None, shortcut: bool = False):
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
if per_io_class:
command += " -d" if shortcut else " --io-class-id"
if io_class_id is not None:
command += " " + io_class_id
elif io_class_id is not None:
raise Exception("Per io class flag not set but ID given.")
if filter is not None:
command += (" -f " if shortcut else " --filter ") + filter
if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def format_cmd(cache_dev: str, force: bool = False, shortcut: bool = False):
command = (" -N -F -d " if shortcut else " --nvme --format --device ") + cache_dev
if force:
command += " -f" if shortcut else " --force"
return casadm_bin + command
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False):
command = " -T " if shortcut else " --stop-cache"
command += (" -i " if shortcut else " --cache-id ") + cache_id
if no_data_flush:
command += " --no-data-flush"
return casadm_bin + command
def list_cmd(output_format: str = None, shortcut: bool = False):
command = " -L" if shortcut else " --list-caches"
if output_format == "table" or output_format == "csv":
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def load_cmd(cache_dev: str, shortcut: bool = False):
return start_cmd(cache_dev, load=True, shortcut=shortcut)
def version_cmd(output_format: str = None, shortcut: bool = False):
command = " -V" if shortcut else " --version"
if output_format == "table" or output_format == "csv":
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def set_cache_mode_cmd(cache_mode: str, cache_id: str,
flush_cache: str = None, shortcut: bool = False):
command = f" -Q -c {cache_mode} -i {cache_id}" if shortcut else \
f" --set-cache-mode --cache-mode {cache_mode} --cache-id {cache_id}"
if flush_cache:
command += (" -f " if shortcut else " --flush-cache ") + flush_cache
return casadm_bin + command
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False):
command = f" -C -C -i {cache_id} -f {file}" if shortcut else \
f" --io-class --load-config --cache-id {cache_id} --file {file}"
return casadm_bin + command
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False):
command = f" -C -L -i {cache_id} -o {output_format}" if shortcut else \
f" --io-class --list --cache-id {cache_id} --output-format {output_format}"
return casadm_bin + command
def _get_param_cmd(namespace: str, cache_id: str, output_format: str = None,
additional_params: str = None, shortcut: bool = False):
command = f" -G -n {namespace} -i {cache_id}" if shortcut else\
f" --get-param --name {namespace} --cache-id {cache_id}"
if additional_params is not None:
command += additional_params
if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def get_param_cutoff_cmd(cache_id: str, core_id: str,
output_format: str = None, shortcut: bool = False):
add_param = (" -j " if shortcut else " --core-id ") + core_id
return _get_param_cmd(namespace="seq-cutoff", cache_id=cache_id, output_format=output_format,
additional_params=add_param, shortcut=shortcut)
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def get_param_cleaning_alru_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def get_param_cleaning_acp_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def _set_param_cmd(namespace: str, cache_id: str, additional_params: str = None,
shortcut: bool = False):
command = f" -X -n {namespace} -i {cache_id}" if shortcut else\
f" --set-param --name {namespace} --cache-id {cache_id}"
command += additional_params
return casadm_bin + command
def set_param_cutoff_cmd(cache_id: str, core_id: str = None, threshold: str = None,
policy: str = None, shortcut: bool = False):
add_params = ""
if core_id is not None:
add_params += (" -j " if shortcut else " --core-id ") + core_id
if threshold is not None:
add_params += (" -t " if shortcut else " --threshold ") + threshold
if policy is not None:
add_params += (" -p " if shortcut else " --policy ") + policy
return _set_param_cmd(namespace="seq-cutoff", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False):
add_params = (" -p " if shortcut else " --policy ") + policy
return _set_param_cmd(namespace="cleaning", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def set_param_cleaning_alru_cmd(cache_id: str, wake_up: str, staleness_time: str,
flush_max_buffers: str, activity_threshold: str,
shortcut: bool = False):
add_param = ""
if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
if staleness_time is not None:
add_param += (" -s " if shortcut else " --staleness-time ") + staleness_time
if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
if activity_threshold is not None:
add_param += (" -t " if shortcut else " --activity-threshold ") + activity_threshold
return _set_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
def set_param_cleaning_acp_cmd(cache_id: str, wake_up: str = None,
flush_max_buffers: str = None, shortcut: bool = False):
add_param = ""
if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
return _set_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
def ctl_help(shortcut: bool = False):
return casctl + " --help" if shortcut else " -h"
def ctl_start():
return casctl + " start"
def ctl_stop(flush: bool = False):
command = casctl + " stop"
if flush:
command += " --flush"
return command
def ctl_init(force: bool = False):
command = casctl + " init"
if force:
command += " --force"
return command

View File

@ -0,0 +1,81 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from typing import List
from api.cas.cli import *
from api.cas.casadm_parser import *
from api.cas.cache import Device
from test_utils.os_utils import *
class CoreStatus(Enum):
empty = 0,
active = 1,
inactive = 2,
detached = 3
class Core(Device):
def __init__(self, core_device: str, cache_id: int):
self.core_device = Device(core_device)
self.system_path = None
core_info = self.__get_core_info()
self.core_id = int(core_info["core_id"])
Device.__init__(self, core_info["exp_obj"])
self.cache_id = cache_id
def __get_core_info(self):
output = TestRun.executor.run(
list_cmd(OutputFormat.csv.name))
if output.exit_code != 0:
raise Exception("Failed to execute list caches command.")
output_lines = output.stdout.splitlines()
for line in output_lines:
split_line = line.split(',')
if split_line[0] == "core" and (split_line[2] == self.core_device.system_path
or split_line[5] == self.system_path):
return {"core_id": split_line[1],
"core_device": split_line[2],
"status": split_line[3],
"exp_obj": split_line[5]}
def get_core_statistics(self,
io_class_id: int = None,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
return get_statistics(self.cache_id, self.core_id, io_class_id,
stat_filter, percentage_val)
def get_status(self):
return self.__get_core_info()["status"]
def get_seq_cut_off_parameters(self):
return get_seq_cut_off_parameters(self.cache_id, self.core_id)
def get_dirty_blocks(self):
return self.get_core_statistics()["dirty"]
def get_clean_blocks(self):
return self.get_core_statistics()["clean"]
def get_occupancy(self):
return self.get_core_statistics()["occupancy"]
# Casadm methods:
def remove_core(self, force: bool = False):
return casadm.remove_core(self.cache_id, self.core_id, force)
def reset_counters(self):
return casadm.reset_counters(self.cache_id, self.core_id)
def flush_core(self):
casadm.flush(self.cache_id, self.core_id)
sync()
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
casadm.set_param_cutoff(self.cache_id, self.core_id,
seq_cutoff_param.threshold, seq_cutoff_param.policy)

View File

@ -0,0 +1,85 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from api.cas import casadm_parser
from api.cas.cache_config import CacheMode
from storage_devices.device import Device
from test_tools import fs_utils
opencas_conf_path = "/etc/opencas/opencas.conf"
def create_init_config_from_running_configuration(load: bool = None, extra_flags=""):
cache_lines = []
core_lines = []
for cache in casadm_parser.get_caches():
cache_lines.append(CacheConfigLine(cache.cache_id,
cache.cache_device,
cache.get_cache_mode(),
load,
extra_flags))
for core in casadm_parser.get_cores(cache.cache_id):
core_lines.append(CoreConfigLine(cache.cache_id,
core.core_id,
core.core_device))
config_lines = []
create_default_init_config()
if len(cache_lines) > 0:
config_lines.append(CacheConfigLine.header)
for c in cache_lines:
config_lines.append(str(c))
if len(core_lines) > 0:
config_lines.append(CoreConfigLine.header)
for c in core_lines:
config_lines.append(str(c))
fs_utils.write_file(opencas_conf_path, '\n'.join(config_lines), False)
def create_default_init_config():
cas_version = casadm_parser.get_casadm_version()
fs_utils.write_file(opencas_conf_path,
f"version={'.'.join(str(x) for x in cas_version.release[0:3])}")
class CacheConfigLine:
header = "[caches]"
def __init__(self, cache_id, cache_device: Device,
cache_mode: CacheMode, load=None, extra_flags=""):
self.cache_id = cache_id
self.cache_device = cache_device
self.load = load
self.cache_mode = cache_mode
self.extra_flags = extra_flags
def __str__(self):
cache_symlink = self.cache_device.get_device_link("/dev/disk/by-id")
cache_device_path = cache_symlink.full_path if cache_symlink is not None \
else self.cache_device.system_path
params = [str(self.cache_id), cache_device_path]
if self.load is not None:
params.append("yes" if self.load else "no")
params.append(self.cache_mode.name)
params.append(self.extra_flags)
return '\t'.join(params)
class CoreConfigLine:
header = "[cores]"
def __init__(self, cache_id, core_id, core_device: Device):
self.cache_id = cache_id
self.core_id = core_id
self.core_device = core_device
def __str__(self):
core_symlink = self.core_device.get_device_link("/dev/disk/by-id")
core_device_path = core_symlink.full_path if core_symlink is not None \
else self.core_device.system_path
params = [str(self.cache_id), str(self.core_id), core_device_path]
return '\t'.join(params)

View File

@ -0,0 +1,93 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import logging
from tests import conftest
from core.test_run import TestRun
LOGGER = logging.getLogger(__name__)
opencas_repo_name = "open-cas-linux"
def install_opencas():
LOGGER.info("Cloning Open CAS repository.")
TestRun.executor.run(f"if [ -d {opencas_repo_name} ]; "
f"then rm -rf {opencas_repo_name}; fi")
output = TestRun.executor.run(
"git clone --recursive https://github.com/Open-CAS/open-cas-linux.git")
if output.exit_code != 0:
raise Exception(f"Error while cloning repository: {output.stdout}\n{output.stderr}")
output = TestRun.executor.run(
f"cd {opencas_repo_name} && "
f"git fetch --all && "
f"git fetch --tags {conftest.get_remote()} +refs/pull/*:refs/remotes/origin/pr/*")
if output.exit_code != 0:
raise Exception(
f"Failed to fetch: "
f"{output.stdout}\n{output.stderr}")
output = TestRun.executor.run(f"cd {opencas_repo_name} && "
f"git checkout {conftest.get_branch()}")
if output.exit_code != 0:
raise Exception(
f"Failed to checkout to {conftest.get_branch()}: {output.stdout}\n{output.stderr}")
LOGGER.info("Open CAS make and make install.")
output = TestRun.executor.run(
f"cd {opencas_repo_name} && "
"git submodule update --init --recursive && "
"./configure && "
"make -j")
if output.exit_code != 0:
raise Exception(
f"Make command executed with nonzero status: {output.stdout}\n{output.stderr}")
output = TestRun.executor.run(f"cd {opencas_repo_name} && "
f"make install")
if output.exit_code != 0:
raise Exception(
f"Error while installing Open CAS: {output.stdout}\n{output.stderr}")
LOGGER.info("Check if casadm is properly installed.")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise Exception(
f"'casadm -V' command returned an error: {output.stdout}\n{output.stderr}")
else:
LOGGER.info(output.stdout)
def uninstall_opencas():
LOGGER.info("Uninstalling Open CAS.")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise Exception("Open CAS is not properly installed.")
else:
TestRun.executor.run(f"cd {opencas_repo_name} && "
f"make uninstall")
if output.exit_code != 0:
raise Exception(
f"There was an error during uninstall process: {output.stdout}\n{output.stderr}")
def reinstall_opencas():
if check_if_installed():
uninstall_opencas()
install_opencas()
def check_if_installed():
LOGGER.info("Check if Open-CAS-Linux is installed.")
output = TestRun.executor.run("which casadm")
if output.exit_code == 0:
LOGGER.info("CAS is installed")
return True
LOGGER.info("CAS not installed")
return False

View File

@ -0,0 +1,128 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from datetime import timedelta
from core.test_run import TestRun
default_config_file_path = "/tmp/opencas_ioclass.conf"
MAX_IO_CLASS_ID = 32
MAX_CLASSIFICATION_DELAY = timedelta(seconds=6)
def create_ioclass_config(
add_default_rule: bool = True, ioclass_config_path: str = default_config_file_path
):
TestRun.LOGGER.info(f"Creating config file {ioclass_config_path}")
output = TestRun.executor.run(
'echo "IO class id,IO class name,Eviction priority,Allocation" '
+ f"> {ioclass_config_path}"
)
if output.exit_code != 0:
raise Exception(
"Failed to create ioclass config file. "
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
if add_default_rule:
output = TestRun.executor.run(
f'echo "0,unclassified,22,1" >> {ioclass_config_path}'
)
if output.exit_code != 0:
raise Exception(
"Failed to create ioclass config file. "
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
def remove_ioclass_config(ioclass_config_path: str = default_config_file_path):
TestRun.LOGGER.info(f"Removing config file {ioclass_config_path}")
output = TestRun.executor.run(f"rm -f {ioclass_config_path}")
if output.exit_code != 0:
raise Exception(
"Failed to remove config file. "
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
def add_ioclass(
ioclass_id: int,
rule: str,
eviction_priority: int,
allocation: bool,
ioclass_config_path: str = default_config_file_path,
):
new_ioclass = f"{ioclass_id},{rule},{eviction_priority},{int(allocation)}"
TestRun.LOGGER.info(
f"Adding rule {new_ioclass} " + f"to config file {ioclass_config_path}"
)
output = TestRun.executor.run(
f'echo "{new_ioclass}" >> {ioclass_config_path}'
)
if output.exit_code != 0:
raise Exception(
"Failed to append ioclass to config file. "
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
def get_ioclass(ioclass_id: int, ioclass_config_path: str = default_config_file_path):
TestRun.LOGGER.info(
f"Retrieving rule no.{ioclass_id} " + f"from config file {ioclass_config_path}"
)
output = TestRun.executor.run(f"cat {ioclass_config_path}")
if output.exit_code != 0:
raise Exception(
"Failed to read ioclass config file. "
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
ioclass_config = output.stdout.splitlines()
for ioclass in ioclass_config:
if int(ioclass.split(",")[0]) == ioclass_id:
return ioclass
def remove_ioclass(
ioclass_id: int, ioclass_config_path: str = default_config_file_path
):
TestRun.LOGGER.info(
f"Removing rule no.{ioclass_id} " + f"from config file {ioclass_config_path}"
)
output = TestRun.executor.run(f"cat {ioclass_config_path}")
if output.exit_code != 0:
raise Exception(
"Failed to read ioclass config file. "
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
old_ioclass_config = output.stdout.splitlines()
config_header = old_ioclass_config[0]
# First line in valid config file is always a header, not a rule - it is
# already extracted above
new_ioclass_config = [
x for x in old_ioclass_config[1:] if int(x.split(",")[0]) != ioclass_id
]
new_ioclass_config.insert(0, config_header)
if len(new_ioclass_config) == len(old_ioclass_config):
raise Exception(
f"Failed to remove ioclass {ioclass_id} from config file {ioclass_config_path}"
)
new_ioclass_config_str = "\n".join(new_ioclass_config)
output = TestRun.executor.run(
f'echo "{new_ioclass_config_str}" > {ioclass_config_path}'
)
if output.exit_code != 0:
raise Exception(
"Failed to save new ioclass config. "
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
)

View File

@ -0,0 +1,16 @@
ip: "x.x.x.x"
user: "example_user"
password: "example_password"
env: "HTTPS_PROXY=myproxy.example:12345"
working_dir: "/tmp/open-cas-linux/"
disks:
- path: '/dev/device_name1' # disk device path
serial: 'ABC' # disk serial number
type: 'nand' # disk type
blocksize: 512 # 512B
- path: '/dev/device_name2'
serial: 'DEF'
type: 'hdd'
blocksize: 512

View File

View File

@ -0,0 +1,9 @@
[pytest]
log_cli = 1
log_cli_level = INFO
log_cli_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)
log_cli_date_format=%Y-%m-%d %H:%M:%S
log_file = pytest.log
log_file_level = INFO
log_file_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)
log_file_date_format=%Y-%m-%d %H:%M:%S

@ -0,0 +1 @@
Subproject commit 465f9670c0ae15f09aa2b8a775cb0b47a34a51fe

View File

View File

@ -0,0 +1,31 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import logging
import pytest
from api.cas import casadm
from tests.conftest import base_prepare
LOGGER = logging.getLogger(__name__)
@pytest.mark.parametrize("shortcut", [True, False])
@pytest.mark.parametrize('prepare_and_cleanup',
[{"core_count": 0, "cache_count": 0}],
indirect=True)
def test_cli_help(prepare_and_cleanup, shortcut):
prepare()
LOGGER.info("Test run")
output = casadm.help(shortcut)
LOGGER.info(output.stdout) # TODO:this is tmp, every ssh command shall be logged via executor
assert output.stdout[0:33] == "Cache Acceleration Software Linux"
# TODO: create yml config for every help command and match the output with it
# TODO: for now the assert above is purely for testing flow in the casadm api
def prepare():
base_prepare()

View File

@ -0,0 +1,76 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import logging
import pytest
from api.cas import casadm, casadm_parser
from tests.conftest import base_prepare
from core.test_run import TestRun
from storage_devices.disk import DiskType
from test_utils.size import Unit, Size
LOGGER = logging.getLogger(__name__)
@pytest.mark.parametrize("shortcut", [True, False])
@pytest.mark.parametrize('prepare_and_cleanup',
[{"core_count": 0, "cache_count": 1, "cache_type": "optane"}, ],
indirect=True)
def test_cli_start_stop_default_value(prepare_and_cleanup, shortcut):
prepare()
cache_device = next(
disk for disk in TestRun.dut.disks if disk.disk_type == DiskType.optane)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
casadm.start_cache(cache_device, shortcut=shortcut, force=True)
caches = casadm_parser.get_caches()
assert len(caches) == 1
assert caches[0].cache_device.system_path == cache_device.system_path
casadm.stop_cache(cache_id=caches[0].cache_id, shortcut=shortcut)
output = casadm.list_caches(shortcut=shortcut)
caches = casadm_parser.get_caches()
assert len(caches) == 0
assert output.stdout == "No caches running"
@pytest.mark.parametrize("shortcut", [True, False])
@pytest.mark.parametrize('prepare_and_cleanup',
[{"core_count": 1, "cache_count": 1, "cache_type": "optane"}],
indirect=True)
def test_cli_add_remove_default_value(prepare_and_cleanup, shortcut):
prepare()
cache_device = next(
disk for disk in TestRun.dut.disks if disk.disk_type == DiskType.optane)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
cache = casadm.start_cache(cache_device, shortcut=shortcut, force=True)
core_device = next(
disk for disk in TestRun.dut.disks if disk.disk_type != DiskType.optane)
casadm.add_core(cache, core_device, shortcut=shortcut)
caches = casadm_parser.get_caches()
assert len(caches[0].get_core_devices()) == 1
assert caches[0].get_core_devices()[0].core_device.system_path == core_device.system_path
casadm.remove_core(cache.cache_id, 1, shortcut=shortcut)
caches = casadm_parser.get_caches()
assert len(caches) == 1
assert len(caches[0].get_core_devices()) == 0
casadm.stop_cache(cache_id=cache.cache_id, shortcut=shortcut)
output = casadm.list_caches(shortcut=shortcut)
caches = casadm_parser.get_caches()
assert len(caches) == 0
assert output.stdout == "No caches running"
def prepare():
base_prepare()

View File

@ -0,0 +1,151 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
import os
import sys
import yaml
from IPy import IP
sys.path.append(os.path.join(os.path.dirname(__file__), "../test-framework"))
from core.test_run_utils import TestRun
from api.cas import installer
from api.cas import casadm
from test_utils.os_utils import Udev
# TODO: Provide basic plugin subsystem
plugins_dir = os.path.join(os.path.dirname(__file__), "../plugins")
sys.path.append(plugins_dir)
try:
from test_wrapper import plugin as test_wrapper
except ImportError:
pass
pytest_options = {}
@pytest.fixture(scope="session", autouse=True)
def get_pytest_options(request):
pytest_options["remote"] = request.config.getoption("--remote")
pytest_options["branch"] = request.config.getoption("--repo-tag")
pytest_options["force_reinstall"] = request.config.getoption("--force-reinstall")
@pytest.fixture()
def prepare_and_cleanup(request):
"""
This fixture returns the dictionary, which contains DUT ip, IPMI, spider, list of disks.
This fixture also returns the executor of commands
"""
# There should be dut config file added to config package and
# pytest should be executed with option --dut-config=conf_name'.
#
# 'ip' field should be filled with valid IP string to use remote ssh executor
# or it should be commented out when user want to execute tests on local machine
#
# User can also have own test wrapper, which runs test prepare, cleanup, etc.
# Then in the config/configuration.py file there should be added path to it:
# test_wrapper_dir = 'wrapper_path'
try:
with open(request.config.getoption('--dut-config')) as cfg:
dut_config = yaml.safe_load(cfg)
except Exception:
dut_config = {}
if 'test_wrapper' in sys.modules:
if 'ip' in dut_config:
try:
IP(dut_config['ip'])
except ValueError:
raise Exception("IP address from configuration file is in invalid format.")
dut_config = test_wrapper.prepare(request.param, dut_config)
TestRun.prepare(dut_config)
TestRun.plugins['opencas'] = {'already_updated': False}
TestRun.LOGGER.info(f"**********Test {request.node.name} started!**********")
yield
TestRun.LOGGER.info("Test cleanup")
Udev.enable()
unmount_cas_devices()
casadm.stop_all_caches()
if 'test_wrapper' in sys.modules:
test_wrapper.cleanup()
def pytest_addoption(parser):
parser.addoption("--dut-config", action="store", default="None")
parser.addoption("--remote", action="store", default="origin")
parser.addoption("--repo-tag", action="store", default="master")
parser.addoption("--force-reinstall", action="store", default="False")
# TODO: investigate whether it is possible to pass the last param as bool
def get_remote():
return pytest_options["remote"]
def get_branch():
return pytest_options["branch"]
def get_force_param():
return pytest_options["force_reinstall"]
def unmount_cas_devices():
output = TestRun.executor.run("cat /proc/mounts | grep cas")
# If exit code is '1' but stdout is empty, there is no mounted cas devices
if output.exit_code == 1:
return
elif output.exit_code != 0:
raise Exception(
f"Failed to list mounted cas devices. \
stdout: {output.stdout} \n stderr :{output.stderr}"
)
for line in output.stdout.splitlines():
cas_device_path = line.split()[0]
TestRun.LOGGER.info(f"Unmounting {cas_device_path}")
output = TestRun.executor.run(f"umount {cas_device_path}")
if output.exit_code != 0:
raise Exception(
f"Failed to unmount {cas_device_path}. \
stdout: {output.stdout} \n stderr :{output.stderr}"
)
def kill_all_io():
TestRun.executor.run("pkill --signal SIGKILL dd")
TestRun.executor.run("kill -9 `ps aux | grep -i vdbench.* | awk '{ print $1 }'`")
TestRun.executor.run("pkill --signal SIGKILL fio*")
def base_prepare():
TestRun.LOGGER.info("Base test prepare")
TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")
Udev.enable()
kill_all_io()
if installer.check_if_installed():
try:
unmount_cas_devices()
casadm.stop_all_caches()
except Exception:
pass # TODO: Reboot DUT if test is executed remotely
if get_force_param() is not "False" and not TestRun.plugins['opencas']['already_updated']:
installer.reinstall_opencas()
elif not installer.check_if_installed():
installer.install_opencas()
TestRun.plugins['opencas']['already_updated'] = True

View File

@ -0,0 +1,65 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from test_tools.disk_utils import Filesystem
from test_utils.size import Size, Unit
from core.test_run import TestRun
from tests.conftest import base_prepare
from test_utils.filesystem.file import File
from test_utils.filesystem.directory import Directory
from test_tools import fs_utils
def setup_module():
TestRun.LOGGER.warning("Entering setup method")
@pytest.mark.parametrize('prepare_and_cleanup',
[{"cache_type": "nand", "cache_count": 1}],
indirect=True)
def test_create_example_partitions(prepare_and_cleanup):
prepare()
TestRun.LOGGER.info("Test run")
TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")
test_disk = TestRun.dut.disks[0]
part_sizes = []
for i in range(1, 6):
part_sizes.append(Size(10 * i + 100, Unit.MebiByte))
test_disk.create_partitions(part_sizes)
TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")
test_disk.partitions[0].create_filesystem(Filesystem.ext3)
@pytest.mark.parametrize('prepare_and_cleanup',
[{"cache_type": "nand", "cache_count": 1}],
indirect=True)
def test_create_example_files(prepare_and_cleanup):
prepare()
TestRun.LOGGER.info("Test run")
file1 = File.create_file("example_file")
file1.write("Test file\ncontent line\ncontent")
content_before_change = file1.read()
TestRun.LOGGER.info(f"File content: {content_before_change}")
fs_utils.replace_in_lines(file1, 'content line', 'replaced line')
content_after_change = file1.read()
assert content_before_change != content_after_change
file2 = file1.copy('/tmp', force=True)
assert file1.md5sum() == file2.md5sum()
file2.chmod_numerical(123)
fs_utils.remove(file2.full_path, True)
dir1 = Directory("~")
dir_content = dir1.ls()
file1.chmod(fs_utils.Permissions['r'] | fs_utils.Permissions['w'], fs_utils.PermissionsUsers(7))
for item in dir_content:
TestRun.LOGGER.info(f"Item {str(item)} - {type(item).__name__}")
fs_utils.remove(file1.full_path, True)
def prepare():
base_prepare()

View File

@ -0,0 +1,169 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from core.test_run import TestRun
from tests.conftest import base_prepare
from storage_devices.disk import DiskType
from test_utils.size import Size, Unit
from api.cas.cache_config import CacheMode
from api.cas import casadm
from test_tools.dd import Dd
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_core_inactive(prepare_and_cleanup):
"""
1. Start cache with 3 cores.
2. Stop cache.
3. Remove one of core devices.
4. Load cache.
5. Check if cache has appropriate number of valid and inactive core devices.
"""
cache, core_device = prepare()
cache_device = cache.cache_device
stats = cache.get_cache_statistics()
assert stats["core devices"] == 3
assert stats["inactive core devices"] == 0
TestRun.LOGGER.info("Stopping cache")
cache.stop()
TestRun.LOGGER.info("Removing one of core devices")
core_device.remove_partitions()
core_device.create_partitions([Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)])
TestRun.LOGGER.info("Loading cache with missing core device")
cache = casadm.start_cache(cache_device, load=True)
stats = cache.get_cache_statistics()
assert stats["core devices"] == 3
assert stats["inactive core devices"] == 1
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_core_inactive_stats(prepare_and_cleanup):
"""
1. Start cache with 3 cores.
2. Switch cache into WB mode.
3. Issue IO to each core.
4. Stop cache without flush.
5. Remove two core devices.
6. Load cache.
7. Check if cache stats are equal to sum of valid and inactive cores stats.
8. Check if percentage values are calculated properly.
"""
cache, core_device = prepare()
cache_device = cache.cache_device
TestRun.LOGGER.info(cache_device)
TestRun.LOGGER.info("Switching cache mode to WB")
cache.set_cache_mode(cache_mode=CacheMode.WB)
cores = cache.get_core_devices()
TestRun.LOGGER.info("Issue IO to each core")
for core in cores:
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1000)
.block_size(Size(4, Unit.KibiByte))
).run()
TestRun.LOGGER.info("Stopping cache with dirty data")
cores[2].flush_core()
cache.stop(no_data_flush=True)
TestRun.LOGGER.info("Removing two of core devices")
core_device.remove_partitions()
core_device.create_partitions([Size(1, Unit.GibiByte)])
TestRun.LOGGER.info("Loading cache with missing core device")
cache = casadm.start_cache(cache_device, load=True)
# Accumulate valid cores stats
cores_occupancy = 0
cores_clean = 0
cores_dirty = 0
cores = cache.get_core_devices()
for core in cores:
core_stats = core.get_core_statistics()
cores_occupancy += core_stats["occupancy"].value
cores_clean += core_stats["clean"].value
cores_dirty += core_stats["dirty"].value
cache_stats = cache.get_cache_statistics()
# Add inactive core stats
cores_occupancy += cache_stats["inactive occupancy"].value
cores_clean += cache_stats["inactive clean"].value
cores_dirty += cache_stats["inactive dirty"].value
assert cache_stats["occupancy"].value == cores_occupancy
assert cache_stats["dirty"].value == cores_dirty
assert cache_stats["clean"].value == cores_clean
cache_stats_percentage = cache.get_cache_statistics(percentage_val=True)
# Calculate expected percentage value of inactive core stats
inactive_occupancy_perc = (
cache_stats["inactive occupancy"].value / cache_stats["cache size"].value
)
inactive_clean_perc = (
cache_stats["inactive clean"].value / cache_stats["occupancy"].value
)
inactive_dirty_perc = (
cache_stats["inactive dirty"].value / cache_stats["occupancy"].value
)
inactive_occupancy_perc = round(100 * inactive_occupancy_perc, 1)
inactive_clean_perc = round(100 * inactive_clean_perc, 1)
inactive_dirty_perc = round(100 * inactive_dirty_perc, 1)
TestRun.LOGGER.info(cache_stats_percentage)
assert inactive_occupancy_perc == cache_stats_percentage["inactive occupancy"]
assert inactive_clean_perc == cache_stats_percentage["inactive clean"]
assert inactive_dirty_perc == cache_stats_percentage["inactive dirty"]
def prepare():
base_prepare()
cache_device = next(
disk
for disk in TestRun.dut.disks
if disk.disk_type in [DiskType.optane, DiskType.nand]
)
core_device = next(
disk
for disk in TestRun.dut.disks
if (
disk.disk_type.value > cache_device.disk_type.value and disk != cache_device
)
)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions(
[Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]
)
cache_device = cache_device.partitions[0]
core_device_1 = core_device.partitions[0]
core_device_2 = core_device.partitions[1]
core_device_3 = core_device.partitions[2]
TestRun.LOGGER.info("Staring cache")
cache = casadm.start_cache(cache_device, force=True)
TestRun.LOGGER.info("Adding core device")
core_1 = cache.add_core(core_dev=core_device_1)
core_2 = cache.add_core(core_dev=core_device_2)
core_3 = cache.add_core(core_dev=core_device_3)
return cache, core_device

View File

@ -0,0 +1,60 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from api.cas import casadm
from api.cas import ioclass_config
from api.cas.cache_config import CacheMode, CleaningPolicy
from storage_devices.disk import DiskType
from tests.conftest import base_prepare
from core.test_run import TestRun
from test_utils.size import Size, Unit
ioclass_config_path = "/tmp/opencas_ioclass.conf"
mountpoint = "/tmp/cas1-1"
def prepare():
base_prepare()
ioclass_config.remove_ioclass_config()
cache_device = next(filter(
lambda disk: disk.disk_type in [DiskType.optane, DiskType.nand],
TestRun.dut.disks
))
core_device = next(filter(
lambda disk: disk.disk_type.value > cache_device.disk_type.value,
TestRun.dut.disks
))
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)])
cache_device = cache_device.partitions[0]
core_device = core_device.partitions[0]
TestRun.LOGGER.info(f"Starting cache")
cache = casadm.start_cache(cache_device, cache_mode=CacheMode.WB, force=True)
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
casadm.set_param_cleaning(cache_id=cache.cache_id, policy=CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core device")
core = casadm.add_core(cache, core_dev=core_device)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
# To make test more precise all workload except of tested ioclass should be
# put in pass-through mode
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
if output.exit_code != 0:
raise Exception(f"Failed to create mountpoint")
return cache, core

View File

@ -0,0 +1,392 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
from datetime import datetime
import pytest
from test_tools import fs_utils
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.directory import Directory
from test_utils.filesystem.file import File
from test_utils.os_utils import drop_caches, DropCachesMode, sync, Udev
from .io_class_common import *
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_directory_depth(prepare_and_cleanup, filesystem):
"""
Test if directory classification works properly for deeply nested directories for read and
write operations.
"""
cache, core = prepare()
Udev.disable()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_dir_path = f"{mountpoint}/base_dir"
TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
fs_utils.create_directory(base_dir_path)
nested_dir_path = base_dir_path
random_depth = random.randint(40, 80)
for i in range(random_depth):
nested_dir_path += f"/dir_{i}"
TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}")
fs_utils.create_directory(path=nested_dir_path, parents=True)
# Test classification in nested dir by reading a previously unclassified file
TestRun.LOGGER.info("Creating the first file in the nested directory")
test_file_1 = File(f"{nested_dir_path}/test_file_1")
dd = (
Dd()
.input("/dev/urandom")
.output(test_file_1.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_1.refresh_item()
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{base_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
base_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
TestRun.LOGGER.info("Reading the file in the nested directory")
dd = (
Dd()
.input(test_file_1.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy + test_file_1.size, \
"Wrong occupancy after reading file!\n" \
f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}"
# Test classification in nested dir by creating a file
base_occupancy = new_occupancy
TestRun.LOGGER.info("Creating the second file in the nested directory")
test_file_2 = File(f"{nested_dir_path}/test_file_2")
dd = (
Dd()
.input("/dev/urandom")
.output(test_file_2.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_2.refresh_item()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy + test_file_2.size, \
"Wrong occupancy after creating file!\n" \
f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_directory_dir_operations(prepare_and_cleanup, filesystem):
"""
Test if directory classification works properly after directory operations like move or rename.
The operations themselves should not cause reclassification but IO after those operations
should be reclassified to proper IO class.
Directory classification may work with a delay after loading IO class configuration or
move/rename operations. Test checks if maximum delay is not exceeded.
"""
def create_files_with_classification_delay_check(directory: Directory, ioclass_id: int):
start_time = datetime.now()
occupancy_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
dd_blocks = 10
dd_size = Size(dd_blocks, Unit.Blocks4096)
file_counter = 0
unclassified_files = []
time_from_start = datetime.now() - start_time
while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY:
occupancy_before = occupancy_after
file_path = f"{directory.full_path}/test_file_{file_counter}"
file_counter += 1
time_from_start = datetime.now() - start_time
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
occupancy_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
if occupancy_after - occupancy_before < dd_size:
unclassified_files.append(file_path)
if len(unclassified_files) == file_counter:
pytest.xfail("No files were properly classified within max delay time!")
if len(unclassified_files):
TestRun.LOGGER.info("Rewriting unclassified test files...")
for file_path in unclassified_files:
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
def read_files_with_reclassification_check(
target_ioclass_id: int, source_ioclass_id: int, directory: Directory, with_delay: bool):
start_time = datetime.now()
target_occupancy_after = cache.get_cache_statistics(
io_class_id=target_ioclass_id)["occupancy"]
source_occupancy_after = cache.get_cache_statistics(
io_class_id=source_ioclass_id)["occupancy"]
unclassified_files = []
for file in [item for item in directory.ls() if isinstance(item, File)]:
target_occupancy_before = target_occupancy_after
source_occupancy_before = source_occupancy_after
time_from_start = datetime.now() - start_time
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
target_occupancy_after = cache.get_cache_statistics(
io_class_id=target_ioclass_id)["occupancy"]
source_occupancy_after = cache.get_cache_statistics(
io_class_id=source_ioclass_id)["occupancy"]
if target_occupancy_after < target_occupancy_before:
pytest.xfail("Target IO class occupancy lowered!")
elif target_occupancy_after - target_occupancy_before < file.size:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Target IO class occupancy not changed properly!")
if source_occupancy_after >= source_occupancy_before:
if file not in unclassified_files:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Source IO class occupancy not changed properly!")
if len(unclassified_files):
TestRun.LOGGER.info("Rereading unclassified test files...")
sync()
drop_caches(DropCachesMode.ALL)
for file in unclassified_files:
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
cache, core = prepare()
Udev.disable()
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
ioclass_id_1 = proper_ids[0]
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
ioclass_id_2 = proper_ids[1]
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
# directory IO classes
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_1,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_1}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_2,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_2}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
non_classified_dir_path = f"{mountpoint}/non_classified"
TestRun.LOGGER.info(
f"Creating a non-classified directory: {non_classified_dir_path}")
dir_1 = Directory.create_directory(path=non_classified_dir_path)
TestRun.LOGGER.info(f"Renaming {non_classified_dir_path} to {classified_dir_path_1}")
dir_1.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_1, ioclass_id=ioclass_id_1)
TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir")
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_2, ioclass_id=ioclass_id_2)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}")
dir_2.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_1, source_ioclass_id=ioclass_id_2,
directory=dir_2, with_delay=False)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}")
dir_2.move(destination=mountpoint)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False)
TestRun.LOGGER.info(f"Removing {classified_dir_path_2}")
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_1} to {classified_dir_path_2}")
dir_1.move(destination=classified_dir_path_2)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_2, source_ioclass_id=ioclass_id_1,
directory=dir_1, with_delay=True)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_2} to {non_classified_dir_path}")
dir_1.move(destination=non_classified_dir_path)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True)
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_directory_file_operations(prepare_and_cleanup, filesystem):
"""
Test if directory classification works properly after file operations like move or rename.
The operations themselves should not cause reclassification but IO after those operations
should be reclassified to proper IO class.
"""
def check_occupancy(expected: Size, actual: Size):
if expected != actual:
pytest.xfail("Occupancy check failed!\n"
f"Expected: {expected}, actual: {actual}")
cache, core = prepare()
Udev.disable()
test_dir_path = f"{mountpoint}/test_dir"
nested_dir_path = f"{test_dir_path}/nested_dir"
dd_blocks = random.randint(5, 50)
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{test_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
TestRun.LOGGER.info(f"Creating directory {nested_dir_path}")
Directory.create_directory(path=nested_dir_path, parents=True)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Creating test file")
classified_before = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
file_path = f"{test_dir_path}/test_file"
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
sync()
drop_caches(DropCachesMode.ALL)
test_file = File(file_path).refresh_item()
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before + test_file.size, classified_after)
TestRun.LOGGER.info("Moving test file out of classified directory")
classified_before = classified_after
non_classified_before = cache.get_cache_statistics(io_class_id=0)["occupancy"]
test_file.move(destination=mountpoint)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file")
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before - test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
check_occupancy(non_classified_before + test_file.size, non_classified_after)
TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}")
classified_before = classified_after
non_classified_before = non_classified_after
test_file.move(destination=nested_dir_path)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file")
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before + test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
check_occupancy(non_classified_before - test_file.size, non_classified_after)

View File

@ -0,0 +1,374 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
import pytest
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev, DropCachesMode, drop_caches
from .io_class_common import *
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_file_extension(prepare_and_cleanup):
cache, core = prepare()
iterations = 50
ioclass_id = 1
tested_extension = "tmp"
wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"extension:{tested_extension}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
cache.flush_cache()
# Check if file with proper extension is cached
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{tested_extension}")
.count(dd_count)
.block_size(dd_size)
)
TestRun.LOGGER.info(f"Writing to file with cached extension.")
for i in range(iterations):
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == (i + 1) * dd_count
cache.flush_cache()
# Check if file with improper extension is not cached
TestRun.LOGGER.info(f"Writing to file with no cached extension.")
for ext in wrong_extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == 0
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_file_extension_preexisting_filesystem(prepare_and_cleanup):
"""Create files on filesystem, add device with filesystem as a core,
write data to files and check if they are cached properly"""
cache, core = prepare()
ioclass_id = 1
extensions = ["tmp", "tm", "out", "txt", "log", "123"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
TestRun.LOGGER.info(f"Preparing files on raw block device")
casadm.remove_core(cache.cache_id, core_id=core.core_id)
core.core_device.create_filesystem(Filesystem.ext3)
core.core_device.mount(mountpoint)
# Prepare files
for ext in extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
core.core_device.unmount()
# Prepare ioclass config
rule = "|".join([f"extension:{ext}" for ext in extensions])
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"{rule}&done",
ioclass_config_path=ioclass_config_path,
)
# Prepare cache for test
TestRun.LOGGER.info(f"Adding device with preexisting data as a core")
core = casadm.add_core(cache, core_dev=core.core_device)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
core.mount(mountpoint)
cache.flush_cache()
# Check if files with proper extensions are cached
TestRun.LOGGER.info(f"Writing to file with cached extension.")
for ext in extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096)
== (extensions.index(ext) + 1) * dd_count
)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_file_offset(prepare_and_cleanup):
cache, core = prepare()
ioclass_id = 1
iterations = 100
dd_size = Size(4, Unit.KibiByte)
dd_count = 1
min_cached_offset = 16384
max_cached_offset = 65536
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
cache.flush_cache()
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
# nor last sector
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
max_seek = int(
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
/ Unit.Blocks4096.value
)
TestRun.LOGGER.info(f"Writing to file within cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096) == 1
), f"Offset not cached: {file_offset}"
cache.flush_cache()
min_seek = 0
max_seek = int(min_cached_offset / Unit.Blocks4096.value)
TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096) == 0
), f"Inappropriately cached offset: {file_offset}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_file_size(prepare_and_cleanup, filesystem):
"""
File size IO class rules are configured in a way that each tested file size is unambiguously
classified.
Firstly write operations are tested (creation of files), secondly read operations.
"""
def load_file_size_io_classes():
# IO class order intentional, do not change
base_size_bytes = int(base_size.get_value(Unit.Byte))
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"file_size:eq:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
rule=f"file_size:lt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule=f"file_size:le:{int(base_size_bytes / 2)}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"file_size:ge:{2 * base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
def create_files_and_check_classification():
TestRun.LOGGER.info("Creating files belonging to different IO classes "
"(classification by writes).")
for size, ioclass_id in size_to_class.items():
occupancy_before = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
file_path = f"{mountpoint}/test_file_{size.get_value()}"
Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run()
occupancy_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
if occupancy_after != occupancy_before + size:
pytest.xfail("File not cached properly!\n"
f"Expected {occupancy_before + size}\n"
f"Actual {occupancy_after}")
test_files.append(File(file_path).refresh_item())
sync()
drop_caches(DropCachesMode.ALL)
def reclassify_files():
TestRun.LOGGER.info("Reading files belonging to different IO classes "
"(classification by reads).")
for file in test_files:
ioclass_id = size_to_class[file.size]
occupancy_before = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
if occupancy_after != occupancy_before + file.size:
pytest.xfail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
sync()
drop_caches(DropCachesMode.ALL)
def remove_files_classification():
TestRun.LOGGER.info("Moving all files to 'unclassified' IO class")
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
occupancy_before = cache.get_cache_statistics(io_class_id=0)["occupancy"]
for file in test_files:
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
if occupancy_after != occupancy_before + file.size:
pytest.xfail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
occupancy_before = occupancy_after
sync()
drop_caches(DropCachesMode.ALL)
def restore_classification_config():
TestRun.LOGGER.info("Restoring IO class configuration")
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
load_file_size_io_classes()
cache, core = prepare()
Udev.disable()
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
size_to_class = {
base_size: 1,
base_size - Unit.Blocks4096: 2,
base_size + Unit.Blocks4096: 3,
base_size / 2: 4,
base_size / 2 - Unit.Blocks4096: 4,
base_size / 2 + Unit.Blocks4096: 2,
base_size * 2: 5,
base_size * 2 - Unit.Blocks4096: 3,
base_size * 2 + Unit.Blocks4096: 5,
}
load_file_size_io_classes()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
test_files = []
create_files_and_check_classification()
remove_files_classification()
restore_classification_config()
reclassify_files()

View File

@ -0,0 +1,116 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import time
import pytest
from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev
from .io_class_common import *
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_process_name(prepare_and_cleanup):
"""Check if data generated by process with particular name is cached"""
cache, core = prepare()
ioclass_id = 1
dd_size = Size(4, Unit.KibiByte)
dd_count = 1
iterations = 100
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"process_name:dd&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
cache.flush_cache()
Udev.disable()
TestRun.LOGGER.info(f"Check if all data generated by dd process is cached.")
for i in range(iterations):
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
.seek(i)
)
dd.run()
sync()
time.sleep(0.1)
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == (i + 1) * dd_count
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_pid(prepare_and_cleanup):
cache, core = prepare()
ioclass_id = 1
iterations = 20
dd_count = 100
dd_size = Size(4, Unit.KibiByte)
Udev.disable()
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
# 'dd' command is created and is appended to 'echo' command instead of running it
dd_command = str(
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
)
for i in range(iterations):
cache.flush_cache()
output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
if output.exit_code != 0:
raise Exception(
f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
)
# Few pids might be used by system during test preparation
pid = int(output.stdout) + 50
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"pid:eq:{pid}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Running dd with pid {pid}")
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
dd_and_pid_command = (
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
)
output = TestRun.executor.run(dd_and_pid_command)
if output.exit_code != 0:
raise Exception(
f"Failed to run dd with target pid. "
f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == dd_count
ioclass_config.remove_ioclass(ioclass_id)

View File

@ -0,0 +1,577 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
import pytest
from test_tools import fs_utils
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev
from .io_class_common import *
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_lba(prepare_and_cleanup):
"""Write data to random lba and check if it is cached according to range
defined in ioclass rule"""
cache, core = prepare()
ioclass_id = 1
min_cached_lba = 56
max_cached_lba = 200
iterations = 100
dd_size = Size(1, Unit.Blocks512)
dd_count = 1
# Prepare ioclass config
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done",
ioclass_config_path=ioclass_config_path,
)
# Prepare cache for test
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
cache.flush_cache()
# Check if lbas from defined range are cached
dirty_count = 0
# '8' step is set to prevent writing cache line more than once
TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
for lba in range(min_cached_lba, max_cached_lba, 8):
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.seek(lba)
)
dd.run()
sync()
dirty_count += 1
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096) == dirty_count
), f"LBA {lba} not cached"
cache.flush_cache()
# Check if lba outside of defined range are not cached
TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.")
for i in range(iterations):
rand_lba = random.randrange(2000)
if min_cached_lba <= rand_lba <= max_cached_lba:
continue
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.seek(rand_lba)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096) == 0
), f"Inappropriately cached lba: {rand_lba}"
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_request_size(prepare_and_cleanup):
cache, core = prepare()
ioclass_id = 1
iterations = 100
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"request_size:ge:8192&request_size:le:16384&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
Udev.disable()
# Check if requests with appropriate size are cached
TestRun.LOGGER.info(
f"Check if requests with size within defined range are cached"
)
cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
for i in range(iterations):
cache.flush_cache()
req_size = random.choice(cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
.oflag("direct")
)
dd.run()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096)
== req_size.value / Unit.Blocks4096.value
)
cache.flush_cache()
# Check if requests with inappropriate size are not cached
TestRun.LOGGER.info(
f"Check if requests with size outside defined range are not cached"
)
not_cached_req_sizes = [
Size(1, Unit.Blocks4096),
Size(8, Unit.Blocks4096),
Size(16, Unit.Blocks4096),
]
for i in range(iterations):
req_size = random.choice(not_cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
.oflag("direct")
)
dd.run()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == 0
@pytest.mark.parametrize("filesystem", list(Filesystem) + [False])
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_direct(prepare_and_cleanup, filesystem):
"""
Perform buffered/direct IO to/from files or raw block device.
Data from buffered IO should be cached.
Data from buffered IO should not be cached and if performed to/from already cached data
should cause reclassification to unclassified IO class.
"""
cache, core = prepare()
Udev.disable()
ioclass_id = 1
io_size = Size(random.randint(1000, 2000), Unit.Blocks4096)
# direct IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule="direct",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
fio = (
Fio().create_command()
.io_engine(IoEngine.libaio)
.size(io_size)
.offset(io_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path)
)
if filesystem:
TestRun.LOGGER.info(
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at"
f" {mountpoint}"
)
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
else:
TestRun.LOGGER.info("Testing on raw exported object")
base_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
TestRun.LOGGER.info(f"Buffered writes to {'file' if filesystem else 'device'}")
fio.run()
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy, \
"Buffered writes were cached!\n" \
f"Expected: {base_occupancy}, actual: {new_occupancy}"
TestRun.LOGGER.info(f"Direct writes to {'file' if filesystem else 'device'}")
fio.direct()
fio.run()
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy + io_size, \
"Wrong number of direct writes was cached!\n" \
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
TestRun.LOGGER.info(f"Buffered reads from {'file' if filesystem else 'device'}")
fio.remove_param("readwrite").remove_param("direct")
fio.read_write(ReadWrite.read)
fio.run()
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy, \
"Buffered reads did not cause reclassification!" \
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}"
TestRun.LOGGER.info(f"Direct reads from {'file' if filesystem else 'device'}")
fio.direct()
fio.run()
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy + io_size, \
"Wrong number of direct reads was cached!\n" \
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_metadata(prepare_and_cleanup, filesystem):
"""
Perform operations on files that cause metadata update.
Determine if every such operation results in increased writes to cached metadata.
Exact values may not be tested as each file system has different metadata structure.
"""
cache, core = prepare()
Udev.disable()
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# metadata IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule="metadata&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
requests_to_metadata_before = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
TestRun.LOGGER.info("Creating 20 test files")
files = []
for i in range(1, 21):
file_path = f"{mountpoint}/test_file_{i}"
dd = (
Dd()
.input("/dev/urandom")
.output(file_path)
.count(random.randint(5, 50))
.block_size(Size(1, Unit.MebiByte))
.oflag("sync")
)
dd.run()
files.append(File(file_path))
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
if requests_to_metadata_after == requests_to_metadata_before:
pytest.xfail("No requests to metadata while creating files!")
requests_to_metadata_before = requests_to_metadata_after
TestRun.LOGGER.info("Renaming all test files")
for file in files:
file.move(f"{file.full_path}_renamed")
sync()
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
if requests_to_metadata_after == requests_to_metadata_before:
pytest.xfail("No requests to metadata while renaming files!")
requests_to_metadata_before = requests_to_metadata_after
test_dir_path = f"{mountpoint}/test_dir"
TestRun.LOGGER.info(f"Creating directory {test_dir_path}")
fs_utils.create_directory(path=test_dir_path)
TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
for file in files:
file.move(test_dir_path)
sync()
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
if requests_to_metadata_after == requests_to_metadata_before:
pytest.xfail("No requests to metadata while moving files!")
TestRun.LOGGER.info(f"Removing {test_dir_path}")
fs_utils.remove(path=test_dir_path, force=True, recursive=True)
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
if requests_to_metadata_after == requests_to_metadata_before:
pytest.xfail("No requests to metadata while deleting directory with files!")
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_id_as_condition(prepare_and_cleanup, filesystem):
"""
Load config in which IO class ids are used as conditions in other IO class definitions.
Check if performed IO is properly classified.
"""
cache, core = prepare()
Udev.disable()
base_dir_path = f"{mountpoint}/base_dir"
ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))
# directory condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"directory:{base_dir_path}",
ioclass_config_path=ioclass_config_path,
)
# file size condition
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
rule=f"file_size:eq:{ioclass_file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
# direct condition
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule="direct",
ioclass_config_path=ioclass_config_path,
)
# IO class 1 OR 2 condition
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule="io_class:1|io_class:2",
ioclass_config_path=ioclass_config_path,
)
# IO class 4 AND file size condition (same as IO class 2)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
# IO class 3 condition
ioclass_config.add_ioclass(
ioclass_id=6,
eviction_priority=1,
allocation=True,
rule="io_class:3",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(base_dir_path)
sync()
# IO fulfilling IO class 1 condition (and not IO class 2)
# Should be classified as IO class 4
base_occupancy = cache.get_cache_statistics(io_class_id=4)["occupancy"]
non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(non_ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_1")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=4)["occupancy"]
assert new_occupancy == base_occupancy + non_ioclass_file_size, \
"Writes were not properly cached!\n" \
f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}"
# IO fulfilling IO class 2 condition (and not IO class 1)
# Should be classified as IO class 5
base_occupancy = cache.get_cache_statistics(io_class_id=5)["occupancy"]
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file_2")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=5)["occupancy"]
assert new_occupancy == base_occupancy + ioclass_file_size, \
"Writes were not properly cached!\n" \
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
# IO fulfilling IO class 1 and 2 conditions
# Should be classified as IO class 5
base_occupancy = new_occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=5)["occupancy"]
assert new_occupancy == base_occupancy + ioclass_file_size, \
"Writes were not properly cached!\n" \
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
# Same IO but direct
# Should be classified as IO class 6
base_occupancy = cache.get_cache_statistics(io_class_id=6)["occupancy"]
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.direct()
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=6)["occupancy"]
assert new_occupancy == base_occupancy + ioclass_file_size, \
"Writes were not properly cached!\n" \
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_conditions_or(prepare_and_cleanup, filesystem):
"""
Load config with IO class combining 5 contradicting conditions connected by OR operator.
Check if every IO fulfilling one condition is classified properly.
"""
cache, core = prepare()
Udev.disable()
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
for i in range(1, 6):
fs_utils.create_directory(f"{mountpoint}/dir{i}")
sync()
# Perform IO fulfilling each condition and check if occupancy raises
for i in range(1, 6):
file_size = Size(random.randint(25, 50), Unit.MebiByte)
base_occupancy = cache.get_cache_statistics(io_class_id=1)["occupancy"]
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/dir{i}/test_file")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=1)["occupancy"]
assert new_occupancy == base_occupancy + file_size, \
"Occupancy has not increased correctly!\n" \
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_conditions_and(prepare_and_cleanup, filesystem):
"""
Load config with IO class combining 5 conditions contradicting at least one other condition
connected by AND operator.
Check if every IO fulfilling one of the conditions is not classified.
"""
cache, core = prepare()
Udev.disable()
file_size = Size(random.randint(25, 50), Unit.MebiByte)
file_size_bytes = int(file_size.get_value(Unit.Byte))
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
f"file_size:eq:{file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_occupancy = cache.get_cache_statistics(io_class_id=1)["occupancy"]
# Perform IO
for size in [file_size, file_size + Size(1, Unit.MebiByte), file_size - Size(1, Unit.MebiByte)]:
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=1)["occupancy"]
assert new_occupancy == base_occupancy, \
"Unexpected occupancy increase!\n" \
f"Expected: {base_occupancy}, actual: {new_occupancy}"

View File

@ -0,0 +1,76 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from api.cas import casadm, casadm_parser
from tests.conftest import base_prepare
from core.test_run import TestRun
from storage_devices.disk import DiskType
from test_utils.size import Size, Unit
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_load_occupied_id(prepare_and_cleanup):
"""
1. Start new cache instance (don't specify cache id)
2. Add core to newly create cache.
3. Stop cache instance.
4. Start new cache instance on another device (don't specify cache id).
5. Try to load metadata from first device.
* Load should fail.
"""
prepare()
cache_device = next(
disk
for disk in TestRun.dut.disks
if disk.disk_type in [DiskType.optane, DiskType.nand]
)
core_device = next(
disk
for disk in TestRun.dut.disks
if (
disk.disk_type.value > cache_device.disk_type.value and disk != cache_device
)
)
TestRun.LOGGER.info("Creating partitons for test")
cache_device.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)])
cache_device_1 = cache_device.partitions[0]
cache_device_2 = cache_device.partitions[1]
core_device = core_device.partitions[0]
TestRun.LOGGER.info("Starting cache with default id and one core")
cache1 = casadm.start_cache(cache_device_1, force=True)
cache1.add_core(core_device)
TestRun.LOGGER.info("Stopping cache")
cache1.stop()
TestRun.LOGGER.info("Starting cache with default id on different device")
cache2 = casadm.start_cache(cache_device_2, force=True)
TestRun.LOGGER.info("Attempt to load metadata from first cache device")
try:
casadm.load_cache(cache_device_1)
except Exception:
pass
caches = casadm_parser.get_caches()
assert len(caches) == 1, "Inappropirate number of caches after load!"
assert caches[0].cache_device.system_path == cache_device_2.system_path
assert caches[0].cache_id == 1
cores = caches[0].get_core_devices()
assert len(cores) == 0
def prepare():
base_prepare()

View File

@ -0,0 +1,333 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from api.cas.casadm import StatsFilter
from api.cas import casadm
from api.cas import ioclass_config
from test_tools.dd import Dd
from api.cas.cache_config import CacheMode, CleaningPolicy
from tests.conftest import base_prepare
from core.test_run import TestRun
from storage_devices.disk import DiskType
from test_utils.size import Size, Unit
from test_utils.os_utils import Udev
ioclass_config_path = "/tmp/opencas_ioclass.conf"
mountpoint = "/tmp/cas1-1"
exported_obj_path_prefix = "/dev/cas1-"
cache_id = 1
# lists of cache and core block stats, that should have zero value for particular cache modes
write_wb_zero_stats = [
"reads from core(s)",
"writes to core(s)",
"total to/from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"writes to core",
"total to/from core",
"reads from cache",
"reads from exported object",
]
write_wt_zero_stats = [
"reads from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_pt_zero_stats = [
"reads from core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_wa_zero_stats = [
"reads from core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_wo_zero_stats = [
"reads from core(s)",
"writes to core(s)",
"total to/from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"writes to core",
"total to/from core",
"reads from exported object",
]
@pytest.mark.parametrize(
"cache_mode,zero_stats",
[
(CacheMode.WB, write_wb_zero_stats),
(CacheMode.WT, write_wt_zero_stats),
(CacheMode.PT, write_pt_zero_stats),
(CacheMode.WA, write_wa_zero_stats),
(CacheMode.WO, write_wo_zero_stats),
],
)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_block_stats_write(prepare_and_cleanup, cache_mode, zero_stats):
"""Perform read and write operations to cache instance in different cache modes
and check if block stats values are correct"""
cache, cores = prepare(cache_mode)
iterations = 10
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
flush(cache)
# Check stats for cache after performing write operation
for core in cores:
dd_seek = 0
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.oflag("direct")
)
# Since every IO has the same size, every stat should be increased with the same step.
# So there is no need to keep value of every stat in separate variable
cache_stat = (
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
)
for i in range(iterations):
dd.seek(dd_seek)
dd.run()
cache_stats = cache.get_cache_statistics(stat_filter=[StatsFilter.blk])
core_stats = core.get_core_statistics(stat_filter=[StatsFilter.blk])
# Check cache stats
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
for key, value in cache_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
else:
# For each next tested core, cache stats has to include
# sum of each previous core
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
# Check single core stats
for key, value in core_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value of \n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
else:
assert assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, dd seek: {dd_seek}. Cache mode {cache_mode}"
)
dd_seek += dd_count
# lists of cache and core block stats, that should have zero value for particular cache modes
read_wb_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wt_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_pt_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wa_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wo_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
@pytest.mark.parametrize(
"cache_mode,zero_stats",
[
(CacheMode.WB, read_wb_zero_stats),
(CacheMode.WT, read_wt_zero_stats),
(CacheMode.PT, read_pt_zero_stats),
(CacheMode.WA, read_wa_zero_stats),
(CacheMode.WO, read_wo_zero_stats),
],
)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_block_stats_read(prepare_and_cleanup, cache_mode, zero_stats):
"""Perform read and write operations to cache instance in different cache modes
and check if block stats values are correct"""
cache, cores = prepare(cache_mode)
iterations = 10
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
flush(cache)
# Check stats for cache after performing read operation
for core in cores:
dd_skip = 0
dd = (
Dd()
.output("/dev/zero")
.input(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.iflag("direct")
)
# Since every IO has the same size, every stat should be increased with the same step.
# So there is no need to keep value of every stat in separate variable
cache_stat = (
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
)
for i in range(iterations):
dd.skip(dd_skip)
dd.run()
cache_stats = cache.get_cache_statistics(stat_filter=[StatsFilter.blk])
core_stats = core.get_core_statistics(stat_filter=[StatsFilter.blk])
# Check cache stats
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
for key, value in cache_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
else:
# For each next tested core, cache stats has to include
# sum of each previous core
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}. Cache mode: {cache_mode}"
)
# Check single core stats
for key, value in core_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}. Cache mode: {cache_mode}"
)
else:
assert assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count} dd skip {dd_skip}. Cache mode: {cache_mode}"
)
dd_skip += dd_count
def flush(cache):
cache.flush_cache()
cache.reset_counters()
stats = cache.get_cache_statistics(stat_filter=[StatsFilter.blk])
for key, value in stats.items():
assert value.get_value(Unit.Blocks4096) == 0
def prepare(cache_mode: CacheMode):
base_prepare()
ioclass_config.remove_ioclass_config()
cache_device = next(
disk
for disk in TestRun.dut.disks
if disk.disk_type in [DiskType.optane, DiskType.nand]
)
core_device = next(
disk
for disk in TestRun.dut.disks
if (disk.disk_type.value > cache_device.disk_type.value and disk != cache_device)
)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions(
[Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]
)
cache_device = cache_device.partitions[0]
core_device_1 = core_device.partitions[0]
core_device_2 = core_device.partitions[1]
core_device_3 = core_device.partitions[2]
Udev.disable()
TestRun.LOGGER.info(f"Starting cache")
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
casadm.set_param_cleaning(cache_id=cache_id, policy=CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core devices")
core_1 = cache.add_core(core_dev=core_device_1)
core_2 = cache.add_core(core_dev=core_device_2)
core_3 = cache.add_core(core_dev=core_device_3)
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
if output.exit_code != 0:
raise Exception(f"Failed to create mountpoint")
return cache, [core_1, core_2, core_3]

View File

@ -0,0 +1,181 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from api.cas.casadm import StatsFilter
from api.cas import casadm
from api.cas import ioclass_config
from api.cas import casadm_parser
from api.cas.cache_config import CleaningPolicy
from tests.conftest import base_prepare
from core.test_run import TestRun
from storage_devices.disk import DiskType
from test_tools.disk_utils import Filesystem
from test_utils.size import Size, Unit
from test_utils.os_utils import sync, Udev
from test_utils.filesystem.file import File
ioclass_config_path = "/tmp/opencas_ioclass.conf"
mountpoint = "/tmp/cas1-1"
cache_id = 1
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_stats_set(prepare_and_cleanup):
"""Try to retrieve stats for all set ioclasses"""
prepare()
min_ioclass_id = 1
max_ioclass_id = 11
ioclass_config.create_ioclass_config(
add_default_rule=True, ioclass_config_path=ioclass_config_path
)
TestRun.LOGGER.info("Preparing ioclass config file")
for i in range(min_ioclass_id, max_ioclass_id):
ioclass_config.add_ioclass(
ioclass_id=(i + 10),
eviction_priority=22,
allocation=True,
rule=f"file_size:le:{4096*i}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id, file=ioclass_config_path)
TestRun.LOGGER.info("Preparing ioclass config file")
for i in range(32):
if i != 0 or i not in range(min_ioclass_id, max_ioclass_id):
with pytest.raises(Exception):
assert casadm_parser.get_statistics(
cache_id=cache_id, io_class_id=True, filter=[StatsFilter.conf]
)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_stats_sum(prepare_and_cleanup):
"""Check if stats for all set ioclasses sum up to cache stats"""
cache, core = prepare()
min_ioclass_id = 1
max_ioclass_id = 11
file_size_base = Unit.KibiByte.value * 4
TestRun.LOGGER.info("Preparing ioclass config file")
ioclass_config.create_ioclass_config(
add_default_rule=True, ioclass_config_path=ioclass_config_path
)
for i in range(min_ioclass_id, max_ioclass_id):
ioclass_config.add_ioclass(
ioclass_id=i,
eviction_priority=22,
allocation=True,
rule=f"file_size:le:{file_size_base*i}&done",
ioclass_config_path=ioclass_config_path,
)
cache.load_io_class(ioclass_config_path)
TestRun.LOGGER.info("Generating files with particular sizes")
files_list = []
for i in range(min_ioclass_id, max_ioclass_id):
path = f"/tmp/test_file_{file_size_base*i}"
File.create_file(path)
f = File(path)
f.padding(Size(file_size_base * i, Unit.Byte))
files_list.append(f)
core.create_filesystem(Filesystem.ext4)
cache.reset_counters()
# Name of stats, which should not be compared
not_compare_stats = ["clean", "occupancy"]
ioclass_id_list = list(range(min_ioclass_id, max_ioclass_id))
# Append default ioclass id
ioclass_id_list.append(0)
TestRun.LOGGER.info("Copying files to mounted core and stats check")
for f in files_list:
# To prevent stats pollution by filesystem requests, umount core device
# after file is copied
core.mount(mountpoint)
f.copy(mountpoint)
sync()
core.unmount()
sync()
cache_stats = cache.get_cache_statistics(
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
)
for ioclass_id in ioclass_id_list:
ioclass_stats = cache.get_cache_statistics(
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk],
io_class_id=ioclass_id,
)
for stat_name in cache_stats:
if stat_name in not_compare_stats:
continue
cache_stats[stat_name] -= ioclass_stats[stat_name]
for stat_name in cache_stats:
if stat_name in not_compare_stats:
continue
stat_val = (
cache_stats[stat_name].get_value()
if isinstance(cache_stats[stat_name], Size)
else cache_stats[stat_name]
)
assert stat_val == 0, f"{stat_name} diverged!\n"
# Test cleanup
for f in files_list:
f.remove()
def flush_cache(cache_id):
casadm.flush(cache_id=cache_id)
sync()
casadm.reset_counters(cache_id=cache_id)
stats = casadm_parser.get_statistics(cache_id=cache_id, filter=[StatsFilter.blk])
for key, value in stats.items():
assert value.get_value(Unit.Blocks4096) == 0
def prepare():
base_prepare()
ioclass_config.remove_ioclass_config()
cache_device = next(
disk
for disk in TestRun.dut.disks
if disk.disk_type in [DiskType.optane, DiskType.nand]
)
core_device = next(
disk
for disk in TestRun.dut.disks
if (disk.disk_type.value > cache_device.disk_type.value and disk != cache_device)
)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(2, Unit.GibiByte)])
cache_device = cache_device.partitions[0]
core_device_1 = core_device.partitions[0]
Udev.disable()
TestRun.LOGGER.info(f"Staring cache")
cache = casadm.start_cache(cache_device, force=True)
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
cache.set_cleaning_policy(CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core devices")
core = cache.add_core(core_dev=core_device_1)
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
if output.exit_code != 0:
raise Exception(f"Failed to create mountpoint")
return cache, core