test-api: update cas api

Signed-off-by: Kamil Gierszewski <kamil.gierszewski@huawei.com>
This commit is contained in:
Kamil Gierszewski 2024-08-08 03:11:21 +02:00
parent 5dccbc3978
commit d48e9fc80d
No known key found for this signature in database
14 changed files with 1180 additions and 1110 deletions

View File

@ -1,28 +1,36 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
from api.cas.casadm_parser import *
from api.cas.cli import *
from api.cas.statistics import CacheStats, CacheIoClassStats
from api.cas.dmesg import get_metadata_size_on_device
from api.cas.statistics import CacheStats, IoClassStats
from test_utils.os_utils import *
from test_utils.output import Output
class Cache:
def __init__(self, device: Device):
def __init__(self, device: Device, cache_id: int = None):
self.cache_device = device
self.cache_id = int(self.__get_cache_id())
self.cache_id = cache_id if cache_id else self.__get_cache_id()
self.__cache_line_size = None
self.__metadata_size = None
self.metadata_size_on_disk = self.get_metadata_size_on_disk()
def __get_cache_id(self):
cmd = f"{list_cmd(by_id_path=False)} | grep {self.cache_device.get_device_id()}"
output = TestRun.executor.run(cmd)
if output.exit_code == 0 and output.stdout.strip():
return output.stdout.split()[1]
else:
raise Exception(f"There is no cache started on {self.cache_device.get_device_id()}.")
def __get_cache_id(self) -> int:
device_path = self.__get_cache_device_path()
caches_dict = get_cas_devices_dict()["caches"]
for cache in caches_dict.values():
if cache["device_path"] == device_path:
return int(cache["id"])
raise Exception(f"There is no cache started on {device_path}")
def __get_cache_device_path(self) -> str:
return self.cache_device.path if self.cache_device is not None else "-"
def get_core_devices(self):
return get_cores(self.cache_id)
@ -39,11 +47,13 @@ class Cache:
cp = stats.config_stats.cleaning_policy
return CleaningPolicy[cp]
def get_metadata_size(self):
if self.__metadata_size is None:
def get_metadata_size_in_ram(self) -> Size:
stats = self.get_statistics()
self.__metadata_size = stats.config_stats.metadata_memory_footprint
return self.__metadata_size
return stats.config_stats.metadata_memory_footprint
def get_metadata_size_on_disk(self) -> Size:
cache_name = f"cache{self.cache_id}"
return get_metadata_size_on_device(cache_name=cache_name)
def get_occupancy(self):
return self.get_statistics().usage_stats.occupancy
@ -80,32 +90,34 @@ class Cache:
# Casadm methods:
def get_io_class_statistics(self,
def get_statistics(
self,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False,
) -> CacheStats:
return CacheStats(
cache_id=self.cache_id,
filter=stat_filter,
percentage_val=percentage_val,
)
def get_io_class_statistics(
self,
io_class_id: int,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
stats = get_statistics(self.cache_id, None, io_class_id,
stat_filter, percentage_val)
return CacheIoClassStats(stats)
percentage_val: bool = False,
):
return IoClassStats(
cache_id=self.cache_id,
filter=stat_filter,
io_class_id=io_class_id,
percentage_val=percentage_val,
)
def get_statistics(self,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
stats = get_statistics(self.cache_id, None, None,
stat_filter, percentage_val)
return CacheStats(stats)
def get_statistics_flat(self,
io_class_id: int = None,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
return get_statistics(self.cache_id, None, io_class_id,
stat_filter, percentage_val)
def flush_cache(self):
casadm.flush(cache_id=self.cache_id)
def flush_cache(self) -> Output:
cmd_output = casadm.flush_cache(cache_id=self.cache_id)
sync()
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0
return cmd_output
def purge_cache(self):
casadm.purge_cache(cache_id=self.cache_id)
@ -136,47 +148,60 @@ class Cache:
return get_io_class_list(self.cache_id)
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
return casadm.set_param_cutoff(self.cache_id,
return casadm.set_param_cutoff(
self.cache_id,
threshold=seq_cutoff_param.threshold,
policy=seq_cutoff_param.policy,
promotion_count=seq_cutoff_param.promotion_count)
promotion_count=seq_cutoff_param.promotion_count,
)
def set_seq_cutoff_threshold(self, threshold: Size):
return casadm.set_param_cutoff(self.cache_id,
threshold=threshold,
policy=None)
return casadm.set_param_cutoff(self.cache_id, threshold=threshold, policy=None)
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy):
return casadm.set_param_cutoff(self.cache_id,
threshold=None,
policy=policy)
return casadm.set_param_cutoff(self.cache_id, threshold=None, policy=policy)
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
return casadm.set_param_cleaning(self.cache_id, cleaning_policy)
def set_params_acp(self, acp_params: FlushParametersAcp):
return casadm.set_param_cleaning_acp(self.cache_id,
return casadm.set_param_cleaning_acp(
self.cache_id,
(
int(acp_params.wake_up_time.total_milliseconds())
if acp_params.wake_up_time else None,
int(acp_params.flush_max_buffers)
if acp_params.flush_max_buffers else None)
if acp_params.wake_up_time
else None
),
int(acp_params.flush_max_buffers) if acp_params.flush_max_buffers else None,
)
def set_params_alru(self, alru_params: FlushParametersAlru):
return casadm.set_param_cleaning_alru(
self.cache_id,
(
int(alru_params.wake_up_time.total_seconds())
if alru_params.wake_up_time is not None else None,
if alru_params.wake_up_time is not None
else None
),
(
int(alru_params.staleness_time.total_seconds())
if alru_params.staleness_time is not None else None,
alru_params.flush_max_buffers
if alru_params.flush_max_buffers is not None else None,
if alru_params.staleness_time is not None
else None
),
(alru_params.flush_max_buffers if alru_params.flush_max_buffers is not None else None),
(
int(alru_params.activity_threshold.total_milliseconds())
if alru_params.activity_threshold is not None else None)
if alru_params.activity_threshold is not None
else None
),
)
def get_cache_config(self):
return CacheConfig(self.get_cache_line_size(),
return CacheConfig(
self.get_cache_line_size(),
self.get_cache_mode(),
self.get_cleaning_policy())
self.get_cleaning_policy(),
)
def standby_detach(self, shortcut: bool = False):
return casadm.standby_detach_cache(cache_id=self.cache_id, shortcut=shortcut)
@ -185,3 +210,6 @@ class Cache:
return casadm.standby_activate_cache(
cache_id=self.cache_id, cache_dev=device, shortcut=shortcut
)
def has_volatile_metadata(self) -> bool:
return self.get_metadata_size_on_disk() == Size.zero()

View File

@ -1,9 +1,10 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
from aenum import Enum, IntFlag
from enum import Enum, IntFlag
from test_utils.os_utils import get_kernel_module_parameter
from test_utils.size import Size, Unit
@ -56,21 +57,15 @@ class CacheMode(Enum):
@staticmethod
def with_traits(flags: CacheModeTrait):
return [
m for m in CacheMode if all(map(lambda t: t in CacheMode.get_traits(m), flags))
]
return [m for m in CacheMode if all(map(lambda t: t in CacheMode.get_traits(m), flags))]
@staticmethod
def without_traits(flags: CacheModeTrait):
return [
m for m in CacheMode if not any(map(lambda t: t in CacheMode.get_traits(m), flags))
]
return [m for m in CacheMode if not any(map(lambda t: t in CacheMode.get_traits(m), flags))]
@staticmethod
def with_any_trait(flags: CacheModeTrait):
return [
m for m in CacheMode if any(map(lambda t: t in CacheMode.get_traits(m), flags))
]
return [m for m in CacheMode if any(map(lambda t: t in CacheMode.get_traits(m), flags))]
class SeqCutOffPolicy(Enum):
@ -90,7 +85,6 @@ class SeqCutOffPolicy(Enum):
class MetadataMode(Enum):
normal = "normal"
atomic = "atomic"
DEFAULT = normal
def __str__(self):
@ -152,18 +146,16 @@ class FlushParametersAlru:
)
def __str__(self):
ret = ["activity threshold: "
+ (f"{self.activity_threshold}" if self.activity_threshold is not None
else "default"),
ret = [
"activity threshold: "
+ (f"{self.activity_threshold}" if self.activity_threshold is not None else "default"),
"flush max buffers: "
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None
else "default"),
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None else "default"),
"staleness time: "
+ (f"{self.staleness_time}" if self.staleness_time is not None
else "default"),
+ (f"{self.staleness_time}" if self.staleness_time is not None else "default"),
"wake up time: "
+ (f"{self.wake_up_time}" if self.wake_up_time is not None
else "default")]
+ (f"{self.wake_up_time}" if self.wake_up_time is not None else "default"),
]
return " | ".join(ret)
@staticmethod
@ -197,12 +189,12 @@ class FlushParametersAcp:
)
def __str__(self):
ret = ["flush max buffers: "
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None
else "default"),
ret = [
"flush max buffers: "
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None else "default"),
"wake up time: "
+ (f"{self.wake_up_time}" if self.wake_up_time is not None
else "default")]
+ (f"{self.wake_up_time}" if self.wake_up_time is not None else "default"),
]
return " | ".join(ret)
@staticmethod
@ -238,7 +230,7 @@ class SeqCutOffParameters:
return SeqCutOffParameters(
threshold=Size(1024, Unit.KibiByte),
policy=SeqCutOffPolicy.full,
promotion_count=8
promotion_count=8,
)
@ -248,10 +240,7 @@ class PromotionParametersNhit:
self.trigger = trigger
def __eq__(self, other):
return (
self.threshold == other.threshold
and self.trigger == other.trigger
)
return self.threshold == other.threshold and self.trigger == other.trigger
@staticmethod
def nhit_params_range():
@ -293,7 +282,7 @@ class KernelParameters:
use_io_scheduler: UseIoScheduler = None,
seq_cut_off_mb: int = None,
max_writeback_queue_size: int = None,
writeback_queue_unblock_size: int = None
writeback_queue_unblock_size: int = None,
):
self.unaligned_io = unaligned_io
self.use_io_scheduler = use_io_scheduler
@ -312,16 +301,17 @@ class KernelParameters:
self.use_io_scheduler, other.use_io_scheduler, UseIoScheduler.DEFAULT
)
and equal_or_default(
self.seq_cut_off_mb, other.seq_cut_off_mb,
self.seq_cut_off_mb_DEFAULT
self.seq_cut_off_mb, other.seq_cut_off_mb, self.seq_cut_off_mb_DEFAULT
)
and equal_or_default(
self.max_writeback_queue_size, other.max_writeback_queue_size,
self.max_writeback_queue_size_DEFAULT
self.max_writeback_queue_size,
other.max_writeback_queue_size,
self.max_writeback_queue_size_DEFAULT,
)
and equal_or_default(
self.writeback_queue_unblock_size, other.writeback_queue_unblock_size,
self.writeback_queue_unblock_size_DEFAULT
self.writeback_queue_unblock_size,
other.writeback_queue_unblock_size,
self.writeback_queue_unblock_size_DEFAULT,
)
)
@ -332,7 +322,7 @@ class KernelParameters:
UseIoScheduler.DEFAULT,
cls.seq_cut_off_mb_DEFAULT,
cls.max_writeback_queue_size_DEFAULT,
cls.writeback_queue_unblock_size_DEFAULT
cls.writeback_queue_unblock_size_DEFAULT,
)
@staticmethod
@ -343,7 +333,7 @@ class KernelParameters:
UseIoScheduler(int(get_kernel_module_parameter(module, "use_io_scheduler"))),
int(get_kernel_module_parameter(module, "seq_cut_off_mb")),
int(get_kernel_module_parameter(module, "max_writeback_queue_size")),
int(get_kernel_module_parameter(module, "writeback_queue_unblock_size"))
int(get_kernel_module_parameter(module, "writeback_queue_unblock_size")),
)
def get_parameter_dictionary(self):
@ -354,10 +344,15 @@ class KernelParameters:
params["use_io_scheduler"] = str(self.use_io_scheduler.value)
if self.seq_cut_off_mb not in [None, self.seq_cut_off_mb_DEFAULT]:
params["seq_cut_off_mb"] = str(self.seq_cut_off_mb)
if self.max_writeback_queue_size not in [None, self.max_writeback_queue_size_DEFAULT]:
if self.max_writeback_queue_size not in [
None,
self.max_writeback_queue_size_DEFAULT,
]:
params["max_writeback_queue_size"] = str(self.max_writeback_queue_size)
if (self.writeback_queue_unblock_size not in
[None, self.writeback_queue_unblock_size_DEFAULT]):
if self.writeback_queue_unblock_size not in [
None,
self.writeback_queue_unblock_size_DEFAULT,
]:
params["writeback_queue_unblock_size"] = str(self.writeback_queue_unblock_size)
return params
@ -370,7 +365,7 @@ class CacheConfig:
cache_line_size=CacheLineSize.DEFAULT,
cache_mode=CacheMode.DEFAULT,
cleaning_policy=CleaningPolicy.DEFAULT,
kernel_parameters=None
kernel_parameters=None,
):
self.cache_line_size = cache_line_size
self.cache_mode = cache_mode
@ -383,7 +378,9 @@ class CacheConfig:
and self.cache_mode == other.cache_mode
and self.cleaning_policy == other.cleaning_policy
and equal_or_default(
self.kernel_parameters, other.kernel_parameters, KernelParameters.DEFAULT
self.kernel_parameters,
other.kernel_parameters,
KernelParameters.DEFAULT,
)
)

View File

@ -1,9 +1,10 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
from aenum import Enum
from enum import Enum
from core.test_run import TestRun
from test_utils import os_utils
from test_utils.os_utils import ModuleRemoveMethod
@ -19,8 +20,7 @@ def reload_all_cas_modules():
def unload_all_cas_modules():
os_utils.unload_kernel_module(CasModule.cache.value,
os_utils.ModuleRemoveMethod.rmmod)
os_utils.unload_kernel_module(CasModule.cache.value, os_utils.ModuleRemoveMethod.rmmod)
def is_cas_management_dev_present():

View File

@ -36,7 +36,8 @@ class Packages:
class _Rpm(RpmSet):
def __init__(self, packages_dir: str = ""):
def __init__(self, packages_paths: list, packages_dir: str = ""):
super().__init__(packages_paths)
self.packages_dir = packages_dir
self.packages = get_packages_list("rpm", self.packages_dir)
@ -65,7 +66,8 @@ class _Rpm(RpmSet):
class _Deb(DebSet):
def __init__(self, packages_dir: str = ""):
def __init__(self, packages_paths: list, packages_dir: str = ""):
super().__init__(packages_paths)
self.packages_dir = packages_dir
self.packages = get_packages_list("deb", self.packages_dir)
@ -98,7 +100,8 @@ def get_packages_list(package_type: str, packages_dir: str):
return []
return [
package for package in find_all_files(packages_dir, recursive=False)
package
for package in find_all_files(packages_dir, recursive=False)
# include only binary packages (ready to be processed by package manager)
if package.endswith(package_type.lower())
and not package.endswith("src." + package_type.lower())

View File

@ -1,55 +1,88 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
from typing import List
from api.cas.cache import Cache
from api.cas.cache_config import CacheLineSize, CacheMode, SeqCutOffPolicy, CleaningPolicy, \
KernelParameters
from api.cas.cache_config import (
CacheLineSize,
CacheMode,
SeqCutOffPolicy,
CleaningPolicy,
KernelParameters,
)
from api.cas.casadm_params import OutputFormat, StatsFilter
from api.cas.cli import *
from api.cas.core import Core
from core.test_run import TestRun
from storage_devices.device import Device
from test_utils.os_utils import reload_kernel_module
from test_utils.output import CmdException
from test_utils.output import CmdException, Output
from test_utils.size import Size, Unit
from .casadm_params import *
from .casctl import stop as casctl_stop
from .cli import *
# casadm commands
def help(shortcut: bool = False):
return TestRun.executor.run(help_cmd(shortcut))
def start_cache(cache_dev: Device, cache_mode: CacheMode = None,
cache_line_size: CacheLineSize = None, cache_id: int = None,
force: bool = False, load: bool = False, shortcut: bool = False,
kernel_params: KernelParameters = KernelParameters()):
def start_cache(
cache_dev: Device,
cache_mode: CacheMode = None,
cache_line_size: CacheLineSize = None,
cache_id: int = None,
force: bool = False,
load: bool = False,
shortcut: bool = False,
kernel_params: KernelParameters = KernelParameters(),
):
if kernel_params != KernelParameters.read_current_settings():
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
_cache_line_size = None if cache_line_size is None else str(
int(cache_line_size.value.get_value(Unit.KibiByte)))
_cache_line_size = (
None
if cache_line_size is None
else str(int(cache_line_size.value.get_value(Unit.KibiByte)))
)
_cache_id = None if cache_id is None else str(cache_id)
_cache_mode = None if cache_mode is None else cache_mode.name.lower()
output = TestRun.executor.run(start_cmd(
cache_dev=cache_dev.path, cache_mode=_cache_mode, cache_line_size=_cache_line_size,
cache_id=_cache_id, force=force, load=load, shortcut=shortcut))
output = TestRun.executor.run(
start_cmd(
cache_dev=cache_dev.path,
cache_mode=_cache_mode,
cache_line_size=_cache_line_size,
cache_id=_cache_id,
force=force,
load=load,
shortcut=shortcut,
)
)
if output.exit_code != 0:
raise CmdException("Failed to start cache.", output)
return Cache(cache_dev)
def standby_init(cache_dev: Device, cache_id: int, cache_line_size: CacheLineSize,
force: bool = False, shortcut: bool = False,
kernel_params: KernelParameters = KernelParameters()):
def standby_init(
cache_dev: Device,
cache_id: int,
cache_line_size: CacheLineSize,
force: bool = False,
shortcut: bool = False,
kernel_params: KernelParameters = KernelParameters(),
):
if kernel_params != KernelParameters.read_current_settings():
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
_cache_line_size = None if cache_line_size is None else str(
int(cache_line_size.value.get_value(Unit.KibiByte)))
_cache_line_size = (
None
if cache_line_size is None
else str(int(cache_line_size.value.get_value(Unit.KibiByte)))
)
output = TestRun.executor.run(
standby_init_cmd(
@ -66,18 +99,14 @@ def standby_init(cache_dev: Device, cache_id: int, cache_line_size: CacheLineSiz
def standby_load(cache_dev: Device, shortcut: bool = False):
output = TestRun.executor.run(
standby_load_cmd(cache_dev=cache_dev.path, shortcut=shortcut)
)
output = TestRun.executor.run(standby_load_cmd(cache_dev=cache_dev.path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to load standby cache.", output)
return Cache(cache_dev)
def standby_detach_cache(cache_id: int, shortcut: bool = False):
output = TestRun.executor.run(
standby_detach_cmd(cache_id=str(cache_id), shortcut=shortcut)
)
output = TestRun.executor.run(standby_detach_cmd(cache_id=str(cache_id), shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to detach standby cache.", output)
return output
@ -85,9 +114,7 @@ def standby_detach_cache(cache_id: int, shortcut: bool = False):
def standby_activate_cache(cache_dev: Device, cache_id: int, shortcut: bool = False):
output = TestRun.executor.run(
standby_activate_cmd(
cache_dev=cache_dev.path, cache_id=str(cache_id), shortcut=shortcut
)
standby_activate_cmd(cache_dev=cache_dev.path, cache_id=str(cache_id), shortcut=shortcut)
)
if output.exit_code != 0:
raise CmdException("Failed to activate standby cache.", output)
@ -96,7 +123,8 @@ def standby_activate_cache(cache_dev: Device, cache_id: int, shortcut: bool = Fa
def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = False):
output = TestRun.executor.run(
stop_cmd(cache_id=str(cache_id), no_data_flush=no_data_flush, shortcut=shortcut))
stop_cmd(cache_id=str(cache_id), no_data_flush=no_data_flush, shortcut=shortcut)
)
if output.exit_code != 0:
raise CmdException("Failed to stop cache.", output)
return output
@ -105,8 +133,13 @@ def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = Fals
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False):
_core_id = None if core_id is None else str(core_id)
output = TestRun.executor.run(
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.path,
core_id=_core_id, shortcut=shortcut))
add_core_cmd(
cache_id=str(cache.cache_id),
core_dev=core_dev.path,
core_id=_core_id,
shortcut=shortcut,
)
)
if output.exit_code != 0:
raise CmdException("Failed to add core.", output)
core = Core(core_dev.path, cache.cache_id)
@ -115,8 +148,10 @@ def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool
def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False):
output = TestRun.executor.run(
remove_core_cmd(cache_id=str(cache_id), core_id=str(core_id),
force=force, shortcut=shortcut))
remove_core_cmd(
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut
)
)
if output.exit_code != 0:
raise CmdException("Failed to remove core.", output)
@ -124,22 +159,24 @@ def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool
def remove_inactive(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False):
output = TestRun.executor.run(
remove_inactive_cmd(
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut))
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut
)
)
if output.exit_code != 0:
raise CmdException("Failed to remove inactive core.", output)
def remove_detached(core_device: Device, shortcut: bool = False):
output = TestRun.executor.run(
remove_detached_cmd(core_device=core_device.path, shortcut=shortcut))
remove_detached_cmd(core_device=core_device.path, shortcut=shortcut)
)
if output.exit_code != 0:
raise CmdException("Failed to remove detached core.", output)
return output
def try_add(core_device: Device, cache_id: int, core_id: int):
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path,
str(core_id)))
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path, str(core_id)))
if output.exit_code != 0:
raise CmdException("Failed to execute try add script command.", output)
return Core(core_device.path, cache_id)
@ -176,36 +213,49 @@ def remove_core_with_script_command(cache_id: int, core_id: int, no_flush: bool
def reset_counters(cache_id: int, core_id: int = None, shortcut: bool = False):
_core_id = None if core_id is None else str(core_id)
output = TestRun.executor.run(
reset_counters_cmd(cache_id=str(cache_id), core_id=_core_id, shortcut=shortcut))
reset_counters_cmd(cache_id=str(cache_id), core_id=_core_id, shortcut=shortcut)
)
if output.exit_code != 0:
raise CmdException("Failed to reset counters.", output)
return output
def flush(cache_id: int, core_id: int = None, shortcut: bool = False):
if core_id is None:
def flush_cache(cache_id: int, shortcut: bool = False) -> Output:
command = flush_cache_cmd(cache_id=str(cache_id), shortcut=shortcut)
else:
command = flush_core_cmd(cache_id=str(cache_id), core_id=str(core_id), shortcut=shortcut)
output = TestRun.executor.run(command)
if output.exit_code != 0:
raise CmdException("Flushing failed.", output)
raise CmdException("Flushing cache failed.", output)
return output
def flush_core(
cache_id: int, core_id: int, shortcut: bool = False
) -> Output:
command = flush_core_cmd(
cache_id=str(cache_id),
core_id=str(core_id),
shortcut=shortcut,
)
output = TestRun.executor.run(command)
if output.exit_code != 0:
raise CmdException("Flushing core failed.", output)
return output
def load_cache(device: Device, shortcut: bool = False):
output = TestRun.executor.run(
load_cmd(cache_dev=device.path, shortcut=shortcut))
output = TestRun.executor.run(load_cmd(cache_dev=device.path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to load cache.", output)
return Cache(device)
def list_caches(output_format: OutputFormat = None, by_id_path: bool = True,
shortcut: bool = False):
def list_caches(
output_format: OutputFormat = None, by_id_path: bool = True, shortcut: bool = False
):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
list_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut))
list_caches_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut)
)
if output.exit_code != 0:
raise CmdException("Failed to list caches.", output)
return output
@ -213,8 +263,7 @@ def list_caches(output_format: OutputFormat = None, by_id_path: bool = True,
def print_version(output_format: OutputFormat = None, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
version_cmd(output_format=_output_format, shortcut=shortcut))
output = TestRun.executor.run(version_cmd(output_format=_output_format, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to print version.", output)
return output
@ -222,37 +271,43 @@ def print_version(output_format: OutputFormat = None, shortcut: bool = False):
def zero_metadata(cache_dev: Device, force: bool = False, shortcut: bool = False):
output = TestRun.executor.run(
zero_metadata_cmd(cache_dev=cache_dev.path, force=force, shortcut=shortcut))
zero_metadata_cmd(cache_dev=cache_dev.path, force=force, shortcut=shortcut)
)
if output.exit_code != 0:
raise CmdException("Failed to wipe metadata.", output)
return output
def stop_all_caches():
if "No caches running" in list_caches().stdout:
from .casadm_parser import get_caches
caches = get_caches()
if not caches:
return
TestRun.LOGGER.info("Stop all caches")
stop_output = casctl_stop()
caches_output = list_caches()
if "No caches running" not in caches_output.stdout:
raise CmdException(f"Error while stopping caches. "
f"Listing caches: {caches_output}", stop_output)
for cache in caches:
stop_cache(cache_id=cache.cache_id)
def remove_all_detached_cores():
from api.cas import casadm_parser
devices = casadm_parser.get_cas_devices_dict()
for dev in devices["core_pool"]:
TestRun.executor.run(remove_detached_cmd(dev["device"]))
def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = False,
io_class_id: int = None, filter: List[StatsFilter] = None,
output_format: OutputFormat = None, by_id_path: bool = True,
shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
_core_id = None if core_id is None else str(core_id)
_io_class_id = None if io_class_id is None else str(io_class_id)
def print_statistics(
cache_id: int,
core_id: int = None,
io_class_id: int = None,
filter: List[StatsFilter] = None,
output_format: OutputFormat = None,
by_id_path: bool = True,
shortcut: bool = False,
):
_output_format = output_format.name if output_format else None
_core_id = str(core_id) if core_id else None
_io_class_id = str(io_class_id) if io_class_id else None
if filter is None:
_filter = filter
else:
@ -260,17 +315,21 @@ def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = Fa
_filter = ",".join(names)
output = TestRun.executor.run(
print_statistics_cmd(
cache_id=str(cache_id), core_id=_core_id,
per_io_class=per_io_class, io_class_id=_io_class_id,
filter=_filter, output_format=_output_format,
by_id_path=by_id_path, shortcut=shortcut))
cache_id=str(cache_id),
core_id=_core_id,
io_class_id=_io_class_id,
filter=_filter,
output_format=_output_format,
by_id_path=by_id_path,
shortcut=shortcut,
)
)
if output.exit_code != 0:
raise CmdException("Printing statistics failed.", output)
return output
def set_cache_mode(cache_mode: CacheMode, cache_id: int,
flush=None, shortcut: bool = False):
def set_cache_mode(cache_mode: CacheMode, cache_id: int, flush=None, shortcut: bool = False):
flush_cache = None
if flush is True:
flush_cache = "yes"
@ -278,8 +337,13 @@ def set_cache_mode(cache_mode: CacheMode, cache_id: int,
flush_cache = "no"
output = TestRun.executor.run(
set_cache_mode_cmd(cache_mode=cache_mode.name.lower(), cache_id=str(cache_id),
flush_cache=flush_cache, shortcut=shortcut))
set_cache_mode_cmd(
cache_mode=cache_mode.name.lower(),
cache_id=str(cache_id),
flush_cache=flush_cache,
shortcut=shortcut,
)
)
if output.exit_code != 0:
raise CmdException("Set cache mode command failed.", output)
return output
@ -287,7 +351,8 @@ def set_cache_mode(cache_mode: CacheMode, cache_id: int,
def load_io_classes(cache_id: int, file: str, shortcut: bool = False):
output = TestRun.executor.run(
load_io_classes_cmd(cache_id=str(cache_id), file=file, shortcut=shortcut))
load_io_classes_cmd(cache_id=str(cache_id), file=file, shortcut=shortcut)
)
if output.exit_code != 0:
raise CmdException("Load IO class command failed.", output)
return output
@ -296,19 +361,28 @@ def load_io_classes(cache_id: int, file: str, shortcut: bool = False):
def list_io_classes(cache_id: int, output_format: OutputFormat, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
list_io_classes_cmd(cache_id=str(cache_id),
output_format=_output_format, shortcut=shortcut))
list_io_classes_cmd(cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut)
)
if output.exit_code != 0:
raise CmdException("List IO class command failed.", output)
return output
def get_param_cutoff(cache_id: int, core_id: int,
output_format: OutputFormat = None, shortcut: bool = False):
def get_param_cutoff(
cache_id: int,
core_id: int,
output_format: OutputFormat = None,
shortcut: bool = False,
):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
get_param_cutoff_cmd(cache_id=str(cache_id), core_id=str(core_id),
output_format=_output_format, shortcut=shortcut))
get_param_cutoff_cmd(
cache_id=str(cache_id),
core_id=str(core_id),
output_format=_output_format,
shortcut=shortcut,
)
)
if output.exit_code != 0:
raise CmdException("Getting sequential cutoff params failed.", output)
return output
@ -317,37 +391,51 @@ def get_param_cutoff(cache_id: int, core_id: int,
def get_param_cleaning(cache_id: int, output_format: OutputFormat = None, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
get_param_cleaning_cmd(cache_id=str(cache_id), output_format=_output_format,
shortcut=shortcut))
get_param_cleaning_cmd(
cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
)
)
if output.exit_code != 0:
raise CmdException("Getting cleaning policy params failed.", output)
return output
def get_param_cleaning_alru(cache_id: int, output_format: OutputFormat = None,
shortcut: bool = False):
def get_param_cleaning_alru(
cache_id: int, output_format: OutputFormat = None, shortcut: bool = False
):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
get_param_cleaning_alru_cmd(cache_id=str(cache_id), output_format=_output_format,
shortcut=shortcut))
get_param_cleaning_alru_cmd(
cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
)
)
if output.exit_code != 0:
raise CmdException("Getting alru cleaning policy params failed.", output)
return output
def get_param_cleaning_acp(cache_id: int, output_format: OutputFormat = None,
shortcut: bool = False):
def get_param_cleaning_acp(
cache_id: int, output_format: OutputFormat = None, shortcut: bool = False
):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
get_param_cleaning_acp_cmd(cache_id=str(cache_id), output_format=_output_format,
shortcut=shortcut))
get_param_cleaning_acp_cmd(
cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
)
)
if output.exit_code != 0:
raise CmdException("Getting acp cleaning policy params failed.", output)
return output
def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
policy: SeqCutOffPolicy = None, promotion_count: int = None):
def set_param_cutoff(
cache_id: int,
core_id: int = None,
threshold: Size = None,
policy: SeqCutOffPolicy = None,
promotion_count: int = None,
shortcut: bool = False,
):
_core_id = None if core_id is None else str(core_id)
_threshold = None if threshold is None else str(int(threshold.get_value(Unit.KibiByte)))
_policy = None if policy is None else policy.name
@ -357,7 +445,8 @@ def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
core_id=_core_id,
threshold=_threshold,
policy=_policy,
promotion_count=_promotion_count
promotion_count=_promotion_count,
shortcut=shortcut,
)
output = TestRun.executor.run(command)
if output.exit_code != 0:
@ -365,34 +454,52 @@ def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
return output
def set_param_cleaning(cache_id: int, policy: CleaningPolicy):
def set_param_cleaning(cache_id: int, policy: CleaningPolicy, shortcut: bool = False):
output = TestRun.executor.run(
set_param_cleaning_cmd(cache_id=str(cache_id), policy=policy.name))
set_param_cleaning_cmd(cache_id=str(cache_id), policy=policy.name, shortcut=shortcut)
)
if output.exit_code != 0:
raise CmdException("Error while setting cleaning policy.", output)
return output
def set_param_cleaning_alru(cache_id: int, wake_up: int = None, staleness_time: int = None,
flush_max_buffers: int = None, activity_threshold: int = None):
def set_param_cleaning_alru(
cache_id: int,
wake_up: int = None,
staleness_time: int = None,
flush_max_buffers: int = None,
activity_threshold: int = None,
shortcut: bool = False,
):
output = TestRun.executor.run(
set_param_cleaning_alru_cmd(
cache_id=cache_id,
wake_up=wake_up,
staleness_time=staleness_time,
flush_max_buffers=flush_max_buffers,
activity_threshold=activity_threshold))
cache_id=str(cache_id),
wake_up=str(wake_up),
staleness_time=str(staleness_time),
flush_max_buffers=str(flush_max_buffers),
activity_threshold=str(activity_threshold),
shortcut=shortcut,
)
)
if output.exit_code != 0:
raise CmdException("Error while setting alru cleaning policy parameters.", output)
return output
def set_param_cleaning_acp(cache_id: int, wake_up: int = None, flush_max_buffers: int = None):
def set_param_cleaning_acp(
cache_id: int,
wake_up: int = None,
flush_max_buffers: int = None,
shortcut: bool = False,
):
output = TestRun.executor.run(
set_param_cleaning_acp_cmd(
cache_id=str(cache_id),
wake_up=str(wake_up) if wake_up is not None else None,
flush_max_buffers=str(flush_max_buffers) if flush_max_buffers else None))
flush_max_buffers=str(flush_max_buffers) if flush_max_buffers else None,
shortcut=shortcut,
)
)
if output.exit_code != 0:
raise CmdException("Error while setting acp cleaning policy parameters.", output)
return output

View File

@ -1,9 +1,10 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
from aenum import Enum
from enum import Enum
class OutputFormat(Enum):

View File

@ -1,12 +1,13 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
import csv
import io
import json
import re
from datetime import timedelta, datetime
from typing import List
@ -18,7 +19,6 @@ from api.cas.version import CasVersion
from core.test_run_utils import TestRun
from storage_devices.device import Device
from test_utils.output import CmdException
from test_utils.size import parse_unit
class Stats(dict):
@ -26,188 +26,78 @@ class Stats(dict):
return json.dumps(self, default=lambda o: str(o), indent=2)
def parse_stats_unit(unit: str):
if unit is None:
return ""
unit = re.search(r".*[^\]]", unit).group()
if unit == "s":
return "s"
elif unit == "%":
return "%"
elif unit == "Requests":
return "requests"
else:
return parse_unit(unit)
def get_filter(filter: List[StatsFilter]):
"""Prepare list of statistic sections which should be retrieved and parsed."""
if filter is None or StatsFilter.all in filter:
_filter = [
f for f in StatsFilter if (f != StatsFilter.all and f != StatsFilter.conf)
]
_filter = [f for f in StatsFilter if (f != StatsFilter.all and f != StatsFilter.conf)]
else:
_filter = [
f for f in filter if (f != StatsFilter.all and f != StatsFilter.conf)
]
_filter = [f for f in filter if (f != StatsFilter.all and f != StatsFilter.conf)]
return _filter
def get_statistics(
cache_id: int,
core_id: int = None,
io_class_id: int = None,
filter: List[StatsFilter] = None,
percentage_val: bool = False,
):
stats = Stats()
_filter = get_filter(filter)
per_io_class = True if io_class_id is not None else False
# No need to retrieve all stats if user specified only 'conf' flag
if filter != [StatsFilter.conf]:
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
per_io_class=per_io_class,
io_class_id=io_class_id,
filter=_filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
if filter is None or StatsFilter.conf in filter or StatsFilter.all in filter:
# Conf statistics have different unit or may have no unit at all. For parsing
# convenience they are gathered separately. As this is only configuration stats
# there is no risk they are divergent.
conf_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
per_io_class=per_io_class,
io_class_id=io_class_id,
filter=[StatsFilter.conf],
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys = conf_stats[0]
stat_values = conf_stats[1]
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
# Some of configuration stats have no unit
try:
stat_name, stat_unit = name.split(" [")
except ValueError:
stat_name = name
stat_unit = None
stat_name = stat_name.lower()
# 'dirty for' and 'cache size' stats occurs twice
if stat_name in stats:
continue
stat_unit = parse_stats_unit(stat_unit)
if isinstance(stat_unit, Unit):
stats[stat_name] = Size(float(val), stat_unit)
elif stat_unit == "s":
stats[stat_name] = timedelta(seconds=int(val))
elif stat_unit == "":
# Some of stats without unit can be a number like IDs,
# some of them can be string like device path
try:
stats[stat_name] = float(val)
except ValueError:
stats[stat_name] = val
# No need to parse all stats if user specified only 'conf' flag
if filter == [StatsFilter.conf]:
return stats
stat_keys = csv_stats[0]
stat_values = csv_stats[1]
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
if percentage_val and " [%]" in name:
stats[name.split(" [")[0].lower()] = float(val)
elif not percentage_val and "[%]" not in name:
stat_name, stat_unit = name.split(" [")
stat_unit = parse_stats_unit(stat_unit)
stat_name = stat_name.lower()
if isinstance(stat_unit, Unit):
stats[stat_name] = Size(float(val), stat_unit)
elif stat_unit == "requests":
stats[stat_name] = float(val)
else:
raise ValueError(f"Invalid unit {stat_unit}")
return stats
def get_caches(): # This method does not return inactive or detached CAS devices
def get_caches() -> list:
from api.cas.cache import Cache
caches_dict = get_cas_devices_dict()["caches"]
caches_list = []
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n')
for line in lines:
args = line.split(',')
if args[0] == "cache":
current_cache = Cache(Device(args[2]))
caches_list.append(current_cache)
for cache in caches_dict.values():
caches_list.append(
Cache(
device=(Device(cache["device_path"]) if cache["device_path"] != "-" else None),
cache_id=cache["id"],
)
)
return caches_list
def get_cores(cache_id: int):
def get_cores(cache_id: int) -> list:
from api.cas.core import Core, CoreStatus
cores_list = []
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n')
is_proper_core_line = False
for line in lines:
args = line.split(',')
if args[0] == "core" and is_proper_core_line:
core_status_str = args[3].lower()
is_valid_status = CoreStatus[core_status_str].value[0] <= 1
if is_valid_status:
cores_list.append(Core(args[2], cache_id))
if args[0] == "cache":
is_proper_core_line = True if int(args[1]) == cache_id else False
return cores_list
cores_dict = get_cas_devices_dict()["cores"].values()
def is_active(core):
return CoreStatus[core["status"].lower()] == CoreStatus.active
return [
Core(core["device_path"], core["cache_id"])
for core in cores_dict
if is_active(core) and core["cache_id"] == cache_id
]
def get_cas_devices_dict():
device_list = list(csv.DictReader(casadm.list_caches(OutputFormat.csv).stdout.split('\n')))
devices = {"core_pool": [], "caches": {}, "cores": {}}
def get_cas_devices_dict() -> dict:
device_list = list(csv.DictReader(casadm.list_caches(OutputFormat.csv).stdout.split("\n")))
devices = {"caches": {}, "cores": {}, "core_pool": {}}
cache_id = -1
core_pool = False
prev_cache_id = -1
for device in device_list:
if device["type"] == "core pool":
core_pool = True
continue
if device["type"] == "cache":
core_pool = False
prev_cache_id = int(device["id"])
devices["caches"].update(
{
int(device["id"]): {
"device": device["disk"],
"status": device["status"],
}
}
)
params = [
("id", int(device["id"])),
("device_path", device["disk"]),
("status", device["status"]),
]
devices["caches"][int(device["id"])] = dict([(key, value) for key, value in params])
cache_id = int(device["id"])
elif device["type"] == "core":
core = {"device": device["disk"], "status": device["status"]}
params = [
("cache_id", cache_id),
("device_path", device["disk"]),
("status", device["status"]),
]
if core_pool:
devices["core_pool"].append(core)
else:
core.update({"cache_id": prev_cache_id})
devices["cores"].update(
{(prev_cache_id, int(device["id"])): core}
params.append(("core_pool", device))
devices["core_pool"][(cache_id, int(device["id"]))] = dict(
[(key, value) for key, value in params]
)
else:
devices["cores"][(cache_id, int(device["id"]))] = dict(
[(key, value) for key, value in params]
)
return devices
@ -215,20 +105,26 @@ def get_flushing_progress(cache_id: int, core_id: int = None):
casadm_output = casadm.list_caches(OutputFormat.csv)
lines = casadm_output.stdout.splitlines()
for line in lines:
line_elements = line.split(',')
if core_id is not None and line_elements[0] == "core" \
and int(line_elements[1]) == core_id \
or core_id is None and line_elements[0] == "cache" \
and int(line_elements[1]) == cache_id:
line_elements = line.split(",")
if (
core_id is not None
and line_elements[0] == "core"
and int(line_elements[1]) == core_id
or core_id is None
and line_elements[0] == "cache"
and int(line_elements[1]) == cache_id
):
try:
flush_line_elements = line_elements[3].split()
flush_percent = flush_line_elements[1][1:]
return float(flush_percent)
except Exception:
break
raise CmdException(f"There is no flushing progress in casadm list output. (cache {cache_id}"
raise CmdException(
f"There is no flushing progress in casadm list output. (cache {cache_id}"
f"{' core ' + str(core_id) if core_id is not None else ''})",
casadm_output)
casadm_output,
)
def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)):
@ -243,50 +139,53 @@ def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)):
def get_flush_parameters_alru(cache_id: int):
casadm_output = casadm.get_param_cleaning_alru(cache_id,
casadm.OutputFormat.csv).stdout.splitlines()
casadm_output = casadm.get_param_cleaning_alru(
cache_id, casadm.OutputFormat.csv
).stdout.splitlines()
flush_parameters = FlushParametersAlru()
for line in casadm_output:
if 'max buffers' in line:
flush_parameters.flush_max_buffers = int(line.split(',')[1])
if 'Activity threshold' in line:
flush_parameters.activity_threshold = Time(milliseconds=int(line.split(',')[1]))
if 'Stale buffer time' in line:
flush_parameters.staleness_time = Time(seconds=int(line.split(',')[1]))
if 'Wake up time' in line:
flush_parameters.wake_up_time = Time(seconds=int(line.split(',')[1]))
if "max buffers" in line:
flush_parameters.flush_max_buffers = int(line.split(",")[1])
if "Activity threshold" in line:
flush_parameters.activity_threshold = Time(milliseconds=int(line.split(",")[1]))
if "Stale buffer time" in line:
flush_parameters.staleness_time = Time(seconds=int(line.split(",")[1]))
if "Wake up time" in line:
flush_parameters.wake_up_time = Time(seconds=int(line.split(",")[1]))
return flush_parameters
def get_flush_parameters_acp(cache_id: int):
casadm_output = casadm.get_param_cleaning_acp(cache_id,
casadm.OutputFormat.csv).stdout.splitlines()
casadm_output = casadm.get_param_cleaning_acp(
cache_id, casadm.OutputFormat.csv
).stdout.splitlines()
flush_parameters = FlushParametersAcp()
for line in casadm_output:
if 'max buffers' in line:
flush_parameters.flush_max_buffers = int(line.split(',')[1])
if 'Wake up time' in line:
flush_parameters.wake_up_time = Time(milliseconds=int(line.split(',')[1]))
if "max buffers" in line:
flush_parameters.flush_max_buffers = int(line.split(",")[1])
if "Wake up time" in line:
flush_parameters.wake_up_time = Time(milliseconds=int(line.split(",")[1]))
return flush_parameters
def get_seq_cut_off_parameters(cache_id: int, core_id: int):
casadm_output = casadm.get_param_cutoff(
cache_id, core_id, casadm.OutputFormat.csv).stdout.splitlines()
cache_id, core_id, casadm.OutputFormat.csv
).stdout.splitlines()
seq_cut_off_params = SeqCutOffParameters()
for line in casadm_output:
if 'Sequential cutoff threshold' in line:
seq_cut_off_params.threshold = Size(int(line.split(',')[1]), Unit.KibiByte)
if 'Sequential cutoff policy' in line:
seq_cut_off_params.policy = SeqCutOffPolicy.from_name(line.split(',')[1])
if 'Sequential cutoff promotion request count threshold' in line:
seq_cut_off_params.promotion_count = int(line.split(',')[1])
if "Sequential cutoff threshold" in line:
seq_cut_off_params.threshold = Size(int(line.split(",")[1]), Unit.KibiByte)
if "Sequential cutoff policy" in line:
seq_cut_off_params.policy = SeqCutOffPolicy.from_name(line.split(",")[1])
if "Sequential cutoff promotion request count threshold" in line:
seq_cut_off_params.promotion_count = int(line.split(",")[1])
return seq_cut_off_params
def get_casadm_version():
casadm_output = casadm.print_version(OutputFormat.csv).stdout.split('\n')
version_str = casadm_output[1].split(',')[-1]
casadm_output = casadm.print_version(OutputFormat.csv).stdout.split("\n")
version_str = casadm_output[1].split(",")[-1]
return CasVersion.from_version_string(version_str)
@ -305,10 +204,12 @@ def get_core_info_by_path(core_disk_path):
output = casadm.list_caches(OutputFormat.csv, by_id_path=True)
reader = csv.DictReader(io.StringIO(output.stdout))
for row in reader:
if row['type'] == "core" and row['disk'] == core_disk_path:
return {"core_id": row['id'],
"core_device": row['disk'],
"status": row['status'],
"exp_obj": row['device']}
if row["type"] == "core" and row["disk"] == core_disk_path:
return {
"core_id": row["id"],
"core_device": row["disk"],
"status": row["status"],
"exp_obj": row["device"],
}
return None

View File

@ -1,5 +1,6 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -11,88 +12,115 @@ casadm_bin = "casadm"
casctl = "casctl"
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False):
command = f" -A -i {cache_id} -d {core_dev}" if shortcut \
else f" --add-core --cache-id {cache_id} --core-device {core_dev}"
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def script_try_add_cmd(cache_id: str, core_dev: str, core_id: str = None):
command = f"{casadm_bin} --script --add-core --try-add --cache-id {cache_id} " \
f"--core-device {core_dev}"
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False) -> str:
command = " -A " if shortcut else " --add-core"
command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -d " if shortcut else " --core-device ") + core_dev
if core_id:
command += f" --core-id {core_id}"
return command
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def script_purge_cache_cmd(cache_id: str):
return f"{casadm_bin} --script --purge-cache --cache-id {cache_id}"
def script_try_add_cmd(cache_id: str, core_dev: str, core_id: str = None) -> str:
command = " --script --add-core --try-add"
command += " --cache-id " + cache_id
if core_id:
command += " --core-device " + core_dev
return casadm_bin + command
def script_purge_core_cmd(cache_id: str, core_id: str):
return f"{casadm_bin} --script --purge-core --cache-id {cache_id} --core-id {core_id}"
def script_purge_cache_cmd(cache_id: str) -> str:
command = "--script --purge-cache"
command += " --cache-id " + cache_id
return casadm_bin + command
def script_detach_core_cmd(cache_id: str, core_id: str):
return f"{casadm_bin} --script --remove-core --detach --cache-id {cache_id} " \
f"--core-id {core_id}"
def script_purge_core_cmd(cache_id: str, core_id: str) -> str:
command = "--script --purge-core"
command += " --cache-id " + cache_id
command += " --core-id " + core_id
return casadm_bin + command
def script_remove_core_cmd(cache_id: str, core_id: str, no_flush: bool = False):
command = f"{casadm_bin} --script --remove-core --cache-id {cache_id} --core-id {core_id}"
def script_detach_core_cmd(cache_id: str, core_id: str) -> str:
command = "--script --remove-core --detach"
command += " --cache-id " + cache_id
command += " --core-id " + core_id
return casadm_bin + command
def script_remove_core_cmd(cache_id: str, core_id: str, no_flush: bool = False) -> str:
command = "--script --remove-core"
command += " --cache-id " + cache_id
command += " --core-id " + core_id
if no_flush:
command += ' --no-flush'
return command
command += " --no-flush"
return casadm_bin + command
def remove_core_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False):
command = f" -R -i {cache_id} -j {core_id}" if shortcut \
else f" --remove-core --cache-id {cache_id} --core-id {core_id}"
def remove_core_cmd(
cache_id: str, core_id: str, force: bool = False, shortcut: bool = False
) -> str:
command = " -R " if shortcut else " --remove-core"
command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -j " if shortcut else " --core-id ") + core_id
if force:
command += " -f" if shortcut else " --force"
return casadm_bin + command
def remove_inactive_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False):
command = f" --remove-inactive {'-i' if shortcut else '--cache-id'} {cache_id} " \
f"{'-j' if shortcut else '--core-id'} {core_id}"
def remove_inactive_cmd(
cache_id: str, core_id: str, force: bool = False, shortcut: bool = False
) -> str:
command = " --remove-inactive"
command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -j " if shortcut else " --core-id ") + core_id
if force:
command += " -f" if shortcut else " --force"
return casadm_bin + command
def remove_detached_cmd(core_device: str, shortcut: bool = False):
command = " --remove-detached" + (" -d " if shortcut else " --device ") + core_device
def remove_detached_cmd(core_device: str, shortcut: bool = False) -> str:
command = " --remove-detached"
command += (" -d " if shortcut else " --device ") + core_device
return casadm_bin + command
def help_cmd(shortcut: bool = False):
return casadm_bin + (" -H" if shortcut else " --help")
def help_cmd(shortcut: bool = False) -> str:
command = " -H" if shortcut else " --help"
return casadm_bin + command
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False):
command = (" -Z -i " if shortcut else " --reset-counters --cache-id ") + cache_id
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False) -> str:
command = " -Z" if shortcut else " --reset-counters"
command += (" -i " if shortcut else " --cache-id ") + cache_id
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def flush_cache_cmd(cache_id: str, shortcut: bool = False):
command = (" -F -i " if shortcut else " --flush-cache --cache-id ") + cache_id
def flush_cache_cmd(cache_id: str, shortcut: bool = False) -> str:
command = " -F" if shortcut else " --flush-cache"
command += (" -i " if shortcut else " --cache-id ") + cache_id
return casadm_bin + command
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False):
command = (f" -F -i {cache_id} -j {core_id}" if shortcut
else f" --flush-cache --cache-id {cache_id} --core-id {core_id}")
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False) -> str:
command = " -F" if shortcut else " --flush-cache"
command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = None,
cache_id: str = None, force: bool = False,
load: bool = False, shortcut: bool = False):
def start_cmd(
cache_dev: str,
cache_mode: str = None,
cache_line_size: str = None,
cache_id: str = None,
force: bool = False,
load: bool = False,
shortcut: bool = False,
) -> str:
command = " -S" if shortcut else " --start-cache"
command += (" -d " if shortcut else " --cache-device ") + cache_dev
if cache_mode is not None:
@ -108,8 +136,13 @@ def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = Non
return casadm_bin + command
def standby_init_cmd(cache_dev: str, cache_id: str, cache_line_size: str,
force: bool = False, shortcut: bool = False):
def standby_init_cmd(
cache_dev: str,
cache_id: str,
cache_line_size: str,
force: bool = False,
shortcut: bool = False,
) -> str:
command = " --standby --init"
command += (" -d " if shortcut else " --cache-device ") + cache_dev
command += (" -i " if shortcut else " --cache-id ") + cache_id
@ -119,56 +152,58 @@ def standby_init_cmd(cache_dev: str, cache_id: str, cache_line_size: str,
return casadm_bin + command
def standby_load_cmd(cache_dev: str, shortcut: bool = False):
def standby_load_cmd(cache_dev: str, shortcut: bool = False) -> str:
command = " --standby --load"
command += (" -d " if shortcut else " --cache-device ") + cache_dev
return casadm_bin + command
def standby_detach_cmd(cache_id: str, shortcut: bool = False):
def standby_detach_cmd(cache_id: str, shortcut: bool = False) -> str:
command = " --standby --detach"
command += (" -i " if shortcut else " --cache-id ") + cache_id
return casadm_bin + command
def standby_activate_cmd(cache_dev: str, cache_id: str, shortcut: bool = False):
def standby_activate_cmd(cache_dev: str, cache_id: str, shortcut: bool = False) -> str:
command = " --standby --activate"
command += (" -d " if shortcut else " --cache-device ") + cache_dev
command += (" -i " if shortcut else " --cache-id ") + cache_id
return casadm_bin + command
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False,
io_class_id: str = None, filter: str = None,
output_format: str = None, by_id_path: bool = True,
shortcut: bool = False):
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id
if core_id is not None:
def print_statistics_cmd(
cache_id: str,
core_id: str = None,
io_class_id: str = None,
filter: str = None,
output_format: str = None,
by_id_path: bool = True,
shortcut: bool = False,
) -> str:
command = " -P" if shortcut else " --stats"
command += (" -i " if shortcut else " --cache-id ") + cache_id
if core_id:
command += (" -j " if shortcut else " --core-id ") + core_id
if per_io_class:
command += " -d" if shortcut else " --io-class-id"
if io_class_id is not None:
command += " " + io_class_id
elif io_class_id is not None:
raise Exception("Per io class flag not set but ID given.")
if filter is not None:
if io_class_id:
command += (" -d " if shortcut else " --io-class-id ") + io_class_id
if filter:
command += (" -f " if shortcut else " --filter ") + filter
if output_format is not None:
if output_format:
command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path:
command += (" -b " if shortcut else " --by-id-path ")
command += " -b " if shortcut else " --by-id-path "
return casadm_bin + command
def zero_metadata_cmd(cache_dev: str, force: bool = False, shortcut: bool = False):
def zero_metadata_cmd(cache_dev: str, force: bool = False, shortcut: bool = False) -> str:
command = " --zero-metadata"
command += (" -d " if shortcut else " --device ") + cache_dev
if force:
command += (" -f" if shortcut else " --force")
command += " -f" if shortcut else " --force"
return casadm_bin + command
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False):
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False) -> str:
command = " -T" if shortcut else " --stop-cache"
command += (" -i " if shortcut else " --cache-id ") + cache_id
if no_data_flush:
@ -176,172 +211,216 @@ def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False)
return casadm_bin + command
def list_cmd(output_format: str = None, by_id_path: bool = True, shortcut: bool = False):
def list_caches_cmd(
output_format: str = None, by_id_path: bool = True, shortcut: bool = False
) -> str:
command = " -L" if shortcut else " --list-caches"
if output_format == "table" or output_format == "csv":
if output_format:
command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path:
command += (" -b " if shortcut else " --by-id-path ")
command += " -b" if shortcut else " --by-id-path"
return casadm_bin + command
def load_cmd(cache_dev: str, shortcut: bool = False):
return start_cmd(cache_dev, load=True, shortcut=shortcut)
def load_cmd(cache_dev: str, shortcut: bool = False) -> str:
return start_cmd(cache_dev=cache_dev, load=True, shortcut=shortcut)
def version_cmd(output_format: str = None, shortcut: bool = False):
def version_cmd(output_format: str = None, shortcut: bool = False) -> str:
command = " -V" if shortcut else " --version"
if output_format == "table" or output_format == "csv":
if output_format:
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def set_cache_mode_cmd(cache_mode: str, cache_id: str,
flush_cache: str = None, shortcut: bool = False):
command = f" -Q -c {cache_mode} -i {cache_id}" if shortcut else \
f" --set-cache-mode --cache-mode {cache_mode} --cache-id {cache_id}"
def set_cache_mode_cmd(
cache_mode: str, cache_id: str, flush_cache: str = None, shortcut: bool = False
) -> str:
command = (" -Q -c" if shortcut else " --set-cache-mode --cache-mode ") + cache_mode
command += (" -i " if shortcut else " --cache-id ") + cache_id
if flush_cache:
command += (" -f " if shortcut else " --flush-cache ") + flush_cache
return casadm_bin + command
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False):
command = f" -C -C -i {cache_id} -f {file}" if shortcut else \
f" --io-class --load-config --cache-id {cache_id} --file {file}"
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False) -> str:
command = " -C -C" if shortcut else " --io-class --load-config"
command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -f " if shortcut else " --file ") + file
return casadm_bin + command
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False):
command = f" -C -L -i {cache_id} -o {output_format}" if shortcut else \
f" --io-class --list --cache-id {cache_id} --output-format {output_format}"
return casadm_bin + command
def _get_param_cmd(namespace: str, cache_id: str, output_format: str = None,
additional_params: str = None, shortcut: bool = False):
command = f" -G -n {namespace} -i {cache_id}" if shortcut else\
f" --get-param --name {namespace} --cache-id {cache_id}"
if additional_params is not None:
command += additional_params
if output_format is not None:
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False) -> str:
command = " -C -L" if shortcut else " --io-class --list"
command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def get_param_cutoff_cmd(cache_id: str, core_id: str,
output_format: str = None, shortcut: bool = False):
add_param = (" -j " if shortcut else " --core-id ") + core_id
return _get_param_cmd(namespace="seq-cutoff", cache_id=cache_id, output_format=output_format,
additional_params=add_param, shortcut=shortcut)
def _get_param_cmd(
name: str,
cache_id: str,
output_format: str = None,
shortcut: bool = False,
) -> str:
command = (" -G -n" if shortcut else " --get-param --name ") + name
command += (" -i " if shortcut else " --cache-id ") + cache_id
if output_format:
command += (" -o " if shortcut else " --output-format ") + output_format
return command
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def get_param_cleaning_alru_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def get_param_cleaning_acp_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def _set_param_cmd(namespace: str, cache_id: str, additional_params: str = None,
shortcut: bool = False):
command = f" -X -n {namespace} -i {cache_id}" if shortcut else\
f" --set-param --name {namespace} --cache-id {cache_id}"
command += additional_params
def get_param_cutoff_cmd(
cache_id: str, core_id: str, output_format: str = None, shortcut: bool = False
) -> str:
name = "seq-cutoff"
command = _get_param_cmd(
name=name,
cache_id=cache_id,
core_id=core_id,
output_format=output_format,
shortcut=shortcut,
)
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def set_param_cutoff_cmd(cache_id: str, core_id: str = None, threshold: str = None,
policy: str = None, promotion_count: str = None, shortcut: bool = False):
add_params = ""
if core_id is not None:
add_params += (" -j " if shortcut else " --core-id ") + str(core_id)
if threshold is not None:
add_params += (" -t " if shortcut else " --threshold ") + str(threshold)
if policy is not None:
add_params += (" -p " if shortcut else " --policy ") + policy
if promotion_count is not None:
add_params += " --promotion-count " + str(promotion_count)
return _set_param_cmd(namespace="seq-cutoff", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False) -> str:
name = "cleaning"
command = _get_param_cmd(
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
)
return casadm_bin + command
def set_param_promotion_cmd(cache_id: str, policy: str, shortcut: bool = False):
add_params = (" -p " if shortcut else " --policy ") + policy
return _set_param_cmd(namespace="promotion", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def get_param_cleaning_alru_cmd(
cache_id: str, output_format: str = None, shortcut: bool = False
) -> str:
name = "cleaning-alru"
command = _get_param_cmd(
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
)
return casadm_bin + command
def get_param_cleaning_acp_cmd(
cache_id: str, output_format: str = None, shortcut: bool = False
) -> str:
name = "cleaning-acp"
command = _get_param_cmd(
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
)
return casadm_bin + command
def _set_param_cmd(name: str, cache_id: str, shortcut: bool = False) -> str:
command = (" X -n" if shortcut else " --set-param --name ") + name
command += (" -i " if shortcut else " --cache-id ") + cache_id
return command
def set_param_cutoff_cmd(
cache_id: str,
core_id: str = None,
threshold: str = None,
policy: str = None,
promotion_count: str = None,
shortcut: bool = False,
) -> str:
name = "seq-cutoff"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
if core_id:
command += (" -j " if shortcut else " --core-id ") + core_id
if threshold:
command += (" -t " if shortcut else " --threshold ") + threshold
if policy:
command += (" -p " if shortcut else " --policy ") + policy
if promotion_count:
command += " --promotion-count " + promotion_count
return casadm_bin + command
def set_param_promotion_cmd(cache_id: str, policy: str, shortcut: bool = False) -> str:
name = "promotion"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
command += (" -p " if shortcut else " --policy ") + policy
return casadm_bin + command
def set_param_promotion_nhit_cmd(
cache_id: str, threshold=None, trigger=None, shortcut: bool = False
):
add_params = ""
if threshold is not None:
add_params += (" -t " if shortcut else " --threshold ") + str(threshold)
cache_id: str, threshold: str = None, trigger: str = None, shortcut: bool = False
) -> str:
name = "promotion-nhit"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
if threshold:
command += (" -t " if shortcut else " --threshold ") + threshold
if trigger is not None:
add_params += (" -o " if shortcut else " --trigger ") + str(trigger)
return _set_param_cmd(namespace="promotion-nhit", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
command += (" -o " if shortcut else " --trigger ") + trigger
return casadm_bin + command
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False):
add_params = (" -p " if shortcut else " --policy ") + policy
return _set_param_cmd(namespace="cleaning", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False) -> str:
name = "cleaning"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
command += (" -p " if shortcut else " --policy ") + policy
return casadm_bin + command
def set_param_cleaning_alru_cmd(cache_id, wake_up=None, staleness_time=None,
flush_max_buffers=None, activity_threshold=None,
shortcut: bool = False):
add_param = ""
def set_param_cleaning_alru_cmd(
cache_id: str,
wake_up: str = None,
staleness_time: str = None,
flush_max_buffers: str = None,
activity_threshold: str = None,
shortcut: bool = False,
) -> str:
name = "cleaning-alru"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
if wake_up:
command += (" -w " if shortcut else " --wake-up ") + wake_up
if staleness_time:
command += (" -s " if shortcut else " --staleness-time ") + staleness_time
if flush_max_buffers:
command += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
if activity_threshold:
command += (" -t " if shortcut else " --activity-threshold ") + activity_threshold
return casadm_bin + command
def set_param_cleaning_acp_cmd(
cache_id: str,
wake_up: str = None,
flush_max_buffers: str = None,
shortcut: bool = False,
) -> str:
name = "cleaning-acp"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + str(wake_up)
if staleness_time is not None:
add_param += (" -s " if shortcut else " --staleness-time ") + str(staleness_time)
command += (" -w " if shortcut else " --wake-up ") + wake_up
if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + str(flush_max_buffers)
if activity_threshold is not None:
add_param += (" -t " if shortcut else " --activity-threshold ") + str(activity_threshold)
return _set_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
command += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
return casadm_bin + command
def set_param_cleaning_acp_cmd(cache_id: str, wake_up: str = None,
flush_max_buffers: str = None, shortcut: bool = False):
add_param = ""
if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
return _set_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
def ctl_help(shortcut: bool = False) -> str:
command = " --help" if shortcut else " -h"
return casctl + command
def ctl_help(shortcut: bool = False):
return casctl + " --help" if shortcut else " -h"
def ctl_start() -> str:
command = " start"
return casctl + command
def ctl_start():
return casctl + " start"
def ctl_stop(flush: bool = False):
command = casctl + " stop"
def ctl_stop(flush: bool = False) -> str:
command = " stop"
if flush:
command += " --flush"
return command
return casctl + command
def ctl_init(force: bool = False):
command = casctl + " init"
def ctl_init(force: bool = False) -> str:
command = " init"
if force:
command += " --force"
return command
return casctl + command

View File

@ -9,6 +9,8 @@ casadm_help = [
r"Usage: casadm \<command\> \[option\.\.\.\]",
r"Available commands:",
r"-S --start-cache Start new cache instance or load using metadata",
r"--attach-cache Attach cache device",
r"--detach-cache Detach cache device",
r"-T --stop-cache Stop cache instance",
r"-X --set-param Set various runtime parameters",
r"-G --get-param Get various runtime parameters",
@ -29,21 +31,19 @@ casadm_help = [
r"e\.g\.",
r"casadm --start-cache --help",
r"For more information, please refer to manual, Admin Guide \(man casadm\)",
r"or go to support page \<https://open-cas\.github\.io\>\."
r"or go to support page \<https://open-cas\.github\.io\>\.",
]
help_help = [
r"Usage: casadm --help",
r"Print help"
]
help_help = [r"Usage: casadm --help", r"Print help"]
version_help = [
r"Usage: casadm --version \[option\.\.\.\]",
r"Print CAS version",
r"Options that are valid with --version \(-V\) are:"
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
]
ioclass_help = [
r"Usage: casadm --io-class \{--load-config|--list\}",
r"Manage IO classes",
@ -56,7 +56,7 @@ ioclass_help = [
r"Usage: casadm --io-class --list --cache-id \<ID\> \[option\.\.\.\]",
r"Options that are valid with --list \(-L\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
]
flush_cache_help = [
@ -64,7 +64,8 @@ flush_cache_help = [
r"Flush all dirty data from the caching device to core devices",
r"Options that are valid with --flush-cache \(-F\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \[\<ID\>\] Identifier of core <0-4095> within given cache instance"
r"-j --core-id \[\<ID\>\] Identifier of core <0-4095> within given cache "
r"instance",
]
reset_counters_help = [
@ -73,7 +74,7 @@ reset_counters_help = [
r"Options that are valid with --reset-counters \(-Z\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
r"instance. If not specified, statistics are reset for all cores in cache instance\."
r"instance\. If not specified, statistics are reset for all cores in cache instance\.",
]
stats_help = [
@ -82,26 +83,26 @@ stats_help = [
r"Options that are valid with --stats \(-P\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \<ID\> Limit display of core-specific statistics to only ones "
r"pertaining to a specific core. If this option is not given, casadm will display statistics "
r"pertaining to a specific core\. If this option is not given, casadm will display statistics "
r"pertaining to all cores assigned to given cache instance\.",
r"-d --io-class-id \[\<ID\>\] Display per IO class statistics",
r"-f --filter \<FILTER-SPEC\> Apply filters from the following set: "
r"\{all, conf, usage, req, blk, err\}",
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
]
list_help = [
list_caches_help = [
r"Usage: casadm --list-caches \[option\.\.\.\]",
r"List all cache instances and core devices",
r"Options that are valid with --list-caches \(-L\) are:",
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
]
remove_detached_help = [
r"Usage: casadm --remove-detached --device \<DEVICE\>",
r"Remove core device from core pool",
r"Options that are valid with --remove-detached are:",
r"-d --device \<DEVICE\> Path to core device"
r"-d --device \<DEVICE\> Path to core device",
]
remove_core_help = [
@ -111,7 +112,7 @@ remove_core_help = [
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
r"instance",
r"-f --force Force active core removal without data flush"
r"-f --force Force active core removal without data flush",
]
add_core_help = [
@ -121,18 +122,17 @@ add_core_help = [
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
r"instance",
r"-d --core-device \<DEVICE\> Path to core device"
r"-d --core-device \<DEVICE\> Path to core device",
]
set_cache_mode_help = [
r"Usage: casadm --set-cache-mode --cache-mode \<NAME\> --cache-id \<ID\> \[option\.\.\.\]",
r"Set cache mode",
r"Options that are valid with --set-cache-mode \(-Q\) are:",
r"-c --cache-mode \<NAME\> Cache mode. Available cache modes: \{wt|wb|wa|pt|wo\}",
r"-c --cache-mode \<NAME\> Cache mode\. Available cache modes: \{wt|wb|wa|pt|wo\}",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-f --flush-cache \<yes|no\> Flush all dirty data from cache before switching "
r"to new mode\. Option is required when switching from Write-Back or Write-Only mode"
r"to new mode\. Option is required when switching from Write-Back or Write-Only mode",
]
get_params_help = [
@ -164,7 +164,7 @@ get_params_help = [
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
r"Options that are valid with --get-param \(-G\) --name \(-n\) promotion-nhit are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
]
set_params_help = [
@ -182,15 +182,15 @@ set_params_help = [
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
r"instance",
r"-t --threshold \<KiB\> Sequential cutoff activation threshold \[KiB\]",
r"-p --policy \<POLICY\> Sequential cutoff policy. Available policies: "
r"-p --policy \<POLICY\> Sequential cutoff policy\. Available policies: "
r"\{always|full|never\}",
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-p --policy \<POLICY\> Cleaning policy type. Available policy types: "
r"-p --policy \<POLICY\> Cleaning policy type\. Available policy types: "
r"\{nop|alru|acp\}",
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-p --policy \<POLICY\> Promotion policy type. Available policy types: "
r"-p --policy \<POLICY\> Promotion policy type\. Available policy types: "
r"\{always|nhit\}",
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion-nhit are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
@ -213,15 +213,16 @@ set_params_help = [
r" -w --wake-up \<NUMBER\> Time between ACP cleaning thread iterations "
r"\<0-10000\>\[ms\] \(default: 10 ms\)",
r"-b --flush-max-buffers \<NUMBER\> Number of cache lines flushed in single ACP cleaning "
r"thread iteration \<1-10000\> \(default: 128\)"
r"thread iteration \<1-10000\> \(default: 128\)",
]
stop_cache_help = [
r"Usage: casadm --stop-cache --cache-id \<ID\> \[option\.\.\.\]",
r"Stop cache instance",
r"Options that are valid with --stop-cache \(-T\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-n --no-data-flush Do not flush dirty data \(may be dangerous\)"
r"-n --no-data-flush Do not flush dirty data \(may be dangerous\)",
]
start_cache_help = [
@ -238,7 +239,7 @@ start_cache_help = [
r"Write-Through, Write-Back, Write-Around, Pass-Through, Write-Only; "
r"without this parameter Write-Through will be set by default",
r"-x --cache-line-size \<NUMBER\> Set cache line size in kibibytes: "
r"\{4,8,16,32,64\}\[KiB\] \(default: 4\)"
r"\{4,8,16,32,64\}\[KiB\] \(default: 4\)",
]
standby_help = [
@ -246,17 +247,16 @@ standby_help = [
]
zero_metadata_help = [
r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option...\]",
r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option\.\.\.\]]",
r"Clear metadata from caching device",
r"Options that are valid with --zero-metadata are:",
r"-d --device \<DEVICE\> Path to device on which metadata would be cleared",
r"-f --force Ignore potential dirty data on cache device"
r"-f --force Ignore potential dirty data on cache device",
]
unrecognized_stderr = [
r"Unrecognized command -\S+",
]
unrecognized_stdout = [
r"Try \`casadm --help | -H\' for more information\."
]
unrecognized_stdout = [r"Try \`casadm --help | -H\' for more information\."]

View File

@ -1,5 +1,6 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -18,32 +19,30 @@ start_cache_with_existing_metadata = [
r"Error inserting cache \d+",
r"Old metadata found on device\.",
r"Please load cache metadata using --load option or use --force to",
r" discard on-disk metadata and start fresh cache instance\."
r" discard on-disk metadata and start fresh cache instance\.",
]
start_cache_on_already_used_dev = [
r"Error inserting cache \d+",
r"Cache device \'\/dev\/\S+\' is already used as cache\."
r"Cache device \'\/dev\/\S+\' is already used as cache\.",
]
start_cache_with_existing_id = [
r"Error inserting cache \d+",
r"Cache ID already exists"
r"Cache ID already exists",
]
standby_init_with_existing_filesystem = [
r"A filesystem exists on \S+. Specify the --force option if you wish to add the cache anyway.",
r"Note: this may result in loss of data"
r"Note: this may result in loss of data",
]
error_inserting_cache = [
r"Error inserting cache \d+"
]
error_inserting_cache = [r"Error inserting cache \d+"]
reinitialize_with_force_or_recovery = [
r"Old metadata found on device\.",
r"Please load cache metadata using --load option or use --force to",
r" discard on-disk metadata and start fresh cache instance\."
r" discard on-disk metadata and start fresh cache instance\.",
]
remove_inactive_core_with_remove_command = [
@ -52,40 +51,36 @@ remove_inactive_core_with_remove_command = [
remove_inactive_dirty_core = [
r"The cache contains dirty data assigned to the core\. If you want to ",
r"continue, please use --force option\.\nWarning: the data will be lost"
r"continue, please use --force option\.\nWarning: the data will be lost",
]
stop_cache_incomplete = [
r"Error while removing cache \d+",
r"Cache is in incomplete state - at least one core is inactive"
r"Cache is in incomplete state - at least one core is inactive",
]
stop_cache_errors = [
r"Removed cache \d+ with errors",
r"Error while writing to cache device"
r"Error while writing to cache device",
]
get_stats_ioclass_id_not_configured = [
r"IO class \d+ is not configured\."
]
get_stats_ioclass_id_not_configured = [r"IO class \d+ is not configured\."]
get_stats_ioclass_id_out_of_range = [
r"Invalid IO class id, must be in the range 0-32\."
]
get_stats_ioclass_id_out_of_range = [r"Invalid IO class id, must be in the range 0-32\."]
remove_multilevel_core = [
r"Error while removing core device \d+ from cache instance \d+",
r"Device opens or mount are pending to this cache"
r"Device opens or mount are pending to this cache",
]
add_cached_core = [
r"Error while adding core device to cache instance \d+",
r"Core device \'/dev/\S+\' is already cached\."
r"Core device \'/dev/\S+\' is already cached\.",
]
already_cached_core = [
r"Error while adding core device to cache instance \d+",
r"Device already added as a core"
r"Device already added as a core",
]
remove_mounted_core = [
@ -94,37 +89,31 @@ remove_mounted_core = [
stop_cache_mounted_core = [
r"Error while removing cache \d+",
r"Device opens or mount are pending to this cache"
r"Device opens or mount are pending to this cache",
]
load_and_force = [
r"Use of \'load\' with \'force\', \'cache-id\', \'cache-mode\' or \'cache-line-size\'",
r" simultaneously is forbidden."
r" simultaneously is forbidden.",
]
try_add_core_sector_size_mismatch = [
r"Error while adding core device to cache instance \d+",
r"Cache device logical sector size is greater than core device logical sector size\.",
r"Consider changing logical sector size on current cache device",
r"or try other device with the same logical sector size as core device\."
r"or try other device with the same logical sector size as core device\.",
]
no_caches_running = [
r"No caches running"
]
no_caches_running = [r"No caches running"]
unavailable_device = [
r"Error while opening \'\S+\'exclusively\. This can be due to\n"
r"cache instance running on this device\. In such case please stop the cache and try again\."
]
error_handling = [
r"Error during options handling"
]
error_handling = [r"Error during options handling"]
no_cas_metadata = [
r"Device \'\S+\' does not contain OpenCAS's metadata\."
]
no_cas_metadata = [r"Device \'\S+\' does not contain OpenCAS's metadata\."]
cache_dirty_data = [
r"Cache instance contains dirty data\. Clearing metadata will result in loss of dirty data\.\n"
@ -140,21 +129,16 @@ cache_dirty_shutdown = [
r"Alternatively, if you wish to clear metadata anyway, please use \'--force\' option\."
]
missing_param = [
r"Option \'.+\' is missing"
]
missing_param = [r"Option \'.+\' is missing"]
disallowed_param = [
r"Unrecognized option \S+"
]
disallowed_param = [r"Unrecognized option \S+"]
operation_forbiden_in_standby = [
r"The operation is not permited while the cache is in the standby mode"
]
mutually_exclusive_params_init = [
r"Can\'t use \'load\' and \'init\' options simultaneously\n"
r"Error during options handling"
r"Can\'t use \'load\' and \'init\' options simultaneously\n" r"Error during options handling"
]
mutually_exclusive_params_load = [
@ -166,30 +150,22 @@ activate_with_different_cache_id = [
r"Cache id specified by user and loaded from metadata are different"
]
cache_activated_successfully = [
r"Successfully activated cache instance \d+"
]
cache_activated_successfully = [r"Successfully activated cache instance \d+"]
invalid_core_volume_size = [
r"Core volume size does not match the size stored in cache metadata"
]
invalid_core_volume_size = [r"Core volume size does not match the size stored in cache metadata"]
error_activating_cache = [
r"Error activating cache \d+"
]
error_activating_cache = [r"Error activating cache \d+"]
activate_without_detach = [
r"Cannot open the device exclusively. Make sure to detach cache before activation."
]
cache_line_size_mismatch = [
r"Cache line size mismatch"
]
cache_line_size_mismatch = [r"Cache line size mismatch"]
headerless_io_class_config = [
r'Cannot parse configuration file - unknown column "1"\.\n'
r'Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n'
r'Please consult Admin Guide to check how columns in configuration file should be named\.'
r"Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n"
r"Please consult Admin Guide to check how columns in configuration file should be named\."
]
illegal_io_class_config_L2C1 = [
@ -205,9 +181,7 @@ illegal_io_class_config_L2C4 = [
r"Cannot parse configuration file - error in line 2 in column 4 \(Allocation\)\."
]
illegal_io_class_config_L2 = [
r"Cannot parse configuration file - error in line 2\."
]
illegal_io_class_config_L2 = [r"Cannot parse configuration file - error in line 2\."]
double_io_class_config = [
r"Double configuration for IO class id \d+\n"
@ -243,14 +217,13 @@ illegal_io_class_invalid_allocation_number = [
]
malformed_io_class_header = [
r'Cannot parse configuration file - unknown column \"value_template\"\.\n'
r'Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n'
r'Please consult Admin Guide to check how columns in configuration file should be named\.'
r"Cannot parse configuration file - unknown column \"value_template\"\.\n"
r"Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n"
r"Please consult Admin Guide to check how columns in configuration file should be named\."
]
unexpected_cls_option = [
r"Option '--cache-line-size \(-x\)' is not allowed"
]
unexpected_cls_option = [r"Option '--cache-line-size \(-x\)' is not allowed"]
def check_stderr_msg(output: Output, expected_messages, negate=False):
return __check_string_msg(output.stderr, expected_messages, negate)
@ -268,7 +241,8 @@ def __check_string_msg(text: str, expected_messages, negate=False):
TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {text}.")
msg_ok = False
elif matches and negate:
TestRun.LOGGER.error(f"Message is incorrect, expected to not find: {msg}\n "
f"actual: {text}.")
TestRun.LOGGER.error(
f"Message is incorrect, expected to not find: {msg}\n " f"actual: {text}."
)
msg_ok = False
return msg_ok

View File

@ -1,17 +1,18 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
from datetime import timedelta
from typing import List
from aenum import Enum
from enum import Enum
from api.cas import casadm
from api.cas.cache_config import SeqCutOffParameters, SeqCutOffPolicy
from api.cas.casadm_params import OutputFormat, StatsFilter
from api.cas.casadm_parser import get_statistics, get_seq_cut_off_parameters, get_core_info_by_path
from api.cas.statistics import CoreStats, CoreIoClassStats
from api.cas.casadm_params import StatsFilter
from api.cas.casadm_parser import get_seq_cut_off_parameters, get_core_info_by_path
from api.cas.statistics import CoreStats, IoClassStats
from core.test_run_utils import TestRun
from storage_devices.device import Device
from test_tools import fs_utils, disk_utils
@ -20,9 +21,9 @@ from test_utils.size import Unit, Size
class CoreStatus(Enum):
empty = 0,
active = 1,
inactive = 2,
empty = 0
active = 1
inactive = 2
detached = 3
@ -51,27 +52,28 @@ class Core(Device):
super().create_filesystem(fs_type, force, blocksize)
self.core_device.filesystem = self.filesystem
def get_io_class_statistics(self,
def get_io_class_statistics(
self,
io_class_id: int,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
stats = get_statistics(self.cache_id, self.core_id, io_class_id,
stat_filter, percentage_val)
return CoreIoClassStats(stats)
percentage_val: bool = False,
):
return IoClassStats(
cache_id=self.cache_id,
filter=stat_filter,
io_class_id=io_class_id,
percentage_val=percentage_val,
)
def get_statistics(self,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
stats = get_statistics(self.cache_id, self.core_id, None,
stat_filter, percentage_val)
return CoreStats(stats)
def get_statistics_flat(self,
io_class_id: int = None,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
return get_statistics(self.cache_id, self.core_id, io_class_id,
stat_filter, percentage_val)
def get_statistics(
self, stat_filter: List[StatsFilter] = None, percentage_val: bool = False
) -> CoreStats:
return CoreStats(
cache_id=self.cache_id,
core_id=self.core_id,
filter=stat_filter,
percentage_val=percentage_val,
)
def get_status(self):
return CoreStatus[self.__get_core_info()["status"].lower()]
@ -106,31 +108,30 @@ class Core(Device):
return casadm.reset_counters(self.cache_id, self.core_id)
def flush_core(self):
casadm.flush(self.cache_id, self.core_id)
casadm.flush_core(self.cache_id, self.core_id)
sync()
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0
def purge_core(self):
casadm.purge_core(self.cache_id, self.core_id)
sync()
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
return casadm.set_param_cutoff(self.cache_id, self.core_id,
return casadm.set_param_cutoff(
self.cache_id,
self.core_id,
seq_cutoff_param.threshold,
seq_cutoff_param.policy,
seq_cutoff_param.promotion_count)
seq_cutoff_param.promotion_count,
)
def set_seq_cutoff_threshold(self, threshold: Size):
return casadm.set_param_cutoff(self.cache_id, self.core_id,
threshold=threshold)
return casadm.set_param_cutoff(self.cache_id, self.core_id, threshold=threshold)
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy):
return casadm.set_param_cutoff(self.cache_id, self.core_id,
policy=policy)
return casadm.set_param_cutoff(self.cache_id, self.core_id, policy=policy)
def set_seq_cutoff_promotion_count(self, promotion_count: int):
return casadm.set_param_cutoff(self.cache_id, self.core_id,
promotion_count=promotion_count)
return casadm.set_param_cutoff(self.cache_id, self.core_id, promotion_count=promotion_count)
def check_if_is_present_in_os(self, should_be_visible=True):
device_in_system_message = "CAS device exists in OS."

View File

@ -5,35 +5,36 @@
import re
from test_utils.dmesg import get_dmesg
from test_utils.size import Size, Unit
def get_metadata_size_on_device(dmesg):
for s in dmesg.split("\n"):
m = re.search(r'Metadata size on device: ([0-9]*) kiB', s)
if m:
return Size(int(m.groups()[0]), Unit.KibiByte)
raise ValueError("Can't find the metadata size in the provided dmesg output")
def get_metadata_size_on_device(cache_name: str) -> Size:
dmesg_reversed = list(reversed(get_dmesg().split("\n")))
cache_dmesg = "\n".join(line for line in dmesg_reversed if cache_name in line)
try:
return _get_metadata_info(dmesg=cache_dmesg, section_name="Metadata size on device")
except ValueError:
raise ValueError("Can't find the metadata size in dmesg output")
def _get_metadata_info(dmesg, section_name):
def _get_metadata_info(dmesg, section_name) -> Size:
for s in dmesg.split("\n"):
if section_name in s:
size, unit = re.search("[0-9]* (B|kiB)", s).group().split()
size, unit = re.search("\\d+ (B|kiB)", s).group().split()
unit = Unit.KibiByte if unit == "kiB" else Unit.Byte
return Size(int(re.search("[0-9]*", size).group()), unit)
return Size(int(re.search("\\d+", size).group()), unit)
raise ValueError(f'"{section_name}" entry doesn\'t exist in the given dmesg output')
def get_md_section_size(section_name, dmesg):
def get_md_section_size(section_name, dmesg) -> Size:
section_name = section_name.strip()
section_name += " size"
return _get_metadata_info(dmesg, section_name)
def get_md_section_offset(section_name, dmesg):
def get_md_section_offset(section_name, dmesg) -> Size:
section_name = section_name.strip()
section_name += " offset"
return _get_metadata_info(dmesg, section_name)

View File

@ -1,5 +1,6 @@
#
# Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
@ -62,25 +63,21 @@ def get_current_commit_hash(from_dut: bool = False):
executor = TestRun.executor if from_dut else LocalExecutor()
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
return executor.run(
f"cd {repo_path} &&"
f'git show HEAD -s --pretty=format:"%H"').stdout
return executor.run(f"cd {repo_path} &&" f'git show HEAD -s --pretty=format:"%H"').stdout
def get_current_commit_message():
local_executor = LocalExecutor()
return local_executor.run(
f"cd {TestRun.usr.repo_dir} &&"
f'git show HEAD -s --pretty=format:"%B"').stdout
f"cd {TestRun.usr.repo_dir} &&" f'git show HEAD -s --pretty=format:"%B"'
).stdout
def get_commit_hash(cas_version, from_dut: bool = False):
executor = TestRun.executor if from_dut else LocalExecutor()
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
output = executor.run(
f"cd {repo_path} && "
f"git rev-parse {cas_version}")
output = executor.run(f"cd {repo_path} && " f"git rev-parse {cas_version}")
if output.exit_code != 0:
raise CmdException(f"Failed to resolve '{cas_version}' to commit hash", output)
@ -104,13 +101,13 @@ def checkout_cas_version(cas_version):
TestRun.LOGGER.info(f"Checkout CAS to {commit_hash}")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"git checkout --force {commit_hash}")
f"cd {TestRun.usr.working_dir} && " f"git checkout --force {commit_hash}"
)
if output.exit_code != 0:
raise CmdException(f"Failed to checkout to {commit_hash}", output)
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"git submodule update --force")
f"cd {TestRun.usr.working_dir} && " f"git submodule update --force"
)
if output.exit_code != 0:
raise CmdException(f"Failed to update submodules", output)

View File

@ -1,256 +1,218 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
# Order in arrays is important!
config_stats_cache = [
"cache id", "cache size", "cache device", "exported object", "core devices",
"inactive core devices", "write policy", "cleaning policy", "promotion policy",
"cache line size", "metadata memory footprint", "dirty for", "status"
]
config_stats_core = [
"core id", "core device", "exported object", "core size", "dirty for", "status",
"seq cutoff threshold", "seq cutoff policy"
]
config_stats_ioclass = ["io class id", "io class name", "eviction priority", "max size"]
usage_stats = ["occupancy", "free", "clean", "dirty"]
usage_stats_ioclass = ["occupancy", "clean", "dirty"]
inactive_usage_stats = ["inactive occupancy", "inactive clean", "inactive dirty"]
request_stats = [
"read hits", "read partial misses", "read full misses", "read total",
"write hits", "write partial misses", "write full misses", "write total",
"pass-through reads", "pass-through writes",
"serviced requests", "total requests"
]
block_stats_cache = [
"reads from core(s)", "writes to core(s)", "total to/from core(s)",
"reads from cache", "writes to cache", "total to/from cache",
"reads from exported object(s)", "writes to exported object(s)",
"total to/from exported object(s)"
]
block_stats_core = [stat.replace("(s)", "") for stat in block_stats_cache]
error_stats = [
"cache read errors", "cache write errors", "cache total errors",
"core read errors", "core write errors", "core total errors",
"total errors"
]
import csv
from datetime import timedelta
from typing import List
from api.cas import casadm
from api.cas.casadm_params import StatsFilter
from test_utils.size import Size, Unit
class CacheStats:
stats_list = [
"config_stats",
"usage_stats",
"inactive_usage_stats",
"request_stats",
"block_stats",
"error_stats",
]
def __init__(
self,
cache_id: int,
filter: List[StatsFilter] = None,
percentage_val: bool = False,
):
def __init__(self, stats):
try:
self.config_stats = CacheConfigStats(
*[stats[stat] for stat in config_stats_cache]
)
except KeyError:
pass
try:
self.usage_stats = UsageStats(
*[stats[stat] for stat in usage_stats]
)
except KeyError:
pass
try:
self.inactive_usage_stats = InactiveUsageStats(
*[stats[stat] for stat in inactive_usage_stats]
)
except KeyError:
pass
try:
self.request_stats = RequestStats(
*[stats[stat] for stat in request_stats]
)
except KeyError:
pass
try:
self.block_stats = BlockStats(
*[stats[stat] for stat in block_stats_cache]
)
except KeyError:
pass
try:
self.error_stats = ErrorStats(
*[stats[stat] for stat in error_stats]
)
except KeyError:
pass
if filter is None:
filters = [
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
StatsFilter.err,
]
else:
filters = filter
csv_stats = casadm.print_statistics(
cache_id=cache_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache:
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
case StatsFilter.conf:
self.config_stats = CacheConfigStats(stats_dict)
case StatsFilter.usage:
self.usage_stats = UsageStats(stats_dict, percentage_val)
case StatsFilter.req:
self.request_stats = RequestStats(stats_dict, percentage_val)
case StatsFilter.blk:
self.block_stats_cache = BlockStats(stats_dict, percentage_val)
case StatsFilter.err:
self.error_stats = ErrorStats(stats_dict, percentage_val)
def __str__(self):
status = ""
for stats_item in self.stats_list:
current_stat = getattr(self, stats_item, None)
if current_stat:
status += f"--- Cache {current_stat}"
return status
# stats_list contains all Class.__str__ methods initialized in CacheStats
stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
return "\n".join(stats_list)
def __eq__(self, other):
if not other:
return False
for stats_item in self.stats_list:
if getattr(self, stats_item, None) != getattr(other, stats_item, None):
return False
return True
# check if all initialized variable in self(CacheStats) match other(CacheStats)
return [getattr(self, stats_item) for stats_item in self.__dict__] == [
getattr(other, stats_item) for stats_item in other.__dict__
]
class CoreStats:
stats_list = [
"config_stats",
"usage_stats",
"request_stats",
"block_stats",
"error_stats",
]
def __init__(
self,
cache_id: int,
core_id: int,
filter: List[StatsFilter] = None,
percentage_val: bool = False,
):
def __init__(self, stats):
try:
self.config_stats = CoreConfigStats(
*[stats[stat] for stat in config_stats_core]
)
except KeyError:
pass
try:
self.usage_stats = UsageStats(
*[stats[stat] for stat in usage_stats]
)
except KeyError:
pass
try:
self.request_stats = RequestStats(
*[stats[stat] for stat in request_stats]
)
except KeyError:
pass
try:
self.block_stats = BlockStats(
*[stats[stat] for stat in block_stats_core]
)
except KeyError:
pass
try:
self.error_stats = ErrorStats(
*[stats[stat] for stat in error_stats]
)
except KeyError:
pass
if filter is None:
filters = [
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
StatsFilter.err,
]
else:
filters = filter
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache:
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
case StatsFilter.conf:
self.config_stats = CoreConfigStats(stats_dict)
case StatsFilter.usage:
self.usage_stats = UsageStats(stats_dict, percentage_val)
case StatsFilter.req:
self.request_stats = RequestStats(stats_dict, percentage_val)
case StatsFilter.blk:
self.block_stats_cache = BlockStats(stats_dict, percentage_val)
case StatsFilter.err:
self.error_stats = ErrorStats(stats_dict, percentage_val)
def __str__(self):
status = ""
for stats_item in self.stats_list:
current_stat = getattr(self, stats_item, None)
if current_stat:
status += f"--- Core {current_stat}"
return status
# stats_list contains all Class.__str__ methods initialized in CacheStats
stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
return "\n".join(stats_list)
def __eq__(self, other):
if not other:
return False
for stats_item in self.stats_list:
if getattr(self, stats_item, None) != getattr(other, stats_item, None):
return False
return True
# check if all initialized variable in self(CacheStats) match other(CacheStats)
return [getattr(self, stats_item) for stats_item in self.__dict__] == [
getattr(other, stats_item) for stats_item in other.__dict__
]
class IoClassStats:
stats_list = [
"config_stats",
"usage_stats",
"request_stats",
"block_stats",
]
def __init__(self, stats, block_stats_list):
try:
self.config_stats = IoClassConfigStats(
*[stats[stat] for stat in config_stats_ioclass]
)
except KeyError:
pass
try:
self.usage_stats = IoClassUsageStats(
*[stats[stat] for stat in usage_stats_ioclass]
)
except KeyError:
pass
try:
self.request_stats = RequestStats(
*[stats[stat] for stat in request_stats]
)
except KeyError:
pass
try:
self.block_stats = BlockStats(
*[stats[stat] for stat in block_stats_list]
)
except KeyError:
pass
def __str__(self):
status = ""
for stats_item in self.stats_list:
current_stat = getattr(self, stats_item, None)
if current_stat:
status += f"--- IO class {current_stat}"
return status
# stats_list contains all Class.__str__ methods initialized in CacheStats
stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
return "\n".join(stats_list)
def __init__(
self,
cache_id: int,
io_class_id: int,
core_id: int = None,
filter: List[StatsFilter] = None,
percentage_val: bool = False,
):
if filter is None:
filters = [
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
]
else:
filters = filter
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
io_class_id=io_class_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache:
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
case StatsFilter.conf:
self.config_stats = IoClassConfigStats(stats_dict, percentage_val)
case StatsFilter.usage:
self.usage_stats = IoClassUsageStats(stats_dict, percentage_val)
case StatsFilter.req:
self.request_stats = RequestStats(stats_dict, percentage_val)
case StatsFilter.blk:
self.block_stats_cache = BlockStats(stats_dict, percentage_val)
def __eq__(self, other):
if not other:
return False
for stats_item in self.stats_list:
if getattr(self, stats_item, None) != getattr(other, stats_item, None):
return False
return True
class CacheIoClassStats(IoClassStats):
def __init__(self, stats):
super().__init__(stats, block_stats_cache)
class CoreIoClassStats(IoClassStats):
def __init__(self, stats):
super().__init__(stats, block_stats_core)
# check if all initialized variable in self(CacheStats) match other(CacheStats)
return [getattr(self, stats_item) for stats_item in self.__dict__] == [
getattr(other, stats_item) for stats_item in other.__dict__
]
class CacheConfigStats:
def __init__(
self,
cache_id,
cache_size,
cache_dev,
exp_obj,
core_dev,
inactive_core_dev,
write_policy,
cleaning_policy,
promotion_policy,
cache_line_size,
metadata_memory_footprint,
dirty_for,
status,
):
self.cache_id = cache_id
self.cache_size = cache_size
self.cache_dev = cache_dev
self.exp_obj = exp_obj
self.core_dev = core_dev
self.inactive_core_dev = inactive_core_dev
self.write_policy = write_policy
self.cleaning_policy = cleaning_policy
self.promotion_policy = promotion_policy
self.cache_line_size = cache_line_size
self.metadata_memory_footprint = metadata_memory_footprint
self.dirty_for = dirty_for
self.status = status
def __init__(self, stats_dict):
self.cache_id = stats_dict["Cache Id"]
self.cache_size = parse_value(
value=stats_dict["Cache Size [4KiB Blocks]"], unit_type="[4KiB Blocks]"
)
self.cache_dev = stats_dict["Cache Device"]
self.exp_obj = stats_dict["Exported Object"]
self.core_dev = stats_dict["Core Devices"]
self.inactive_core_devices = stats_dict["Inactive Core Devices"]
self.write_policy = stats_dict["Write Policy"]
self.cleaning_policy = stats_dict["Cleaning Policy"]
self.promotion_policy = stats_dict["Promotion Policy"]
self.cache_line_size = parse_value(
value=stats_dict["Cache line size [KiB]"], unit_type="[KiB]"
)
self.metadata_memory_footprint = parse_value(
value=stats_dict["Metadata Memory Footprint [MiB]"], unit_type="[MiB]"
)
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type="[s]")
self.status = stats_dict["Status"]
def __str__(self):
return (
@ -260,10 +222,10 @@ class CacheConfigStats:
f"Cache device: {self.cache_dev}\n"
f"Exported object: {self.exp_obj}\n"
f"Core devices: {self.core_dev}\n"
f"Inactive core devices: {self.inactive_core_dev}\n"
f"Write policy: {self.write_policy}\n"
f"Cleaning policy: {self.cleaning_policy}\n"
f"Promotion policy: {self.promotion_policy}\n"
f"Inactive Core Devices: {self.inactive_core_devices}\n"
f"Write Policy: {self.write_policy}\n"
f"Cleaning Policy: {self.cleaning_policy}\n"
f"Promotion Policy: {self.promotion_policy}\n"
f"Cache line size: {self.cache_line_size}\n"
f"Metadata memory footprint: {self.metadata_memory_footprint}\n"
f"Dirty for: {self.dirty_for}\n"
@ -279,7 +241,7 @@ class CacheConfigStats:
and self.cache_dev == other.cache_dev
and self.exp_obj == other.exp_obj
and self.core_dev == other.core_dev
and self.inactive_core_dev == other.inactive_core_dev
and self.inactive_core_devices == other.inactive_core_devices
and self.write_policy == other.write_policy
and self.cleaning_policy == other.cleaning_policy
and self.promotion_policy == other.promotion_policy
@ -291,25 +253,19 @@ class CacheConfigStats:
class CoreConfigStats:
def __init__(
self,
core_id,
core_dev,
exp_obj,
core_size,
dirty_for,
status,
seq_cutoff_threshold,
seq_cutoff_policy,
):
self.core_id = core_id
self.core_dev = core_dev
self.exp_obj = exp_obj
self.core_size = core_size
self.dirty_for = dirty_for
self.status = status
self.seq_cutoff_threshold = seq_cutoff_threshold
self.seq_cutoff_policy = seq_cutoff_policy
def __init__(self, stats_dict):
self.core_id = stats_dict["Core Id"]
self.core_dev = stats_dict["Core Device"]
self.exp_obj = stats_dict["Exported Object"]
self.core_size = parse_value(
value=stats_dict["Core Size [4KiB Blocks]"], unit_type=" [4KiB Blocks]"
)
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type="[s]")
self.status = stats_dict["Status"]
self.seq_cutoff_threshold = parse_value(
value=stats_dict["Seq cutoff threshold [KiB]"], unit_type="[KiB]"
)
self.seq_cutoff_policy = stats_dict["Seq cutoff policy"]
def __str__(self):
return (
@ -340,13 +296,11 @@ class CoreConfigStats:
class IoClassConfigStats:
def __init__(
self, io_class_id, io_class_name, eviction_priority, selective_allocation
):
self.io_class_id = io_class_id
self.io_class_name = io_class_name
self.eviction_priority = eviction_priority
self.selective_allocation = selective_allocation
def __init__(self, stats_dict):
self.io_class_id = stats_dict["IO class ID"]
self.io_class_name = stats_dict["IO class name"]
self.eviction_priority = stats_dict["Eviction priority"]
self.max_size = stats_dict["Max size"]
def __str__(self):
return (
@ -354,7 +308,7 @@ class IoClassConfigStats:
f"IO class ID: {self.io_class_id}\n"
f"IO class name: {self.io_class_name}\n"
f"Eviction priority: {self.eviction_priority}\n"
f"Selective allocation: {self.selective_allocation}\n"
f"Max size: {self.max_size}\n"
)
def __eq__(self, other):
@ -364,16 +318,17 @@ class IoClassConfigStats:
self.io_class_id == other.io_class_id
and self.io_class_name == other.io_class_name
and self.eviction_priority == other.eviction_priority
and self.selective_allocation == other.selective_allocation
and self.max_size == other.max_size
)
class UsageStats:
def __init__(self, occupancy, free, clean, dirty):
self.occupancy = occupancy
self.free = free
self.clean = clean
self.dirty = dirty
def __init__(self, stats_dict, percentage_val):
unit = "[%]" if percentage_val else "[4KiB Blocks]"
self.occupancy = parse_value(value=stats_dict[f"Occupancy {unit}"], unit_type=unit)
self.free = parse_value(value=stats_dict[f"Free {unit}"], unit_type=unit)
self.clean = parse_value(value=stats_dict[f"Clean {unit}"], unit_type=unit)
self.dirty = parse_value(value=stats_dict[f"Dirty {unit}"], unit_type=unit)
def __str__(self):
return (
@ -405,7 +360,7 @@ class UsageStats:
self.occupancy + other.occupancy,
self.free + other.free,
self.clean + other.clean,
self.dirty + other.dirty
self.dirty + other.dirty,
)
def __iadd__(self, other):
@ -417,10 +372,11 @@ class UsageStats:
class IoClassUsageStats:
def __init__(self, occupancy, clean, dirty):
self.occupancy = occupancy
self.clean = clean
self.dirty = dirty
def __init__(self, stats_dict, percentage_val):
unit = "[%]" if percentage_val else "[4KiB Blocks]"
self.occupancy = parse_value(value=stats_dict[f"Occupancy {unit}"], unit_type=unit)
self.clean = parse_value(value=stats_dict[f"Clean {unit}"], unit_type=unit)
self.dirty = parse_value(value=stats_dict[f"Dirty {unit}"], unit_type=unit)
def __str__(self):
return (
@ -449,7 +405,7 @@ class IoClassUsageStats:
return UsageStats(
self.occupancy + other.occupancy,
self.clean + other.clean,
self.dirty + other.dirty
self.dirty + other.dirty,
)
def __iadd__(self, other):
@ -484,31 +440,26 @@ class InactiveUsageStats:
class RequestStats:
def __init__(
self,
read_hits,
read_part_misses,
read_full_misses,
read_total,
write_hits,
write_part_misses,
write_full_misses,
write_total,
pass_through_reads,
pass_through_writes,
requests_serviced,
requests_total,
):
def __init__(self, stats_dict, percentage_val):
unit = "[%]" if percentage_val else "[Requests]"
self.read = RequestStatsChunk(
read_hits, read_part_misses, read_full_misses, read_total
stats_dict=stats_dict, percentage_val=percentage_val, operation="Read"
)
self.write = RequestStatsChunk(
write_hits, write_part_misses, write_full_misses, write_total
stats_dict=stats_dict, percentage_val=percentage_val, operation="Write"
)
self.pass_through_reads = parse_value(
value=stats_dict[f"Pass-Through reads {unit}"], unit_type=unit
)
self.pass_through_writes = parse_value(
value=stats_dict[f"Pass-Through writes {unit}"], unit_type=unit
)
self.requests_serviced = parse_value(
value=stats_dict[f"Serviced requests {unit}"], unit_type=unit
)
self.requests_total = parse_value(
value=stats_dict[f"Total requests {unit}"], unit_type=unit
)
self.pass_through_reads = pass_through_reads
self.pass_through_writes = pass_through_writes
self.requests_serviced = requests_serviced
self.requests_total = requests_total
def __str__(self):
return (
@ -535,11 +486,16 @@ class RequestStats:
class RequestStatsChunk:
def __init__(self, hits, part_misses, full_misses, total):
self.hits = hits
self.part_misses = part_misses
self.full_misses = full_misses
self.total = total
def __init__(self, stats_dict, percentage_val, operation: str):
unit = "[%]" if percentage_val else "[Requests]"
self.hits = parse_value(value=stats_dict[f"{operation} hits {unit}"], unit_type=unit)
self.part_misses = parse_value(
value=stats_dict[f"{operation} partial misses {unit}"], unit_type=unit
)
self.full_misses = parse_value(
value=stats_dict[f"{operation} full misses {unit}"], unit_type=unit
)
self.total = parse_value(value=stats_dict[f"{operation} total {unit}"], unit_type=unit)
def __str__(self):
return (
@ -561,21 +517,18 @@ class RequestStatsChunk:
class BlockStats:
def __init__(
self,
core_reads,
core_writes,
core_total,
cache_reads,
cache_writes,
cache_total,
exp_obj_reads,
exp_obj_writes,
exp_obj_total,
):
self.core = BasicStatsChunk(core_reads, core_writes, core_total)
self.cache = BasicStatsChunk(cache_reads, cache_writes, cache_total)
self.exp_obj = BasicStatsChunk(exp_obj_reads, exp_obj_writes, exp_obj_total)
def __init__(self, stats_dict, percentage_val):
self.core = BasicStatsChunk(
stats_dict=stats_dict, percentage_val=percentage_val, device="core"
)
self.cache = BasicStatsChunk(
stats_dict=stats_dict, percentage_val=percentage_val, device="cache"
)
self.exp_obj = BasicStatsChunk(
stats_dict=stats_dict,
percentage_val=percentage_val,
device="exported object",
)
def __str__(self):
return (
@ -589,30 +542,20 @@ class BlockStats:
if not other:
return False
return (
self.core == other.core
and self.cache == other.cache
and self.exp_obj == other.exp_obj
self.core == other.core and self.cache == other.cache and self.exp_obj == other.exp_obj
)
class ErrorStats:
def __init__(
self,
cache_read_errors,
cache_write_errors,
cache_total_errors,
core_read_errors,
core_write_errors,
core_total_errors,
total_errors,
):
self.cache = BasicStatsChunk(
cache_read_errors, cache_write_errors, cache_total_errors
def __init__(self, stats_dict, percentage_val):
unit = "[%]" if percentage_val else "[Requests]"
self.cache = BasicStatsChunkError(
stats_dict=stats_dict, percentage_val=percentage_val, device="Cache"
)
self.core = BasicStatsChunk(
core_read_errors, core_write_errors, core_total_errors
self.core = BasicStatsChunkError(
stats_dict=stats_dict, percentage_val=percentage_val, device="Core"
)
self.total_errors = total_errors
self.total_errors = parse_value(value=stats_dict[f"Total errors {unit}"], unit_type=unit)
def __str__(self):
return (
@ -633,10 +576,11 @@ class ErrorStats:
class BasicStatsChunk:
def __init__(self, reads, writes, total):
self.reads = reads
self.writes = writes
self.total = total
def __init__(self, stats_dict: dict, percentage_val: bool, device: str):
unit = "[%]" if percentage_val else "[4KiB Blocks]"
self.reads = parse_value(value=stats_dict[f"Reads from {device} {unit}"], unit_type=unit)
self.writes = parse_value(value=stats_dict[f"Writes to {device} {unit}"], unit_type=unit)
self.total = parse_value(value=stats_dict[f"Total to/from {device} {unit}"], unit_type=unit)
def __str__(self):
return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n"
@ -645,7 +589,44 @@ class BasicStatsChunk:
if not other:
return False
return (
self.reads == other.reads
and self.writes == other.writes
and self.total == other.total
self.reads == other.reads and self.writes == other.writes and self.total == other.total
)
class BasicStatsChunkError:
def __init__(self, stats_dict: dict, percentage_val: bool, device: str):
unit = "[%]" if percentage_val else "[Requests]"
self.reads = parse_value(value=stats_dict[f"{device} read errors {unit}"], unit_type=unit)
self.writes = parse_value(value=stats_dict[f"{device} write errors {unit}"], unit_type=unit)
self.total = parse_value(value=stats_dict[f"{device} total errors {unit}"], unit_type=unit)
def __str__(self):
return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n"
def __eq__(self, other):
if not other:
return False
return (
self.reads == other.reads and self.writes == other.writes and self.total == other.total
)
def parse_value(value: str, unit_type: str) -> int | float | Size | timedelta | str:
match unit_type:
case "[Requests]":
stat_unit = int(value)
case "[%]":
stat_unit = float(value)
case "[4KiB Blocks]":
stat_unit = Size(float(value), Unit.Blocks4096)
case "[MiB]":
stat_unit = Size(float(value), Unit.MebiByte)
case "[KiB]":
stat_unit = Size(float(value), Unit.KibiByte)
case "[GiB]":
stat_unit = Size(float(value), Unit.GibiByte)
case "[s]":
stat_unit = timedelta(seconds=float(value))
case _:
stat_unit = value
return stat_unit