test-api: update cas api

Signed-off-by: Kamil Gierszewski <kamil.gierszewski@huawei.com>
This commit is contained in:
Kamil Gierszewski 2024-08-08 03:11:21 +02:00
parent 5dccbc3978
commit d48e9fc80d
No known key found for this signature in database
14 changed files with 1180 additions and 1110 deletions

View File

@ -1,28 +1,36 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
from api.cas.casadm_parser import * from api.cas.casadm_parser import *
from api.cas.cli import * from api.cas.dmesg import get_metadata_size_on_device
from api.cas.statistics import CacheStats, CacheIoClassStats from api.cas.statistics import CacheStats, IoClassStats
from test_utils.os_utils import * from test_utils.os_utils import *
from test_utils.output import Output
class Cache: class Cache:
def __init__(self, device: Device): def __init__(self, device: Device, cache_id: int = None):
self.cache_device = device self.cache_device = device
self.cache_id = int(self.__get_cache_id()) self.cache_id = cache_id if cache_id else self.__get_cache_id()
self.__cache_line_size = None self.__cache_line_size = None
self.__metadata_size = None self.metadata_size_on_disk = self.get_metadata_size_on_disk()
def __get_cache_id(self): def __get_cache_id(self) -> int:
cmd = f"{list_cmd(by_id_path=False)} | grep {self.cache_device.get_device_id()}" device_path = self.__get_cache_device_path()
output = TestRun.executor.run(cmd)
if output.exit_code == 0 and output.stdout.strip(): caches_dict = get_cas_devices_dict()["caches"]
return output.stdout.split()[1]
else: for cache in caches_dict.values():
raise Exception(f"There is no cache started on {self.cache_device.get_device_id()}.") if cache["device_path"] == device_path:
return int(cache["id"])
raise Exception(f"There is no cache started on {device_path}")
def __get_cache_device_path(self) -> str:
return self.cache_device.path if self.cache_device is not None else "-"
def get_core_devices(self): def get_core_devices(self):
return get_cores(self.cache_id) return get_cores(self.cache_id)
@ -39,11 +47,13 @@ class Cache:
cp = stats.config_stats.cleaning_policy cp = stats.config_stats.cleaning_policy
return CleaningPolicy[cp] return CleaningPolicy[cp]
def get_metadata_size(self): def get_metadata_size_in_ram(self) -> Size:
if self.__metadata_size is None:
stats = self.get_statistics() stats = self.get_statistics()
self.__metadata_size = stats.config_stats.metadata_memory_footprint return stats.config_stats.metadata_memory_footprint
return self.__metadata_size
def get_metadata_size_on_disk(self) -> Size:
cache_name = f"cache{self.cache_id}"
return get_metadata_size_on_device(cache_name=cache_name)
def get_occupancy(self): def get_occupancy(self):
return self.get_statistics().usage_stats.occupancy return self.get_statistics().usage_stats.occupancy
@ -80,32 +90,34 @@ class Cache:
# Casadm methods: # Casadm methods:
def get_io_class_statistics(self, def get_statistics(
self,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False,
) -> CacheStats:
return CacheStats(
cache_id=self.cache_id,
filter=stat_filter,
percentage_val=percentage_val,
)
def get_io_class_statistics(
self,
io_class_id: int, io_class_id: int,
stat_filter: List[StatsFilter] = None, stat_filter: List[StatsFilter] = None,
percentage_val: bool = False): percentage_val: bool = False,
stats = get_statistics(self.cache_id, None, io_class_id, ):
stat_filter, percentage_val) return IoClassStats(
return CacheIoClassStats(stats) cache_id=self.cache_id,
filter=stat_filter,
io_class_id=io_class_id,
percentage_val=percentage_val,
)
def get_statistics(self, def flush_cache(self) -> Output:
stat_filter: List[StatsFilter] = None, cmd_output = casadm.flush_cache(cache_id=self.cache_id)
percentage_val: bool = False):
stats = get_statistics(self.cache_id, None, None,
stat_filter, percentage_val)
return CacheStats(stats)
def get_statistics_flat(self,
io_class_id: int = None,
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
return get_statistics(self.cache_id, None, io_class_id,
stat_filter, percentage_val)
def flush_cache(self):
casadm.flush(cache_id=self.cache_id)
sync() sync()
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0 return cmd_output
def purge_cache(self): def purge_cache(self):
casadm.purge_cache(cache_id=self.cache_id) casadm.purge_cache(cache_id=self.cache_id)
@ -136,47 +148,60 @@ class Cache:
return get_io_class_list(self.cache_id) return get_io_class_list(self.cache_id)
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters): def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
return casadm.set_param_cutoff(self.cache_id, return casadm.set_param_cutoff(
self.cache_id,
threshold=seq_cutoff_param.threshold, threshold=seq_cutoff_param.threshold,
policy=seq_cutoff_param.policy, policy=seq_cutoff_param.policy,
promotion_count=seq_cutoff_param.promotion_count) promotion_count=seq_cutoff_param.promotion_count,
)
def set_seq_cutoff_threshold(self, threshold: Size): def set_seq_cutoff_threshold(self, threshold: Size):
return casadm.set_param_cutoff(self.cache_id, return casadm.set_param_cutoff(self.cache_id, threshold=threshold, policy=None)
threshold=threshold,
policy=None)
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy): def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy):
return casadm.set_param_cutoff(self.cache_id, return casadm.set_param_cutoff(self.cache_id, threshold=None, policy=policy)
threshold=None,
policy=policy)
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy): def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
return casadm.set_param_cleaning(self.cache_id, cleaning_policy) return casadm.set_param_cleaning(self.cache_id, cleaning_policy)
def set_params_acp(self, acp_params: FlushParametersAcp): def set_params_acp(self, acp_params: FlushParametersAcp):
return casadm.set_param_cleaning_acp(self.cache_id, return casadm.set_param_cleaning_acp(
self.cache_id,
(
int(acp_params.wake_up_time.total_milliseconds()) int(acp_params.wake_up_time.total_milliseconds())
if acp_params.wake_up_time else None, if acp_params.wake_up_time
int(acp_params.flush_max_buffers) else None
if acp_params.flush_max_buffers else None) ),
int(acp_params.flush_max_buffers) if acp_params.flush_max_buffers else None,
)
def set_params_alru(self, alru_params: FlushParametersAlru): def set_params_alru(self, alru_params: FlushParametersAlru):
return casadm.set_param_cleaning_alru( return casadm.set_param_cleaning_alru(
self.cache_id, self.cache_id,
(
int(alru_params.wake_up_time.total_seconds()) int(alru_params.wake_up_time.total_seconds())
if alru_params.wake_up_time is not None else None, if alru_params.wake_up_time is not None
else None
),
(
int(alru_params.staleness_time.total_seconds()) int(alru_params.staleness_time.total_seconds())
if alru_params.staleness_time is not None else None, if alru_params.staleness_time is not None
alru_params.flush_max_buffers else None
if alru_params.flush_max_buffers is not None else None, ),
(alru_params.flush_max_buffers if alru_params.flush_max_buffers is not None else None),
(
int(alru_params.activity_threshold.total_milliseconds()) int(alru_params.activity_threshold.total_milliseconds())
if alru_params.activity_threshold is not None else None) if alru_params.activity_threshold is not None
else None
),
)
def get_cache_config(self): def get_cache_config(self):
return CacheConfig(self.get_cache_line_size(), return CacheConfig(
self.get_cache_line_size(),
self.get_cache_mode(), self.get_cache_mode(),
self.get_cleaning_policy()) self.get_cleaning_policy(),
)
def standby_detach(self, shortcut: bool = False): def standby_detach(self, shortcut: bool = False):
return casadm.standby_detach_cache(cache_id=self.cache_id, shortcut=shortcut) return casadm.standby_detach_cache(cache_id=self.cache_id, shortcut=shortcut)
@ -185,3 +210,6 @@ class Cache:
return casadm.standby_activate_cache( return casadm.standby_activate_cache(
cache_id=self.cache_id, cache_dev=device, shortcut=shortcut cache_id=self.cache_id, cache_dev=device, shortcut=shortcut
) )
def has_volatile_metadata(self) -> bool:
return self.get_metadata_size_on_disk() == Size.zero()

View File

@ -1,9 +1,10 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
from aenum import Enum, IntFlag from enum import Enum, IntFlag
from test_utils.os_utils import get_kernel_module_parameter from test_utils.os_utils import get_kernel_module_parameter
from test_utils.size import Size, Unit from test_utils.size import Size, Unit
@ -56,21 +57,15 @@ class CacheMode(Enum):
@staticmethod @staticmethod
def with_traits(flags: CacheModeTrait): def with_traits(flags: CacheModeTrait):
return [ return [m for m in CacheMode if all(map(lambda t: t in CacheMode.get_traits(m), flags))]
m for m in CacheMode if all(map(lambda t: t in CacheMode.get_traits(m), flags))
]
@staticmethod @staticmethod
def without_traits(flags: CacheModeTrait): def without_traits(flags: CacheModeTrait):
return [ return [m for m in CacheMode if not any(map(lambda t: t in CacheMode.get_traits(m), flags))]
m for m in CacheMode if not any(map(lambda t: t in CacheMode.get_traits(m), flags))
]
@staticmethod @staticmethod
def with_any_trait(flags: CacheModeTrait): def with_any_trait(flags: CacheModeTrait):
return [ return [m for m in CacheMode if any(map(lambda t: t in CacheMode.get_traits(m), flags))]
m for m in CacheMode if any(map(lambda t: t in CacheMode.get_traits(m), flags))
]
class SeqCutOffPolicy(Enum): class SeqCutOffPolicy(Enum):
@ -90,7 +85,6 @@ class SeqCutOffPolicy(Enum):
class MetadataMode(Enum): class MetadataMode(Enum):
normal = "normal" normal = "normal"
atomic = "atomic"
DEFAULT = normal DEFAULT = normal
def __str__(self): def __str__(self):
@ -152,18 +146,16 @@ class FlushParametersAlru:
) )
def __str__(self): def __str__(self):
ret = ["activity threshold: " ret = [
+ (f"{self.activity_threshold}" if self.activity_threshold is not None "activity threshold: "
else "default"), + (f"{self.activity_threshold}" if self.activity_threshold is not None else "default"),
"flush max buffers: " "flush max buffers: "
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None + (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None else "default"),
else "default"),
"staleness time: " "staleness time: "
+ (f"{self.staleness_time}" if self.staleness_time is not None + (f"{self.staleness_time}" if self.staleness_time is not None else "default"),
else "default"),
"wake up time: " "wake up time: "
+ (f"{self.wake_up_time}" if self.wake_up_time is not None + (f"{self.wake_up_time}" if self.wake_up_time is not None else "default"),
else "default")] ]
return " | ".join(ret) return " | ".join(ret)
@staticmethod @staticmethod
@ -197,12 +189,12 @@ class FlushParametersAcp:
) )
def __str__(self): def __str__(self):
ret = ["flush max buffers: " ret = [
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None "flush max buffers: "
else "default"), + (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None else "default"),
"wake up time: " "wake up time: "
+ (f"{self.wake_up_time}" if self.wake_up_time is not None + (f"{self.wake_up_time}" if self.wake_up_time is not None else "default"),
else "default")] ]
return " | ".join(ret) return " | ".join(ret)
@staticmethod @staticmethod
@ -238,7 +230,7 @@ class SeqCutOffParameters:
return SeqCutOffParameters( return SeqCutOffParameters(
threshold=Size(1024, Unit.KibiByte), threshold=Size(1024, Unit.KibiByte),
policy=SeqCutOffPolicy.full, policy=SeqCutOffPolicy.full,
promotion_count=8 promotion_count=8,
) )
@ -248,10 +240,7 @@ class PromotionParametersNhit:
self.trigger = trigger self.trigger = trigger
def __eq__(self, other): def __eq__(self, other):
return ( return self.threshold == other.threshold and self.trigger == other.trigger
self.threshold == other.threshold
and self.trigger == other.trigger
)
@staticmethod @staticmethod
def nhit_params_range(): def nhit_params_range():
@ -293,7 +282,7 @@ class KernelParameters:
use_io_scheduler: UseIoScheduler = None, use_io_scheduler: UseIoScheduler = None,
seq_cut_off_mb: int = None, seq_cut_off_mb: int = None,
max_writeback_queue_size: int = None, max_writeback_queue_size: int = None,
writeback_queue_unblock_size: int = None writeback_queue_unblock_size: int = None,
): ):
self.unaligned_io = unaligned_io self.unaligned_io = unaligned_io
self.use_io_scheduler = use_io_scheduler self.use_io_scheduler = use_io_scheduler
@ -312,16 +301,17 @@ class KernelParameters:
self.use_io_scheduler, other.use_io_scheduler, UseIoScheduler.DEFAULT self.use_io_scheduler, other.use_io_scheduler, UseIoScheduler.DEFAULT
) )
and equal_or_default( and equal_or_default(
self.seq_cut_off_mb, other.seq_cut_off_mb, self.seq_cut_off_mb, other.seq_cut_off_mb, self.seq_cut_off_mb_DEFAULT
self.seq_cut_off_mb_DEFAULT
) )
and equal_or_default( and equal_or_default(
self.max_writeback_queue_size, other.max_writeback_queue_size, self.max_writeback_queue_size,
self.max_writeback_queue_size_DEFAULT other.max_writeback_queue_size,
self.max_writeback_queue_size_DEFAULT,
) )
and equal_or_default( and equal_or_default(
self.writeback_queue_unblock_size, other.writeback_queue_unblock_size, self.writeback_queue_unblock_size,
self.writeback_queue_unblock_size_DEFAULT other.writeback_queue_unblock_size,
self.writeback_queue_unblock_size_DEFAULT,
) )
) )
@ -332,7 +322,7 @@ class KernelParameters:
UseIoScheduler.DEFAULT, UseIoScheduler.DEFAULT,
cls.seq_cut_off_mb_DEFAULT, cls.seq_cut_off_mb_DEFAULT,
cls.max_writeback_queue_size_DEFAULT, cls.max_writeback_queue_size_DEFAULT,
cls.writeback_queue_unblock_size_DEFAULT cls.writeback_queue_unblock_size_DEFAULT,
) )
@staticmethod @staticmethod
@ -343,7 +333,7 @@ class KernelParameters:
UseIoScheduler(int(get_kernel_module_parameter(module, "use_io_scheduler"))), UseIoScheduler(int(get_kernel_module_parameter(module, "use_io_scheduler"))),
int(get_kernel_module_parameter(module, "seq_cut_off_mb")), int(get_kernel_module_parameter(module, "seq_cut_off_mb")),
int(get_kernel_module_parameter(module, "max_writeback_queue_size")), int(get_kernel_module_parameter(module, "max_writeback_queue_size")),
int(get_kernel_module_parameter(module, "writeback_queue_unblock_size")) int(get_kernel_module_parameter(module, "writeback_queue_unblock_size")),
) )
def get_parameter_dictionary(self): def get_parameter_dictionary(self):
@ -354,10 +344,15 @@ class KernelParameters:
params["use_io_scheduler"] = str(self.use_io_scheduler.value) params["use_io_scheduler"] = str(self.use_io_scheduler.value)
if self.seq_cut_off_mb not in [None, self.seq_cut_off_mb_DEFAULT]: if self.seq_cut_off_mb not in [None, self.seq_cut_off_mb_DEFAULT]:
params["seq_cut_off_mb"] = str(self.seq_cut_off_mb) params["seq_cut_off_mb"] = str(self.seq_cut_off_mb)
if self.max_writeback_queue_size not in [None, self.max_writeback_queue_size_DEFAULT]: if self.max_writeback_queue_size not in [
None,
self.max_writeback_queue_size_DEFAULT,
]:
params["max_writeback_queue_size"] = str(self.max_writeback_queue_size) params["max_writeback_queue_size"] = str(self.max_writeback_queue_size)
if (self.writeback_queue_unblock_size not in if self.writeback_queue_unblock_size not in [
[None, self.writeback_queue_unblock_size_DEFAULT]): None,
self.writeback_queue_unblock_size_DEFAULT,
]:
params["writeback_queue_unblock_size"] = str(self.writeback_queue_unblock_size) params["writeback_queue_unblock_size"] = str(self.writeback_queue_unblock_size)
return params return params
@ -370,7 +365,7 @@ class CacheConfig:
cache_line_size=CacheLineSize.DEFAULT, cache_line_size=CacheLineSize.DEFAULT,
cache_mode=CacheMode.DEFAULT, cache_mode=CacheMode.DEFAULT,
cleaning_policy=CleaningPolicy.DEFAULT, cleaning_policy=CleaningPolicy.DEFAULT,
kernel_parameters=None kernel_parameters=None,
): ):
self.cache_line_size = cache_line_size self.cache_line_size = cache_line_size
self.cache_mode = cache_mode self.cache_mode = cache_mode
@ -383,7 +378,9 @@ class CacheConfig:
and self.cache_mode == other.cache_mode and self.cache_mode == other.cache_mode
and self.cleaning_policy == other.cleaning_policy and self.cleaning_policy == other.cleaning_policy
and equal_or_default( and equal_or_default(
self.kernel_parameters, other.kernel_parameters, KernelParameters.DEFAULT self.kernel_parameters,
other.kernel_parameters,
KernelParameters.DEFAULT,
) )
) )

View File

@ -1,9 +1,10 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
from aenum import Enum from enum import Enum
from core.test_run import TestRun from core.test_run import TestRun
from test_utils import os_utils from test_utils import os_utils
from test_utils.os_utils import ModuleRemoveMethod from test_utils.os_utils import ModuleRemoveMethod
@ -19,8 +20,7 @@ def reload_all_cas_modules():
def unload_all_cas_modules(): def unload_all_cas_modules():
os_utils.unload_kernel_module(CasModule.cache.value, os_utils.unload_kernel_module(CasModule.cache.value, os_utils.ModuleRemoveMethod.rmmod)
os_utils.ModuleRemoveMethod.rmmod)
def is_cas_management_dev_present(): def is_cas_management_dev_present():

View File

@ -36,7 +36,8 @@ class Packages:
class _Rpm(RpmSet): class _Rpm(RpmSet):
def __init__(self, packages_dir: str = ""): def __init__(self, packages_paths: list, packages_dir: str = ""):
super().__init__(packages_paths)
self.packages_dir = packages_dir self.packages_dir = packages_dir
self.packages = get_packages_list("rpm", self.packages_dir) self.packages = get_packages_list("rpm", self.packages_dir)
@ -65,7 +66,8 @@ class _Rpm(RpmSet):
class _Deb(DebSet): class _Deb(DebSet):
def __init__(self, packages_dir: str = ""): def __init__(self, packages_paths: list, packages_dir: str = ""):
super().__init__(packages_paths)
self.packages_dir = packages_dir self.packages_dir = packages_dir
self.packages = get_packages_list("deb", self.packages_dir) self.packages = get_packages_list("deb", self.packages_dir)
@ -98,7 +100,8 @@ def get_packages_list(package_type: str, packages_dir: str):
return [] return []
return [ return [
package for package in find_all_files(packages_dir, recursive=False) package
for package in find_all_files(packages_dir, recursive=False)
# include only binary packages (ready to be processed by package manager) # include only binary packages (ready to be processed by package manager)
if package.endswith(package_type.lower()) if package.endswith(package_type.lower())
and not package.endswith("src." + package_type.lower()) and not package.endswith("src." + package_type.lower())

View File

@ -1,55 +1,88 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
from typing import List from typing import List
from api.cas.cache import Cache from api.cas.cache import Cache
from api.cas.cache_config import CacheLineSize, CacheMode, SeqCutOffPolicy, CleaningPolicy, \ from api.cas.cache_config import (
KernelParameters CacheLineSize,
CacheMode,
SeqCutOffPolicy,
CleaningPolicy,
KernelParameters,
)
from api.cas.casadm_params import OutputFormat, StatsFilter
from api.cas.cli import *
from api.cas.core import Core from api.cas.core import Core
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.device import Device from storage_devices.device import Device
from test_utils.os_utils import reload_kernel_module from test_utils.os_utils import reload_kernel_module
from test_utils.output import CmdException from test_utils.output import CmdException, Output
from test_utils.size import Size, Unit from test_utils.size import Size, Unit
from .casadm_params import *
from .casctl import stop as casctl_stop
from .cli import * # casadm commands
def help(shortcut: bool = False): def help(shortcut: bool = False):
return TestRun.executor.run(help_cmd(shortcut)) return TestRun.executor.run(help_cmd(shortcut))
def start_cache(cache_dev: Device, cache_mode: CacheMode = None, def start_cache(
cache_line_size: CacheLineSize = None, cache_id: int = None, cache_dev: Device,
force: bool = False, load: bool = False, shortcut: bool = False, cache_mode: CacheMode = None,
kernel_params: KernelParameters = KernelParameters()): cache_line_size: CacheLineSize = None,
cache_id: int = None,
force: bool = False,
load: bool = False,
shortcut: bool = False,
kernel_params: KernelParameters = KernelParameters(),
):
if kernel_params != KernelParameters.read_current_settings(): if kernel_params != KernelParameters.read_current_settings():
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary()) reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
_cache_line_size = None if cache_line_size is None else str( _cache_line_size = (
int(cache_line_size.value.get_value(Unit.KibiByte))) None
if cache_line_size is None
else str(int(cache_line_size.value.get_value(Unit.KibiByte)))
)
_cache_id = None if cache_id is None else str(cache_id) _cache_id = None if cache_id is None else str(cache_id)
_cache_mode = None if cache_mode is None else cache_mode.name.lower() _cache_mode = None if cache_mode is None else cache_mode.name.lower()
output = TestRun.executor.run(start_cmd( output = TestRun.executor.run(
cache_dev=cache_dev.path, cache_mode=_cache_mode, cache_line_size=_cache_line_size, start_cmd(
cache_id=_cache_id, force=force, load=load, shortcut=shortcut)) cache_dev=cache_dev.path,
cache_mode=_cache_mode,
cache_line_size=_cache_line_size,
cache_id=_cache_id,
force=force,
load=load,
shortcut=shortcut,
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to start cache.", output) raise CmdException("Failed to start cache.", output)
return Cache(cache_dev) return Cache(cache_dev)
def standby_init(cache_dev: Device, cache_id: int, cache_line_size: CacheLineSize, def standby_init(
force: bool = False, shortcut: bool = False, cache_dev: Device,
kernel_params: KernelParameters = KernelParameters()): cache_id: int,
cache_line_size: CacheLineSize,
force: bool = False,
shortcut: bool = False,
kernel_params: KernelParameters = KernelParameters(),
):
if kernel_params != KernelParameters.read_current_settings(): if kernel_params != KernelParameters.read_current_settings():
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary()) reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
_cache_line_size = None if cache_line_size is None else str( _cache_line_size = (
int(cache_line_size.value.get_value(Unit.KibiByte))) None
if cache_line_size is None
else str(int(cache_line_size.value.get_value(Unit.KibiByte)))
)
output = TestRun.executor.run( output = TestRun.executor.run(
standby_init_cmd( standby_init_cmd(
@ -66,18 +99,14 @@ def standby_init(cache_dev: Device, cache_id: int, cache_line_size: CacheLineSiz
def standby_load(cache_dev: Device, shortcut: bool = False): def standby_load(cache_dev: Device, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(standby_load_cmd(cache_dev=cache_dev.path, shortcut=shortcut))
standby_load_cmd(cache_dev=cache_dev.path, shortcut=shortcut)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to load standby cache.", output) raise CmdException("Failed to load standby cache.", output)
return Cache(cache_dev) return Cache(cache_dev)
def standby_detach_cache(cache_id: int, shortcut: bool = False): def standby_detach_cache(cache_id: int, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(standby_detach_cmd(cache_id=str(cache_id), shortcut=shortcut))
standby_detach_cmd(cache_id=str(cache_id), shortcut=shortcut)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to detach standby cache.", output) raise CmdException("Failed to detach standby cache.", output)
return output return output
@ -85,9 +114,7 @@ def standby_detach_cache(cache_id: int, shortcut: bool = False):
def standby_activate_cache(cache_dev: Device, cache_id: int, shortcut: bool = False): def standby_activate_cache(cache_dev: Device, cache_id: int, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
standby_activate_cmd( standby_activate_cmd(cache_dev=cache_dev.path, cache_id=str(cache_id), shortcut=shortcut)
cache_dev=cache_dev.path, cache_id=str(cache_id), shortcut=shortcut
)
) )
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to activate standby cache.", output) raise CmdException("Failed to activate standby cache.", output)
@ -96,7 +123,8 @@ def standby_activate_cache(cache_dev: Device, cache_id: int, shortcut: bool = Fa
def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = False): def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
stop_cmd(cache_id=str(cache_id), no_data_flush=no_data_flush, shortcut=shortcut)) stop_cmd(cache_id=str(cache_id), no_data_flush=no_data_flush, shortcut=shortcut)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to stop cache.", output) raise CmdException("Failed to stop cache.", output)
return output return output
@ -105,8 +133,13 @@ def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = Fals
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False): def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False):
_core_id = None if core_id is None else str(core_id) _core_id = None if core_id is None else str(core_id)
output = TestRun.executor.run( output = TestRun.executor.run(
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.path, add_core_cmd(
core_id=_core_id, shortcut=shortcut)) cache_id=str(cache.cache_id),
core_dev=core_dev.path,
core_id=_core_id,
shortcut=shortcut,
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to add core.", output) raise CmdException("Failed to add core.", output)
core = Core(core_dev.path, cache.cache_id) core = Core(core_dev.path, cache.cache_id)
@ -115,8 +148,10 @@ def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool
def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False): def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
remove_core_cmd(cache_id=str(cache_id), core_id=str(core_id), remove_core_cmd(
force=force, shortcut=shortcut)) cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to remove core.", output) raise CmdException("Failed to remove core.", output)
@ -124,22 +159,24 @@ def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool
def remove_inactive(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False): def remove_inactive(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
remove_inactive_cmd( remove_inactive_cmd(
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut)) cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to remove inactive core.", output) raise CmdException("Failed to remove inactive core.", output)
def remove_detached(core_device: Device, shortcut: bool = False): def remove_detached(core_device: Device, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
remove_detached_cmd(core_device=core_device.path, shortcut=shortcut)) remove_detached_cmd(core_device=core_device.path, shortcut=shortcut)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to remove detached core.", output) raise CmdException("Failed to remove detached core.", output)
return output return output
def try_add(core_device: Device, cache_id: int, core_id: int): def try_add(core_device: Device, cache_id: int, core_id: int):
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path, output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path, str(core_id)))
str(core_id)))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to execute try add script command.", output) raise CmdException("Failed to execute try add script command.", output)
return Core(core_device.path, cache_id) return Core(core_device.path, cache_id)
@ -176,36 +213,49 @@ def remove_core_with_script_command(cache_id: int, core_id: int, no_flush: bool
def reset_counters(cache_id: int, core_id: int = None, shortcut: bool = False): def reset_counters(cache_id: int, core_id: int = None, shortcut: bool = False):
_core_id = None if core_id is None else str(core_id) _core_id = None if core_id is None else str(core_id)
output = TestRun.executor.run( output = TestRun.executor.run(
reset_counters_cmd(cache_id=str(cache_id), core_id=_core_id, shortcut=shortcut)) reset_counters_cmd(cache_id=str(cache_id), core_id=_core_id, shortcut=shortcut)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to reset counters.", output) raise CmdException("Failed to reset counters.", output)
return output return output
def flush(cache_id: int, core_id: int = None, shortcut: bool = False): def flush_cache(cache_id: int, shortcut: bool = False) -> Output:
if core_id is None:
command = flush_cache_cmd(cache_id=str(cache_id), shortcut=shortcut) command = flush_cache_cmd(cache_id=str(cache_id), shortcut=shortcut)
else:
command = flush_core_cmd(cache_id=str(cache_id), core_id=str(core_id), shortcut=shortcut)
output = TestRun.executor.run(command) output = TestRun.executor.run(command)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Flushing failed.", output) raise CmdException("Flushing cache failed.", output)
return output
def flush_core(
cache_id: int, core_id: int, shortcut: bool = False
) -> Output:
command = flush_core_cmd(
cache_id=str(cache_id),
core_id=str(core_id),
shortcut=shortcut,
)
output = TestRun.executor.run(command)
if output.exit_code != 0:
raise CmdException("Flushing core failed.", output)
return output return output
def load_cache(device: Device, shortcut: bool = False): def load_cache(device: Device, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(load_cmd(cache_dev=device.path, shortcut=shortcut))
load_cmd(cache_dev=device.path, shortcut=shortcut))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to load cache.", output) raise CmdException("Failed to load cache.", output)
return Cache(device) return Cache(device)
def list_caches(output_format: OutputFormat = None, by_id_path: bool = True, def list_caches(
shortcut: bool = False): output_format: OutputFormat = None, by_id_path: bool = True, shortcut: bool = False
):
_output_format = None if output_format is None else output_format.name _output_format = None if output_format is None else output_format.name
output = TestRun.executor.run( output = TestRun.executor.run(
list_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut)) list_caches_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to list caches.", output) raise CmdException("Failed to list caches.", output)
return output return output
@ -213,8 +263,7 @@ def list_caches(output_format: OutputFormat = None, by_id_path: bool = True,
def print_version(output_format: OutputFormat = None, shortcut: bool = False): def print_version(output_format: OutputFormat = None, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name _output_format = None if output_format is None else output_format.name
output = TestRun.executor.run( output = TestRun.executor.run(version_cmd(output_format=_output_format, shortcut=shortcut))
version_cmd(output_format=_output_format, shortcut=shortcut))
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to print version.", output) raise CmdException("Failed to print version.", output)
return output return output
@ -222,37 +271,43 @@ def print_version(output_format: OutputFormat = None, shortcut: bool = False):
def zero_metadata(cache_dev: Device, force: bool = False, shortcut: bool = False): def zero_metadata(cache_dev: Device, force: bool = False, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
zero_metadata_cmd(cache_dev=cache_dev.path, force=force, shortcut=shortcut)) zero_metadata_cmd(cache_dev=cache_dev.path, force=force, shortcut=shortcut)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Failed to wipe metadata.", output) raise CmdException("Failed to wipe metadata.", output)
return output return output
def stop_all_caches(): def stop_all_caches():
if "No caches running" in list_caches().stdout: from .casadm_parser import get_caches
caches = get_caches()
if not caches:
return return
TestRun.LOGGER.info("Stop all caches") for cache in caches:
stop_output = casctl_stop() stop_cache(cache_id=cache.cache_id)
caches_output = list_caches()
if "No caches running" not in caches_output.stdout:
raise CmdException(f"Error while stopping caches. "
f"Listing caches: {caches_output}", stop_output)
def remove_all_detached_cores(): def remove_all_detached_cores():
from api.cas import casadm_parser from api.cas import casadm_parser
devices = casadm_parser.get_cas_devices_dict() devices = casadm_parser.get_cas_devices_dict()
for dev in devices["core_pool"]: for dev in devices["core_pool"]:
TestRun.executor.run(remove_detached_cmd(dev["device"])) TestRun.executor.run(remove_detached_cmd(dev["device"]))
def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = False, def print_statistics(
io_class_id: int = None, filter: List[StatsFilter] = None, cache_id: int,
output_format: OutputFormat = None, by_id_path: bool = True, core_id: int = None,
shortcut: bool = False): io_class_id: int = None,
_output_format = None if output_format is None else output_format.name filter: List[StatsFilter] = None,
_core_id = None if core_id is None else str(core_id) output_format: OutputFormat = None,
_io_class_id = None if io_class_id is None else str(io_class_id) by_id_path: bool = True,
shortcut: bool = False,
):
_output_format = output_format.name if output_format else None
_core_id = str(core_id) if core_id else None
_io_class_id = str(io_class_id) if io_class_id else None
if filter is None: if filter is None:
_filter = filter _filter = filter
else: else:
@ -260,17 +315,21 @@ def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = Fa
_filter = ",".join(names) _filter = ",".join(names)
output = TestRun.executor.run( output = TestRun.executor.run(
print_statistics_cmd( print_statistics_cmd(
cache_id=str(cache_id), core_id=_core_id, cache_id=str(cache_id),
per_io_class=per_io_class, io_class_id=_io_class_id, core_id=_core_id,
filter=_filter, output_format=_output_format, io_class_id=_io_class_id,
by_id_path=by_id_path, shortcut=shortcut)) filter=_filter,
output_format=_output_format,
by_id_path=by_id_path,
shortcut=shortcut,
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Printing statistics failed.", output) raise CmdException("Printing statistics failed.", output)
return output return output
def set_cache_mode(cache_mode: CacheMode, cache_id: int, def set_cache_mode(cache_mode: CacheMode, cache_id: int, flush=None, shortcut: bool = False):
flush=None, shortcut: bool = False):
flush_cache = None flush_cache = None
if flush is True: if flush is True:
flush_cache = "yes" flush_cache = "yes"
@ -278,8 +337,13 @@ def set_cache_mode(cache_mode: CacheMode, cache_id: int,
flush_cache = "no" flush_cache = "no"
output = TestRun.executor.run( output = TestRun.executor.run(
set_cache_mode_cmd(cache_mode=cache_mode.name.lower(), cache_id=str(cache_id), set_cache_mode_cmd(
flush_cache=flush_cache, shortcut=shortcut)) cache_mode=cache_mode.name.lower(),
cache_id=str(cache_id),
flush_cache=flush_cache,
shortcut=shortcut,
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Set cache mode command failed.", output) raise CmdException("Set cache mode command failed.", output)
return output return output
@ -287,7 +351,8 @@ def set_cache_mode(cache_mode: CacheMode, cache_id: int,
def load_io_classes(cache_id: int, file: str, shortcut: bool = False): def load_io_classes(cache_id: int, file: str, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
load_io_classes_cmd(cache_id=str(cache_id), file=file, shortcut=shortcut)) load_io_classes_cmd(cache_id=str(cache_id), file=file, shortcut=shortcut)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Load IO class command failed.", output) raise CmdException("Load IO class command failed.", output)
return output return output
@ -296,19 +361,28 @@ def load_io_classes(cache_id: int, file: str, shortcut: bool = False):
def list_io_classes(cache_id: int, output_format: OutputFormat, shortcut: bool = False): def list_io_classes(cache_id: int, output_format: OutputFormat, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name _output_format = None if output_format is None else output_format.name
output = TestRun.executor.run( output = TestRun.executor.run(
list_io_classes_cmd(cache_id=str(cache_id), list_io_classes_cmd(cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut)
output_format=_output_format, shortcut=shortcut)) )
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("List IO class command failed.", output) raise CmdException("List IO class command failed.", output)
return output return output
def get_param_cutoff(cache_id: int, core_id: int, def get_param_cutoff(
output_format: OutputFormat = None, shortcut: bool = False): cache_id: int,
core_id: int,
output_format: OutputFormat = None,
shortcut: bool = False,
):
_output_format = None if output_format is None else output_format.name _output_format = None if output_format is None else output_format.name
output = TestRun.executor.run( output = TestRun.executor.run(
get_param_cutoff_cmd(cache_id=str(cache_id), core_id=str(core_id), get_param_cutoff_cmd(
output_format=_output_format, shortcut=shortcut)) cache_id=str(cache_id),
core_id=str(core_id),
output_format=_output_format,
shortcut=shortcut,
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Getting sequential cutoff params failed.", output) raise CmdException("Getting sequential cutoff params failed.", output)
return output return output
@ -317,37 +391,51 @@ def get_param_cutoff(cache_id: int, core_id: int,
def get_param_cleaning(cache_id: int, output_format: OutputFormat = None, shortcut: bool = False): def get_param_cleaning(cache_id: int, output_format: OutputFormat = None, shortcut: bool = False):
_output_format = None if output_format is None else output_format.name _output_format = None if output_format is None else output_format.name
output = TestRun.executor.run( output = TestRun.executor.run(
get_param_cleaning_cmd(cache_id=str(cache_id), output_format=_output_format, get_param_cleaning_cmd(
shortcut=shortcut)) cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Getting cleaning policy params failed.", output) raise CmdException("Getting cleaning policy params failed.", output)
return output return output
def get_param_cleaning_alru(cache_id: int, output_format: OutputFormat = None, def get_param_cleaning_alru(
shortcut: bool = False): cache_id: int, output_format: OutputFormat = None, shortcut: bool = False
):
_output_format = None if output_format is None else output_format.name _output_format = None if output_format is None else output_format.name
output = TestRun.executor.run( output = TestRun.executor.run(
get_param_cleaning_alru_cmd(cache_id=str(cache_id), output_format=_output_format, get_param_cleaning_alru_cmd(
shortcut=shortcut)) cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Getting alru cleaning policy params failed.", output) raise CmdException("Getting alru cleaning policy params failed.", output)
return output return output
def get_param_cleaning_acp(cache_id: int, output_format: OutputFormat = None, def get_param_cleaning_acp(
shortcut: bool = False): cache_id: int, output_format: OutputFormat = None, shortcut: bool = False
):
_output_format = None if output_format is None else output_format.name _output_format = None if output_format is None else output_format.name
output = TestRun.executor.run( output = TestRun.executor.run(
get_param_cleaning_acp_cmd(cache_id=str(cache_id), output_format=_output_format, get_param_cleaning_acp_cmd(
shortcut=shortcut)) cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Getting acp cleaning policy params failed.", output) raise CmdException("Getting acp cleaning policy params failed.", output)
return output return output
def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None, def set_param_cutoff(
policy: SeqCutOffPolicy = None, promotion_count: int = None): cache_id: int,
core_id: int = None,
threshold: Size = None,
policy: SeqCutOffPolicy = None,
promotion_count: int = None,
shortcut: bool = False,
):
_core_id = None if core_id is None else str(core_id) _core_id = None if core_id is None else str(core_id)
_threshold = None if threshold is None else str(int(threshold.get_value(Unit.KibiByte))) _threshold = None if threshold is None else str(int(threshold.get_value(Unit.KibiByte)))
_policy = None if policy is None else policy.name _policy = None if policy is None else policy.name
@ -357,7 +445,8 @@ def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
core_id=_core_id, core_id=_core_id,
threshold=_threshold, threshold=_threshold,
policy=_policy, policy=_policy,
promotion_count=_promotion_count promotion_count=_promotion_count,
shortcut=shortcut,
) )
output = TestRun.executor.run(command) output = TestRun.executor.run(command)
if output.exit_code != 0: if output.exit_code != 0:
@ -365,34 +454,52 @@ def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
return output return output
def set_param_cleaning(cache_id: int, policy: CleaningPolicy): def set_param_cleaning(cache_id: int, policy: CleaningPolicy, shortcut: bool = False):
output = TestRun.executor.run( output = TestRun.executor.run(
set_param_cleaning_cmd(cache_id=str(cache_id), policy=policy.name)) set_param_cleaning_cmd(cache_id=str(cache_id), policy=policy.name, shortcut=shortcut)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Error while setting cleaning policy.", output) raise CmdException("Error while setting cleaning policy.", output)
return output return output
def set_param_cleaning_alru(cache_id: int, wake_up: int = None, staleness_time: int = None, def set_param_cleaning_alru(
flush_max_buffers: int = None, activity_threshold: int = None): cache_id: int,
wake_up: int = None,
staleness_time: int = None,
flush_max_buffers: int = None,
activity_threshold: int = None,
shortcut: bool = False,
):
output = TestRun.executor.run( output = TestRun.executor.run(
set_param_cleaning_alru_cmd( set_param_cleaning_alru_cmd(
cache_id=cache_id, cache_id=str(cache_id),
wake_up=wake_up, wake_up=str(wake_up),
staleness_time=staleness_time, staleness_time=str(staleness_time),
flush_max_buffers=flush_max_buffers, flush_max_buffers=str(flush_max_buffers),
activity_threshold=activity_threshold)) activity_threshold=str(activity_threshold),
shortcut=shortcut,
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Error while setting alru cleaning policy parameters.", output) raise CmdException("Error while setting alru cleaning policy parameters.", output)
return output return output
def set_param_cleaning_acp(cache_id: int, wake_up: int = None, flush_max_buffers: int = None): def set_param_cleaning_acp(
cache_id: int,
wake_up: int = None,
flush_max_buffers: int = None,
shortcut: bool = False,
):
output = TestRun.executor.run( output = TestRun.executor.run(
set_param_cleaning_acp_cmd( set_param_cleaning_acp_cmd(
cache_id=str(cache_id), cache_id=str(cache_id),
wake_up=str(wake_up) if wake_up is not None else None, wake_up=str(wake_up) if wake_up is not None else None,
flush_max_buffers=str(flush_max_buffers) if flush_max_buffers else None)) flush_max_buffers=str(flush_max_buffers) if flush_max_buffers else None,
shortcut=shortcut,
)
)
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException("Error while setting acp cleaning policy parameters.", output) raise CmdException("Error while setting acp cleaning policy parameters.", output)
return output return output

View File

@ -1,9 +1,10 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
from aenum import Enum from enum import Enum
class OutputFormat(Enum): class OutputFormat(Enum):

View File

@ -1,12 +1,13 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import csv import csv
import io import io
import json import json
import re
from datetime import timedelta, datetime from datetime import timedelta, datetime
from typing import List from typing import List
@ -18,7 +19,6 @@ from api.cas.version import CasVersion
from core.test_run_utils import TestRun from core.test_run_utils import TestRun
from storage_devices.device import Device from storage_devices.device import Device
from test_utils.output import CmdException from test_utils.output import CmdException
from test_utils.size import parse_unit
class Stats(dict): class Stats(dict):
@ -26,188 +26,78 @@ class Stats(dict):
return json.dumps(self, default=lambda o: str(o), indent=2) return json.dumps(self, default=lambda o: str(o), indent=2)
def parse_stats_unit(unit: str):
if unit is None:
return ""
unit = re.search(r".*[^\]]", unit).group()
if unit == "s":
return "s"
elif unit == "%":
return "%"
elif unit == "Requests":
return "requests"
else:
return parse_unit(unit)
def get_filter(filter: List[StatsFilter]): def get_filter(filter: List[StatsFilter]):
"""Prepare list of statistic sections which should be retrieved and parsed. """ """Prepare list of statistic sections which should be retrieved and parsed."""
if filter is None or StatsFilter.all in filter: if filter is None or StatsFilter.all in filter:
_filter = [ _filter = [f for f in StatsFilter if (f != StatsFilter.all and f != StatsFilter.conf)]
f for f in StatsFilter if (f != StatsFilter.all and f != StatsFilter.conf)
]
else: else:
_filter = [ _filter = [f for f in filter if (f != StatsFilter.all and f != StatsFilter.conf)]
f for f in filter if (f != StatsFilter.all and f != StatsFilter.conf)
]
return _filter return _filter
def get_statistics( def get_caches() -> list:
cache_id: int,
core_id: int = None,
io_class_id: int = None,
filter: List[StatsFilter] = None,
percentage_val: bool = False,
):
stats = Stats()
_filter = get_filter(filter)
per_io_class = True if io_class_id is not None else False
# No need to retrieve all stats if user specified only 'conf' flag
if filter != [StatsFilter.conf]:
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
per_io_class=per_io_class,
io_class_id=io_class_id,
filter=_filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
if filter is None or StatsFilter.conf in filter or StatsFilter.all in filter:
# Conf statistics have different unit or may have no unit at all. For parsing
# convenience they are gathered separately. As this is only configuration stats
# there is no risk they are divergent.
conf_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
per_io_class=per_io_class,
io_class_id=io_class_id,
filter=[StatsFilter.conf],
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys = conf_stats[0]
stat_values = conf_stats[1]
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
# Some of configuration stats have no unit
try:
stat_name, stat_unit = name.split(" [")
except ValueError:
stat_name = name
stat_unit = None
stat_name = stat_name.lower()
# 'dirty for' and 'cache size' stats occurs twice
if stat_name in stats:
continue
stat_unit = parse_stats_unit(stat_unit)
if isinstance(stat_unit, Unit):
stats[stat_name] = Size(float(val), stat_unit)
elif stat_unit == "s":
stats[stat_name] = timedelta(seconds=int(val))
elif stat_unit == "":
# Some of stats without unit can be a number like IDs,
# some of them can be string like device path
try:
stats[stat_name] = float(val)
except ValueError:
stats[stat_name] = val
# No need to parse all stats if user specified only 'conf' flag
if filter == [StatsFilter.conf]:
return stats
stat_keys = csv_stats[0]
stat_values = csv_stats[1]
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
if percentage_val and " [%]" in name:
stats[name.split(" [")[0].lower()] = float(val)
elif not percentage_val and "[%]" not in name:
stat_name, stat_unit = name.split(" [")
stat_unit = parse_stats_unit(stat_unit)
stat_name = stat_name.lower()
if isinstance(stat_unit, Unit):
stats[stat_name] = Size(float(val), stat_unit)
elif stat_unit == "requests":
stats[stat_name] = float(val)
else:
raise ValueError(f"Invalid unit {stat_unit}")
return stats
def get_caches(): # This method does not return inactive or detached CAS devices
from api.cas.cache import Cache from api.cas.cache import Cache
caches_dict = get_cas_devices_dict()["caches"]
caches_list = [] caches_list = []
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n')
for line in lines: for cache in caches_dict.values():
args = line.split(',') caches_list.append(
if args[0] == "cache": Cache(
current_cache = Cache(Device(args[2])) device=(Device(cache["device_path"]) if cache["device_path"] != "-" else None),
caches_list.append(current_cache) cache_id=cache["id"],
)
)
return caches_list return caches_list
def get_cores(cache_id: int): def get_cores(cache_id: int) -> list:
from api.cas.core import Core, CoreStatus from api.cas.core import Core, CoreStatus
cores_list = []
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n') cores_dict = get_cas_devices_dict()["cores"].values()
is_proper_core_line = False
for line in lines: def is_active(core):
args = line.split(',') return CoreStatus[core["status"].lower()] == CoreStatus.active
if args[0] == "core" and is_proper_core_line:
core_status_str = args[3].lower() return [
is_valid_status = CoreStatus[core_status_str].value[0] <= 1 Core(core["device_path"], core["cache_id"])
if is_valid_status: for core in cores_dict
cores_list.append(Core(args[2], cache_id)) if is_active(core) and core["cache_id"] == cache_id
if args[0] == "cache": ]
is_proper_core_line = True if int(args[1]) == cache_id else False
return cores_list
def get_cas_devices_dict(): def get_cas_devices_dict() -> dict:
device_list = list(csv.DictReader(casadm.list_caches(OutputFormat.csv).stdout.split('\n'))) device_list = list(csv.DictReader(casadm.list_caches(OutputFormat.csv).stdout.split("\n")))
devices = {"core_pool": [], "caches": {}, "cores": {}} devices = {"caches": {}, "cores": {}, "core_pool": {}}
cache_id = -1
core_pool = False core_pool = False
prev_cache_id = -1
for device in device_list: for device in device_list:
if device["type"] == "core pool":
core_pool = True
continue
if device["type"] == "cache": if device["type"] == "cache":
core_pool = False params = [
prev_cache_id = int(device["id"]) ("id", int(device["id"])),
devices["caches"].update( ("device_path", device["disk"]),
{ ("status", device["status"]),
int(device["id"]): { ]
"device": device["disk"], devices["caches"][int(device["id"])] = dict([(key, value) for key, value in params])
"status": device["status"], cache_id = int(device["id"])
}
}
)
elif device["type"] == "core": elif device["type"] == "core":
core = {"device": device["disk"], "status": device["status"]} params = [
("cache_id", cache_id),
("device_path", device["disk"]),
("status", device["status"]),
]
if core_pool: if core_pool:
devices["core_pool"].append(core) params.append(("core_pool", device))
else: devices["core_pool"][(cache_id, int(device["id"]))] = dict(
core.update({"cache_id": prev_cache_id}) [(key, value) for key, value in params]
devices["cores"].update(
{(prev_cache_id, int(device["id"])): core}
) )
else:
devices["cores"][(cache_id, int(device["id"]))] = dict(
[(key, value) for key, value in params]
)
return devices return devices
@ -215,20 +105,26 @@ def get_flushing_progress(cache_id: int, core_id: int = None):
casadm_output = casadm.list_caches(OutputFormat.csv) casadm_output = casadm.list_caches(OutputFormat.csv)
lines = casadm_output.stdout.splitlines() lines = casadm_output.stdout.splitlines()
for line in lines: for line in lines:
line_elements = line.split(',') line_elements = line.split(",")
if core_id is not None and line_elements[0] == "core" \ if (
and int(line_elements[1]) == core_id \ core_id is not None
or core_id is None and line_elements[0] == "cache" \ and line_elements[0] == "core"
and int(line_elements[1]) == cache_id: and int(line_elements[1]) == core_id
or core_id is None
and line_elements[0] == "cache"
and int(line_elements[1]) == cache_id
):
try: try:
flush_line_elements = line_elements[3].split() flush_line_elements = line_elements[3].split()
flush_percent = flush_line_elements[1][1:] flush_percent = flush_line_elements[1][1:]
return float(flush_percent) return float(flush_percent)
except Exception: except Exception:
break break
raise CmdException(f"There is no flushing progress in casadm list output. (cache {cache_id}" raise CmdException(
f"There is no flushing progress in casadm list output. (cache {cache_id}"
f"{' core ' + str(core_id) if core_id is not None else ''})", f"{' core ' + str(core_id) if core_id is not None else ''})",
casadm_output) casadm_output,
)
def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)): def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)):
@ -243,50 +139,53 @@ def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)):
def get_flush_parameters_alru(cache_id: int): def get_flush_parameters_alru(cache_id: int):
casadm_output = casadm.get_param_cleaning_alru(cache_id, casadm_output = casadm.get_param_cleaning_alru(
casadm.OutputFormat.csv).stdout.splitlines() cache_id, casadm.OutputFormat.csv
).stdout.splitlines()
flush_parameters = FlushParametersAlru() flush_parameters = FlushParametersAlru()
for line in casadm_output: for line in casadm_output:
if 'max buffers' in line: if "max buffers" in line:
flush_parameters.flush_max_buffers = int(line.split(',')[1]) flush_parameters.flush_max_buffers = int(line.split(",")[1])
if 'Activity threshold' in line: if "Activity threshold" in line:
flush_parameters.activity_threshold = Time(milliseconds=int(line.split(',')[1])) flush_parameters.activity_threshold = Time(milliseconds=int(line.split(",")[1]))
if 'Stale buffer time' in line: if "Stale buffer time" in line:
flush_parameters.staleness_time = Time(seconds=int(line.split(',')[1])) flush_parameters.staleness_time = Time(seconds=int(line.split(",")[1]))
if 'Wake up time' in line: if "Wake up time" in line:
flush_parameters.wake_up_time = Time(seconds=int(line.split(',')[1])) flush_parameters.wake_up_time = Time(seconds=int(line.split(",")[1]))
return flush_parameters return flush_parameters
def get_flush_parameters_acp(cache_id: int): def get_flush_parameters_acp(cache_id: int):
casadm_output = casadm.get_param_cleaning_acp(cache_id, casadm_output = casadm.get_param_cleaning_acp(
casadm.OutputFormat.csv).stdout.splitlines() cache_id, casadm.OutputFormat.csv
).stdout.splitlines()
flush_parameters = FlushParametersAcp() flush_parameters = FlushParametersAcp()
for line in casadm_output: for line in casadm_output:
if 'max buffers' in line: if "max buffers" in line:
flush_parameters.flush_max_buffers = int(line.split(',')[1]) flush_parameters.flush_max_buffers = int(line.split(",")[1])
if 'Wake up time' in line: if "Wake up time" in line:
flush_parameters.wake_up_time = Time(milliseconds=int(line.split(',')[1])) flush_parameters.wake_up_time = Time(milliseconds=int(line.split(",")[1]))
return flush_parameters return flush_parameters
def get_seq_cut_off_parameters(cache_id: int, core_id: int): def get_seq_cut_off_parameters(cache_id: int, core_id: int):
casadm_output = casadm.get_param_cutoff( casadm_output = casadm.get_param_cutoff(
cache_id, core_id, casadm.OutputFormat.csv).stdout.splitlines() cache_id, core_id, casadm.OutputFormat.csv
).stdout.splitlines()
seq_cut_off_params = SeqCutOffParameters() seq_cut_off_params = SeqCutOffParameters()
for line in casadm_output: for line in casadm_output:
if 'Sequential cutoff threshold' in line: if "Sequential cutoff threshold" in line:
seq_cut_off_params.threshold = Size(int(line.split(',')[1]), Unit.KibiByte) seq_cut_off_params.threshold = Size(int(line.split(",")[1]), Unit.KibiByte)
if 'Sequential cutoff policy' in line: if "Sequential cutoff policy" in line:
seq_cut_off_params.policy = SeqCutOffPolicy.from_name(line.split(',')[1]) seq_cut_off_params.policy = SeqCutOffPolicy.from_name(line.split(",")[1])
if 'Sequential cutoff promotion request count threshold' in line: if "Sequential cutoff promotion request count threshold" in line:
seq_cut_off_params.promotion_count = int(line.split(',')[1]) seq_cut_off_params.promotion_count = int(line.split(",")[1])
return seq_cut_off_params return seq_cut_off_params
def get_casadm_version(): def get_casadm_version():
casadm_output = casadm.print_version(OutputFormat.csv).stdout.split('\n') casadm_output = casadm.print_version(OutputFormat.csv).stdout.split("\n")
version_str = casadm_output[1].split(',')[-1] version_str = casadm_output[1].split(",")[-1]
return CasVersion.from_version_string(version_str) return CasVersion.from_version_string(version_str)
@ -305,10 +204,12 @@ def get_core_info_by_path(core_disk_path):
output = casadm.list_caches(OutputFormat.csv, by_id_path=True) output = casadm.list_caches(OutputFormat.csv, by_id_path=True)
reader = csv.DictReader(io.StringIO(output.stdout)) reader = csv.DictReader(io.StringIO(output.stdout))
for row in reader: for row in reader:
if row['type'] == "core" and row['disk'] == core_disk_path: if row["type"] == "core" and row["disk"] == core_disk_path:
return {"core_id": row['id'], return {
"core_device": row['disk'], "core_id": row["id"],
"status": row['status'], "core_device": row["disk"],
"exp_obj": row['device']} "status": row["status"],
"exp_obj": row["device"],
}
return None return None

View File

@ -1,5 +1,6 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -11,88 +12,115 @@ casadm_bin = "casadm"
casctl = "casctl" casctl = "casctl"
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False): def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False) -> str:
command = f" -A -i {cache_id} -d {core_dev}" if shortcut \ command = " -A " if shortcut else " --add-core"
else f" --add-core --cache-id {cache_id} --core-device {core_dev}" command += (" -i " if shortcut else " --cache-id ") + cache_id
if core_id is not None: command += (" -d " if shortcut else " --core-device ") + core_dev
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def script_try_add_cmd(cache_id: str, core_dev: str, core_id: str = None):
command = f"{casadm_bin} --script --add-core --try-add --cache-id {cache_id} " \
f"--core-device {core_dev}"
if core_id: if core_id:
command += f" --core-id {core_id}" command += (" -j " if shortcut else " --core-id ") + core_id
return command return casadm_bin + command
def script_purge_cache_cmd(cache_id: str): def script_try_add_cmd(cache_id: str, core_dev: str, core_id: str = None) -> str:
return f"{casadm_bin} --script --purge-cache --cache-id {cache_id}" command = " --script --add-core --try-add"
command += " --cache-id " + cache_id
if core_id:
command += " --core-device " + core_dev
return casadm_bin + command
def script_purge_core_cmd(cache_id: str, core_id: str): def script_purge_cache_cmd(cache_id: str) -> str:
return f"{casadm_bin} --script --purge-core --cache-id {cache_id} --core-id {core_id}" command = "--script --purge-cache"
command += " --cache-id " + cache_id
return casadm_bin + command
def script_detach_core_cmd(cache_id: str, core_id: str): def script_purge_core_cmd(cache_id: str, core_id: str) -> str:
return f"{casadm_bin} --script --remove-core --detach --cache-id {cache_id} " \ command = "--script --purge-core"
f"--core-id {core_id}" command += " --cache-id " + cache_id
command += " --core-id " + core_id
return casadm_bin + command
def script_remove_core_cmd(cache_id: str, core_id: str, no_flush: bool = False): def script_detach_core_cmd(cache_id: str, core_id: str) -> str:
command = f"{casadm_bin} --script --remove-core --cache-id {cache_id} --core-id {core_id}" command = "--script --remove-core --detach"
command += " --cache-id " + cache_id
command += " --core-id " + core_id
return casadm_bin + command
def script_remove_core_cmd(cache_id: str, core_id: str, no_flush: bool = False) -> str:
command = "--script --remove-core"
command += " --cache-id " + cache_id
command += " --core-id " + core_id
if no_flush: if no_flush:
command += ' --no-flush' command += " --no-flush"
return command return casadm_bin + command
def remove_core_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False): def remove_core_cmd(
command = f" -R -i {cache_id} -j {core_id}" if shortcut \ cache_id: str, core_id: str, force: bool = False, shortcut: bool = False
else f" --remove-core --cache-id {cache_id} --core-id {core_id}" ) -> str:
command = " -R " if shortcut else " --remove-core"
command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -j " if shortcut else " --core-id ") + core_id
if force: if force:
command += " -f" if shortcut else " --force" command += " -f" if shortcut else " --force"
return casadm_bin + command return casadm_bin + command
def remove_inactive_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False): def remove_inactive_cmd(
command = f" --remove-inactive {'-i' if shortcut else '--cache-id'} {cache_id} " \ cache_id: str, core_id: str, force: bool = False, shortcut: bool = False
f"{'-j' if shortcut else '--core-id'} {core_id}" ) -> str:
command = " --remove-inactive"
command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -j " if shortcut else " --core-id ") + core_id
if force: if force:
command += " -f" if shortcut else " --force" command += " -f" if shortcut else " --force"
return casadm_bin + command return casadm_bin + command
def remove_detached_cmd(core_device: str, shortcut: bool = False): def remove_detached_cmd(core_device: str, shortcut: bool = False) -> str:
command = " --remove-detached" + (" -d " if shortcut else " --device ") + core_device command = " --remove-detached"
command += (" -d " if shortcut else " --device ") + core_device
return casadm_bin + command return casadm_bin + command
def help_cmd(shortcut: bool = False): def help_cmd(shortcut: bool = False) -> str:
return casadm_bin + (" -H" if shortcut else " --help") command = " -H" if shortcut else " --help"
return casadm_bin + command
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False): def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False) -> str:
command = (" -Z -i " if shortcut else " --reset-counters --cache-id ") + cache_id command = " -Z" if shortcut else " --reset-counters"
command += (" -i " if shortcut else " --cache-id ") + cache_id
if core_id is not None: if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command return casadm_bin + command
def flush_cache_cmd(cache_id: str, shortcut: bool = False): def flush_cache_cmd(cache_id: str, shortcut: bool = False) -> str:
command = (" -F -i " if shortcut else " --flush-cache --cache-id ") + cache_id command = " -F" if shortcut else " --flush-cache"
command += (" -i " if shortcut else " --cache-id ") + cache_id
return casadm_bin + command return casadm_bin + command
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False): def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False) -> str:
command = (f" -F -i {cache_id} -j {core_id}" if shortcut command = " -F" if shortcut else " --flush-cache"
else f" --flush-cache --cache-id {cache_id} --core-id {core_id}") command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command return casadm_bin + command
def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = None, def start_cmd(
cache_id: str = None, force: bool = False, cache_dev: str,
load: bool = False, shortcut: bool = False): cache_mode: str = None,
cache_line_size: str = None,
cache_id: str = None,
force: bool = False,
load: bool = False,
shortcut: bool = False,
) -> str:
command = " -S" if shortcut else " --start-cache" command = " -S" if shortcut else " --start-cache"
command += (" -d " if shortcut else " --cache-device ") + cache_dev command += (" -d " if shortcut else " --cache-device ") + cache_dev
if cache_mode is not None: if cache_mode is not None:
@ -108,8 +136,13 @@ def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = Non
return casadm_bin + command return casadm_bin + command
def standby_init_cmd(cache_dev: str, cache_id: str, cache_line_size: str, def standby_init_cmd(
force: bool = False, shortcut: bool = False): cache_dev: str,
cache_id: str,
cache_line_size: str,
force: bool = False,
shortcut: bool = False,
) -> str:
command = " --standby --init" command = " --standby --init"
command += (" -d " if shortcut else " --cache-device ") + cache_dev command += (" -d " if shortcut else " --cache-device ") + cache_dev
command += (" -i " if shortcut else " --cache-id ") + cache_id command += (" -i " if shortcut else " --cache-id ") + cache_id
@ -119,229 +152,275 @@ def standby_init_cmd(cache_dev: str, cache_id: str, cache_line_size: str,
return casadm_bin + command return casadm_bin + command
def standby_load_cmd(cache_dev: str, shortcut: bool = False): def standby_load_cmd(cache_dev: str, shortcut: bool = False) -> str:
command = " --standby --load" command = " --standby --load"
command += (" -d " if shortcut else " --cache-device ") + cache_dev command += (" -d " if shortcut else " --cache-device ") + cache_dev
return casadm_bin + command return casadm_bin + command
def standby_detach_cmd(cache_id: str, shortcut: bool = False): def standby_detach_cmd(cache_id: str, shortcut: bool = False) -> str:
command = " --standby --detach" command = " --standby --detach"
command += (" -i " if shortcut else " --cache-id ") + cache_id command += (" -i " if shortcut else " --cache-id ") + cache_id
return casadm_bin + command return casadm_bin + command
def standby_activate_cmd(cache_dev: str, cache_id: str, shortcut: bool = False): def standby_activate_cmd(cache_dev: str, cache_id: str, shortcut: bool = False) -> str:
command = " --standby --activate" command = " --standby --activate"
command += (" -d " if shortcut else " --cache-device ") + cache_dev command += (" -d " if shortcut else " --cache-device ") + cache_dev
command += (" -i " if shortcut else " --cache-id ") + cache_id command += (" -i " if shortcut else " --cache-id ") + cache_id
return casadm_bin + command return casadm_bin + command
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False, def print_statistics_cmd(
io_class_id: str = None, filter: str = None, cache_id: str,
output_format: str = None, by_id_path: bool = True, core_id: str = None,
shortcut: bool = False): io_class_id: str = None,
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id filter: str = None,
if core_id is not None: output_format: str = None,
by_id_path: bool = True,
shortcut: bool = False,
) -> str:
command = " -P" if shortcut else " --stats"
command += (" -i " if shortcut else " --cache-id ") + cache_id
if core_id:
command += (" -j " if shortcut else " --core-id ") + core_id command += (" -j " if shortcut else " --core-id ") + core_id
if per_io_class: if io_class_id:
command += " -d" if shortcut else " --io-class-id" command += (" -d " if shortcut else " --io-class-id ") + io_class_id
if io_class_id is not None: if filter:
command += " " + io_class_id
elif io_class_id is not None:
raise Exception("Per io class flag not set but ID given.")
if filter is not None:
command += (" -f " if shortcut else " --filter ") + filter command += (" -f " if shortcut else " --filter ") + filter
if output_format is not None: if output_format:
command += (" -o " if shortcut else " --output-format ") + output_format command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path: if by_id_path:
command += (" -b " if shortcut else " --by-id-path ") command += " -b " if shortcut else " --by-id-path "
return casadm_bin + command return casadm_bin + command
def zero_metadata_cmd(cache_dev: str, force: bool = False, shortcut: bool = False): def zero_metadata_cmd(cache_dev: str, force: bool = False, shortcut: bool = False) -> str:
command = " --zero-metadata" command = " --zero-metadata"
command += (" -d " if shortcut else " --device ") + cache_dev command += (" -d " if shortcut else " --device ") + cache_dev
if force: if force:
command += (" -f" if shortcut else " --force") command += " -f" if shortcut else " --force"
return casadm_bin + command return casadm_bin + command
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False): def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False) -> str:
command = " -T " if shortcut else " --stop-cache" command = " -T" if shortcut else " --stop-cache"
command += (" -i " if shortcut else " --cache-id ") + cache_id command += (" -i " if shortcut else " --cache-id ") + cache_id
if no_data_flush: if no_data_flush:
command += " --no-data-flush" command += " --no-data-flush"
return casadm_bin + command return casadm_bin + command
def list_cmd(output_format: str = None, by_id_path: bool = True, shortcut: bool = False): def list_caches_cmd(
output_format: str = None, by_id_path: bool = True, shortcut: bool = False
) -> str:
command = " -L" if shortcut else " --list-caches" command = " -L" if shortcut else " --list-caches"
if output_format == "table" or output_format == "csv": if output_format:
command += (" -o " if shortcut else " --output-format ") + output_format command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path: if by_id_path:
command += (" -b " if shortcut else " --by-id-path ") command += " -b" if shortcut else " --by-id-path"
return casadm_bin + command return casadm_bin + command
def load_cmd(cache_dev: str, shortcut: bool = False): def load_cmd(cache_dev: str, shortcut: bool = False) -> str:
return start_cmd(cache_dev, load=True, shortcut=shortcut) return start_cmd(cache_dev=cache_dev, load=True, shortcut=shortcut)
def version_cmd(output_format: str = None, shortcut: bool = False): def version_cmd(output_format: str = None, shortcut: bool = False) -> str:
command = " -V" if shortcut else " --version" command = " -V" if shortcut else " --version"
if output_format == "table" or output_format == "csv": if output_format:
command += (" -o " if shortcut else " --output-format ") + output_format command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command return casadm_bin + command
def set_cache_mode_cmd(cache_mode: str, cache_id: str, def set_cache_mode_cmd(
flush_cache: str = None, shortcut: bool = False): cache_mode: str, cache_id: str, flush_cache: str = None, shortcut: bool = False
command = f" -Q -c {cache_mode} -i {cache_id}" if shortcut else \ ) -> str:
f" --set-cache-mode --cache-mode {cache_mode} --cache-id {cache_id}" command = (" -Q -c" if shortcut else " --set-cache-mode --cache-mode ") + cache_mode
command += (" -i " if shortcut else " --cache-id ") + cache_id
if flush_cache: if flush_cache:
command += (" -f " if shortcut else " --flush-cache ") + flush_cache command += (" -f " if shortcut else " --flush-cache ") + flush_cache
return casadm_bin + command return casadm_bin + command
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False): def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False) -> str:
command = f" -C -C -i {cache_id} -f {file}" if shortcut else \ command = " -C -C" if shortcut else " --io-class --load-config"
f" --io-class --load-config --cache-id {cache_id} --file {file}" command += (" -i " if shortcut else " --cache-id ") + cache_id
command += (" -f " if shortcut else " --file ") + file
return casadm_bin + command return casadm_bin + command
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False): def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False) -> str:
command = f" -C -L -i {cache_id} -o {output_format}" if shortcut else \ command = " -C -L" if shortcut else " --io-class --list"
f" --io-class --list --cache-id {cache_id} --output-format {output_format}" command += (" -i " if shortcut else " --cache-id ") + cache_id
return casadm_bin + command
def _get_param_cmd(namespace: str, cache_id: str, output_format: str = None,
additional_params: str = None, shortcut: bool = False):
command = f" -G -n {namespace} -i {cache_id}" if shortcut else\
f" --get-param --name {namespace} --cache-id {cache_id}"
if additional_params is not None:
command += additional_params
if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command return casadm_bin + command
def get_param_cutoff_cmd(cache_id: str, core_id: str, def _get_param_cmd(
output_format: str = None, shortcut: bool = False): name: str,
add_param = (" -j " if shortcut else " --core-id ") + core_id cache_id: str,
return _get_param_cmd(namespace="seq-cutoff", cache_id=cache_id, output_format=output_format, output_format: str = None,
additional_params=add_param, shortcut=shortcut) shortcut: bool = False,
) -> str:
command = (" -G -n" if shortcut else " --get-param --name ") + name
command += (" -i " if shortcut else " --cache-id ") + cache_id
if output_format:
command += (" -o " if shortcut else " --output-format ") + output_format
return command
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False): def get_param_cutoff_cmd(
return _get_param_cmd(namespace="cleaning", cache_id=cache_id, cache_id: str, core_id: str, output_format: str = None, shortcut: bool = False
output_format=output_format, shortcut=shortcut) ) -> str:
name = "seq-cutoff"
command = _get_param_cmd(
def get_param_cleaning_alru_cmd(cache_id: str, output_format: str = None, shortcut: bool = False): name=name,
return _get_param_cmd(namespace="cleaning-alru", cache_id=cache_id, cache_id=cache_id,
output_format=output_format, shortcut=shortcut) core_id=core_id,
output_format=output_format,
shortcut=shortcut,
def get_param_cleaning_acp_cmd(cache_id: str, output_format: str = None, shortcut: bool = False): )
return _get_param_cmd(namespace="cleaning-acp", cache_id=cache_id, command += (" -j " if shortcut else " --core-id ") + core_id
output_format=output_format, shortcut=shortcut)
def _set_param_cmd(namespace: str, cache_id: str, additional_params: str = None,
shortcut: bool = False):
command = f" -X -n {namespace} -i {cache_id}" if shortcut else\
f" --set-param --name {namespace} --cache-id {cache_id}"
command += additional_params
return casadm_bin + command return casadm_bin + command
def set_param_cutoff_cmd(cache_id: str, core_id: str = None, threshold: str = None, def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False) -> str:
policy: str = None, promotion_count: str = None, shortcut: bool = False): name = "cleaning"
add_params = "" command = _get_param_cmd(
if core_id is not None: name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
add_params += (" -j " if shortcut else " --core-id ") + str(core_id) )
if threshold is not None: return casadm_bin + command
add_params += (" -t " if shortcut else " --threshold ") + str(threshold)
if policy is not None:
add_params += (" -p " if shortcut else " --policy ") + policy
if promotion_count is not None:
add_params += " --promotion-count " + str(promotion_count)
return _set_param_cmd(namespace="seq-cutoff", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def set_param_promotion_cmd(cache_id: str, policy: str, shortcut: bool = False): def get_param_cleaning_alru_cmd(
add_params = (" -p " if shortcut else " --policy ") + policy cache_id: str, output_format: str = None, shortcut: bool = False
return _set_param_cmd(namespace="promotion", cache_id=cache_id, ) -> str:
additional_params=add_params, shortcut=shortcut) name = "cleaning-alru"
command = _get_param_cmd(
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
)
return casadm_bin + command
def get_param_cleaning_acp_cmd(
cache_id: str, output_format: str = None, shortcut: bool = False
) -> str:
name = "cleaning-acp"
command = _get_param_cmd(
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
)
return casadm_bin + command
def _set_param_cmd(name: str, cache_id: str, shortcut: bool = False) -> str:
command = (" X -n" if shortcut else " --set-param --name ") + name
command += (" -i " if shortcut else " --cache-id ") + cache_id
return command
def set_param_cutoff_cmd(
cache_id: str,
core_id: str = None,
threshold: str = None,
policy: str = None,
promotion_count: str = None,
shortcut: bool = False,
) -> str:
name = "seq-cutoff"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
if core_id:
command += (" -j " if shortcut else " --core-id ") + core_id
if threshold:
command += (" -t " if shortcut else " --threshold ") + threshold
if policy:
command += (" -p " if shortcut else " --policy ") + policy
if promotion_count:
command += " --promotion-count " + promotion_count
return casadm_bin + command
def set_param_promotion_cmd(cache_id: str, policy: str, shortcut: bool = False) -> str:
name = "promotion"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
command += (" -p " if shortcut else " --policy ") + policy
return casadm_bin + command
def set_param_promotion_nhit_cmd( def set_param_promotion_nhit_cmd(
cache_id: str, threshold=None, trigger=None, shortcut: bool = False cache_id: str, threshold: str = None, trigger: str = None, shortcut: bool = False
): ) -> str:
add_params = "" name = "promotion-nhit"
if threshold is not None: command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
add_params += (" -t " if shortcut else " --threshold ") + str(threshold) if threshold:
command += (" -t " if shortcut else " --threshold ") + threshold
if trigger is not None: if trigger is not None:
add_params += (" -o " if shortcut else " --trigger ") + str(trigger) command += (" -o " if shortcut else " --trigger ") + trigger
return _set_param_cmd(namespace="promotion-nhit", cache_id=cache_id, return casadm_bin + command
additional_params=add_params, shortcut=shortcut)
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False): def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False) -> str:
add_params = (" -p " if shortcut else " --policy ") + policy name = "cleaning"
return _set_param_cmd(namespace="cleaning", cache_id=cache_id, command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
additional_params=add_params, shortcut=shortcut) command += (" -p " if shortcut else " --policy ") + policy
return casadm_bin + command
def set_param_cleaning_alru_cmd(cache_id, wake_up=None, staleness_time=None, def set_param_cleaning_alru_cmd(
flush_max_buffers=None, activity_threshold=None, cache_id: str,
shortcut: bool = False): wake_up: str = None,
add_param = "" staleness_time: str = None,
flush_max_buffers: str = None,
activity_threshold: str = None,
shortcut: bool = False,
) -> str:
name = "cleaning-alru"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
if wake_up:
command += (" -w " if shortcut else " --wake-up ") + wake_up
if staleness_time:
command += (" -s " if shortcut else " --staleness-time ") + staleness_time
if flush_max_buffers:
command += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
if activity_threshold:
command += (" -t " if shortcut else " --activity-threshold ") + activity_threshold
return casadm_bin + command
def set_param_cleaning_acp_cmd(
cache_id: str,
wake_up: str = None,
flush_max_buffers: str = None,
shortcut: bool = False,
) -> str:
name = "cleaning-acp"
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
if wake_up is not None: if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + str(wake_up) command += (" -w " if shortcut else " --wake-up ") + wake_up
if staleness_time is not None:
add_param += (" -s " if shortcut else " --staleness-time ") + str(staleness_time)
if flush_max_buffers is not None: if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + str(flush_max_buffers) command += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
if activity_threshold is not None: return casadm_bin + command
add_param += (" -t " if shortcut else " --activity-threshold ") + str(activity_threshold)
return _set_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
def set_param_cleaning_acp_cmd(cache_id: str, wake_up: str = None, def ctl_help(shortcut: bool = False) -> str:
flush_max_buffers: str = None, shortcut: bool = False): command = " --help" if shortcut else " -h"
add_param = "" return casctl + command
if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
return _set_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
def ctl_help(shortcut: bool = False): def ctl_start() -> str:
return casctl + " --help" if shortcut else " -h" command = " start"
return casctl + command
def ctl_start(): def ctl_stop(flush: bool = False) -> str:
return casctl + " start" command = " stop"
def ctl_stop(flush: bool = False):
command = casctl + " stop"
if flush: if flush:
command += " --flush" command += " --flush"
return command return casctl + command
def ctl_init(force: bool = False): def ctl_init(force: bool = False) -> str:
command = casctl + " init" command = " init"
if force: if force:
command += " --force" command += " --force"
return command return casctl + command

View File

@ -9,6 +9,8 @@ casadm_help = [
r"Usage: casadm \<command\> \[option\.\.\.\]", r"Usage: casadm \<command\> \[option\.\.\.\]",
r"Available commands:", r"Available commands:",
r"-S --start-cache Start new cache instance or load using metadata", r"-S --start-cache Start new cache instance or load using metadata",
r"--attach-cache Attach cache device",
r"--detach-cache Detach cache device",
r"-T --stop-cache Stop cache instance", r"-T --stop-cache Stop cache instance",
r"-X --set-param Set various runtime parameters", r"-X --set-param Set various runtime parameters",
r"-G --get-param Get various runtime parameters", r"-G --get-param Get various runtime parameters",
@ -29,21 +31,19 @@ casadm_help = [
r"e\.g\.", r"e\.g\.",
r"casadm --start-cache --help", r"casadm --start-cache --help",
r"For more information, please refer to manual, Admin Guide \(man casadm\)", r"For more information, please refer to manual, Admin Guide \(man casadm\)",
r"or go to support page \<https://open-cas\.github\.io\>\." r"or go to support page \<https://open-cas\.github\.io\>\.",
] ]
help_help = [ help_help = [r"Usage: casadm --help", r"Print help"]
r"Usage: casadm --help",
r"Print help"
]
version_help = [ version_help = [
r"Usage: casadm --version \[option\.\.\.\]", r"Usage: casadm --version \[option\.\.\.\]",
r"Print CAS version", r"Print CAS version",
r"Options that are valid with --version \(-V\) are:" r"Options that are valid with --version \(-V\) are:"
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}" r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
] ]
ioclass_help = [ ioclass_help = [
r"Usage: casadm --io-class \{--load-config|--list\}", r"Usage: casadm --io-class \{--load-config|--list\}",
r"Manage IO classes", r"Manage IO classes",
@ -56,7 +56,7 @@ ioclass_help = [
r"Usage: casadm --io-class --list --cache-id \<ID\> \[option\.\.\.\]", r"Usage: casadm --io-class --list --cache-id \<ID\> \[option\.\.\.\]",
r"Options that are valid with --list \(-L\) are:", r"Options that are valid with --list \(-L\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}" r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
] ]
flush_cache_help = [ flush_cache_help = [
@ -64,7 +64,8 @@ flush_cache_help = [
r"Flush all dirty data from the caching device to core devices", r"Flush all dirty data from the caching device to core devices",
r"Options that are valid with --flush-cache \(-F\) are:", r"Options that are valid with --flush-cache \(-F\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \[\<ID\>\] Identifier of core <0-4095> within given cache instance" r"-j --core-id \[\<ID\>\] Identifier of core <0-4095> within given cache "
r"instance",
] ]
reset_counters_help = [ reset_counters_help = [
@ -73,7 +74,7 @@ reset_counters_help = [
r"Options that are valid with --reset-counters \(-Z\) are:", r"Options that are valid with --reset-counters \(-Z\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache " r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
r"instance. If not specified, statistics are reset for all cores in cache instance\." r"instance\. If not specified, statistics are reset for all cores in cache instance\.",
] ]
stats_help = [ stats_help = [
@ -82,26 +83,26 @@ stats_help = [
r"Options that are valid with --stats \(-P\) are:", r"Options that are valid with --stats \(-P\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \<ID\> Limit display of core-specific statistics to only ones " r"-j --core-id \<ID\> Limit display of core-specific statistics to only ones "
r"pertaining to a specific core. If this option is not given, casadm will display statistics " r"pertaining to a specific core\. If this option is not given, casadm will display statistics "
r"pertaining to all cores assigned to given cache instance\.", r"pertaining to all cores assigned to given cache instance\.",
r"-d --io-class-id \[\<ID\>\] Display per IO class statistics", r"-d --io-class-id \[\<ID\>\] Display per IO class statistics",
r"-f --filter \<FILTER-SPEC\> Apply filters from the following set: " r"-f --filter \<FILTER-SPEC\> Apply filters from the following set: "
r"\{all, conf, usage, req, blk, err\}", r"\{all, conf, usage, req, blk, err\}",
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}" r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
] ]
list_help = [ list_caches_help = [
r"Usage: casadm --list-caches \[option\.\.\.\]", r"Usage: casadm --list-caches \[option\.\.\.\]",
r"List all cache instances and core devices", r"List all cache instances and core devices",
r"Options that are valid with --list-caches \(-L\) are:", r"Options that are valid with --list-caches \(-L\) are:",
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}" r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
] ]
remove_detached_help = [ remove_detached_help = [
r"Usage: casadm --remove-detached --device \<DEVICE\>", r"Usage: casadm --remove-detached --device \<DEVICE\>",
r"Remove core device from core pool", r"Remove core device from core pool",
r"Options that are valid with --remove-detached are:", r"Options that are valid with --remove-detached are:",
r"-d --device \<DEVICE\> Path to core device" r"-d --device \<DEVICE\> Path to core device",
] ]
remove_core_help = [ remove_core_help = [
@ -111,7 +112,7 @@ remove_core_help = [
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache " r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
r"instance", r"instance",
r"-f --force Force active core removal without data flush" r"-f --force Force active core removal without data flush",
] ]
add_core_help = [ add_core_help = [
@ -121,18 +122,17 @@ add_core_help = [
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache " r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
r"instance", r"instance",
r"-d --core-device \<DEVICE\> Path to core device" r"-d --core-device \<DEVICE\> Path to core device",
] ]
set_cache_mode_help = [ set_cache_mode_help = [
r"Usage: casadm --set-cache-mode --cache-mode \<NAME\> --cache-id \<ID\> \[option\.\.\.\]", r"Usage: casadm --set-cache-mode --cache-mode \<NAME\> --cache-id \<ID\> \[option\.\.\.\]",
r"Set cache mode", r"Set cache mode",
r"Options that are valid with --set-cache-mode \(-Q\) are:", r"Options that are valid with --set-cache-mode \(-Q\) are:",
r"-c --cache-mode \<NAME\> Cache mode. Available cache modes: \{wt|wb|wa|pt|wo\}", r"-c --cache-mode \<NAME\> Cache mode\. Available cache modes: \{wt|wb|wa|pt|wo\}",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-f --flush-cache \<yes|no\> Flush all dirty data from cache before switching " r"-f --flush-cache \<yes|no\> Flush all dirty data from cache before switching "
r"to new mode\. Option is required when switching from Write-Back or Write-Only mode" r"to new mode\. Option is required when switching from Write-Back or Write-Only mode",
] ]
get_params_help = [ get_params_help = [
@ -164,7 +164,7 @@ get_params_help = [
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}", r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
r"Options that are valid with --get-param \(-G\) --name \(-n\) promotion-nhit are:", r"Options that are valid with --get-param \(-G\) --name \(-n\) promotion-nhit are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}" r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
] ]
set_params_help = [ set_params_help = [
@ -182,15 +182,15 @@ set_params_help = [
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache " r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
r"instance", r"instance",
r"-t --threshold \<KiB\> Sequential cutoff activation threshold \[KiB\]", r"-t --threshold \<KiB\> Sequential cutoff activation threshold \[KiB\]",
r"-p --policy \<POLICY\> Sequential cutoff policy. Available policies: " r"-p --policy \<POLICY\> Sequential cutoff policy\. Available policies: "
r"\{always|full|never\}", r"\{always|full|never\}",
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning are:", r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-p --policy \<POLICY\> Cleaning policy type. Available policy types: " r"-p --policy \<POLICY\> Cleaning policy type\. Available policy types: "
r"\{nop|alru|acp\}", r"\{nop|alru|acp\}",
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion are:", r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-p --policy \<POLICY\> Promotion policy type. Available policy types: " r"-p --policy \<POLICY\> Promotion policy type\. Available policy types: "
r"\{always|nhit\}", r"\{always|nhit\}",
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion-nhit are:", r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion-nhit are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
@ -213,15 +213,16 @@ set_params_help = [
r" -w --wake-up \<NUMBER\> Time between ACP cleaning thread iterations " r" -w --wake-up \<NUMBER\> Time between ACP cleaning thread iterations "
r"\<0-10000\>\[ms\] \(default: 10 ms\)", r"\<0-10000\>\[ms\] \(default: 10 ms\)",
r"-b --flush-max-buffers \<NUMBER\> Number of cache lines flushed in single ACP cleaning " r"-b --flush-max-buffers \<NUMBER\> Number of cache lines flushed in single ACP cleaning "
r"thread iteration \<1-10000\> \(default: 128\)" r"thread iteration \<1-10000\> \(default: 128\)",
] ]
stop_cache_help = [ stop_cache_help = [
r"Usage: casadm --stop-cache --cache-id \<ID\> \[option\.\.\.\]", r"Usage: casadm --stop-cache --cache-id \<ID\> \[option\.\.\.\]",
r"Stop cache instance", r"Stop cache instance",
r"Options that are valid with --stop-cache \(-T\) are:", r"Options that are valid with --stop-cache \(-T\) are:",
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>", r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
r"-n --no-data-flush Do not flush dirty data \(may be dangerous\)" r"-n --no-data-flush Do not flush dirty data \(may be dangerous\)",
] ]
start_cache_help = [ start_cache_help = [
@ -238,7 +239,7 @@ start_cache_help = [
r"Write-Through, Write-Back, Write-Around, Pass-Through, Write-Only; " r"Write-Through, Write-Back, Write-Around, Pass-Through, Write-Only; "
r"without this parameter Write-Through will be set by default", r"without this parameter Write-Through will be set by default",
r"-x --cache-line-size \<NUMBER\> Set cache line size in kibibytes: " r"-x --cache-line-size \<NUMBER\> Set cache line size in kibibytes: "
r"\{4,8,16,32,64\}\[KiB\] \(default: 4\)" r"\{4,8,16,32,64\}\[KiB\] \(default: 4\)",
] ]
standby_help = [ standby_help = [
@ -246,17 +247,16 @@ standby_help = [
] ]
zero_metadata_help = [ zero_metadata_help = [
r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option...\]", r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option\.\.\.\]]",
r"Clear metadata from caching device", r"Clear metadata from caching device",
r"Options that are valid with --zero-metadata are:", r"Options that are valid with --zero-metadata are:",
r"-d --device \<DEVICE\> Path to device on which metadata would be cleared", r"-d --device \<DEVICE\> Path to device on which metadata would be cleared",
r"-f --force Ignore potential dirty data on cache device" r"-f --force Ignore potential dirty data on cache device",
] ]
unrecognized_stderr = [ unrecognized_stderr = [
r"Unrecognized command -\S+", r"Unrecognized command -\S+",
] ]
unrecognized_stdout = [ unrecognized_stdout = [r"Try \`casadm --help | -H\' for more information\."]
r"Try \`casadm --help | -H\' for more information\."
]

View File

@ -1,5 +1,6 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -18,32 +19,30 @@ start_cache_with_existing_metadata = [
r"Error inserting cache \d+", r"Error inserting cache \d+",
r"Old metadata found on device\.", r"Old metadata found on device\.",
r"Please load cache metadata using --load option or use --force to", r"Please load cache metadata using --load option or use --force to",
r" discard on-disk metadata and start fresh cache instance\." r" discard on-disk metadata and start fresh cache instance\.",
] ]
start_cache_on_already_used_dev = [ start_cache_on_already_used_dev = [
r"Error inserting cache \d+", r"Error inserting cache \d+",
r"Cache device \'\/dev\/\S+\' is already used as cache\." r"Cache device \'\/dev\/\S+\' is already used as cache\.",
] ]
start_cache_with_existing_id = [ start_cache_with_existing_id = [
r"Error inserting cache \d+", r"Error inserting cache \d+",
r"Cache ID already exists" r"Cache ID already exists",
] ]
standby_init_with_existing_filesystem = [ standby_init_with_existing_filesystem = [
r"A filesystem exists on \S+. Specify the --force option if you wish to add the cache anyway.", r"A filesystem exists on \S+. Specify the --force option if you wish to add the cache anyway.",
r"Note: this may result in loss of data" r"Note: this may result in loss of data",
] ]
error_inserting_cache = [ error_inserting_cache = [r"Error inserting cache \d+"]
r"Error inserting cache \d+"
]
reinitialize_with_force_or_recovery = [ reinitialize_with_force_or_recovery = [
r"Old metadata found on device\.", r"Old metadata found on device\.",
r"Please load cache metadata using --load option or use --force to", r"Please load cache metadata using --load option or use --force to",
r" discard on-disk metadata and start fresh cache instance\." r" discard on-disk metadata and start fresh cache instance\.",
] ]
remove_inactive_core_with_remove_command = [ remove_inactive_core_with_remove_command = [
@ -52,40 +51,36 @@ remove_inactive_core_with_remove_command = [
remove_inactive_dirty_core = [ remove_inactive_dirty_core = [
r"The cache contains dirty data assigned to the core\. If you want to ", r"The cache contains dirty data assigned to the core\. If you want to ",
r"continue, please use --force option\.\nWarning: the data will be lost" r"continue, please use --force option\.\nWarning: the data will be lost",
] ]
stop_cache_incomplete = [ stop_cache_incomplete = [
r"Error while removing cache \d+", r"Error while removing cache \d+",
r"Cache is in incomplete state - at least one core is inactive" r"Cache is in incomplete state - at least one core is inactive",
] ]
stop_cache_errors = [ stop_cache_errors = [
r"Removed cache \d+ with errors", r"Removed cache \d+ with errors",
r"Error while writing to cache device" r"Error while writing to cache device",
] ]
get_stats_ioclass_id_not_configured = [ get_stats_ioclass_id_not_configured = [r"IO class \d+ is not configured\."]
r"IO class \d+ is not configured\."
]
get_stats_ioclass_id_out_of_range = [ get_stats_ioclass_id_out_of_range = [r"Invalid IO class id, must be in the range 0-32\."]
r"Invalid IO class id, must be in the range 0-32\."
]
remove_multilevel_core = [ remove_multilevel_core = [
r"Error while removing core device \d+ from cache instance \d+", r"Error while removing core device \d+ from cache instance \d+",
r"Device opens or mount are pending to this cache" r"Device opens or mount are pending to this cache",
] ]
add_cached_core = [ add_cached_core = [
r"Error while adding core device to cache instance \d+", r"Error while adding core device to cache instance \d+",
r"Core device \'/dev/\S+\' is already cached\." r"Core device \'/dev/\S+\' is already cached\.",
] ]
already_cached_core = [ already_cached_core = [
r"Error while adding core device to cache instance \d+", r"Error while adding core device to cache instance \d+",
r"Device already added as a core" r"Device already added as a core",
] ]
remove_mounted_core = [ remove_mounted_core = [
@ -94,37 +89,31 @@ remove_mounted_core = [
stop_cache_mounted_core = [ stop_cache_mounted_core = [
r"Error while removing cache \d+", r"Error while removing cache \d+",
r"Device opens or mount are pending to this cache" r"Device opens or mount are pending to this cache",
] ]
load_and_force = [ load_and_force = [
r"Use of \'load\' with \'force\', \'cache-id\', \'cache-mode\' or \'cache-line-size\'", r"Use of \'load\' with \'force\', \'cache-id\', \'cache-mode\' or \'cache-line-size\'",
r" simultaneously is forbidden." r" simultaneously is forbidden.",
] ]
try_add_core_sector_size_mismatch = [ try_add_core_sector_size_mismatch = [
r"Error while adding core device to cache instance \d+", r"Error while adding core device to cache instance \d+",
r"Cache device logical sector size is greater than core device logical sector size\.", r"Cache device logical sector size is greater than core device logical sector size\.",
r"Consider changing logical sector size on current cache device", r"Consider changing logical sector size on current cache device",
r"or try other device with the same logical sector size as core device\." r"or try other device with the same logical sector size as core device\.",
] ]
no_caches_running = [ no_caches_running = [r"No caches running"]
r"No caches running"
]
unavailable_device = [ unavailable_device = [
r"Error while opening \'\S+\'exclusively\. This can be due to\n" r"Error while opening \'\S+\'exclusively\. This can be due to\n"
r"cache instance running on this device\. In such case please stop the cache and try again\." r"cache instance running on this device\. In such case please stop the cache and try again\."
] ]
error_handling = [ error_handling = [r"Error during options handling"]
r"Error during options handling"
]
no_cas_metadata = [ no_cas_metadata = [r"Device \'\S+\' does not contain OpenCAS's metadata\."]
r"Device \'\S+\' does not contain OpenCAS's metadata\."
]
cache_dirty_data = [ cache_dirty_data = [
r"Cache instance contains dirty data\. Clearing metadata will result in loss of dirty data\.\n" r"Cache instance contains dirty data\. Clearing metadata will result in loss of dirty data\.\n"
@ -140,21 +129,16 @@ cache_dirty_shutdown = [
r"Alternatively, if you wish to clear metadata anyway, please use \'--force\' option\." r"Alternatively, if you wish to clear metadata anyway, please use \'--force\' option\."
] ]
missing_param = [ missing_param = [r"Option \'.+\' is missing"]
r"Option \'.+\' is missing"
]
disallowed_param = [ disallowed_param = [r"Unrecognized option \S+"]
r"Unrecognized option \S+"
]
operation_forbiden_in_standby = [ operation_forbiden_in_standby = [
r"The operation is not permited while the cache is in the standby mode" r"The operation is not permited while the cache is in the standby mode"
] ]
mutually_exclusive_params_init = [ mutually_exclusive_params_init = [
r"Can\'t use \'load\' and \'init\' options simultaneously\n" r"Can\'t use \'load\' and \'init\' options simultaneously\n" r"Error during options handling"
r"Error during options handling"
] ]
mutually_exclusive_params_load = [ mutually_exclusive_params_load = [
@ -166,30 +150,22 @@ activate_with_different_cache_id = [
r"Cache id specified by user and loaded from metadata are different" r"Cache id specified by user and loaded from metadata are different"
] ]
cache_activated_successfully = [ cache_activated_successfully = [r"Successfully activated cache instance \d+"]
r"Successfully activated cache instance \d+"
]
invalid_core_volume_size = [ invalid_core_volume_size = [r"Core volume size does not match the size stored in cache metadata"]
r"Core volume size does not match the size stored in cache metadata"
]
error_activating_cache = [ error_activating_cache = [r"Error activating cache \d+"]
r"Error activating cache \d+"
]
activate_without_detach = [ activate_without_detach = [
r"Cannot open the device exclusively. Make sure to detach cache before activation." r"Cannot open the device exclusively. Make sure to detach cache before activation."
] ]
cache_line_size_mismatch = [ cache_line_size_mismatch = [r"Cache line size mismatch"]
r"Cache line size mismatch"
]
headerless_io_class_config = [ headerless_io_class_config = [
r'Cannot parse configuration file - unknown column "1"\.\n' r'Cannot parse configuration file - unknown column "1"\.\n'
r'Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n' r"Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n"
r'Please consult Admin Guide to check how columns in configuration file should be named\.' r"Please consult Admin Guide to check how columns in configuration file should be named\."
] ]
illegal_io_class_config_L2C1 = [ illegal_io_class_config_L2C1 = [
@ -205,9 +181,7 @@ illegal_io_class_config_L2C4 = [
r"Cannot parse configuration file - error in line 2 in column 4 \(Allocation\)\." r"Cannot parse configuration file - error in line 2 in column 4 \(Allocation\)\."
] ]
illegal_io_class_config_L2 = [ illegal_io_class_config_L2 = [r"Cannot parse configuration file - error in line 2\."]
r"Cannot parse configuration file - error in line 2\."
]
double_io_class_config = [ double_io_class_config = [
r"Double configuration for IO class id \d+\n" r"Double configuration for IO class id \d+\n"
@ -243,14 +217,13 @@ illegal_io_class_invalid_allocation_number = [
] ]
malformed_io_class_header = [ malformed_io_class_header = [
r'Cannot parse configuration file - unknown column \"value_template\"\.\n' r"Cannot parse configuration file - unknown column \"value_template\"\.\n"
r'Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n' r"Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n"
r'Please consult Admin Guide to check how columns in configuration file should be named\.' r"Please consult Admin Guide to check how columns in configuration file should be named\."
] ]
unexpected_cls_option = [ unexpected_cls_option = [r"Option '--cache-line-size \(-x\)' is not allowed"]
r"Option '--cache-line-size \(-x\)' is not allowed"
]
def check_stderr_msg(output: Output, expected_messages, negate=False): def check_stderr_msg(output: Output, expected_messages, negate=False):
return __check_string_msg(output.stderr, expected_messages, negate) return __check_string_msg(output.stderr, expected_messages, negate)
@ -268,7 +241,8 @@ def __check_string_msg(text: str, expected_messages, negate=False):
TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {text}.") TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {text}.")
msg_ok = False msg_ok = False
elif matches and negate: elif matches and negate:
TestRun.LOGGER.error(f"Message is incorrect, expected to not find: {msg}\n " TestRun.LOGGER.error(
f"actual: {text}.") f"Message is incorrect, expected to not find: {msg}\n " f"actual: {text}."
)
msg_ok = False msg_ok = False
return msg_ok return msg_ok

View File

@ -1,17 +1,18 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
from datetime import timedelta from datetime import timedelta
from typing import List from typing import List
from enum import Enum
from aenum import Enum
from api.cas import casadm from api.cas import casadm
from api.cas.cache_config import SeqCutOffParameters, SeqCutOffPolicy from api.cas.cache_config import SeqCutOffParameters, SeqCutOffPolicy
from api.cas.casadm_params import OutputFormat, StatsFilter from api.cas.casadm_params import StatsFilter
from api.cas.casadm_parser import get_statistics, get_seq_cut_off_parameters, get_core_info_by_path from api.cas.casadm_parser import get_seq_cut_off_parameters, get_core_info_by_path
from api.cas.statistics import CoreStats, CoreIoClassStats from api.cas.statistics import CoreStats, IoClassStats
from core.test_run_utils import TestRun from core.test_run_utils import TestRun
from storage_devices.device import Device from storage_devices.device import Device
from test_tools import fs_utils, disk_utils from test_tools import fs_utils, disk_utils
@ -20,9 +21,9 @@ from test_utils.size import Unit, Size
class CoreStatus(Enum): class CoreStatus(Enum):
empty = 0, empty = 0
active = 1, active = 1
inactive = 2, inactive = 2
detached = 3 detached = 3
@ -51,27 +52,28 @@ class Core(Device):
super().create_filesystem(fs_type, force, blocksize) super().create_filesystem(fs_type, force, blocksize)
self.core_device.filesystem = self.filesystem self.core_device.filesystem = self.filesystem
def get_io_class_statistics(self, def get_io_class_statistics(
self,
io_class_id: int, io_class_id: int,
stat_filter: List[StatsFilter] = None, stat_filter: List[StatsFilter] = None,
percentage_val: bool = False): percentage_val: bool = False,
stats = get_statistics(self.cache_id, self.core_id, io_class_id, ):
stat_filter, percentage_val) return IoClassStats(
return CoreIoClassStats(stats) cache_id=self.cache_id,
filter=stat_filter,
io_class_id=io_class_id,
percentage_val=percentage_val,
)
def get_statistics(self, def get_statistics(
stat_filter: List[StatsFilter] = None, self, stat_filter: List[StatsFilter] = None, percentage_val: bool = False
percentage_val: bool = False): ) -> CoreStats:
stats = get_statistics(self.cache_id, self.core_id, None, return CoreStats(
stat_filter, percentage_val) cache_id=self.cache_id,
return CoreStats(stats) core_id=self.core_id,
filter=stat_filter,
def get_statistics_flat(self, percentage_val=percentage_val,
io_class_id: int = None, )
stat_filter: List[StatsFilter] = None,
percentage_val: bool = False):
return get_statistics(self.cache_id, self.core_id, io_class_id,
stat_filter, percentage_val)
def get_status(self): def get_status(self):
return CoreStatus[self.__get_core_info()["status"].lower()] return CoreStatus[self.__get_core_info()["status"].lower()]
@ -106,31 +108,30 @@ class Core(Device):
return casadm.reset_counters(self.cache_id, self.core_id) return casadm.reset_counters(self.cache_id, self.core_id)
def flush_core(self): def flush_core(self):
casadm.flush(self.cache_id, self.core_id) casadm.flush_core(self.cache_id, self.core_id)
sync() sync()
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0
def purge_core(self): def purge_core(self):
casadm.purge_core(self.cache_id, self.core_id) casadm.purge_core(self.cache_id, self.core_id)
sync() sync()
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters): def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
return casadm.set_param_cutoff(self.cache_id, self.core_id, return casadm.set_param_cutoff(
self.cache_id,
self.core_id,
seq_cutoff_param.threshold, seq_cutoff_param.threshold,
seq_cutoff_param.policy, seq_cutoff_param.policy,
seq_cutoff_param.promotion_count) seq_cutoff_param.promotion_count,
)
def set_seq_cutoff_threshold(self, threshold: Size): def set_seq_cutoff_threshold(self, threshold: Size):
return casadm.set_param_cutoff(self.cache_id, self.core_id, return casadm.set_param_cutoff(self.cache_id, self.core_id, threshold=threshold)
threshold=threshold)
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy): def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy):
return casadm.set_param_cutoff(self.cache_id, self.core_id, return casadm.set_param_cutoff(self.cache_id, self.core_id, policy=policy)
policy=policy)
def set_seq_cutoff_promotion_count(self, promotion_count: int): def set_seq_cutoff_promotion_count(self, promotion_count: int):
return casadm.set_param_cutoff(self.cache_id, self.core_id, return casadm.set_param_cutoff(self.cache_id, self.core_id, promotion_count=promotion_count)
promotion_count=promotion_count)
def check_if_is_present_in_os(self, should_be_visible=True): def check_if_is_present_in_os(self, should_be_visible=True):
device_in_system_message = "CAS device exists in OS." device_in_system_message = "CAS device exists in OS."

View File

@ -5,35 +5,36 @@
import re import re
from test_utils.dmesg import get_dmesg
from test_utils.size import Size, Unit from test_utils.size import Size, Unit
def get_metadata_size_on_device(dmesg): def get_metadata_size_on_device(cache_name: str) -> Size:
for s in dmesg.split("\n"): dmesg_reversed = list(reversed(get_dmesg().split("\n")))
m = re.search(r'Metadata size on device: ([0-9]*) kiB', s) cache_dmesg = "\n".join(line for line in dmesg_reversed if cache_name in line)
if m: try:
return Size(int(m.groups()[0]), Unit.KibiByte) return _get_metadata_info(dmesg=cache_dmesg, section_name="Metadata size on device")
except ValueError:
raise ValueError("Can't find the metadata size in the provided dmesg output") raise ValueError("Can't find the metadata size in dmesg output")
def _get_metadata_info(dmesg, section_name): def _get_metadata_info(dmesg, section_name) -> Size:
for s in dmesg.split("\n"): for s in dmesg.split("\n"):
if section_name in s: if section_name in s:
size, unit = re.search("[0-9]* (B|kiB)", s).group().split() size, unit = re.search("\\d+ (B|kiB)", s).group().split()
unit = Unit.KibiByte if unit == "kiB" else Unit.Byte unit = Unit.KibiByte if unit == "kiB" else Unit.Byte
return Size(int(re.search("[0-9]*", size).group()), unit) return Size(int(re.search("\\d+", size).group()), unit)
raise ValueError(f'"{section_name}" entry doesn\'t exist in the given dmesg output') raise ValueError(f'"{section_name}" entry doesn\'t exist in the given dmesg output')
def get_md_section_size(section_name, dmesg): def get_md_section_size(section_name, dmesg) -> Size:
section_name = section_name.strip() section_name = section_name.strip()
section_name += " size" section_name += " size"
return _get_metadata_info(dmesg, section_name) return _get_metadata_info(dmesg, section_name)
def get_md_section_offset(section_name, dmesg): def get_md_section_offset(section_name, dmesg) -> Size:
section_name = section_name.strip() section_name = section_name.strip()
section_name += " offset" section_name += " offset"
return _get_metadata_info(dmesg, section_name) return _get_metadata_info(dmesg, section_name)

View File

@ -1,5 +1,6 @@
# #
# Copyright(c) 2019-2022 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -62,25 +63,21 @@ def get_current_commit_hash(from_dut: bool = False):
executor = TestRun.executor if from_dut else LocalExecutor() executor = TestRun.executor if from_dut else LocalExecutor()
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
return executor.run( return executor.run(f"cd {repo_path} &&" f'git show HEAD -s --pretty=format:"%H"').stdout
f"cd {repo_path} &&"
f'git show HEAD -s --pretty=format:"%H"').stdout
def get_current_commit_message(): def get_current_commit_message():
local_executor = LocalExecutor() local_executor = LocalExecutor()
return local_executor.run( return local_executor.run(
f"cd {TestRun.usr.repo_dir} &&" f"cd {TestRun.usr.repo_dir} &&" f'git show HEAD -s --pretty=format:"%B"'
f'git show HEAD -s --pretty=format:"%B"').stdout ).stdout
def get_commit_hash(cas_version, from_dut: bool = False): def get_commit_hash(cas_version, from_dut: bool = False):
executor = TestRun.executor if from_dut else LocalExecutor() executor = TestRun.executor if from_dut else LocalExecutor()
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
output = executor.run( output = executor.run(f"cd {repo_path} && " f"git rev-parse {cas_version}")
f"cd {repo_path} && "
f"git rev-parse {cas_version}")
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException(f"Failed to resolve '{cas_version}' to commit hash", output) raise CmdException(f"Failed to resolve '{cas_version}' to commit hash", output)
@ -104,13 +101,13 @@ def checkout_cas_version(cas_version):
TestRun.LOGGER.info(f"Checkout CAS to {commit_hash}") TestRun.LOGGER.info(f"Checkout CAS to {commit_hash}")
output = TestRun.executor.run( output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && " f"cd {TestRun.usr.working_dir} && " f"git checkout --force {commit_hash}"
f"git checkout --force {commit_hash}") )
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException(f"Failed to checkout to {commit_hash}", output) raise CmdException(f"Failed to checkout to {commit_hash}", output)
output = TestRun.executor.run( output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && " f"cd {TestRun.usr.working_dir} && " f"git submodule update --force"
f"git submodule update --force") )
if output.exit_code != 0: if output.exit_code != 0:
raise CmdException(f"Failed to update submodules", output) raise CmdException(f"Failed to update submodules", output)

View File

@ -1,256 +1,218 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
# Order in arrays is important!
config_stats_cache = [ import csv
"cache id", "cache size", "cache device", "exported object", "core devices",
"inactive core devices", "write policy", "cleaning policy", "promotion policy", from datetime import timedelta
"cache line size", "metadata memory footprint", "dirty for", "status" from typing import List
]
config_stats_core = [ from api.cas import casadm
"core id", "core device", "exported object", "core size", "dirty for", "status", from api.cas.casadm_params import StatsFilter
"seq cutoff threshold", "seq cutoff policy" from test_utils.size import Size, Unit
]
config_stats_ioclass = ["io class id", "io class name", "eviction priority", "max size"]
usage_stats = ["occupancy", "free", "clean", "dirty"]
usage_stats_ioclass = ["occupancy", "clean", "dirty"]
inactive_usage_stats = ["inactive occupancy", "inactive clean", "inactive dirty"]
request_stats = [
"read hits", "read partial misses", "read full misses", "read total",
"write hits", "write partial misses", "write full misses", "write total",
"pass-through reads", "pass-through writes",
"serviced requests", "total requests"
]
block_stats_cache = [
"reads from core(s)", "writes to core(s)", "total to/from core(s)",
"reads from cache", "writes to cache", "total to/from cache",
"reads from exported object(s)", "writes to exported object(s)",
"total to/from exported object(s)"
]
block_stats_core = [stat.replace("(s)", "") for stat in block_stats_cache]
error_stats = [
"cache read errors", "cache write errors", "cache total errors",
"core read errors", "core write errors", "core total errors",
"total errors"
]
class CacheStats: class CacheStats:
stats_list = [ def __init__(
"config_stats", self,
"usage_stats", cache_id: int,
"inactive_usage_stats", filter: List[StatsFilter] = None,
"request_stats", percentage_val: bool = False,
"block_stats", ):
"error_stats",
]
def __init__(self, stats): if filter is None:
try: filters = [
self.config_stats = CacheConfigStats( StatsFilter.conf,
*[stats[stat] for stat in config_stats_cache] StatsFilter.usage,
) StatsFilter.req,
except KeyError: StatsFilter.blk,
pass StatsFilter.err,
try: ]
self.usage_stats = UsageStats( else:
*[stats[stat] for stat in usage_stats] filters = filter
)
except KeyError: csv_stats = casadm.print_statistics(
pass cache_id=cache_id,
try: filter=filter,
self.inactive_usage_stats = InactiveUsageStats( output_format=casadm.OutputFormat.csv,
*[stats[stat] for stat in inactive_usage_stats] ).stdout.splitlines()
)
except KeyError: stat_keys, stat_values = csv.reader(csv_stats)
pass
try: # Unify names in block stats for core and cache:
self.request_stats = RequestStats( # cache stats: Reads from core(s)
*[stats[stat] for stat in request_stats] # core stats: Reads from core
) stat_keys = [x.replace("(s)", "") for x in stat_keys]
except KeyError: stats_dict = dict(zip(stat_keys, stat_values))
pass
try: for filter in filters:
self.block_stats = BlockStats( match filter:
*[stats[stat] for stat in block_stats_cache] case StatsFilter.conf:
) self.config_stats = CacheConfigStats(stats_dict)
except KeyError: case StatsFilter.usage:
pass self.usage_stats = UsageStats(stats_dict, percentage_val)
try: case StatsFilter.req:
self.error_stats = ErrorStats( self.request_stats = RequestStats(stats_dict, percentage_val)
*[stats[stat] for stat in error_stats] case StatsFilter.blk:
) self.block_stats_cache = BlockStats(stats_dict, percentage_val)
except KeyError: case StatsFilter.err:
pass self.error_stats = ErrorStats(stats_dict, percentage_val)
def __str__(self): def __str__(self):
status = "" # stats_list contains all Class.__str__ methods initialized in CacheStats
for stats_item in self.stats_list: stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
current_stat = getattr(self, stats_item, None) return "\n".join(stats_list)
if current_stat:
status += f"--- Cache {current_stat}"
return status
def __eq__(self, other): def __eq__(self, other):
if not other: # check if all initialized variable in self(CacheStats) match other(CacheStats)
return False return [getattr(self, stats_item) for stats_item in self.__dict__] == [
for stats_item in self.stats_list: getattr(other, stats_item) for stats_item in other.__dict__
if getattr(self, stats_item, None) != getattr(other, stats_item, None): ]
return False
return True
class CoreStats: class CoreStats:
stats_list = [ def __init__(
"config_stats", self,
"usage_stats", cache_id: int,
"request_stats", core_id: int,
"block_stats", filter: List[StatsFilter] = None,
"error_stats", percentage_val: bool = False,
] ):
def __init__(self, stats): if filter is None:
try: filters = [
self.config_stats = CoreConfigStats( StatsFilter.conf,
*[stats[stat] for stat in config_stats_core] StatsFilter.usage,
) StatsFilter.req,
except KeyError: StatsFilter.blk,
pass StatsFilter.err,
try: ]
self.usage_stats = UsageStats( else:
*[stats[stat] for stat in usage_stats] filters = filter
)
except KeyError: csv_stats = casadm.print_statistics(
pass cache_id=cache_id,
try: core_id=core_id,
self.request_stats = RequestStats( filter=filter,
*[stats[stat] for stat in request_stats] output_format=casadm.OutputFormat.csv,
) ).stdout.splitlines()
except KeyError:
pass stat_keys, stat_values = csv.reader(csv_stats)
try:
self.block_stats = BlockStats( # Unify names in block stats for core and cache:
*[stats[stat] for stat in block_stats_core] # cache stats: Reads from core(s)
) # core stats: Reads from core
except KeyError: stat_keys = [x.replace("(s)", "") for x in stat_keys]
pass stats_dict = dict(zip(stat_keys, stat_values))
try:
self.error_stats = ErrorStats( for filter in filters:
*[stats[stat] for stat in error_stats] match filter:
) case StatsFilter.conf:
except KeyError: self.config_stats = CoreConfigStats(stats_dict)
pass case StatsFilter.usage:
self.usage_stats = UsageStats(stats_dict, percentage_val)
case StatsFilter.req:
self.request_stats = RequestStats(stats_dict, percentage_val)
case StatsFilter.blk:
self.block_stats_cache = BlockStats(stats_dict, percentage_val)
case StatsFilter.err:
self.error_stats = ErrorStats(stats_dict, percentage_val)
def __str__(self): def __str__(self):
status = "" # stats_list contains all Class.__str__ methods initialized in CacheStats
for stats_item in self.stats_list: stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
current_stat = getattr(self, stats_item, None) return "\n".join(stats_list)
if current_stat:
status += f"--- Core {current_stat}"
return status
def __eq__(self, other): def __eq__(self, other):
if not other: # check if all initialized variable in self(CacheStats) match other(CacheStats)
return False return [getattr(self, stats_item) for stats_item in self.__dict__] == [
for stats_item in self.stats_list: getattr(other, stats_item) for stats_item in other.__dict__
if getattr(self, stats_item, None) != getattr(other, stats_item, None): ]
return False
return True
class IoClassStats: class IoClassStats:
stats_list = [
"config_stats",
"usage_stats",
"request_stats",
"block_stats",
]
def __init__(self, stats, block_stats_list):
try:
self.config_stats = IoClassConfigStats(
*[stats[stat] for stat in config_stats_ioclass]
)
except KeyError:
pass
try:
self.usage_stats = IoClassUsageStats(
*[stats[stat] for stat in usage_stats_ioclass]
)
except KeyError:
pass
try:
self.request_stats = RequestStats(
*[stats[stat] for stat in request_stats]
)
except KeyError:
pass
try:
self.block_stats = BlockStats(
*[stats[stat] for stat in block_stats_list]
)
except KeyError:
pass
def __str__(self): def __str__(self):
status = "" # stats_list contains all Class.__str__ methods initialized in CacheStats
for stats_item in self.stats_list: stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
current_stat = getattr(self, stats_item, None) return "\n".join(stats_list)
if current_stat:
status += f"--- IO class {current_stat}" def __init__(
return status self,
cache_id: int,
io_class_id: int,
core_id: int = None,
filter: List[StatsFilter] = None,
percentage_val: bool = False,
):
if filter is None:
filters = [
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
]
else:
filters = filter
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
io_class_id=io_class_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache:
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
case StatsFilter.conf:
self.config_stats = IoClassConfigStats(stats_dict, percentage_val)
case StatsFilter.usage:
self.usage_stats = IoClassUsageStats(stats_dict, percentage_val)
case StatsFilter.req:
self.request_stats = RequestStats(stats_dict, percentage_val)
case StatsFilter.blk:
self.block_stats_cache = BlockStats(stats_dict, percentage_val)
def __eq__(self, other): def __eq__(self, other):
if not other: # check if all initialized variable in self(CacheStats) match other(CacheStats)
return False return [getattr(self, stats_item) for stats_item in self.__dict__] == [
for stats_item in self.stats_list: getattr(other, stats_item) for stats_item in other.__dict__
if getattr(self, stats_item, None) != getattr(other, stats_item, None): ]
return False
return True
class CacheIoClassStats(IoClassStats):
def __init__(self, stats):
super().__init__(stats, block_stats_cache)
class CoreIoClassStats(IoClassStats):
def __init__(self, stats):
super().__init__(stats, block_stats_core)
class CacheConfigStats: class CacheConfigStats:
def __init__( def __init__(self, stats_dict):
self, self.cache_id = stats_dict["Cache Id"]
cache_id, self.cache_size = parse_value(
cache_size, value=stats_dict["Cache Size [4KiB Blocks]"], unit_type="[4KiB Blocks]"
cache_dev, )
exp_obj, self.cache_dev = stats_dict["Cache Device"]
core_dev, self.exp_obj = stats_dict["Exported Object"]
inactive_core_dev, self.core_dev = stats_dict["Core Devices"]
write_policy, self.inactive_core_devices = stats_dict["Inactive Core Devices"]
cleaning_policy, self.write_policy = stats_dict["Write Policy"]
promotion_policy, self.cleaning_policy = stats_dict["Cleaning Policy"]
cache_line_size, self.promotion_policy = stats_dict["Promotion Policy"]
metadata_memory_footprint, self.cache_line_size = parse_value(
dirty_for, value=stats_dict["Cache line size [KiB]"], unit_type="[KiB]"
status, )
): self.metadata_memory_footprint = parse_value(
self.cache_id = cache_id value=stats_dict["Metadata Memory Footprint [MiB]"], unit_type="[MiB]"
self.cache_size = cache_size )
self.cache_dev = cache_dev self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type="[s]")
self.exp_obj = exp_obj self.status = stats_dict["Status"]
self.core_dev = core_dev
self.inactive_core_dev = inactive_core_dev
self.write_policy = write_policy
self.cleaning_policy = cleaning_policy
self.promotion_policy = promotion_policy
self.cache_line_size = cache_line_size
self.metadata_memory_footprint = metadata_memory_footprint
self.dirty_for = dirty_for
self.status = status
def __str__(self): def __str__(self):
return ( return (
@ -260,10 +222,10 @@ class CacheConfigStats:
f"Cache device: {self.cache_dev}\n" f"Cache device: {self.cache_dev}\n"
f"Exported object: {self.exp_obj}\n" f"Exported object: {self.exp_obj}\n"
f"Core devices: {self.core_dev}\n" f"Core devices: {self.core_dev}\n"
f"Inactive core devices: {self.inactive_core_dev}\n" f"Inactive Core Devices: {self.inactive_core_devices}\n"
f"Write policy: {self.write_policy}\n" f"Write Policy: {self.write_policy}\n"
f"Cleaning policy: {self.cleaning_policy}\n" f"Cleaning Policy: {self.cleaning_policy}\n"
f"Promotion policy: {self.promotion_policy}\n" f"Promotion Policy: {self.promotion_policy}\n"
f"Cache line size: {self.cache_line_size}\n" f"Cache line size: {self.cache_line_size}\n"
f"Metadata memory footprint: {self.metadata_memory_footprint}\n" f"Metadata memory footprint: {self.metadata_memory_footprint}\n"
f"Dirty for: {self.dirty_for}\n" f"Dirty for: {self.dirty_for}\n"
@ -279,7 +241,7 @@ class CacheConfigStats:
and self.cache_dev == other.cache_dev and self.cache_dev == other.cache_dev
and self.exp_obj == other.exp_obj and self.exp_obj == other.exp_obj
and self.core_dev == other.core_dev and self.core_dev == other.core_dev
and self.inactive_core_dev == other.inactive_core_dev and self.inactive_core_devices == other.inactive_core_devices
and self.write_policy == other.write_policy and self.write_policy == other.write_policy
and self.cleaning_policy == other.cleaning_policy and self.cleaning_policy == other.cleaning_policy
and self.promotion_policy == other.promotion_policy and self.promotion_policy == other.promotion_policy
@ -291,25 +253,19 @@ class CacheConfigStats:
class CoreConfigStats: class CoreConfigStats:
def __init__( def __init__(self, stats_dict):
self, self.core_id = stats_dict["Core Id"]
core_id, self.core_dev = stats_dict["Core Device"]
core_dev, self.exp_obj = stats_dict["Exported Object"]
exp_obj, self.core_size = parse_value(
core_size, value=stats_dict["Core Size [4KiB Blocks]"], unit_type=" [4KiB Blocks]"
dirty_for, )
status, self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type="[s]")
seq_cutoff_threshold, self.status = stats_dict["Status"]
seq_cutoff_policy, self.seq_cutoff_threshold = parse_value(
): value=stats_dict["Seq cutoff threshold [KiB]"], unit_type="[KiB]"
self.core_id = core_id )
self.core_dev = core_dev self.seq_cutoff_policy = stats_dict["Seq cutoff policy"]
self.exp_obj = exp_obj
self.core_size = core_size
self.dirty_for = dirty_for
self.status = status
self.seq_cutoff_threshold = seq_cutoff_threshold
self.seq_cutoff_policy = seq_cutoff_policy
def __str__(self): def __str__(self):
return ( return (
@ -340,13 +296,11 @@ class CoreConfigStats:
class IoClassConfigStats: class IoClassConfigStats:
def __init__( def __init__(self, stats_dict):
self, io_class_id, io_class_name, eviction_priority, selective_allocation self.io_class_id = stats_dict["IO class ID"]
): self.io_class_name = stats_dict["IO class name"]
self.io_class_id = io_class_id self.eviction_priority = stats_dict["Eviction priority"]
self.io_class_name = io_class_name self.max_size = stats_dict["Max size"]
self.eviction_priority = eviction_priority
self.selective_allocation = selective_allocation
def __str__(self): def __str__(self):
return ( return (
@ -354,7 +308,7 @@ class IoClassConfigStats:
f"IO class ID: {self.io_class_id}\n" f"IO class ID: {self.io_class_id}\n"
f"IO class name: {self.io_class_name}\n" f"IO class name: {self.io_class_name}\n"
f"Eviction priority: {self.eviction_priority}\n" f"Eviction priority: {self.eviction_priority}\n"
f"Selective allocation: {self.selective_allocation}\n" f"Max size: {self.max_size}\n"
) )
def __eq__(self, other): def __eq__(self, other):
@ -364,16 +318,17 @@ class IoClassConfigStats:
self.io_class_id == other.io_class_id self.io_class_id == other.io_class_id
and self.io_class_name == other.io_class_name and self.io_class_name == other.io_class_name
and self.eviction_priority == other.eviction_priority and self.eviction_priority == other.eviction_priority
and self.selective_allocation == other.selective_allocation and self.max_size == other.max_size
) )
class UsageStats: class UsageStats:
def __init__(self, occupancy, free, clean, dirty): def __init__(self, stats_dict, percentage_val):
self.occupancy = occupancy unit = "[%]" if percentage_val else "[4KiB Blocks]"
self.free = free self.occupancy = parse_value(value=stats_dict[f"Occupancy {unit}"], unit_type=unit)
self.clean = clean self.free = parse_value(value=stats_dict[f"Free {unit}"], unit_type=unit)
self.dirty = dirty self.clean = parse_value(value=stats_dict[f"Clean {unit}"], unit_type=unit)
self.dirty = parse_value(value=stats_dict[f"Dirty {unit}"], unit_type=unit)
def __str__(self): def __str__(self):
return ( return (
@ -405,7 +360,7 @@ class UsageStats:
self.occupancy + other.occupancy, self.occupancy + other.occupancy,
self.free + other.free, self.free + other.free,
self.clean + other.clean, self.clean + other.clean,
self.dirty + other.dirty self.dirty + other.dirty,
) )
def __iadd__(self, other): def __iadd__(self, other):
@ -417,10 +372,11 @@ class UsageStats:
class IoClassUsageStats: class IoClassUsageStats:
def __init__(self, occupancy, clean, dirty): def __init__(self, stats_dict, percentage_val):
self.occupancy = occupancy unit = "[%]" if percentage_val else "[4KiB Blocks]"
self.clean = clean self.occupancy = parse_value(value=stats_dict[f"Occupancy {unit}"], unit_type=unit)
self.dirty = dirty self.clean = parse_value(value=stats_dict[f"Clean {unit}"], unit_type=unit)
self.dirty = parse_value(value=stats_dict[f"Dirty {unit}"], unit_type=unit)
def __str__(self): def __str__(self):
return ( return (
@ -449,7 +405,7 @@ class IoClassUsageStats:
return UsageStats( return UsageStats(
self.occupancy + other.occupancy, self.occupancy + other.occupancy,
self.clean + other.clean, self.clean + other.clean,
self.dirty + other.dirty self.dirty + other.dirty,
) )
def __iadd__(self, other): def __iadd__(self, other):
@ -484,31 +440,26 @@ class InactiveUsageStats:
class RequestStats: class RequestStats:
def __init__( def __init__(self, stats_dict, percentage_val):
self, unit = "[%]" if percentage_val else "[Requests]"
read_hits,
read_part_misses,
read_full_misses,
read_total,
write_hits,
write_part_misses,
write_full_misses,
write_total,
pass_through_reads,
pass_through_writes,
requests_serviced,
requests_total,
):
self.read = RequestStatsChunk( self.read = RequestStatsChunk(
read_hits, read_part_misses, read_full_misses, read_total stats_dict=stats_dict, percentage_val=percentage_val, operation="Read"
) )
self.write = RequestStatsChunk( self.write = RequestStatsChunk(
write_hits, write_part_misses, write_full_misses, write_total stats_dict=stats_dict, percentage_val=percentage_val, operation="Write"
)
self.pass_through_reads = parse_value(
value=stats_dict[f"Pass-Through reads {unit}"], unit_type=unit
)
self.pass_through_writes = parse_value(
value=stats_dict[f"Pass-Through writes {unit}"], unit_type=unit
)
self.requests_serviced = parse_value(
value=stats_dict[f"Serviced requests {unit}"], unit_type=unit
)
self.requests_total = parse_value(
value=stats_dict[f"Total requests {unit}"], unit_type=unit
) )
self.pass_through_reads = pass_through_reads
self.pass_through_writes = pass_through_writes
self.requests_serviced = requests_serviced
self.requests_total = requests_total
def __str__(self): def __str__(self):
return ( return (
@ -535,11 +486,16 @@ class RequestStats:
class RequestStatsChunk: class RequestStatsChunk:
def __init__(self, hits, part_misses, full_misses, total): def __init__(self, stats_dict, percentage_val, operation: str):
self.hits = hits unit = "[%]" if percentage_val else "[Requests]"
self.part_misses = part_misses self.hits = parse_value(value=stats_dict[f"{operation} hits {unit}"], unit_type=unit)
self.full_misses = full_misses self.part_misses = parse_value(
self.total = total value=stats_dict[f"{operation} partial misses {unit}"], unit_type=unit
)
self.full_misses = parse_value(
value=stats_dict[f"{operation} full misses {unit}"], unit_type=unit
)
self.total = parse_value(value=stats_dict[f"{operation} total {unit}"], unit_type=unit)
def __str__(self): def __str__(self):
return ( return (
@ -561,21 +517,18 @@ class RequestStatsChunk:
class BlockStats: class BlockStats:
def __init__( def __init__(self, stats_dict, percentage_val):
self, self.core = BasicStatsChunk(
core_reads, stats_dict=stats_dict, percentage_val=percentage_val, device="core"
core_writes, )
core_total, self.cache = BasicStatsChunk(
cache_reads, stats_dict=stats_dict, percentage_val=percentage_val, device="cache"
cache_writes, )
cache_total, self.exp_obj = BasicStatsChunk(
exp_obj_reads, stats_dict=stats_dict,
exp_obj_writes, percentage_val=percentage_val,
exp_obj_total, device="exported object",
): )
self.core = BasicStatsChunk(core_reads, core_writes, core_total)
self.cache = BasicStatsChunk(cache_reads, cache_writes, cache_total)
self.exp_obj = BasicStatsChunk(exp_obj_reads, exp_obj_writes, exp_obj_total)
def __str__(self): def __str__(self):
return ( return (
@ -589,30 +542,20 @@ class BlockStats:
if not other: if not other:
return False return False
return ( return (
self.core == other.core self.core == other.core and self.cache == other.cache and self.exp_obj == other.exp_obj
and self.cache == other.cache
and self.exp_obj == other.exp_obj
) )
class ErrorStats: class ErrorStats:
def __init__( def __init__(self, stats_dict, percentage_val):
self, unit = "[%]" if percentage_val else "[Requests]"
cache_read_errors, self.cache = BasicStatsChunkError(
cache_write_errors, stats_dict=stats_dict, percentage_val=percentage_val, device="Cache"
cache_total_errors,
core_read_errors,
core_write_errors,
core_total_errors,
total_errors,
):
self.cache = BasicStatsChunk(
cache_read_errors, cache_write_errors, cache_total_errors
) )
self.core = BasicStatsChunk( self.core = BasicStatsChunkError(
core_read_errors, core_write_errors, core_total_errors stats_dict=stats_dict, percentage_val=percentage_val, device="Core"
) )
self.total_errors = total_errors self.total_errors = parse_value(value=stats_dict[f"Total errors {unit}"], unit_type=unit)
def __str__(self): def __str__(self):
return ( return (
@ -633,10 +576,11 @@ class ErrorStats:
class BasicStatsChunk: class BasicStatsChunk:
def __init__(self, reads, writes, total): def __init__(self, stats_dict: dict, percentage_val: bool, device: str):
self.reads = reads unit = "[%]" if percentage_val else "[4KiB Blocks]"
self.writes = writes self.reads = parse_value(value=stats_dict[f"Reads from {device} {unit}"], unit_type=unit)
self.total = total self.writes = parse_value(value=stats_dict[f"Writes to {device} {unit}"], unit_type=unit)
self.total = parse_value(value=stats_dict[f"Total to/from {device} {unit}"], unit_type=unit)
def __str__(self): def __str__(self):
return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n" return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n"
@ -645,7 +589,44 @@ class BasicStatsChunk:
if not other: if not other:
return False return False
return ( return (
self.reads == other.reads self.reads == other.reads and self.writes == other.writes and self.total == other.total
and self.writes == other.writes
and self.total == other.total
) )
class BasicStatsChunkError:
def __init__(self, stats_dict: dict, percentage_val: bool, device: str):
unit = "[%]" if percentage_val else "[Requests]"
self.reads = parse_value(value=stats_dict[f"{device} read errors {unit}"], unit_type=unit)
self.writes = parse_value(value=stats_dict[f"{device} write errors {unit}"], unit_type=unit)
self.total = parse_value(value=stats_dict[f"{device} total errors {unit}"], unit_type=unit)
def __str__(self):
return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n"
def __eq__(self, other):
if not other:
return False
return (
self.reads == other.reads and self.writes == other.writes and self.total == other.total
)
def parse_value(value: str, unit_type: str) -> int | float | Size | timedelta | str:
match unit_type:
case "[Requests]":
stat_unit = int(value)
case "[%]":
stat_unit = float(value)
case "[4KiB Blocks]":
stat_unit = Size(float(value), Unit.Blocks4096)
case "[MiB]":
stat_unit = Size(float(value), Unit.MebiByte)
case "[KiB]":
stat_unit = Size(float(value), Unit.KibiByte)
case "[GiB]":
stat_unit = Size(float(value), Unit.GibiByte)
case "[s]":
stat_unit = timedelta(seconds=float(value))
case _:
stat_unit = value
return stat_unit