Merge pull request #1488 from Kamoppl/kamilg/update_cas_api
test-api: Update cas api
This commit is contained in:
commit
d324f541a1
@ -1,54 +1,64 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
from api.cas.casadm_parser import *
|
||||
from api.cas.cli import *
|
||||
from api.cas.core import Core
|
||||
from api.cas.dmesg import get_metadata_size_on_device
|
||||
from api.cas.statistics import CacheStats, CacheIoClassStats
|
||||
from test_utils.os_utils import *
|
||||
from test_utils.output import Output
|
||||
|
||||
|
||||
class Cache:
|
||||
def __init__(self, device: Device):
|
||||
def __init__(self, device: Device, cache_id: int = None) -> None:
|
||||
self.cache_device = device
|
||||
self.cache_id = int(self.__get_cache_id())
|
||||
self.cache_id = cache_id if cache_id else self.__get_cache_id()
|
||||
self.__cache_line_size = None
|
||||
self.__metadata_size = None
|
||||
self.metadata_size_on_disk = self.get_metadata_size_on_disk()
|
||||
|
||||
def __get_cache_id(self):
|
||||
cmd = f"{list_cmd(by_id_path=False)} | grep {self.cache_device.get_device_id()}"
|
||||
output = TestRun.executor.run(cmd)
|
||||
if output.exit_code == 0 and output.stdout.strip():
|
||||
return output.stdout.split()[1]
|
||||
else:
|
||||
raise Exception(f"There is no cache started on {self.cache_device.get_device_id()}.")
|
||||
def __get_cache_id(self) -> int:
|
||||
device_path = self.__get_cache_device_path()
|
||||
|
||||
def get_core_devices(self):
|
||||
caches_dict = get_cas_devices_dict()["caches"]
|
||||
|
||||
for cache in caches_dict.values():
|
||||
if cache["device_path"] == device_path:
|
||||
return int(cache["id"])
|
||||
|
||||
raise Exception(f"There is no cache started on {device_path}")
|
||||
|
||||
def __get_cache_device_path(self) -> str:
|
||||
return self.cache_device.path if self.cache_device is not None else "-"
|
||||
|
||||
def get_core_devices(self) -> list:
|
||||
return get_cores(self.cache_id)
|
||||
|
||||
def get_cache_line_size(self):
|
||||
def get_cache_line_size(self) -> CacheLineSize:
|
||||
if self.__cache_line_size is None:
|
||||
stats = self.get_statistics()
|
||||
stats_line_size = stats.config_stats.cache_line_size
|
||||
self.__cache_line_size = CacheLineSize(stats_line_size)
|
||||
return self.__cache_line_size
|
||||
|
||||
def get_cleaning_policy(self):
|
||||
def get_cleaning_policy(self) -> CleaningPolicy:
|
||||
stats = self.get_statistics()
|
||||
cp = stats.config_stats.cleaning_policy
|
||||
return CleaningPolicy[cp]
|
||||
|
||||
def get_metadata_size(self):
|
||||
if self.__metadata_size is None:
|
||||
stats = self.get_statistics()
|
||||
self.__metadata_size = stats.config_stats.metadata_memory_footprint
|
||||
return self.__metadata_size
|
||||
def get_metadata_size_in_ram(self) -> Size:
|
||||
stats = self.get_statistics()
|
||||
return stats.config_stats.metadata_memory_footprint
|
||||
|
||||
def get_metadata_size_on_disk(self) -> Size:
|
||||
return get_metadata_size_on_device(cache_id=self.cache_id)
|
||||
|
||||
def get_occupancy(self):
|
||||
return self.get_statistics().usage_stats.occupancy
|
||||
|
||||
def get_status(self):
|
||||
def get_status(self) -> CacheStatus:
|
||||
status = (
|
||||
self.get_statistics(stat_filter=[StatsFilter.conf])
|
||||
.config_stats.status.replace(" ", "_")
|
||||
@ -57,131 +67,142 @@ class Cache:
|
||||
return CacheStatus[status]
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
def size(self) -> Size:
|
||||
return self.get_statistics().config_stats.cache_size
|
||||
|
||||
def get_cache_mode(self):
|
||||
def get_cache_mode(self) -> CacheMode:
|
||||
return CacheMode[self.get_statistics().config_stats.write_policy.upper()]
|
||||
|
||||
def get_dirty_blocks(self):
|
||||
def get_dirty_blocks(self) -> Size:
|
||||
return self.get_statistics().usage_stats.dirty
|
||||
|
||||
def get_dirty_for(self):
|
||||
def get_dirty_for(self) -> timedelta:
|
||||
return self.get_statistics().config_stats.dirty_for
|
||||
|
||||
def get_clean_blocks(self):
|
||||
def get_clean_blocks(self) -> Size:
|
||||
return self.get_statistics().usage_stats.clean
|
||||
|
||||
def get_flush_parameters_alru(self):
|
||||
def get_flush_parameters_alru(self) -> FlushParametersAlru:
|
||||
return get_flush_parameters_alru(self.cache_id)
|
||||
|
||||
def get_flush_parameters_acp(self):
|
||||
def get_flush_parameters_acp(self) -> FlushParametersAcp:
|
||||
return get_flush_parameters_acp(self.cache_id)
|
||||
|
||||
# Casadm methods:
|
||||
|
||||
def get_io_class_statistics(self,
|
||||
io_class_id: int,
|
||||
stat_filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False):
|
||||
stats = get_statistics(self.cache_id, None, io_class_id,
|
||||
stat_filter, percentage_val)
|
||||
return CacheIoClassStats(stats)
|
||||
def get_statistics(
|
||||
self,
|
||||
stat_filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False,
|
||||
) -> CacheStats:
|
||||
return CacheStats(
|
||||
cache_id=self.cache_id,
|
||||
filter=stat_filter,
|
||||
percentage_val=percentage_val,
|
||||
)
|
||||
|
||||
def get_statistics(self,
|
||||
stat_filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False):
|
||||
stats = get_statistics(self.cache_id, None, None,
|
||||
stat_filter, percentage_val)
|
||||
return CacheStats(stats)
|
||||
def get_io_class_statistics(
|
||||
self,
|
||||
io_class_id: int = None,
|
||||
stat_filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False,
|
||||
) -> CacheIoClassStats:
|
||||
return CacheIoClassStats(
|
||||
cache_id=self.cache_id,
|
||||
filter=stat_filter,
|
||||
io_class_id=io_class_id,
|
||||
percentage_val=percentage_val,
|
||||
)
|
||||
|
||||
def get_statistics_flat(self,
|
||||
io_class_id: int = None,
|
||||
stat_filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False):
|
||||
return get_statistics(self.cache_id, None, io_class_id,
|
||||
stat_filter, percentage_val)
|
||||
|
||||
def flush_cache(self):
|
||||
casadm.flush(cache_id=self.cache_id)
|
||||
def flush_cache(self) -> Output:
|
||||
output = casadm.flush_cache(cache_id=self.cache_id)
|
||||
sync()
|
||||
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0
|
||||
return output
|
||||
|
||||
def purge_cache(self):
|
||||
casadm.purge_cache(cache_id=self.cache_id)
|
||||
def purge_cache(self) -> Output:
|
||||
output = casadm.purge_cache(cache_id=self.cache_id)
|
||||
sync()
|
||||
return output
|
||||
|
||||
def stop(self, no_data_flush: bool = False):
|
||||
def stop(self, no_data_flush: bool = False) -> Output:
|
||||
return casadm.stop_cache(self.cache_id, no_data_flush)
|
||||
|
||||
def add_core(self, core_dev, core_id: int = None):
|
||||
def add_core(self, core_dev, core_id: int = None) -> Core:
|
||||
return casadm.add_core(self, core_dev, core_id)
|
||||
|
||||
def remove_core(self, core_id: int, force: bool = False):
|
||||
def remove_core(self, core_id: int, force: bool = False) -> Output:
|
||||
return casadm.remove_core(self.cache_id, core_id, force)
|
||||
|
||||
def remove_inactive_core(self, core_id: int, force: bool = False):
|
||||
def remove_inactive_core(self, core_id: int, force: bool = False) -> Output:
|
||||
return casadm.remove_inactive(self.cache_id, core_id, force)
|
||||
|
||||
def reset_counters(self):
|
||||
def reset_counters(self) -> Output:
|
||||
return casadm.reset_counters(self.cache_id)
|
||||
|
||||
def set_cache_mode(self, cache_mode: CacheMode, flush=None):
|
||||
def set_cache_mode(self, cache_mode: CacheMode, flush=None) -> Output:
|
||||
return casadm.set_cache_mode(cache_mode, self.cache_id, flush)
|
||||
|
||||
def load_io_class(self, file_path: str):
|
||||
def load_io_class(self, file_path: str) -> Output:
|
||||
return casadm.load_io_classes(self.cache_id, file_path)
|
||||
|
||||
def list_io_classes(self):
|
||||
def list_io_classes(self) -> list:
|
||||
return get_io_class_list(self.cache_id)
|
||||
|
||||
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
|
||||
return casadm.set_param_cutoff(self.cache_id,
|
||||
threshold=seq_cutoff_param.threshold,
|
||||
policy=seq_cutoff_param.policy,
|
||||
promotion_count=seq_cutoff_param.promotion_count)
|
||||
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters) -> Output:
|
||||
return casadm.set_param_cutoff(
|
||||
self.cache_id,
|
||||
threshold=seq_cutoff_param.threshold,
|
||||
policy=seq_cutoff_param.policy,
|
||||
promotion_count=seq_cutoff_param.promotion_count,
|
||||
)
|
||||
|
||||
def set_seq_cutoff_threshold(self, threshold: Size):
|
||||
return casadm.set_param_cutoff(self.cache_id,
|
||||
threshold=threshold,
|
||||
policy=None)
|
||||
def set_seq_cutoff_threshold(self, threshold: Size) -> Output:
|
||||
return casadm.set_param_cutoff(self.cache_id, threshold=threshold, policy=None)
|
||||
|
||||
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy):
|
||||
return casadm.set_param_cutoff(self.cache_id,
|
||||
threshold=None,
|
||||
policy=policy)
|
||||
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy) -> Output:
|
||||
return casadm.set_param_cutoff(self.cache_id, threshold=None, policy=policy)
|
||||
|
||||
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
|
||||
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy) -> Output:
|
||||
return casadm.set_param_cleaning(self.cache_id, cleaning_policy)
|
||||
|
||||
def set_params_acp(self, acp_params: FlushParametersAcp):
|
||||
return casadm.set_param_cleaning_acp(self.cache_id,
|
||||
int(acp_params.wake_up_time.total_milliseconds())
|
||||
if acp_params.wake_up_time else None,
|
||||
int(acp_params.flush_max_buffers)
|
||||
if acp_params.flush_max_buffers else None)
|
||||
def set_params_acp(self, acp_params: FlushParametersAcp) -> Output:
|
||||
return casadm.set_param_cleaning_acp(
|
||||
self.cache_id,
|
||||
int(acp_params.wake_up_time.total_milliseconds()) if acp_params.wake_up_time else None,
|
||||
int(acp_params.flush_max_buffers) if acp_params.flush_max_buffers else None,
|
||||
)
|
||||
|
||||
def set_params_alru(self, alru_params: FlushParametersAlru):
|
||||
def set_params_alru(self, alru_params: FlushParametersAlru) -> Output:
|
||||
return casadm.set_param_cleaning_alru(
|
||||
self.cache_id,
|
||||
int(alru_params.wake_up_time.total_seconds())
|
||||
if alru_params.wake_up_time is not None else None,
|
||||
int(alru_params.staleness_time.total_seconds())
|
||||
if alru_params.staleness_time is not None else None,
|
||||
alru_params.flush_max_buffers
|
||||
if alru_params.flush_max_buffers is not None else None,
|
||||
int(alru_params.activity_threshold.total_milliseconds())
|
||||
if alru_params.activity_threshold is not None else None)
|
||||
(int(alru_params.wake_up_time.total_seconds()) if alru_params.wake_up_time else None),
|
||||
(
|
||||
int(alru_params.staleness_time.total_seconds())
|
||||
if alru_params.staleness_time
|
||||
else None
|
||||
),
|
||||
(alru_params.flush_max_buffers if alru_params.flush_max_buffers else None),
|
||||
(
|
||||
int(alru_params.activity_threshold.total_milliseconds())
|
||||
if alru_params.activity_threshold
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
def get_cache_config(self):
|
||||
return CacheConfig(self.get_cache_line_size(),
|
||||
self.get_cache_mode(),
|
||||
self.get_cleaning_policy())
|
||||
def get_cache_config(self) -> CacheConfig:
|
||||
return CacheConfig(
|
||||
self.get_cache_line_size(),
|
||||
self.get_cache_mode(),
|
||||
self.get_cleaning_policy(),
|
||||
)
|
||||
|
||||
def standby_detach(self, shortcut: bool = False):
|
||||
def standby_detach(self, shortcut: bool = False) -> Output:
|
||||
return casadm.standby_detach_cache(cache_id=self.cache_id, shortcut=shortcut)
|
||||
|
||||
def standby_activate(self, device, shortcut: bool = False):
|
||||
def standby_activate(self, device, shortcut: bool = False) -> Output:
|
||||
return casadm.standby_activate_cache(
|
||||
cache_id=self.cache_id, cache_dev=device, shortcut=shortcut
|
||||
)
|
||||
|
||||
def has_volatile_metadata(self) -> bool:
|
||||
return self.get_metadata_size_on_disk() == Size.zero()
|
||||
|
@ -1,9 +1,10 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
from aenum import Enum, IntFlag
|
||||
from enum import Enum, IntFlag
|
||||
|
||||
from test_utils.os_utils import get_kernel_module_parameter
|
||||
from test_utils.size import Size, Unit
|
||||
@ -40,37 +41,34 @@ class CacheMode(Enum):
|
||||
return self.value
|
||||
|
||||
@staticmethod
|
||||
def get_traits(cache_mode):
|
||||
if cache_mode == CacheMode.PT:
|
||||
return CacheModeTrait(0)
|
||||
elif cache_mode == CacheMode.WT:
|
||||
return CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite
|
||||
elif cache_mode == CacheMode.WB:
|
||||
return (
|
||||
CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites
|
||||
)
|
||||
elif cache_mode == CacheMode.WO:
|
||||
return CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites
|
||||
elif cache_mode == CacheMode.WA:
|
||||
return CacheModeTrait.InsertRead
|
||||
def get_traits(cache_mode) -> CacheModeTrait:
|
||||
match cache_mode:
|
||||
case CacheMode.PT:
|
||||
return CacheModeTrait(0)
|
||||
case CacheMode.WT:
|
||||
return CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite
|
||||
case CacheMode.WB:
|
||||
return (
|
||||
CacheModeTrait.InsertRead
|
||||
| CacheModeTrait.InsertWrite
|
||||
| CacheModeTrait.LazyWrites
|
||||
)
|
||||
case CacheMode.WO:
|
||||
return CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites
|
||||
case CacheMode.WA:
|
||||
return CacheModeTrait.InsertRead
|
||||
|
||||
@staticmethod
|
||||
def with_traits(flags: CacheModeTrait):
|
||||
return [
|
||||
m for m in CacheMode if all(map(lambda t: t in CacheMode.get_traits(m), flags))
|
||||
]
|
||||
def with_traits(flags: CacheModeTrait) -> list:
|
||||
return [m for m in CacheMode if all(map(lambda t: t in CacheMode.get_traits(m), flags))]
|
||||
|
||||
@staticmethod
|
||||
def without_traits(flags: CacheModeTrait):
|
||||
return [
|
||||
m for m in CacheMode if not any(map(lambda t: t in CacheMode.get_traits(m), flags))
|
||||
]
|
||||
def without_traits(flags: CacheModeTrait) -> list:
|
||||
return [m for m in CacheMode if not any(map(lambda t: t in CacheMode.get_traits(m), flags))]
|
||||
|
||||
@staticmethod
|
||||
def with_any_trait(flags: CacheModeTrait):
|
||||
return [
|
||||
m for m in CacheMode if any(map(lambda t: t in CacheMode.get_traits(m), flags))
|
||||
]
|
||||
def with_any_trait(flags: CacheModeTrait) -> list:
|
||||
return [m for m in CacheMode if any(map(lambda t: t in CacheMode.get_traits(m), flags))]
|
||||
|
||||
|
||||
class SeqCutOffPolicy(Enum):
|
||||
@ -90,7 +88,6 @@ class SeqCutOffPolicy(Enum):
|
||||
|
||||
class MetadataMode(Enum):
|
||||
normal = "normal"
|
||||
atomic = "atomic"
|
||||
DEFAULT = normal
|
||||
|
||||
def __str__(self):
|
||||
@ -133,10 +130,10 @@ class CacheStatus(Enum):
|
||||
class FlushParametersAlru:
|
||||
def __init__(
|
||||
self,
|
||||
activity_threshold=None,
|
||||
flush_max_buffers=None,
|
||||
staleness_time=None,
|
||||
wake_up_time=None,
|
||||
activity_threshold: Time = None,
|
||||
flush_max_buffers: int = None,
|
||||
staleness_time: Time = None,
|
||||
wake_up_time: Time = None,
|
||||
):
|
||||
self.activity_threshold = activity_threshold
|
||||
self.flush_max_buffers = flush_max_buffers
|
||||
@ -152,18 +149,16 @@ class FlushParametersAlru:
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
ret = ["activity threshold: "
|
||||
+ (f"{self.activity_threshold}" if self.activity_threshold is not None
|
||||
else "default"),
|
||||
"flush max buffers: "
|
||||
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None
|
||||
else "default"),
|
||||
"staleness time: "
|
||||
+ (f"{self.staleness_time}" if self.staleness_time is not None
|
||||
else "default"),
|
||||
"wake up time: "
|
||||
+ (f"{self.wake_up_time}" if self.wake_up_time is not None
|
||||
else "default")]
|
||||
ret = [
|
||||
"activity threshold: "
|
||||
+ (f"{self.activity_threshold}" if self.activity_threshold is not None else "default"),
|
||||
"flush max buffers: "
|
||||
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None else "default"),
|
||||
"staleness time: "
|
||||
+ (f"{self.staleness_time}" if self.staleness_time is not None else "default"),
|
||||
"wake up time: "
|
||||
+ (f"{self.wake_up_time}" if self.wake_up_time is not None else "default"),
|
||||
]
|
||||
return " | ".join(ret)
|
||||
|
||||
@staticmethod
|
||||
@ -197,12 +192,12 @@ class FlushParametersAcp:
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
ret = ["flush max buffers: "
|
||||
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None
|
||||
else "default"),
|
||||
"wake up time: "
|
||||
+ (f"{self.wake_up_time}" if self.wake_up_time is not None
|
||||
else "default")]
|
||||
ret = [
|
||||
"flush max buffers: "
|
||||
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None else "default"),
|
||||
"wake up time: "
|
||||
+ (f"{self.wake_up_time}" if self.wake_up_time is not None else "default"),
|
||||
]
|
||||
return " | ".join(ret)
|
||||
|
||||
@staticmethod
|
||||
@ -221,7 +216,9 @@ class FlushParametersAcp:
|
||||
|
||||
|
||||
class SeqCutOffParameters:
|
||||
def __init__(self, policy=None, threshold=None, promotion_count=None):
|
||||
def __init__(
|
||||
self, policy: CleaningPolicy = None, threshold: Size = None, promotion_count: int = None
|
||||
):
|
||||
self.policy = policy
|
||||
self.threshold = threshold
|
||||
self.promotion_count = promotion_count
|
||||
@ -238,20 +235,17 @@ class SeqCutOffParameters:
|
||||
return SeqCutOffParameters(
|
||||
threshold=Size(1024, Unit.KibiByte),
|
||||
policy=SeqCutOffPolicy.full,
|
||||
promotion_count=8
|
||||
promotion_count=8,
|
||||
)
|
||||
|
||||
|
||||
class PromotionParametersNhit:
|
||||
def __init__(self, threshold=None, trigger=None):
|
||||
def __init__(self, threshold: Size = None, trigger: int = None):
|
||||
self.threshold = threshold
|
||||
self.trigger = trigger
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
self.threshold == other.threshold
|
||||
and self.trigger == other.trigger
|
||||
)
|
||||
return self.threshold == other.threshold and self.trigger == other.trigger
|
||||
|
||||
@staticmethod
|
||||
def nhit_params_range():
|
||||
@ -270,8 +264,8 @@ class PromotionParametersNhit:
|
||||
|
||||
# Specify how IO requests unaligned to 4KiB should be handled
|
||||
class UnalignedIo(Enum):
|
||||
PT = 0 # use PT mode
|
||||
cache = 1 # use current cache mode
|
||||
PT = 0 # use PT mode
|
||||
cache = 1 # use current cache mode
|
||||
DEFAULT = cache
|
||||
|
||||
|
||||
@ -288,12 +282,12 @@ class KernelParameters:
|
||||
writeback_queue_unblock_size_DEFAULT = 60000
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
unaligned_io: UnalignedIo = None,
|
||||
use_io_scheduler: UseIoScheduler = None,
|
||||
seq_cut_off_mb: int = None,
|
||||
max_writeback_queue_size: int = None,
|
||||
writeback_queue_unblock_size: int = None
|
||||
self,
|
||||
unaligned_io: UnalignedIo = None,
|
||||
use_io_scheduler: UseIoScheduler = None,
|
||||
seq_cut_off_mb: int = None,
|
||||
max_writeback_queue_size: int = None,
|
||||
writeback_queue_unblock_size: int = None,
|
||||
):
|
||||
self.unaligned_io = unaligned_io
|
||||
self.use_io_scheduler = use_io_scheduler
|
||||
@ -312,16 +306,17 @@ class KernelParameters:
|
||||
self.use_io_scheduler, other.use_io_scheduler, UseIoScheduler.DEFAULT
|
||||
)
|
||||
and equal_or_default(
|
||||
self.seq_cut_off_mb, other.seq_cut_off_mb,
|
||||
self.seq_cut_off_mb_DEFAULT
|
||||
self.seq_cut_off_mb, other.seq_cut_off_mb, self.seq_cut_off_mb_DEFAULT
|
||||
)
|
||||
and equal_or_default(
|
||||
self.max_writeback_queue_size, other.max_writeback_queue_size,
|
||||
self.max_writeback_queue_size_DEFAULT
|
||||
self.max_writeback_queue_size,
|
||||
other.max_writeback_queue_size,
|
||||
self.max_writeback_queue_size_DEFAULT,
|
||||
)
|
||||
and equal_or_default(
|
||||
self.writeback_queue_unblock_size, other.writeback_queue_unblock_size,
|
||||
self.writeback_queue_unblock_size_DEFAULT
|
||||
self.writeback_queue_unblock_size,
|
||||
other.writeback_queue_unblock_size,
|
||||
self.writeback_queue_unblock_size_DEFAULT,
|
||||
)
|
||||
)
|
||||
|
||||
@ -332,7 +327,7 @@ class KernelParameters:
|
||||
UseIoScheduler.DEFAULT,
|
||||
cls.seq_cut_off_mb_DEFAULT,
|
||||
cls.max_writeback_queue_size_DEFAULT,
|
||||
cls.writeback_queue_unblock_size_DEFAULT
|
||||
cls.writeback_queue_unblock_size_DEFAULT,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@ -343,7 +338,7 @@ class KernelParameters:
|
||||
UseIoScheduler(int(get_kernel_module_parameter(module, "use_io_scheduler"))),
|
||||
int(get_kernel_module_parameter(module, "seq_cut_off_mb")),
|
||||
int(get_kernel_module_parameter(module, "max_writeback_queue_size")),
|
||||
int(get_kernel_module_parameter(module, "writeback_queue_unblock_size"))
|
||||
int(get_kernel_module_parameter(module, "writeback_queue_unblock_size")),
|
||||
)
|
||||
|
||||
def get_parameter_dictionary(self):
|
||||
@ -354,10 +349,15 @@ class KernelParameters:
|
||||
params["use_io_scheduler"] = str(self.use_io_scheduler.value)
|
||||
if self.seq_cut_off_mb not in [None, self.seq_cut_off_mb_DEFAULT]:
|
||||
params["seq_cut_off_mb"] = str(self.seq_cut_off_mb)
|
||||
if self.max_writeback_queue_size not in [None, self.max_writeback_queue_size_DEFAULT]:
|
||||
if self.max_writeback_queue_size not in [
|
||||
None,
|
||||
self.max_writeback_queue_size_DEFAULT,
|
||||
]:
|
||||
params["max_writeback_queue_size"] = str(self.max_writeback_queue_size)
|
||||
if (self.writeback_queue_unblock_size not in
|
||||
[None, self.writeback_queue_unblock_size_DEFAULT]):
|
||||
if self.writeback_queue_unblock_size not in [
|
||||
None,
|
||||
self.writeback_queue_unblock_size_DEFAULT,
|
||||
]:
|
||||
params["writeback_queue_unblock_size"] = str(self.writeback_queue_unblock_size)
|
||||
return params
|
||||
|
||||
@ -367,10 +367,10 @@ class KernelParameters:
|
||||
class CacheConfig:
|
||||
def __init__(
|
||||
self,
|
||||
cache_line_size=CacheLineSize.DEFAULT,
|
||||
cache_mode=CacheMode.DEFAULT,
|
||||
cleaning_policy=CleaningPolicy.DEFAULT,
|
||||
kernel_parameters=None
|
||||
cache_line_size: CacheLineSize = CacheLineSize.DEFAULT,
|
||||
cache_mode: CacheMode = CacheMode.DEFAULT,
|
||||
cleaning_policy: CleaningPolicy = CleaningPolicy.DEFAULT,
|
||||
kernel_parameters=None,
|
||||
):
|
||||
self.cache_line_size = cache_line_size
|
||||
self.cache_mode = cache_mode
|
||||
@ -383,7 +383,9 @@ class CacheConfig:
|
||||
and self.cache_mode == other.cache_mode
|
||||
and self.cleaning_policy == other.cleaning_policy
|
||||
and equal_or_default(
|
||||
self.kernel_parameters, other.kernel_parameters, KernelParameters.DEFAULT
|
||||
self.kernel_parameters,
|
||||
other.kernel_parameters,
|
||||
KernelParameters.DEFAULT,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -1,9 +1,10 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
from aenum import Enum
|
||||
from enum import Enum
|
||||
from core.test_run import TestRun
|
||||
from test_utils import os_utils
|
||||
from test_utils.os_utils import ModuleRemoveMethod
|
||||
@ -19,8 +20,7 @@ def reload_all_cas_modules():
|
||||
|
||||
|
||||
def unload_all_cas_modules():
|
||||
os_utils.unload_kernel_module(CasModule.cache.value,
|
||||
os_utils.ModuleRemoveMethod.rmmod)
|
||||
os_utils.unload_kernel_module(CasModule.cache.value, os_utils.ModuleRemoveMethod.rmmod)
|
||||
|
||||
|
||||
def is_cas_management_dev_present():
|
||||
|
@ -36,7 +36,8 @@ class Packages:
|
||||
|
||||
|
||||
class _Rpm(RpmSet):
|
||||
def __init__(self, packages_dir: str = ""):
|
||||
def __init__(self, packages_paths: list, packages_dir: str = ""):
|
||||
super().__init__(packages_paths)
|
||||
self.packages_dir = packages_dir
|
||||
self.packages = get_packages_list("rpm", self.packages_dir)
|
||||
|
||||
@ -65,7 +66,8 @@ class _Rpm(RpmSet):
|
||||
|
||||
|
||||
class _Deb(DebSet):
|
||||
def __init__(self, packages_dir: str = ""):
|
||||
def __init__(self, packages_paths: list, packages_dir: str = ""):
|
||||
super().__init__(packages_paths)
|
||||
self.packages_dir = packages_dir
|
||||
self.packages = get_packages_list("deb", self.packages_dir)
|
||||
|
||||
@ -98,7 +100,8 @@ def get_packages_list(package_type: str, packages_dir: str):
|
||||
return []
|
||||
|
||||
return [
|
||||
package for package in find_all_files(packages_dir, recursive=False)
|
||||
package
|
||||
for package in find_all_files(packages_dir, recursive=False)
|
||||
# include only binary packages (ready to be processed by package manager)
|
||||
if package.endswith(package_type.lower())
|
||||
and not package.endswith("src." + package_type.lower())
|
@ -1,5 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -8,7 +9,12 @@ from datetime import timedelta
|
||||
from string import Template
|
||||
from textwrap import dedent
|
||||
|
||||
from test_tools.fs_utils import check_if_directory_exists, create_directory, write_file, remove
|
||||
from test_tools.fs_utils import (
|
||||
check_if_directory_exists,
|
||||
create_directory,
|
||||
write_file,
|
||||
remove,
|
||||
)
|
||||
from test_utils.systemd import reload_daemon
|
||||
|
||||
opencas_drop_in_directory = Path("/etc/systemd/system/open-cas.service.d/")
|
||||
|
@ -1,55 +1,419 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
from typing import List
|
||||
|
||||
from api.cas.cache import Cache
|
||||
from api.cas.cache_config import CacheLineSize, CacheMode, SeqCutOffPolicy, CleaningPolicy, \
|
||||
KernelParameters
|
||||
from api.cas.cache_config import (
|
||||
CacheLineSize,
|
||||
CacheMode,
|
||||
SeqCutOffPolicy,
|
||||
CleaningPolicy,
|
||||
KernelParameters,
|
||||
)
|
||||
from api.cas.casadm_params import OutputFormat, StatsFilter
|
||||
from api.cas.cli import *
|
||||
from api.cas.core import Core
|
||||
from core.test_run import TestRun
|
||||
from storage_devices.device import Device
|
||||
from test_utils.os_utils import reload_kernel_module
|
||||
from test_utils.output import CmdException
|
||||
from test_utils.output import CmdException, Output
|
||||
from test_utils.size import Size, Unit
|
||||
from .casadm_params import *
|
||||
from .casctl import stop as casctl_stop
|
||||
from .cli import *
|
||||
|
||||
|
||||
def help(shortcut: bool = False):
|
||||
return TestRun.executor.run(help_cmd(shortcut))
|
||||
# casadm commands
|
||||
|
||||
|
||||
def start_cache(cache_dev: Device, cache_mode: CacheMode = None,
|
||||
cache_line_size: CacheLineSize = None, cache_id: int = None,
|
||||
force: bool = False, load: bool = False, shortcut: bool = False,
|
||||
kernel_params: KernelParameters = KernelParameters()):
|
||||
def start_cache(
|
||||
cache_dev: Device,
|
||||
cache_mode: CacheMode = None,
|
||||
cache_line_size: CacheLineSize = None,
|
||||
cache_id: int = None,
|
||||
force: bool = False,
|
||||
load: bool = False,
|
||||
shortcut: bool = False,
|
||||
kernel_params: KernelParameters = KernelParameters(),
|
||||
) -> Cache:
|
||||
if kernel_params != KernelParameters.read_current_settings():
|
||||
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
|
||||
|
||||
_cache_line_size = None if cache_line_size is None else str(
|
||||
int(cache_line_size.value.get_value(Unit.KibiByte)))
|
||||
_cache_id = None if cache_id is None else str(cache_id)
|
||||
_cache_mode = None if cache_mode is None else cache_mode.name.lower()
|
||||
output = TestRun.executor.run(start_cmd(
|
||||
cache_dev=cache_dev.path, cache_mode=_cache_mode, cache_line_size=_cache_line_size,
|
||||
cache_id=_cache_id, force=force, load=load, shortcut=shortcut))
|
||||
_cache_line_size = (
|
||||
str(int(cache_line_size.value.get_value(Unit.KibiByte)))
|
||||
if cache_line_size is not None
|
||||
else None
|
||||
)
|
||||
_cache_id = str(cache_id) if cache_id is not None else None
|
||||
_cache_mode = cache_mode.name.lower() if cache_mode else None
|
||||
output = TestRun.executor.run(
|
||||
start_cmd(
|
||||
cache_dev=cache_dev.path,
|
||||
cache_mode=_cache_mode,
|
||||
cache_line_size=_cache_line_size,
|
||||
cache_id=_cache_id,
|
||||
force=force,
|
||||
load=load,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to start cache.", output)
|
||||
return Cache(cache_dev)
|
||||
|
||||
|
||||
def standby_init(cache_dev: Device, cache_id: int, cache_line_size: CacheLineSize,
|
||||
force: bool = False, shortcut: bool = False,
|
||||
kernel_params: KernelParameters = KernelParameters()):
|
||||
def load_cache(device: Device, shortcut: bool = False) -> Cache:
|
||||
output = TestRun.executor.run(load_cmd(cache_dev=device.path, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to load cache.", output)
|
||||
return Cache(device)
|
||||
|
||||
|
||||
def attach_cache(cache_id: int, device: Device, force: bool, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(
|
||||
attach_cache_cmd(
|
||||
cache_dev=device.path, cache_id=str(cache_id), force=force, shortcut=shortcut
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to attach cache.", output)
|
||||
return output
|
||||
|
||||
|
||||
def detach_cache(cache_id: int, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(detach_cache_cmd(cache_id=str(cache_id), shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to detach cache.", output)
|
||||
return output
|
||||
|
||||
|
||||
def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(
|
||||
stop_cmd(cache_id=str(cache_id), no_data_flush=no_data_flush, shortcut=shortcut)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to stop cache.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_param_cutoff(
|
||||
cache_id: int,
|
||||
core_id: int = None,
|
||||
threshold: Size = None,
|
||||
policy: SeqCutOffPolicy = None,
|
||||
promotion_count: int = None,
|
||||
shortcut: bool = False,
|
||||
) -> Output:
|
||||
_core_id = str(core_id) if core_id is not None else None
|
||||
_threshold = str(int(threshold.get_value(Unit.KibiByte))) if threshold else None
|
||||
_policy = policy.name if policy else None
|
||||
_promotion_count = str(promotion_count) if promotion_count is not None else None
|
||||
command = set_param_cutoff_cmd(
|
||||
cache_id=str(cache_id),
|
||||
core_id=_core_id,
|
||||
threshold=_threshold,
|
||||
policy=_policy,
|
||||
promotion_count=_promotion_count,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
output = TestRun.executor.run(command)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error while setting sequential cut-off params.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_param_cleaning(cache_id: int, policy: CleaningPolicy, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(
|
||||
set_param_cleaning_cmd(cache_id=str(cache_id), policy=policy.name, shortcut=shortcut)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error while setting cleaning policy.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_param_cleaning_alru(
|
||||
cache_id: int,
|
||||
wake_up: int = None,
|
||||
staleness_time: int = None,
|
||||
flush_max_buffers: int = None,
|
||||
activity_threshold: int = None,
|
||||
shortcut: bool = False,
|
||||
) -> Output:
|
||||
_wake_up = str(wake_up) if wake_up is not None else None
|
||||
_staleness_time = str(staleness_time) if staleness_time is not None else None
|
||||
_flush_max_buffers = str(flush_max_buffers) if flush_max_buffers is not None else None
|
||||
_activity_threshold = str(activity_threshold) if activity_threshold is not None else None
|
||||
output = TestRun.executor.run(
|
||||
set_param_cleaning_alru_cmd(
|
||||
cache_id=str(cache_id),
|
||||
wake_up=_wake_up,
|
||||
staleness_time=_staleness_time,
|
||||
flush_max_buffers=_flush_max_buffers,
|
||||
activity_threshold=_activity_threshold,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error while setting alru cleaning policy parameters.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_param_cleaning_acp(
|
||||
cache_id: int, wake_up: int = None, flush_max_buffers: int = None, shortcut: bool = False
|
||||
) -> Output:
|
||||
_wake_up = str(wake_up) if wake_up is not None else None
|
||||
_flush_max_buffers = str(flush_max_buffers) if flush_max_buffers is not None else None
|
||||
output = TestRun.executor.run(
|
||||
set_param_cleaning_acp_cmd(
|
||||
cache_id=str(cache_id),
|
||||
wake_up=_wake_up,
|
||||
flush_max_buffers=_flush_max_buffers,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error while setting acp cleaning policy parameters.", output)
|
||||
return output
|
||||
|
||||
|
||||
def get_param_cutoff(
|
||||
cache_id: int, core_id: int, output_format: OutputFormat = None, shortcut: bool = False
|
||||
) -> Output:
|
||||
_output_format = output_format.name if output_format else None
|
||||
output = TestRun.executor.run(
|
||||
get_param_cutoff_cmd(
|
||||
cache_id=str(cache_id),
|
||||
core_id=str(core_id),
|
||||
output_format=_output_format,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Getting sequential cutoff params failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def get_param_cleaning(cache_id: int, output_format: OutputFormat = None, shortcut: bool = False):
|
||||
_output_format = output_format.name if output_format else None
|
||||
output = TestRun.executor.run(
|
||||
get_param_cleaning_cmd(
|
||||
cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Getting cleaning policy failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def get_param_cleaning_alru(
|
||||
cache_id: int, output_format: OutputFormat = None, shortcut: bool = False
|
||||
):
|
||||
_output_format = output_format.name if output_format else None
|
||||
output = TestRun.executor.run(
|
||||
get_param_cleaning_alru_cmd(
|
||||
cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Getting alru cleaning policy params failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def get_param_cleaning_acp(
|
||||
cache_id: int, output_format: OutputFormat = None, shortcut: bool = False
|
||||
):
|
||||
_output_format = output_format.name if output_format else None
|
||||
output = TestRun.executor.run(
|
||||
get_param_cleaning_acp_cmd(
|
||||
cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Getting acp cleaning policy params failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_cache_mode(
|
||||
cache_mode: CacheMode, cache_id: int, flush=None, shortcut: bool = False
|
||||
) -> Output:
|
||||
flush_cache = None
|
||||
if flush:
|
||||
flush_cache = "yes" if flush else "no"
|
||||
output = TestRun.executor.run(
|
||||
set_cache_mode_cmd(
|
||||
cache_mode=cache_mode.name.lower(),
|
||||
cache_id=str(cache_id),
|
||||
flush_cache=flush_cache,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Set cache mode command failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False) -> Core:
|
||||
_core_id = str(core_id) if core_id is not None else None
|
||||
output = TestRun.executor.run(
|
||||
add_core_cmd(
|
||||
cache_id=str(cache.cache_id),
|
||||
core_dev=core_dev.path,
|
||||
core_id=_core_id,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to add core.", output)
|
||||
return Core(core_dev.path, cache.cache_id)
|
||||
|
||||
|
||||
def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(
|
||||
remove_core_cmd(
|
||||
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to remove core.", output)
|
||||
return output
|
||||
|
||||
|
||||
def remove_inactive(
|
||||
cache_id: int, core_id: int, force: bool = False, shortcut: bool = False
|
||||
) -> Output:
|
||||
output = TestRun.executor.run(
|
||||
remove_inactive_cmd(
|
||||
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to remove inactive core.", output)
|
||||
return output
|
||||
|
||||
|
||||
def remove_detached(core_device: Device, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(
|
||||
remove_detached_cmd(core_device=core_device.path, shortcut=shortcut)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to remove detached core.", output)
|
||||
return output
|
||||
|
||||
|
||||
def list_caches(
|
||||
output_format: OutputFormat = None, by_id_path: bool = True, shortcut: bool = False
|
||||
) -> Output:
|
||||
_output_format = output_format.name if output_format else None
|
||||
output = TestRun.executor.run(
|
||||
list_caches_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to list caches.", output)
|
||||
return output
|
||||
|
||||
|
||||
def print_statistics(
|
||||
cache_id: int,
|
||||
core_id: int = None,
|
||||
io_class_id: int = None,
|
||||
filter: List[StatsFilter] = None,
|
||||
output_format: OutputFormat = None,
|
||||
by_id_path: bool = True,
|
||||
shortcut: bool = False,
|
||||
) -> Output:
|
||||
_output_format = output_format.name if output_format else None
|
||||
_io_class_id = str(io_class_id) if io_class_id is not None else None
|
||||
_core_id = str(core_id) if core_id is not None else None
|
||||
if filter is None:
|
||||
_filter = filter
|
||||
else:
|
||||
names = (x.name for x in filter)
|
||||
_filter = ",".join(names)
|
||||
output = TestRun.executor.run(
|
||||
print_statistics_cmd(
|
||||
cache_id=str(cache_id),
|
||||
core_id=_core_id,
|
||||
io_class_id=_io_class_id,
|
||||
filter=_filter,
|
||||
output_format=_output_format,
|
||||
by_id_path=by_id_path,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Printing statistics failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def reset_counters(cache_id: int, core_id: int = None, shortcut: bool = False) -> Output:
|
||||
_core_id = str(core_id) if core_id is not None else None
|
||||
output = TestRun.executor.run(
|
||||
reset_counters_cmd(cache_id=str(cache_id), core_id=_core_id, shortcut=shortcut)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to reset counters.", output)
|
||||
return output
|
||||
|
||||
|
||||
def flush_cache(cache_id: int, shortcut: bool = False) -> Output:
|
||||
command = flush_cache_cmd(cache_id=str(cache_id), shortcut=shortcut)
|
||||
output = TestRun.executor.run(command)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Flushing cache failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def flush_core(cache_id: int, core_id: int, shortcut: bool = False) -> Output:
|
||||
command = flush_core_cmd(cache_id=str(cache_id), core_id=str(core_id), shortcut=shortcut)
|
||||
output = TestRun.executor.run(command)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Flushing core failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def load_io_classes(cache_id: int, file: str, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(
|
||||
load_io_classes_cmd(cache_id=str(cache_id), file=file, shortcut=shortcut)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Load IO class command failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def list_io_classes(cache_id: int, output_format: OutputFormat, shortcut: bool = False) -> Output:
|
||||
_output_format = output_format.name if output_format else None
|
||||
output = TestRun.executor.run(
|
||||
list_io_classes_cmd(cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("List IO class command failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def print_version(output_format: OutputFormat = None, shortcut: bool = False) -> Output:
|
||||
_output_format = output_format.name if output_format else None
|
||||
output = TestRun.executor.run(version_cmd(output_format=_output_format, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to print version.", output)
|
||||
return output
|
||||
|
||||
|
||||
def help(shortcut: bool = False) -> Output:
|
||||
return TestRun.executor.run(help_cmd(shortcut))
|
||||
|
||||
|
||||
def standby_init(
|
||||
cache_dev: Device,
|
||||
cache_id: int,
|
||||
cache_line_size: CacheLineSize,
|
||||
force: bool = False,
|
||||
shortcut: bool = False,
|
||||
kernel_params: KernelParameters = KernelParameters(),
|
||||
) -> Cache:
|
||||
if kernel_params != KernelParameters.read_current_settings():
|
||||
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
|
||||
|
||||
_cache_line_size = None if cache_line_size is None else str(
|
||||
int(cache_line_size.value.get_value(Unit.KibiByte)))
|
||||
_cache_line_size = str(int(cache_line_size.value.get_value(Unit.KibiByte)))
|
||||
|
||||
output = TestRun.executor.run(
|
||||
standby_init_cmd(
|
||||
@ -65,334 +429,92 @@ def standby_init(cache_dev: Device, cache_id: int, cache_line_size: CacheLineSiz
|
||||
return Cache(cache_dev)
|
||||
|
||||
|
||||
def standby_load(cache_dev: Device, shortcut: bool = False):
|
||||
output = TestRun.executor.run(
|
||||
standby_load_cmd(cache_dev=cache_dev.path, shortcut=shortcut)
|
||||
)
|
||||
def standby_load(cache_dev: Device, shortcut: bool = False) -> Cache:
|
||||
output = TestRun.executor.run(standby_load_cmd(cache_dev=cache_dev.path, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to load standby cache.", output)
|
||||
return Cache(cache_dev)
|
||||
|
||||
|
||||
def standby_detach_cache(cache_id: int, shortcut: bool = False):
|
||||
output = TestRun.executor.run(
|
||||
standby_detach_cmd(cache_id=str(cache_id), shortcut=shortcut)
|
||||
)
|
||||
def standby_detach_cache(cache_id: int, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(standby_detach_cmd(cache_id=str(cache_id), shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to detach standby cache.", output)
|
||||
return output
|
||||
|
||||
|
||||
def standby_activate_cache(cache_dev: Device, cache_id: int, shortcut: bool = False):
|
||||
def standby_activate_cache(cache_dev: Device, cache_id: int, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(
|
||||
standby_activate_cmd(
|
||||
cache_dev=cache_dev.path, cache_id=str(cache_id), shortcut=shortcut
|
||||
)
|
||||
standby_activate_cmd(cache_dev=cache_dev.path, cache_id=str(cache_id), shortcut=shortcut)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to activate standby cache.", output)
|
||||
return output
|
||||
|
||||
|
||||
def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = False):
|
||||
def zero_metadata(cache_dev: Device, force: bool = False, shortcut: bool = False) -> Output:
|
||||
output = TestRun.executor.run(
|
||||
stop_cmd(cache_id=str(cache_id), no_data_flush=no_data_flush, shortcut=shortcut))
|
||||
zero_metadata_cmd(cache_dev=cache_dev.path, force=force, shortcut=shortcut)
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to stop cache.", output)
|
||||
raise CmdException("Failed to wipe metadata.", output)
|
||||
return output
|
||||
|
||||
|
||||
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False):
|
||||
_core_id = None if core_id is None else str(core_id)
|
||||
output = TestRun.executor.run(
|
||||
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.path,
|
||||
core_id=_core_id, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to add core.", output)
|
||||
core = Core(core_dev.path, cache.cache_id)
|
||||
return core
|
||||
# script command
|
||||
|
||||
|
||||
def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False):
|
||||
output = TestRun.executor.run(
|
||||
remove_core_cmd(cache_id=str(cache_id), core_id=str(core_id),
|
||||
force=force, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to remove core.", output)
|
||||
|
||||
|
||||
def remove_inactive(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False):
|
||||
output = TestRun.executor.run(
|
||||
remove_inactive_cmd(
|
||||
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to remove inactive core.", output)
|
||||
|
||||
|
||||
def remove_detached(core_device: Device, shortcut: bool = False):
|
||||
output = TestRun.executor.run(
|
||||
remove_detached_cmd(core_device=core_device.path, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to remove detached core.", output)
|
||||
return output
|
||||
|
||||
|
||||
def try_add(core_device: Device, cache_id: int, core_id: int):
|
||||
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path,
|
||||
str(core_id)))
|
||||
def try_add(core_device: Device, cache_id: int, core_id: int) -> Core:
|
||||
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path, str(core_id)))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to execute try add script command.", output)
|
||||
return Core(core_device.path, cache_id)
|
||||
|
||||
|
||||
def purge_cache(cache_id: int):
|
||||
def purge_cache(cache_id: int) -> Output:
|
||||
output = TestRun.executor.run(script_purge_cache_cmd(str(cache_id)))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Purge cache failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def purge_core(cache_id: int, core_id: int):
|
||||
def purge_core(cache_id: int, core_id: int) -> Output:
|
||||
output = TestRun.executor.run(script_purge_core_cmd(str(cache_id), str(core_id)))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Purge core failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def detach_core(cache_id: int, core_id: int):
|
||||
def detach_core(cache_id: int, core_id: int) -> Output:
|
||||
output = TestRun.executor.run(script_detach_core_cmd(str(cache_id), str(core_id)))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to execute detach core script command.", output)
|
||||
return output
|
||||
|
||||
|
||||
def remove_core_with_script_command(cache_id: int, core_id: int, no_flush: bool = False):
|
||||
def remove_core_with_script_command(cache_id: int, core_id: int, no_flush: bool = False) -> Output:
|
||||
output = TestRun.executor.run(script_remove_core_cmd(str(cache_id), str(core_id), no_flush))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to execute remove core script command.", output)
|
||||
return output
|
||||
|
||||
|
||||
def reset_counters(cache_id: int, core_id: int = None, shortcut: bool = False):
|
||||
_core_id = None if core_id is None else str(core_id)
|
||||
output = TestRun.executor.run(
|
||||
reset_counters_cmd(cache_id=str(cache_id), core_id=_core_id, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to reset counters.", output)
|
||||
return output
|
||||
# casadm custom commands
|
||||
|
||||
|
||||
def flush(cache_id: int, core_id: int = None, shortcut: bool = False):
|
||||
if core_id is None:
|
||||
command = flush_cache_cmd(cache_id=str(cache_id), shortcut=shortcut)
|
||||
else:
|
||||
command = flush_core_cmd(cache_id=str(cache_id), core_id=str(core_id), shortcut=shortcut)
|
||||
output = TestRun.executor.run(command)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Flushing failed.", output)
|
||||
return output
|
||||
def stop_all_caches() -> None:
|
||||
from api.cas.casadm_parser import get_caches
|
||||
|
||||
|
||||
def load_cache(device: Device, shortcut: bool = False):
|
||||
output = TestRun.executor.run(
|
||||
load_cmd(cache_dev=device.path, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to load cache.", output)
|
||||
return Cache(device)
|
||||
|
||||
|
||||
def list_caches(output_format: OutputFormat = None, by_id_path: bool = True,
|
||||
shortcut: bool = False):
|
||||
_output_format = None if output_format is None else output_format.name
|
||||
output = TestRun.executor.run(
|
||||
list_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to list caches.", output)
|
||||
return output
|
||||
|
||||
|
||||
def print_version(output_format: OutputFormat = None, shortcut: bool = False):
|
||||
_output_format = None if output_format is None else output_format.name
|
||||
output = TestRun.executor.run(
|
||||
version_cmd(output_format=_output_format, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to print version.", output)
|
||||
return output
|
||||
|
||||
|
||||
def zero_metadata(cache_dev: Device, force: bool = False, shortcut: bool = False):
|
||||
output = TestRun.executor.run(
|
||||
zero_metadata_cmd(cache_dev=cache_dev.path, force=force, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to wipe metadata.", output)
|
||||
return output
|
||||
|
||||
|
||||
def stop_all_caches():
|
||||
if "No caches running" in list_caches().stdout:
|
||||
caches = get_caches()
|
||||
if not caches:
|
||||
return
|
||||
TestRun.LOGGER.info("Stop all caches")
|
||||
stop_output = casctl_stop()
|
||||
caches_output = list_caches()
|
||||
if "No caches running" not in caches_output.stdout:
|
||||
raise CmdException(f"Error while stopping caches. "
|
||||
f"Listing caches: {caches_output}", stop_output)
|
||||
for cache in caches:
|
||||
stop_cache(cache_id=cache.cache_id, no_data_flush=True)
|
||||
|
||||
|
||||
def remove_all_detached_cores():
|
||||
from api.cas import casadm_parser
|
||||
devices = casadm_parser.get_cas_devices_dict()
|
||||
def remove_all_detached_cores() -> None:
|
||||
from api.cas.casadm_parser import get_cas_devices_dict
|
||||
|
||||
devices = get_cas_devices_dict()
|
||||
for dev in devices["core_pool"]:
|
||||
TestRun.executor.run(remove_detached_cmd(dev["device"]))
|
||||
|
||||
|
||||
def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = False,
|
||||
io_class_id: int = None, filter: List[StatsFilter] = None,
|
||||
output_format: OutputFormat = None, by_id_path: bool = True,
|
||||
shortcut: bool = False):
|
||||
_output_format = None if output_format is None else output_format.name
|
||||
_core_id = None if core_id is None else str(core_id)
|
||||
_io_class_id = None if io_class_id is None else str(io_class_id)
|
||||
if filter is None:
|
||||
_filter = filter
|
||||
else:
|
||||
names = (x.name for x in filter)
|
||||
_filter = ",".join(names)
|
||||
output = TestRun.executor.run(
|
||||
print_statistics_cmd(
|
||||
cache_id=str(cache_id), core_id=_core_id,
|
||||
per_io_class=per_io_class, io_class_id=_io_class_id,
|
||||
filter=_filter, output_format=_output_format,
|
||||
by_id_path=by_id_path, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Printing statistics failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_cache_mode(cache_mode: CacheMode, cache_id: int,
|
||||
flush=None, shortcut: bool = False):
|
||||
flush_cache = None
|
||||
if flush is True:
|
||||
flush_cache = "yes"
|
||||
elif flush is False:
|
||||
flush_cache = "no"
|
||||
|
||||
output = TestRun.executor.run(
|
||||
set_cache_mode_cmd(cache_mode=cache_mode.name.lower(), cache_id=str(cache_id),
|
||||
flush_cache=flush_cache, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Set cache mode command failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def load_io_classes(cache_id: int, file: str, shortcut: bool = False):
|
||||
output = TestRun.executor.run(
|
||||
load_io_classes_cmd(cache_id=str(cache_id), file=file, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Load IO class command failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def list_io_classes(cache_id: int, output_format: OutputFormat, shortcut: bool = False):
|
||||
_output_format = None if output_format is None else output_format.name
|
||||
output = TestRun.executor.run(
|
||||
list_io_classes_cmd(cache_id=str(cache_id),
|
||||
output_format=_output_format, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("List IO class command failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def get_param_cutoff(cache_id: int, core_id: int,
|
||||
output_format: OutputFormat = None, shortcut: bool = False):
|
||||
_output_format = None if output_format is None else output_format.name
|
||||
output = TestRun.executor.run(
|
||||
get_param_cutoff_cmd(cache_id=str(cache_id), core_id=str(core_id),
|
||||
output_format=_output_format, shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Getting sequential cutoff params failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def get_param_cleaning(cache_id: int, output_format: OutputFormat = None, shortcut: bool = False):
|
||||
_output_format = None if output_format is None else output_format.name
|
||||
output = TestRun.executor.run(
|
||||
get_param_cleaning_cmd(cache_id=str(cache_id), output_format=_output_format,
|
||||
shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Getting cleaning policy params failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def get_param_cleaning_alru(cache_id: int, output_format: OutputFormat = None,
|
||||
shortcut: bool = False):
|
||||
_output_format = None if output_format is None else output_format.name
|
||||
output = TestRun.executor.run(
|
||||
get_param_cleaning_alru_cmd(cache_id=str(cache_id), output_format=_output_format,
|
||||
shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Getting alru cleaning policy params failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def get_param_cleaning_acp(cache_id: int, output_format: OutputFormat = None,
|
||||
shortcut: bool = False):
|
||||
_output_format = None if output_format is None else output_format.name
|
||||
output = TestRun.executor.run(
|
||||
get_param_cleaning_acp_cmd(cache_id=str(cache_id), output_format=_output_format,
|
||||
shortcut=shortcut))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Getting acp cleaning policy params failed.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
|
||||
policy: SeqCutOffPolicy = None, promotion_count: int = None):
|
||||
_core_id = None if core_id is None else str(core_id)
|
||||
_threshold = None if threshold is None else str(int(threshold.get_value(Unit.KibiByte)))
|
||||
_policy = None if policy is None else policy.name
|
||||
_promotion_count = None if promotion_count is None else str(promotion_count)
|
||||
command = set_param_cutoff_cmd(
|
||||
cache_id=str(cache_id),
|
||||
core_id=_core_id,
|
||||
threshold=_threshold,
|
||||
policy=_policy,
|
||||
promotion_count=_promotion_count
|
||||
)
|
||||
output = TestRun.executor.run(command)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error while setting sequential cut-off params.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_param_cleaning(cache_id: int, policy: CleaningPolicy):
|
||||
output = TestRun.executor.run(
|
||||
set_param_cleaning_cmd(cache_id=str(cache_id), policy=policy.name))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error while setting cleaning policy.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_param_cleaning_alru(cache_id: int, wake_up: int = None, staleness_time: int = None,
|
||||
flush_max_buffers: int = None, activity_threshold: int = None):
|
||||
output = TestRun.executor.run(
|
||||
set_param_cleaning_alru_cmd(
|
||||
cache_id=cache_id,
|
||||
wake_up=wake_up,
|
||||
staleness_time=staleness_time,
|
||||
flush_max_buffers=flush_max_buffers,
|
||||
activity_threshold=activity_threshold))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error while setting alru cleaning policy parameters.", output)
|
||||
return output
|
||||
|
||||
|
||||
def set_param_cleaning_acp(cache_id: int, wake_up: int = None, flush_max_buffers: int = None):
|
||||
output = TestRun.executor.run(
|
||||
set_param_cleaning_acp_cmd(
|
||||
cache_id=str(cache_id),
|
||||
wake_up=str(wake_up) if wake_up is not None else None,
|
||||
flush_max_buffers=str(flush_max_buffers) if flush_max_buffers else None))
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Error while setting acp cleaning policy parameters.", output)
|
||||
return output
|
||||
|
@ -1,9 +1,22 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
from aenum import Enum
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class ParamName(Enum):
|
||||
seq_cutoff = "seq-cutoff"
|
||||
cleaning = "cleaning"
|
||||
cleaning_alru = "cleaning-alru"
|
||||
cleaning_acp = "cleaning-acp"
|
||||
promotion = "promotion"
|
||||
promotion_nhit = "promotion-nhit"
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
|
||||
class OutputFormat(Enum):
|
||||
|
@ -1,12 +1,13 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
import re
|
||||
|
||||
from datetime import timedelta, datetime
|
||||
from typing import List
|
||||
|
||||
@ -18,7 +19,6 @@ from api.cas.version import CasVersion
|
||||
from core.test_run_utils import TestRun
|
||||
from storage_devices.device import Device
|
||||
from test_utils.output import CmdException
|
||||
from test_utils.size import parse_unit
|
||||
|
||||
|
||||
class Stats(dict):
|
||||
@ -26,188 +26,79 @@ class Stats(dict):
|
||||
return json.dumps(self, default=lambda o: str(o), indent=2)
|
||||
|
||||
|
||||
def parse_stats_unit(unit: str):
|
||||
if unit is None:
|
||||
return ""
|
||||
|
||||
unit = re.search(r".*[^\]]", unit).group()
|
||||
|
||||
if unit == "s":
|
||||
return "s"
|
||||
elif unit == "%":
|
||||
return "%"
|
||||
elif unit == "Requests":
|
||||
return "requests"
|
||||
else:
|
||||
return parse_unit(unit)
|
||||
|
||||
|
||||
def get_filter(filter: List[StatsFilter]):
|
||||
"""Prepare list of statistic sections which should be retrieved and parsed. """
|
||||
"""Prepare list of statistic sections which should be retrieved and parsed."""
|
||||
if filter is None or StatsFilter.all in filter:
|
||||
_filter = [
|
||||
f for f in StatsFilter if (f != StatsFilter.all and f != StatsFilter.conf)
|
||||
]
|
||||
_filter = [f for f in StatsFilter if (f != StatsFilter.all and f != StatsFilter.conf)]
|
||||
else:
|
||||
_filter = [
|
||||
f for f in filter if (f != StatsFilter.all and f != StatsFilter.conf)
|
||||
]
|
||||
_filter = [f for f in filter if (f != StatsFilter.all and f != StatsFilter.conf)]
|
||||
|
||||
return _filter
|
||||
|
||||
|
||||
def get_statistics(
|
||||
cache_id: int,
|
||||
core_id: int = None,
|
||||
io_class_id: int = None,
|
||||
filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False,
|
||||
):
|
||||
stats = Stats()
|
||||
|
||||
_filter = get_filter(filter)
|
||||
|
||||
per_io_class = True if io_class_id is not None else False
|
||||
|
||||
# No need to retrieve all stats if user specified only 'conf' flag
|
||||
if filter != [StatsFilter.conf]:
|
||||
csv_stats = casadm.print_statistics(
|
||||
cache_id=cache_id,
|
||||
core_id=core_id,
|
||||
per_io_class=per_io_class,
|
||||
io_class_id=io_class_id,
|
||||
filter=_filter,
|
||||
output_format=casadm.OutputFormat.csv,
|
||||
).stdout.splitlines()
|
||||
|
||||
if filter is None or StatsFilter.conf in filter or StatsFilter.all in filter:
|
||||
# Conf statistics have different unit or may have no unit at all. For parsing
|
||||
# convenience they are gathered separately. As this is only configuration stats
|
||||
# there is no risk they are divergent.
|
||||
conf_stats = casadm.print_statistics(
|
||||
cache_id=cache_id,
|
||||
core_id=core_id,
|
||||
per_io_class=per_io_class,
|
||||
io_class_id=io_class_id,
|
||||
filter=[StatsFilter.conf],
|
||||
output_format=casadm.OutputFormat.csv,
|
||||
).stdout.splitlines()
|
||||
stat_keys = conf_stats[0]
|
||||
stat_values = conf_stats[1]
|
||||
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
|
||||
# Some of configuration stats have no unit
|
||||
try:
|
||||
stat_name, stat_unit = name.split(" [")
|
||||
except ValueError:
|
||||
stat_name = name
|
||||
stat_unit = None
|
||||
|
||||
stat_name = stat_name.lower()
|
||||
|
||||
# 'dirty for' and 'cache size' stats occurs twice
|
||||
if stat_name in stats:
|
||||
continue
|
||||
|
||||
stat_unit = parse_stats_unit(stat_unit)
|
||||
|
||||
if isinstance(stat_unit, Unit):
|
||||
stats[stat_name] = Size(float(val), stat_unit)
|
||||
elif stat_unit == "s":
|
||||
stats[stat_name] = timedelta(seconds=int(val))
|
||||
elif stat_unit == "":
|
||||
# Some of stats without unit can be a number like IDs,
|
||||
# some of them can be string like device path
|
||||
try:
|
||||
stats[stat_name] = float(val)
|
||||
except ValueError:
|
||||
stats[stat_name] = val
|
||||
|
||||
# No need to parse all stats if user specified only 'conf' flag
|
||||
if filter == [StatsFilter.conf]:
|
||||
return stats
|
||||
|
||||
stat_keys = csv_stats[0]
|
||||
stat_values = csv_stats[1]
|
||||
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
|
||||
if percentage_val and " [%]" in name:
|
||||
stats[name.split(" [")[0].lower()] = float(val)
|
||||
elif not percentage_val and "[%]" not in name:
|
||||
stat_name, stat_unit = name.split(" [")
|
||||
|
||||
stat_unit = parse_stats_unit(stat_unit)
|
||||
|
||||
stat_name = stat_name.lower()
|
||||
|
||||
if isinstance(stat_unit, Unit):
|
||||
stats[stat_name] = Size(float(val), stat_unit)
|
||||
elif stat_unit == "requests":
|
||||
stats[stat_name] = float(val)
|
||||
else:
|
||||
raise ValueError(f"Invalid unit {stat_unit}")
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def get_caches(): # This method does not return inactive or detached CAS devices
|
||||
def get_caches() -> list:
|
||||
from api.cas.cache import Cache
|
||||
|
||||
caches_dict = get_cas_devices_dict()["caches"]
|
||||
caches_list = []
|
||||
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n')
|
||||
for line in lines:
|
||||
args = line.split(',')
|
||||
if args[0] == "cache":
|
||||
current_cache = Cache(Device(args[2]))
|
||||
caches_list.append(current_cache)
|
||||
|
||||
for cache in caches_dict.values():
|
||||
caches_list.append(
|
||||
Cache(
|
||||
device=(Device(cache["device_path"]) if cache["device_path"] != "-" else None),
|
||||
cache_id=cache["id"],
|
||||
)
|
||||
)
|
||||
|
||||
return caches_list
|
||||
|
||||
|
||||
def get_cores(cache_id: int):
|
||||
def get_cores(cache_id: int) -> list:
|
||||
from api.cas.core import Core, CoreStatus
|
||||
cores_list = []
|
||||
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n')
|
||||
is_proper_core_line = False
|
||||
for line in lines:
|
||||
args = line.split(',')
|
||||
if args[0] == "core" and is_proper_core_line:
|
||||
core_status_str = args[3].lower()
|
||||
is_valid_status = CoreStatus[core_status_str].value[0] <= 1
|
||||
if is_valid_status:
|
||||
cores_list.append(Core(args[2], cache_id))
|
||||
if args[0] == "cache":
|
||||
is_proper_core_line = True if int(args[1]) == cache_id else False
|
||||
return cores_list
|
||||
|
||||
cores_dict = get_cas_devices_dict()["cores"].values()
|
||||
|
||||
def is_active(core):
|
||||
return CoreStatus[core["status"].lower()] == CoreStatus.active
|
||||
|
||||
return [
|
||||
Core(core["device_path"], core["cache_id"])
|
||||
for core in cores_dict
|
||||
if is_active(core) and core["cache_id"] == cache_id
|
||||
]
|
||||
|
||||
|
||||
def get_cas_devices_dict():
|
||||
device_list = list(csv.DictReader(casadm.list_caches(OutputFormat.csv).stdout.split('\n')))
|
||||
devices = {"core_pool": [], "caches": {}, "cores": {}}
|
||||
def get_cas_devices_dict() -> dict:
|
||||
device_list = list(csv.DictReader(casadm.list_caches(OutputFormat.csv).stdout.split("\n")))
|
||||
devices = {"caches": {}, "cores": {}, "core_pool": {}}
|
||||
cache_id = -1
|
||||
core_pool = False
|
||||
prev_cache_id = -1
|
||||
|
||||
for device in device_list:
|
||||
if device["type"] == "core pool":
|
||||
core_pool = True
|
||||
continue
|
||||
|
||||
if device["type"] == "cache":
|
||||
core_pool = False
|
||||
prev_cache_id = int(device["id"])
|
||||
devices["caches"].update(
|
||||
{
|
||||
int(device["id"]): {
|
||||
"device": device["disk"],
|
||||
"status": device["status"],
|
||||
}
|
||||
}
|
||||
)
|
||||
cache_id = int(device["id"])
|
||||
params = [
|
||||
("id", cache_id),
|
||||
("device_path", device["disk"]),
|
||||
("status", device["status"]),
|
||||
]
|
||||
devices["caches"][cache_id] = dict([(key, value) for key, value in params])
|
||||
|
||||
elif device["type"] == "core":
|
||||
core = {"device": device["disk"], "status": device["status"]}
|
||||
params = [
|
||||
("cache_id", cache_id),
|
||||
("device_path", device["disk"]),
|
||||
("status", device["status"]),
|
||||
]
|
||||
if core_pool:
|
||||
devices["core_pool"].append(core)
|
||||
else:
|
||||
core.update({"cache_id": prev_cache_id})
|
||||
devices["cores"].update(
|
||||
{(prev_cache_id, int(device["id"])): core}
|
||||
params.append(("core_pool", device))
|
||||
devices["core_pool"][(cache_id, int(device["id"]))] = dict(
|
||||
[(key, value) for key, value in params]
|
||||
)
|
||||
else:
|
||||
devices["cores"][(cache_id, int(device["id"]))] = dict(
|
||||
[(key, value) for key, value in params]
|
||||
)
|
||||
|
||||
return devices
|
||||
|
||||
|
||||
@ -215,20 +106,26 @@ def get_flushing_progress(cache_id: int, core_id: int = None):
|
||||
casadm_output = casadm.list_caches(OutputFormat.csv)
|
||||
lines = casadm_output.stdout.splitlines()
|
||||
for line in lines:
|
||||
line_elements = line.split(',')
|
||||
if core_id is not None and line_elements[0] == "core" \
|
||||
and int(line_elements[1]) == core_id \
|
||||
or core_id is None and line_elements[0] == "cache" \
|
||||
and int(line_elements[1]) == cache_id:
|
||||
line_elements = line.split(",")
|
||||
if (
|
||||
core_id is not None
|
||||
and line_elements[0] == "core"
|
||||
and int(line_elements[1]) == core_id
|
||||
or core_id is None
|
||||
and line_elements[0] == "cache"
|
||||
and int(line_elements[1]) == cache_id
|
||||
):
|
||||
try:
|
||||
flush_line_elements = line_elements[3].split()
|
||||
flush_percent = flush_line_elements[1][1:]
|
||||
return float(flush_percent)
|
||||
except Exception:
|
||||
break
|
||||
raise CmdException(f"There is no flushing progress in casadm list output. (cache {cache_id}"
|
||||
f"{' core ' + str(core_id) if core_id is not None else ''})",
|
||||
casadm_output)
|
||||
raise CmdException(
|
||||
f"There is no flushing progress in casadm list output. (cache {cache_id}"
|
||||
f"{' core ' + str(core_id) if core_id is not None else ''})",
|
||||
casadm_output,
|
||||
)
|
||||
|
||||
|
||||
def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)):
|
||||
@ -243,54 +140,57 @@ def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)):
|
||||
|
||||
|
||||
def get_flush_parameters_alru(cache_id: int):
|
||||
casadm_output = casadm.get_param_cleaning_alru(cache_id,
|
||||
casadm.OutputFormat.csv).stdout.splitlines()
|
||||
casadm_output = casadm.get_param_cleaning_alru(
|
||||
cache_id, casadm.OutputFormat.csv
|
||||
).stdout.splitlines()
|
||||
flush_parameters = FlushParametersAlru()
|
||||
for line in casadm_output:
|
||||
if 'max buffers' in line:
|
||||
flush_parameters.flush_max_buffers = int(line.split(',')[1])
|
||||
if 'Activity threshold' in line:
|
||||
flush_parameters.activity_threshold = Time(milliseconds=int(line.split(',')[1]))
|
||||
if 'Stale buffer time' in line:
|
||||
flush_parameters.staleness_time = Time(seconds=int(line.split(',')[1]))
|
||||
if 'Wake up time' in line:
|
||||
flush_parameters.wake_up_time = Time(seconds=int(line.split(',')[1]))
|
||||
if "max buffers" in line:
|
||||
flush_parameters.flush_max_buffers = int(line.split(",")[1])
|
||||
if "Activity threshold" in line:
|
||||
flush_parameters.activity_threshold = Time(milliseconds=int(line.split(",")[1]))
|
||||
if "Stale buffer time" in line:
|
||||
flush_parameters.staleness_time = Time(seconds=int(line.split(",")[1]))
|
||||
if "Wake up time" in line:
|
||||
flush_parameters.wake_up_time = Time(seconds=int(line.split(",")[1]))
|
||||
return flush_parameters
|
||||
|
||||
|
||||
def get_flush_parameters_acp(cache_id: int):
|
||||
casadm_output = casadm.get_param_cleaning_acp(cache_id,
|
||||
casadm.OutputFormat.csv).stdout.splitlines()
|
||||
casadm_output = casadm.get_param_cleaning_acp(
|
||||
cache_id, casadm.OutputFormat.csv
|
||||
).stdout.splitlines()
|
||||
flush_parameters = FlushParametersAcp()
|
||||
for line in casadm_output:
|
||||
if 'max buffers' in line:
|
||||
flush_parameters.flush_max_buffers = int(line.split(',')[1])
|
||||
if 'Wake up time' in line:
|
||||
flush_parameters.wake_up_time = Time(milliseconds=int(line.split(',')[1]))
|
||||
if "max buffers" in line:
|
||||
flush_parameters.flush_max_buffers = int(line.split(",")[1])
|
||||
if "Wake up time" in line:
|
||||
flush_parameters.wake_up_time = Time(milliseconds=int(line.split(",")[1]))
|
||||
return flush_parameters
|
||||
|
||||
|
||||
def get_seq_cut_off_parameters(cache_id: int, core_id: int):
|
||||
casadm_output = casadm.get_param_cutoff(
|
||||
cache_id, core_id, casadm.OutputFormat.csv).stdout.splitlines()
|
||||
cache_id, core_id, casadm.OutputFormat.csv
|
||||
).stdout.splitlines()
|
||||
seq_cut_off_params = SeqCutOffParameters()
|
||||
for line in casadm_output:
|
||||
if 'Sequential cutoff threshold' in line:
|
||||
seq_cut_off_params.threshold = Size(int(line.split(',')[1]), Unit.KibiByte)
|
||||
if 'Sequential cutoff policy' in line:
|
||||
seq_cut_off_params.policy = SeqCutOffPolicy.from_name(line.split(',')[1])
|
||||
if 'Sequential cutoff promotion request count threshold' in line:
|
||||
seq_cut_off_params.promotion_count = int(line.split(',')[1])
|
||||
if "Sequential cutoff threshold" in line:
|
||||
seq_cut_off_params.threshold = Size(int(line.split(",")[1]), Unit.KibiByte)
|
||||
if "Sequential cutoff policy" in line:
|
||||
seq_cut_off_params.policy = SeqCutOffPolicy.from_name(line.split(",")[1])
|
||||
if "Sequential cutoff promotion request count threshold" in line:
|
||||
seq_cut_off_params.promotion_count = int(line.split(",")[1])
|
||||
return seq_cut_off_params
|
||||
|
||||
|
||||
def get_casadm_version():
|
||||
casadm_output = casadm.print_version(OutputFormat.csv).stdout.split('\n')
|
||||
version_str = casadm_output[1].split(',')[-1]
|
||||
casadm_output = casadm.print_version(OutputFormat.csv).stdout.split("\n")
|
||||
version_str = casadm_output[1].split(",")[-1]
|
||||
return CasVersion.from_version_string(version_str)
|
||||
|
||||
|
||||
def get_io_class_list(cache_id: int):
|
||||
def get_io_class_list(cache_id: int) -> list:
|
||||
ret = []
|
||||
casadm_output = casadm.list_io_classes(cache_id, OutputFormat.csv).stdout.splitlines()
|
||||
casadm_output.pop(0) # Remove header
|
||||
@ -301,14 +201,16 @@ def get_io_class_list(cache_id: int):
|
||||
return ret
|
||||
|
||||
|
||||
def get_core_info_by_path(core_disk_path):
|
||||
def get_core_info_by_path(core_disk_path) -> dict | None:
|
||||
output = casadm.list_caches(OutputFormat.csv, by_id_path=True)
|
||||
reader = csv.DictReader(io.StringIO(output.stdout))
|
||||
for row in reader:
|
||||
if row['type'] == "core" and row['disk'] == core_disk_path:
|
||||
return {"core_id": row['id'],
|
||||
"core_device": row['disk'],
|
||||
"status": row['status'],
|
||||
"exp_obj": row['device']}
|
||||
if row["type"] == "core" and row["disk"] == core_disk_path:
|
||||
return {
|
||||
"core_id": row["id"],
|
||||
"core_device": row["disk"],
|
||||
"status": row["status"],
|
||||
"exp_obj": row["device"],
|
||||
}
|
||||
|
||||
return None
|
||||
|
@ -1,5 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -11,88 +12,15 @@ casadm_bin = "casadm"
|
||||
casctl = "casctl"
|
||||
|
||||
|
||||
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False):
|
||||
command = f" -A -i {cache_id} -d {core_dev}" if shortcut \
|
||||
else f" --add-core --cache-id {cache_id} --core-device {core_dev}"
|
||||
if core_id is not None:
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def script_try_add_cmd(cache_id: str, core_dev: str, core_id: str = None):
|
||||
command = f"{casadm_bin} --script --add-core --try-add --cache-id {cache_id} " \
|
||||
f"--core-device {core_dev}"
|
||||
if core_id:
|
||||
command += f" --core-id {core_id}"
|
||||
return command
|
||||
|
||||
|
||||
def script_purge_cache_cmd(cache_id: str):
|
||||
return f"{casadm_bin} --script --purge-cache --cache-id {cache_id}"
|
||||
|
||||
|
||||
def script_purge_core_cmd(cache_id: str, core_id: str):
|
||||
return f"{casadm_bin} --script --purge-core --cache-id {cache_id} --core-id {core_id}"
|
||||
|
||||
|
||||
def script_detach_core_cmd(cache_id: str, core_id: str):
|
||||
return f"{casadm_bin} --script --remove-core --detach --cache-id {cache_id} " \
|
||||
f"--core-id {core_id}"
|
||||
|
||||
|
||||
def script_remove_core_cmd(cache_id: str, core_id: str, no_flush: bool = False):
|
||||
command = f"{casadm_bin} --script --remove-core --cache-id {cache_id} --core-id {core_id}"
|
||||
if no_flush:
|
||||
command += ' --no-flush'
|
||||
return command
|
||||
|
||||
|
||||
def remove_core_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False):
|
||||
command = f" -R -i {cache_id} -j {core_id}" if shortcut \
|
||||
else f" --remove-core --cache-id {cache_id} --core-id {core_id}"
|
||||
if force:
|
||||
command += " -f" if shortcut else " --force"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def remove_inactive_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False):
|
||||
command = f" --remove-inactive {'-i' if shortcut else '--cache-id'} {cache_id} " \
|
||||
f"{'-j' if shortcut else '--core-id'} {core_id}"
|
||||
if force:
|
||||
command += " -f" if shortcut else " --force"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def remove_detached_cmd(core_device: str, shortcut: bool = False):
|
||||
command = " --remove-detached" + (" -d " if shortcut else " --device ") + core_device
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def help_cmd(shortcut: bool = False):
|
||||
return casadm_bin + (" -H" if shortcut else " --help")
|
||||
|
||||
|
||||
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False):
|
||||
command = (" -Z -i " if shortcut else " --reset-counters --cache-id ") + cache_id
|
||||
if core_id is not None:
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def flush_cache_cmd(cache_id: str, shortcut: bool = False):
|
||||
command = (" -F -i " if shortcut else " --flush-cache --cache-id ") + cache_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False):
|
||||
command = (f" -F -i {cache_id} -j {core_id}" if shortcut
|
||||
else f" --flush-cache --cache-id {cache_id} --core-id {core_id}")
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = None,
|
||||
cache_id: str = None, force: bool = False,
|
||||
load: bool = False, shortcut: bool = False):
|
||||
def start_cmd(
|
||||
cache_dev: str,
|
||||
cache_mode: str = None,
|
||||
cache_line_size: str = None,
|
||||
cache_id: str = None,
|
||||
force: bool = False,
|
||||
load: bool = False,
|
||||
shortcut: bool = False,
|
||||
) -> str:
|
||||
command = " -S" if shortcut else " --start-cache"
|
||||
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||
if cache_mode is not None:
|
||||
@ -108,8 +36,341 @@ def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = Non
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def standby_init_cmd(cache_dev: str, cache_id: str, cache_line_size: str,
|
||||
force: bool = False, shortcut: bool = False):
|
||||
def load_cmd(cache_dev: str, shortcut: bool = False) -> str:
|
||||
return start_cmd(cache_dev=cache_dev, load=True, shortcut=shortcut)
|
||||
|
||||
|
||||
def attach_cache_cmd(
|
||||
cache_dev: str, cache_id: str, force: bool = False, shortcut: bool = False
|
||||
) -> str:
|
||||
command = " --attach-cache"
|
||||
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
if force:
|
||||
command += " -f" if shortcut else " --force"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def detach_cache_cmd(cache_id: str, shortcut: bool = False) -> str:
|
||||
command = " --detach-cache"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False) -> str:
|
||||
command = " -T" if shortcut else " --stop-cache"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
if no_data_flush:
|
||||
command += " --no-data-flush"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def _set_param_cmd(name: str, cache_id: str, shortcut: bool = False) -> str:
|
||||
command = (" X -n" if shortcut else " --set-param --name ") + name
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
return command
|
||||
|
||||
|
||||
def set_param_cutoff_cmd(
|
||||
cache_id: str,
|
||||
core_id: str = None,
|
||||
threshold: str = None,
|
||||
policy: str = None,
|
||||
promotion_count: str = None,
|
||||
shortcut: bool = False,
|
||||
) -> str:
|
||||
name = "seq-cutoff"
|
||||
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||
if core_id:
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
if threshold:
|
||||
command += (" -t " if shortcut else " --threshold ") + threshold
|
||||
if policy:
|
||||
command += (" -p " if shortcut else " --policy ") + policy
|
||||
if promotion_count:
|
||||
command += " --promotion-count " + promotion_count
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def set_param_promotion_cmd(cache_id: str, policy: str, shortcut: bool = False) -> str:
|
||||
name = "promotion"
|
||||
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||
command += (" -p " if shortcut else " --policy ") + policy
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def set_param_promotion_nhit_cmd(
|
||||
cache_id: str, threshold: str = None, trigger: str = None, shortcut: bool = False
|
||||
) -> str:
|
||||
name = "promotion-nhit"
|
||||
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||
if threshold:
|
||||
command += (" -t " if shortcut else " --threshold ") + threshold
|
||||
if trigger is not None:
|
||||
command += (" -o " if shortcut else " --trigger ") + trigger
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False) -> str:
|
||||
name = "cleaning"
|
||||
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||
command += (" -p " if shortcut else " --policy ") + policy
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def set_param_cleaning_alru_cmd(
|
||||
cache_id: str,
|
||||
wake_up: str = None,
|
||||
staleness_time: str = None,
|
||||
flush_max_buffers: str = None,
|
||||
activity_threshold: str = None,
|
||||
shortcut: bool = False,
|
||||
) -> str:
|
||||
name = "cleaning-alru"
|
||||
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||
if wake_up:
|
||||
command += (" -w " if shortcut else " --wake-up ") + wake_up
|
||||
if staleness_time:
|
||||
command += (" -s " if shortcut else " --staleness-time ") + staleness_time
|
||||
if flush_max_buffers:
|
||||
command += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
|
||||
if activity_threshold:
|
||||
command += (" -t " if shortcut else " --activity-threshold ") + activity_threshold
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def set_param_cleaning_acp_cmd(
|
||||
cache_id: str,
|
||||
wake_up: str = None,
|
||||
flush_max_buffers: str = None,
|
||||
shortcut: bool = False,
|
||||
) -> str:
|
||||
name = "cleaning-acp"
|
||||
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||
if wake_up is not None:
|
||||
command += (" -w " if shortcut else " --wake-up ") + wake_up
|
||||
if flush_max_buffers is not None:
|
||||
command += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def _get_param_cmd(
|
||||
name: str,
|
||||
cache_id: str,
|
||||
output_format: str = None,
|
||||
shortcut: bool = False,
|
||||
) -> str:
|
||||
command = (" -G -n" if shortcut else " --get-param --name ") + name
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
if output_format:
|
||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||
return command
|
||||
|
||||
|
||||
def get_param_cutoff_cmd(
|
||||
cache_id: str, core_id: str, output_format: str = None, shortcut: bool = False
|
||||
) -> str:
|
||||
name = "seq-cutoff"
|
||||
command = _get_param_cmd(
|
||||
name=name,
|
||||
cache_id=cache_id,
|
||||
output_format=output_format,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def get_param_promotion_cmd(
|
||||
cache_id: str, output_format: str = None, shortcut: bool = False
|
||||
) -> str:
|
||||
name = "promotion"
|
||||
command = _get_param_cmd(
|
||||
name=name,
|
||||
cache_id=cache_id,
|
||||
output_format=output_format,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def get_param_promotion_nhit_cmd(
|
||||
cache_id: str, output_format: str = None, shortcut: bool = False
|
||||
) -> str:
|
||||
name = "promotion-nhit"
|
||||
command = _get_param_cmd(
|
||||
name=name,
|
||||
cache_id=cache_id,
|
||||
output_format=output_format,
|
||||
shortcut=shortcut,
|
||||
)
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False) -> str:
|
||||
name = "cleaning"
|
||||
command = _get_param_cmd(
|
||||
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
|
||||
)
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def get_param_cleaning_alru_cmd(
|
||||
cache_id: str, output_format: str = None, shortcut: bool = False
|
||||
) -> str:
|
||||
name = "cleaning-alru"
|
||||
command = _get_param_cmd(
|
||||
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
|
||||
)
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def get_param_cleaning_acp_cmd(
|
||||
cache_id: str, output_format: str = None, shortcut: bool = False
|
||||
) -> str:
|
||||
name = "cleaning-acp"
|
||||
command = _get_param_cmd(
|
||||
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
|
||||
)
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def set_cache_mode_cmd(
|
||||
cache_mode: str, cache_id: str, flush_cache: str = None, shortcut: bool = False
|
||||
) -> str:
|
||||
command = (" -Q -c" if shortcut else " --set-cache-mode --cache-mode ") + cache_mode
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
if flush_cache:
|
||||
command += (" -f " if shortcut else " --flush-cache ") + flush_cache
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False) -> str:
|
||||
command = " -A " if shortcut else " --add-core"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
command += (" -d " if shortcut else " --core-device ") + core_dev
|
||||
if core_id:
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def remove_core_cmd(
|
||||
cache_id: str, core_id: str, force: bool = False, shortcut: bool = False
|
||||
) -> str:
|
||||
command = " -R " if shortcut else " --remove-core"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
if force:
|
||||
command += " -f" if shortcut else " --force"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def remove_inactive_cmd(
|
||||
cache_id: str, core_id: str, force: bool = False, shortcut: bool = False
|
||||
) -> str:
|
||||
command = " --remove-inactive"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
if force:
|
||||
command += " -f" if shortcut else " --force"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def remove_detached_cmd(core_device: str, shortcut: bool = False) -> str:
|
||||
command = " --remove-detached"
|
||||
command += (" -d " if shortcut else " --device ") + core_device
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def list_caches_cmd(
|
||||
output_format: str = None, by_id_path: bool = True, shortcut: bool = False
|
||||
) -> str:
|
||||
command = " -L" if shortcut else " --list-caches"
|
||||
if output_format:
|
||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||
if by_id_path:
|
||||
command += " -b" if shortcut else " --by-id-path"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def print_statistics_cmd(
|
||||
cache_id: str,
|
||||
core_id: str = None,
|
||||
io_class_id: str = None,
|
||||
filter: str = None,
|
||||
output_format: str = None,
|
||||
by_id_path: bool = True,
|
||||
shortcut: bool = False,
|
||||
) -> str:
|
||||
command = " -P" if shortcut else " --stats"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
if core_id:
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
if io_class_id:
|
||||
command += (" -d " if shortcut else " --io-class-id ") + io_class_id
|
||||
if filter:
|
||||
command += (" -f " if shortcut else " --filter ") + filter
|
||||
if output_format:
|
||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||
if by_id_path:
|
||||
command += " -b " if shortcut else " --by-id-path "
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False) -> str:
|
||||
command = " -Z" if shortcut else " --reset-counters"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
if core_id is not None:
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def flush_cache_cmd(cache_id: str, shortcut: bool = False) -> str:
|
||||
command = " -F" if shortcut else " --flush-cache"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False) -> str:
|
||||
command = " -F" if shortcut else " --flush-cache"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False) -> str:
|
||||
command = " -C -C" if shortcut else " --io-class --load-config"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
command += (" -f " if shortcut else " --file ") + file
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False) -> str:
|
||||
command = " -C -L" if shortcut else " --io-class --list"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def version_cmd(output_format: str = None, shortcut: bool = False) -> str:
|
||||
command = " -V" if shortcut else " --version"
|
||||
if output_format:
|
||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def help_cmd(shortcut: bool = False) -> str:
|
||||
command = " -H" if shortcut else " --help"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def standby_init_cmd(
|
||||
cache_dev: str,
|
||||
cache_id: str,
|
||||
cache_line_size: str,
|
||||
force: bool = False,
|
||||
shortcut: bool = False,
|
||||
) -> str:
|
||||
command = " --standby --init"
|
||||
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
@ -119,229 +380,95 @@ def standby_init_cmd(cache_dev: str, cache_id: str, cache_line_size: str,
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def standby_load_cmd(cache_dev: str, shortcut: bool = False):
|
||||
def standby_load_cmd(cache_dev: str, shortcut: bool = False) -> str:
|
||||
command = " --standby --load"
|
||||
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def standby_detach_cmd(cache_id: str, shortcut: bool = False):
|
||||
def standby_detach_cmd(cache_id: str, shortcut: bool = False) -> str:
|
||||
command = " --standby --detach"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def standby_activate_cmd(cache_dev: str, cache_id: str, shortcut: bool = False):
|
||||
def standby_activate_cmd(cache_dev: str, cache_id: str, shortcut: bool = False) -> str:
|
||||
command = " --standby --activate"
|
||||
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False,
|
||||
io_class_id: str = None, filter: str = None,
|
||||
output_format: str = None, by_id_path: bool = True,
|
||||
shortcut: bool = False):
|
||||
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id
|
||||
if core_id is not None:
|
||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||
if per_io_class:
|
||||
command += " -d" if shortcut else " --io-class-id"
|
||||
if io_class_id is not None:
|
||||
command += " " + io_class_id
|
||||
elif io_class_id is not None:
|
||||
raise Exception("Per io class flag not set but ID given.")
|
||||
if filter is not None:
|
||||
command += (" -f " if shortcut else " --filter ") + filter
|
||||
if output_format is not None:
|
||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||
if by_id_path:
|
||||
command += (" -b " if shortcut else " --by-id-path ")
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def zero_metadata_cmd(cache_dev: str, force: bool = False, shortcut: bool = False):
|
||||
def zero_metadata_cmd(cache_dev: str, force: bool = False, shortcut: bool = False) -> str:
|
||||
command = " --zero-metadata"
|
||||
command += (" -d " if shortcut else " --device ") + cache_dev
|
||||
if force:
|
||||
command += (" -f" if shortcut else " --force")
|
||||
command += " -f" if shortcut else " --force"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False):
|
||||
command = " -T " if shortcut else " --stop-cache"
|
||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||
if no_data_flush:
|
||||
command += " --no-data-flush"
|
||||
return casadm_bin + command
|
||||
# casctl command
|
||||
|
||||
|
||||
def list_cmd(output_format: str = None, by_id_path: bool = True, shortcut: bool = False):
|
||||
command = " -L" if shortcut else " --list-caches"
|
||||
if output_format == "table" or output_format == "csv":
|
||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||
if by_id_path:
|
||||
command += (" -b " if shortcut else " --by-id-path ")
|
||||
return casadm_bin + command
|
||||
def ctl_help(shortcut: bool = False) -> str:
|
||||
command = " --help" if shortcut else " -h"
|
||||
return casctl + command
|
||||
|
||||
|
||||
def load_cmd(cache_dev: str, shortcut: bool = False):
|
||||
return start_cmd(cache_dev, load=True, shortcut=shortcut)
|
||||
def ctl_start() -> str:
|
||||
command = " start"
|
||||
return casctl + command
|
||||
|
||||
|
||||
def version_cmd(output_format: str = None, shortcut: bool = False):
|
||||
command = " -V" if shortcut else " --version"
|
||||
if output_format == "table" or output_format == "csv":
|
||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def set_cache_mode_cmd(cache_mode: str, cache_id: str,
|
||||
flush_cache: str = None, shortcut: bool = False):
|
||||
command = f" -Q -c {cache_mode} -i {cache_id}" if shortcut else \
|
||||
f" --set-cache-mode --cache-mode {cache_mode} --cache-id {cache_id}"
|
||||
if flush_cache:
|
||||
command += (" -f " if shortcut else " --flush-cache ") + flush_cache
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False):
|
||||
command = f" -C -C -i {cache_id} -f {file}" if shortcut else \
|
||||
f" --io-class --load-config --cache-id {cache_id} --file {file}"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False):
|
||||
command = f" -C -L -i {cache_id} -o {output_format}" if shortcut else \
|
||||
f" --io-class --list --cache-id {cache_id} --output-format {output_format}"
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def _get_param_cmd(namespace: str, cache_id: str, output_format: str = None,
|
||||
additional_params: str = None, shortcut: bool = False):
|
||||
command = f" -G -n {namespace} -i {cache_id}" if shortcut else\
|
||||
f" --get-param --name {namespace} --cache-id {cache_id}"
|
||||
if additional_params is not None:
|
||||
command += additional_params
|
||||
if output_format is not None:
|
||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def get_param_cutoff_cmd(cache_id: str, core_id: str,
|
||||
output_format: str = None, shortcut: bool = False):
|
||||
add_param = (" -j " if shortcut else " --core-id ") + core_id
|
||||
return _get_param_cmd(namespace="seq-cutoff", cache_id=cache_id, output_format=output_format,
|
||||
additional_params=add_param, shortcut=shortcut)
|
||||
|
||||
|
||||
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
|
||||
return _get_param_cmd(namespace="cleaning", cache_id=cache_id,
|
||||
output_format=output_format, shortcut=shortcut)
|
||||
|
||||
|
||||
def get_param_cleaning_alru_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
|
||||
return _get_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
|
||||
output_format=output_format, shortcut=shortcut)
|
||||
|
||||
|
||||
def get_param_cleaning_acp_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
|
||||
return _get_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
|
||||
output_format=output_format, shortcut=shortcut)
|
||||
|
||||
|
||||
def _set_param_cmd(namespace: str, cache_id: str, additional_params: str = None,
|
||||
shortcut: bool = False):
|
||||
command = f" -X -n {namespace} -i {cache_id}" if shortcut else\
|
||||
f" --set-param --name {namespace} --cache-id {cache_id}"
|
||||
command += additional_params
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def set_param_cutoff_cmd(cache_id: str, core_id: str = None, threshold: str = None,
|
||||
policy: str = None, promotion_count: str = None, shortcut: bool = False):
|
||||
add_params = ""
|
||||
if core_id is not None:
|
||||
add_params += (" -j " if shortcut else " --core-id ") + str(core_id)
|
||||
if threshold is not None:
|
||||
add_params += (" -t " if shortcut else " --threshold ") + str(threshold)
|
||||
if policy is not None:
|
||||
add_params += (" -p " if shortcut else " --policy ") + policy
|
||||
if promotion_count is not None:
|
||||
add_params += " --promotion-count " + str(promotion_count)
|
||||
return _set_param_cmd(namespace="seq-cutoff", cache_id=cache_id,
|
||||
additional_params=add_params, shortcut=shortcut)
|
||||
|
||||
|
||||
def set_param_promotion_cmd(cache_id: str, policy: str, shortcut: bool = False):
|
||||
add_params = (" -p " if shortcut else " --policy ") + policy
|
||||
return _set_param_cmd(namespace="promotion", cache_id=cache_id,
|
||||
additional_params=add_params, shortcut=shortcut)
|
||||
|
||||
|
||||
def set_param_promotion_nhit_cmd(
|
||||
cache_id: str, threshold=None, trigger=None, shortcut: bool = False
|
||||
):
|
||||
add_params = ""
|
||||
if threshold is not None:
|
||||
add_params += (" -t " if shortcut else " --threshold ") + str(threshold)
|
||||
if trigger is not None:
|
||||
add_params += (" -o " if shortcut else " --trigger ") + str(trigger)
|
||||
return _set_param_cmd(namespace="promotion-nhit", cache_id=cache_id,
|
||||
additional_params=add_params, shortcut=shortcut)
|
||||
|
||||
|
||||
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False):
|
||||
add_params = (" -p " if shortcut else " --policy ") + policy
|
||||
return _set_param_cmd(namespace="cleaning", cache_id=cache_id,
|
||||
additional_params=add_params, shortcut=shortcut)
|
||||
|
||||
|
||||
def set_param_cleaning_alru_cmd(cache_id, wake_up=None, staleness_time=None,
|
||||
flush_max_buffers=None, activity_threshold=None,
|
||||
shortcut: bool = False):
|
||||
add_param = ""
|
||||
if wake_up is not None:
|
||||
add_param += (" -w " if shortcut else " --wake-up ") + str(wake_up)
|
||||
if staleness_time is not None:
|
||||
add_param += (" -s " if shortcut else " --staleness-time ") + str(staleness_time)
|
||||
if flush_max_buffers is not None:
|
||||
add_param += (" -b " if shortcut else " --flush-max-buffers ") + str(flush_max_buffers)
|
||||
if activity_threshold is not None:
|
||||
add_param += (" -t " if shortcut else " --activity-threshold ") + str(activity_threshold)
|
||||
|
||||
return _set_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
|
||||
additional_params=add_param, shortcut=shortcut)
|
||||
|
||||
|
||||
def set_param_cleaning_acp_cmd(cache_id: str, wake_up: str = None,
|
||||
flush_max_buffers: str = None, shortcut: bool = False):
|
||||
add_param = ""
|
||||
if wake_up is not None:
|
||||
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
|
||||
if flush_max_buffers is not None:
|
||||
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
|
||||
return _set_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
|
||||
additional_params=add_param, shortcut=shortcut)
|
||||
|
||||
|
||||
def ctl_help(shortcut: bool = False):
|
||||
return casctl + " --help" if shortcut else " -h"
|
||||
|
||||
|
||||
def ctl_start():
|
||||
return casctl + " start"
|
||||
|
||||
|
||||
def ctl_stop(flush: bool = False):
|
||||
command = casctl + " stop"
|
||||
def ctl_stop(flush: bool = False) -> str:
|
||||
command = " stop"
|
||||
if flush:
|
||||
command += " --flush"
|
||||
return command
|
||||
return casctl + command
|
||||
|
||||
|
||||
def ctl_init(force: bool = False):
|
||||
command = casctl + " init"
|
||||
def ctl_init(force: bool = False) -> str:
|
||||
command = " init"
|
||||
if force:
|
||||
command += " --force"
|
||||
return command
|
||||
return casctl + command
|
||||
|
||||
|
||||
# casadm script
|
||||
|
||||
|
||||
def script_try_add_cmd(cache_id: str, core_dev: str, core_id: str) -> str:
|
||||
command = " --script --add-core --try-add"
|
||||
command += " --cache-id " + cache_id
|
||||
command += " --core-device " + core_dev
|
||||
command += f" --core-id " + core_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def script_purge_cache_cmd(cache_id: str) -> str:
|
||||
command = " --script --purge-cache"
|
||||
command += " --cache-id " + cache_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def script_purge_core_cmd(cache_id: str, core_id: str) -> str:
|
||||
command = " --script --purge-core"
|
||||
command += " --cache-id " + cache_id
|
||||
command += " --core-id " + core_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def script_detach_core_cmd(cache_id: str, core_id: str) -> str:
|
||||
command = " --script --remove-core --detach"
|
||||
command += " --cache-id " + cache_id
|
||||
command += " --core-id " + core_id
|
||||
return casadm_bin + command
|
||||
|
||||
|
||||
def script_remove_core_cmd(cache_id: str, core_id: str, no_flush: bool = False) -> str:
|
||||
command = " --script --remove-core"
|
||||
command += " --cache-id " + cache_id
|
||||
command += " --core-id " + core_id
|
||||
if no_flush:
|
||||
command += " --no-flush"
|
||||
return casadm_bin + command
|
||||
|
@ -9,6 +9,8 @@ casadm_help = [
|
||||
r"Usage: casadm \<command\> \[option\.\.\.\]",
|
||||
r"Available commands:",
|
||||
r"-S --start-cache Start new cache instance or load using metadata",
|
||||
r"--attach-cache Attach cache device",
|
||||
r"--detach-cache Detach cache device",
|
||||
r"-T --stop-cache Stop cache instance",
|
||||
r"-X --set-param Set various runtime parameters",
|
||||
r"-G --get-param Get various runtime parameters",
|
||||
@ -29,112 +31,101 @@ casadm_help = [
|
||||
r"e\.g\.",
|
||||
r"casadm --start-cache --help",
|
||||
r"For more information, please refer to manual, Admin Guide \(man casadm\)",
|
||||
r"or go to support page \<https://open-cas\.github\.io\>\."
|
||||
r"or go to support page \<https://open-cas\.github\.io\>\.",
|
||||
]
|
||||
|
||||
help_help = [
|
||||
r"Usage: casadm --help",
|
||||
r"Print help"
|
||||
start_cache_help = [
|
||||
r"Usage: casadm --start-cache --cache-device \<DEVICE\> \[option\.\.\.\]",
|
||||
r"Start new cache instance or load using metadata",
|
||||
r"Options that are valid with --start-cache \(-S\) are:",
|
||||
r"-d --cache-device \<DEVICE\> Caching device to be used",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\> "
|
||||
r"\(if not provided, the first available number will be used\)",
|
||||
r"-l --load Load cache metadata from caching device "
|
||||
r"\(DANGEROUS - see manual or Admin Guide for details\)",
|
||||
r"-f --force Force the creation of cache instance",
|
||||
r"-c --cache-mode \<NAME\> Set cache mode from available: \{wt|wb|wa|pt|wo\} "
|
||||
r"Write-Through, Write-Back, Write-Around, Pass-Through, Write-Only; "
|
||||
r"without this parameter Write-Through will be set by default",
|
||||
r"-x --cache-line-size \<NUMBER\> Set cache line size in kibibytes: "
|
||||
r"\{4,8,16,32,64\}\[KiB\] \(default: 4\)",
|
||||
]
|
||||
|
||||
version_help = [
|
||||
r"Usage: casadm --version \[option\.\.\.\]",
|
||||
r"Print CAS version",
|
||||
r"Options that are valid with --version \(-V\) are:"
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
||||
attach_cache_help = [
|
||||
r"Usage: casadm --attach-cache --cache-device \<DEVICE\> --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Attach cache device",
|
||||
r"Options that are valid with --attach-cache are:",
|
||||
r"-d --cache-device \<DEVICE\> Caching device to be used",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\> "
|
||||
r"\(if not provided, the first available number will be used\)",
|
||||
r"-f --force Force attaching the cache device",
|
||||
]
|
||||
|
||||
ioclass_help = [
|
||||
r"Usage: casadm --io-class \{--load-config|--list\}",
|
||||
r"Manage IO classes",
|
||||
r"Loads configuration for IO classes:",
|
||||
r"Usage: casadm --io-class --load-config --cache-id \<ID\> --file \<FILE\>",
|
||||
r"Options that are valid with --load-config \(-C\) are:",
|
||||
detach_cache_help = [
|
||||
r"Usage: casadm --detach-cache --cache-id \<ID\>",
|
||||
r"Detach cache device",
|
||||
r"Options that are valid with --detach-cache are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-f --file \<FILE\> Configuration file containing IO class definition",
|
||||
r"Lists currently configured IO classes:",
|
||||
r"Usage: casadm --io-class --list --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Options that are valid with --list \(-L\) are:",
|
||||
]
|
||||
|
||||
|
||||
stop_cache_help = [
|
||||
r"Usage: casadm --stop-cache --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Stop cache instance",
|
||||
r"Options that are valid with --stop-cache \(-T\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
||||
r"-n --no-data-flush Do not flush dirty data \(may be dangerous\)",
|
||||
]
|
||||
|
||||
flush_cache_help = [
|
||||
r"Usage: casadm --flush-cache --cache-id \<ID\>",
|
||||
r"Flush all dirty data from the caching device to core devices",
|
||||
r"Options that are valid with --flush-cache \(-F\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \[\<ID\>\] Identifier of core <0-4095> within given cache instance"
|
||||
]
|
||||
|
||||
reset_counters_help = [
|
||||
r"Usage: casadm --reset-counters --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Reset cache statistics for core device within cache instance",
|
||||
r"Options that are valid with --reset-counters \(-Z\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||
r"instance. If not specified, statistics are reset for all cores in cache instance\."
|
||||
]
|
||||
|
||||
stats_help = [
|
||||
r"Usage: casadm --stats --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Print statistics for cache instance",
|
||||
r"Options that are valid with --stats \(-P\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \<ID\> Limit display of core-specific statistics to only ones "
|
||||
r"pertaining to a specific core. If this option is not given, casadm will display statistics "
|
||||
r"pertaining to all cores assigned to given cache instance\.",
|
||||
r"-d --io-class-id \[\<ID\>\] Display per IO class statistics",
|
||||
r"-f --filter \<FILTER-SPEC\> Apply filters from the following set: "
|
||||
r"\{all, conf, usage, req, blk, err\}",
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
||||
]
|
||||
|
||||
list_help = [
|
||||
r"Usage: casadm --list-caches \[option\.\.\.\]",
|
||||
r"List all cache instances and core devices",
|
||||
r"Options that are valid with --list-caches \(-L\) are:",
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
||||
]
|
||||
|
||||
remove_detached_help = [
|
||||
r"Usage: casadm --remove-detached --device \<DEVICE\>",
|
||||
r"Remove core device from core pool",
|
||||
r"Options that are valid with --remove-detached are:",
|
||||
r"-d --device \<DEVICE\> Path to core device"
|
||||
]
|
||||
|
||||
remove_core_help = [
|
||||
r"Usage: casadm --remove-core --cache-id \<ID\> --core-id \<ID\> \[option\.\.\.\]",
|
||||
r"Remove active core device from cache instance",
|
||||
r"Options that are valid with --remove-core \(-R\) are:",
|
||||
set_params_help = [
|
||||
r"Usage: casadm --set-param --name \<NAME\>",
|
||||
r"Set various runtime parameters",
|
||||
r"Valid values of NAME are:",
|
||||
r"seq-cutoff - Sequential cutoff parameters",
|
||||
r"cleaning - Cleaning policy parameters",
|
||||
r"promotion - Promotion policy parameters",
|
||||
r"promotion-nhit - Promotion policy NHIT parameters",
|
||||
r"cleaning-alru - Cleaning policy ALRU parameters",
|
||||
r"cleaning-acp - Cleaning policy ACP parameters",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) seq-cutoff are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||
r"instance",
|
||||
r"-f --force Force active core removal without data flush"
|
||||
]
|
||||
|
||||
add_core_help = [
|
||||
r"Usage: casadm --add-core --cache-id \<ID\> --core-device \<DEVICE\> \[option\.\.\.\]",
|
||||
r"Add core device to cache instance",
|
||||
r"Options that are valid with --add-core \(-A\) are:",
|
||||
r"-t --threshold \<KiB\> Sequential cutoff activation threshold \[KiB\]",
|
||||
r"-p --policy \<POLICY\> Sequential cutoff policy\. Available policies: "
|
||||
r"\{always|full|never\}",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||
r"instance",
|
||||
r"-d --core-device \<DEVICE\> Path to core device"
|
||||
|
||||
]
|
||||
|
||||
set_cache_mode_help = [
|
||||
r"Usage: casadm --set-cache-mode --cache-mode \<NAME\> --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Set cache mode",
|
||||
r"Options that are valid with --set-cache-mode \(-Q\) are:",
|
||||
r"-c --cache-mode \<NAME\> Cache mode. Available cache modes: \{wt|wb|wa|pt|wo\}",
|
||||
r"-p --policy \<POLICY\> Cleaning policy type\. Available policy types: "
|
||||
r"\{nop|alru|acp\}",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-f --flush-cache \<yes|no\> Flush all dirty data from cache before switching "
|
||||
r"to new mode\. Option is required when switching from Write-Back or Write-Only mode"
|
||||
r"-p --policy \<POLICY\> Promotion policy type\. Available policy types: "
|
||||
r"\{always|nhit\}",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion-nhit are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-t --threshold \<NUMBER\> Number of requests for given core line after which "
|
||||
r"NHIT policy allows insertion into cache \<2-1000\> \(default: 3\)",
|
||||
r"-o --trigger \<NUMBER\> Cache occupancy value over which NHIT promotion "
|
||||
r"is active \<0-100\>\[\%\] \(default: 80\%\)",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning-alru are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-w --wake-up \<NUMBER\> Cleaning thread sleep time after an idle wake up "
|
||||
r"\<0-3600\>\[s\] \(default: 20 s\)",
|
||||
r"-s --staleness-time \<NUMBER\> Time that has to pass from the last write operation "
|
||||
r"before a dirty cache block can be scheduled to be flushed \<1-3600\>\[s\] \(default: 120 s\)",
|
||||
r"-b --flush-max-buffers \<NUMBER\> Number of dirty cache blocks to be flushed in one "
|
||||
r"cleaning cycle \<1-10000\> \(default: 100\)",
|
||||
r"-t --activity-threshold \<NUMBER\> Cache idle time before flushing thread can start "
|
||||
r"\<0-1000000\>\[ms\] \(default: 10000 ms\)",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning-acp are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r" -w --wake-up \<NUMBER\> Time between ACP cleaning thread iterations "
|
||||
r"\<0-10000\>\[ms\] \(default: 10 ms\)",
|
||||
r"-b --flush-max-buffers \<NUMBER\> Number of cache lines flushed in single ACP cleaning "
|
||||
r"thread iteration \<1-10000\> \(default: 128\)",
|
||||
]
|
||||
|
||||
|
||||
get_params_help = [
|
||||
r"Usage: casadm --get-param --name \<NAME\>",
|
||||
r"Get various runtime parameters",
|
||||
@ -164,99 +155,142 @@ get_params_help = [
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||
r"Options that are valid with --get-param \(-G\) --name \(-n\) promotion-nhit are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||
]
|
||||
|
||||
set_params_help = [
|
||||
r"Usage: casadm --set-param --name \<NAME\>",
|
||||
r"Set various runtime parameters",
|
||||
r"Valid values of NAME are:",
|
||||
r"seq-cutoff - Sequential cutoff parameters",
|
||||
r"cleaning - Cleaning policy parameters",
|
||||
r"promotion - Promotion policy parameters",
|
||||
r"promotion-nhit - Promotion policy NHIT parameters",
|
||||
r"cleaning-alru - Cleaning policy ALRU parameters",
|
||||
r"cleaning-acp - Cleaning policy ACP parameters",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) seq-cutoff are:",
|
||||
|
||||
set_cache_mode_help = [
|
||||
r"Usage: casadm --set-cache-mode --cache-mode \<NAME\> --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Set cache mode",
|
||||
r"Options that are valid with --set-cache-mode \(-Q\) are:",
|
||||
r"-c --cache-mode \<NAME\> Cache mode\. Available cache modes: \{wt|wb|wa|pt|wo\}",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-f --flush-cache \<yes|no\> Flush all dirty data from cache before switching "
|
||||
r"to new mode\. Option is required when switching from Write-Back or Write-Only mode",
|
||||
]
|
||||
|
||||
|
||||
add_core_help = [
|
||||
r"Usage: casadm --add-core --cache-id \<ID\> --core-device \<DEVICE\> \[option\.\.\.\]",
|
||||
r"Add core device to cache instance",
|
||||
r"Options that are valid with --add-core \(-A\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||
r"instance",
|
||||
r"-t --threshold \<KiB\> Sequential cutoff activation threshold \[KiB\]",
|
||||
r"-p --policy \<POLICY\> Sequential cutoff policy. Available policies: "
|
||||
r"\{always|full|never\}",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-p --policy \<POLICY\> Cleaning policy type. Available policy types: "
|
||||
r"\{nop|alru|acp\}",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-p --policy \<POLICY\> Promotion policy type. Available policy types: "
|
||||
r"\{always|nhit\}",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion-nhit are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-t --threshold \<NUMBER\> Number of requests for given core line after which "
|
||||
r"NHIT policy allows insertion into cache \<2-1000\> \(default: 3\)",
|
||||
r"-o --trigger \<NUMBER\> Cache occupancy value over which NHIT promotion "
|
||||
r"is active \<0-100\>\[\%\] \(default: 80\%\)",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning-alru are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-w --wake-up \<NUMBER\> Cleaning thread sleep time after an idle wake up "
|
||||
r"\<0-3600\>\[s\] \(default: 20 s\)",
|
||||
r"-s --staleness-time \<NUMBER\> Time that has to pass from the last write operation "
|
||||
r"before a dirty cache block can be scheduled to be flushed \<1-3600\>\[s\] \(default: 120 s\)",
|
||||
r"-b --flush-max-buffers \<NUMBER\> Number of dirty cache blocks to be flushed in one "
|
||||
r"cleaning cycle \<1-10000\> \(default: 100\)",
|
||||
r"-t --activity-threshold \<NUMBER\> Cache idle time before flushing thread can start "
|
||||
r"\<0-1000000\>\[ms\] \(default: 10000 ms\)",
|
||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning-acp are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r" -w --wake-up \<NUMBER\> Time between ACP cleaning thread iterations "
|
||||
r"\<0-10000\>\[ms\] \(default: 10 ms\)",
|
||||
r"-b --flush-max-buffers \<NUMBER\> Number of cache lines flushed in single ACP cleaning "
|
||||
r"thread iteration \<1-10000\> \(default: 128\)"
|
||||
r"-d --core-device \<DEVICE\> Path to core device",
|
||||
]
|
||||
|
||||
stop_cache_help = [
|
||||
r"Usage: casadm --stop-cache --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Stop cache instance",
|
||||
r"Options that are valid with --stop-cache \(-T\) are:",
|
||||
remove_core_help = [
|
||||
r"Usage: casadm --remove-core --cache-id \<ID\> --core-id \<ID\> \[option\.\.\.\]",
|
||||
r"Remove active core device from cache instance",
|
||||
r"Options that are valid with --remove-core \(-R\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-n --no-data-flush Do not flush dirty data \(may be dangerous\)"
|
||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||
r"instance",
|
||||
r"-f --force Force active core removal without data flush",
|
||||
]
|
||||
|
||||
start_cache_help = [
|
||||
r"Usage: casadm --start-cache --cache-device \<DEVICE\> \[option\.\.\.\]",
|
||||
r"Start new cache instance or load using metadata",
|
||||
r"Options that are valid with --start-cache \(-S\) are:",
|
||||
r"-d --cache-device \<DEVICE\> Caching device to be used",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\> "
|
||||
r"\(if not provided, the first available number will be used\)",
|
||||
r"-l --load Load cache metadata from caching device "
|
||||
r"\(DANGEROUS - see manual or Admin Guide for details\)",
|
||||
r"-f --force Force the creation of cache instance",
|
||||
r"-c --cache-mode \<NAME\> Set cache mode from available: \{wt|wb|wa|pt|wo\} "
|
||||
r"Write-Through, Write-Back, Write-Around, Pass-Through, Write-Only; "
|
||||
r"without this parameter Write-Through will be set by default",
|
||||
r"-x --cache-line-size \<NUMBER\> Set cache line size in kibibytes: "
|
||||
r"\{4,8,16,32,64\}\[KiB\] \(default: 4\)"
|
||||
|
||||
remove_inactive_help = [
|
||||
r"casadm --remove-inactive --cache-id \<ID\> --core-id \<ID\> \[option\.\.\.\]",
|
||||
r"Remove inactive core device from cache instance",
|
||||
r"Options that are valid with --remove-inactive are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given "
|
||||
r"cache instance",
|
||||
r"-f --force Force dirty inactive core removal",
|
||||
]
|
||||
|
||||
|
||||
remove_detached_help = [
|
||||
r"Usage: casadm --remove-detached --device \<DEVICE\>",
|
||||
r"Remove core device from core pool",
|
||||
r"Options that are valid with --remove-detached are:",
|
||||
r"-d --device \<DEVICE\> Path to core device",
|
||||
]
|
||||
|
||||
|
||||
list_caches_help = [
|
||||
r"Usage: casadm --list-caches \[option\.\.\.\]",
|
||||
r"List all cache instances and core devices",
|
||||
r"Options that are valid with --list-caches \(-L\) are:",
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||
]
|
||||
|
||||
stats_help = [
|
||||
r"Usage: casadm --stats --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Print statistics for cache instance",
|
||||
r"Options that are valid with --stats \(-P\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \<ID\> Limit display of core-specific statistics to only ones "
|
||||
r"pertaining to a specific core\. If this option is not given, casadm will display statistics "
|
||||
r"pertaining to all cores assigned to given cache instance\.",
|
||||
r"-d --io-class-id \[\<ID\>\] Display per IO class statistics",
|
||||
r"-f --filter \<FILTER-SPEC\> Apply filters from the following set: "
|
||||
r"\{all, conf, usage, req, blk, err\}",
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||
]
|
||||
|
||||
|
||||
reset_counters_help = [
|
||||
r"Usage: casadm --reset-counters --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Reset cache statistics for core device within cache instance",
|
||||
r"Options that are valid with --reset-counters \(-Z\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||
r"instance\. If not specified, statistics are reset for all cores in cache instance\.",
|
||||
]
|
||||
|
||||
flush_cache_help = [
|
||||
r"Usage: casadm --flush-cache --cache-id \<ID\>",
|
||||
r"Flush all dirty data from the caching device to core devices",
|
||||
r"Options that are valid with --flush-cache \(-F\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-j --core-id \[\<ID\>\] Identifier of core <0-4095> within given cache "
|
||||
r"instance",
|
||||
]
|
||||
|
||||
|
||||
ioclass_help = [
|
||||
r"Usage: casadm --io-class \{--load-config|--list\}",
|
||||
r"Manage IO classes",
|
||||
r"Loads configuration for IO classes:",
|
||||
r"Usage: casadm --io-class --load-config --cache-id \<ID\> --file \<FILE\>",
|
||||
r"Options that are valid with --load-config \(-C\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-f --file \<FILE\> Configuration file containing IO class definition",
|
||||
r"Lists currently configured IO classes:",
|
||||
r"Usage: casadm --io-class --list --cache-id \<ID\> \[option\.\.\.\]",
|
||||
r"Options that are valid with --list \(-L\) are:",
|
||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||
]
|
||||
|
||||
|
||||
version_help = [
|
||||
r"Usage: casadm --version \[option\.\.\.\]",
|
||||
r"Print CAS version",
|
||||
r"Options that are valid with --version \(-V\) are:"
|
||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||
]
|
||||
|
||||
help_help = [r"Usage: casadm --help", r"Print help"]
|
||||
|
||||
|
||||
standby_help = [
|
||||
r"The command is not supported"
|
||||
]
|
||||
|
||||
zero_metadata_help = [
|
||||
r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option...\]",
|
||||
r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option\.\.\.\]]",
|
||||
r"Clear metadata from caching device",
|
||||
r"Options that are valid with --zero-metadata are:",
|
||||
r"-d --device \<DEVICE\> Path to device on which metadata would be cleared",
|
||||
r"-f --force Ignore potential dirty data on cache device"
|
||||
r"-f --force Ignore potential dirty data on cache device",
|
||||
]
|
||||
|
||||
unrecognized_stderr = [
|
||||
r"Unrecognized command -\S+",
|
||||
]
|
||||
|
||||
unrecognized_stdout = [
|
||||
r"Try \`casadm --help | -H\' for more information\."
|
||||
]
|
||||
unrecognized_stdout = [r"Try \`casadm --help | -H\' for more information\."]
|
||||
|
@ -1,5 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -18,32 +19,30 @@ start_cache_with_existing_metadata = [
|
||||
r"Error inserting cache \d+",
|
||||
r"Old metadata found on device\.",
|
||||
r"Please load cache metadata using --load option or use --force to",
|
||||
r" discard on-disk metadata and start fresh cache instance\."
|
||||
r" discard on-disk metadata and start fresh cache instance\.",
|
||||
]
|
||||
|
||||
start_cache_on_already_used_dev = [
|
||||
r"Error inserting cache \d+",
|
||||
r"Cache device \'\/dev\/\S+\' is already used as cache\."
|
||||
r"Cache device \'\/dev\/\S+\' is already used as cache\.",
|
||||
]
|
||||
|
||||
start_cache_with_existing_id = [
|
||||
r"Error inserting cache \d+",
|
||||
r"Cache ID already exists"
|
||||
r"Cache ID already exists",
|
||||
]
|
||||
|
||||
standby_init_with_existing_filesystem = [
|
||||
r"A filesystem exists on \S+. Specify the --force option if you wish to add the cache anyway.",
|
||||
r"Note: this may result in loss of data"
|
||||
r"Note: this may result in loss of data",
|
||||
]
|
||||
|
||||
error_inserting_cache = [
|
||||
r"Error inserting cache \d+"
|
||||
]
|
||||
error_inserting_cache = [r"Error inserting cache \d+"]
|
||||
|
||||
reinitialize_with_force_or_recovery = [
|
||||
r"Old metadata found on device\.",
|
||||
r"Please load cache metadata using --load option or use --force to",
|
||||
r" discard on-disk metadata and start fresh cache instance\."
|
||||
r" discard on-disk metadata and start fresh cache instance\.",
|
||||
]
|
||||
|
||||
remove_inactive_core_with_remove_command = [
|
||||
@ -52,40 +51,36 @@ remove_inactive_core_with_remove_command = [
|
||||
|
||||
remove_inactive_dirty_core = [
|
||||
r"The cache contains dirty data assigned to the core\. If you want to ",
|
||||
r"continue, please use --force option\.\nWarning: the data will be lost"
|
||||
r"continue, please use --force option\.\nWarning: the data will be lost",
|
||||
]
|
||||
|
||||
stop_cache_incomplete = [
|
||||
r"Error while removing cache \d+",
|
||||
r"Cache is in incomplete state - at least one core is inactive"
|
||||
r"Cache is in incomplete state - at least one core is inactive",
|
||||
]
|
||||
|
||||
stop_cache_errors = [
|
||||
r"Removed cache \d+ with errors",
|
||||
r"Error while writing to cache device"
|
||||
r"Error while writing to cache device",
|
||||
]
|
||||
|
||||
get_stats_ioclass_id_not_configured = [
|
||||
r"IO class \d+ is not configured\."
|
||||
]
|
||||
get_stats_ioclass_id_not_configured = [r"IO class \d+ is not configured\."]
|
||||
|
||||
get_stats_ioclass_id_out_of_range = [
|
||||
r"Invalid IO class id, must be in the range 0-32\."
|
||||
]
|
||||
get_stats_ioclass_id_out_of_range = [r"Invalid IO class id, must be in the range 0-32\."]
|
||||
|
||||
remove_multilevel_core = [
|
||||
r"Error while removing core device \d+ from cache instance \d+",
|
||||
r"Device opens or mount are pending to this cache"
|
||||
r"Device opens or mount are pending to this cache",
|
||||
]
|
||||
|
||||
add_cached_core = [
|
||||
r"Error while adding core device to cache instance \d+",
|
||||
r"Core device \'/dev/\S+\' is already cached\."
|
||||
r"Core device \'/dev/\S+\' is already cached\.",
|
||||
]
|
||||
|
||||
already_cached_core = [
|
||||
r"Error while adding core device to cache instance \d+",
|
||||
r"Device already added as a core"
|
||||
r"Device already added as a core",
|
||||
]
|
||||
|
||||
remove_mounted_core = [
|
||||
@ -94,37 +89,31 @@ remove_mounted_core = [
|
||||
|
||||
stop_cache_mounted_core = [
|
||||
r"Error while removing cache \d+",
|
||||
r"Device opens or mount are pending to this cache"
|
||||
r"Device opens or mount are pending to this cache",
|
||||
]
|
||||
|
||||
load_and_force = [
|
||||
r"Use of \'load\' with \'force\', \'cache-id\', \'cache-mode\' or \'cache-line-size\'",
|
||||
r" simultaneously is forbidden."
|
||||
r" simultaneously is forbidden.",
|
||||
]
|
||||
|
||||
try_add_core_sector_size_mismatch = [
|
||||
r"Error while adding core device to cache instance \d+",
|
||||
r"Cache device logical sector size is greater than core device logical sector size\.",
|
||||
r"Consider changing logical sector size on current cache device",
|
||||
r"or try other device with the same logical sector size as core device\."
|
||||
r"or try other device with the same logical sector size as core device\.",
|
||||
]
|
||||
|
||||
no_caches_running = [
|
||||
r"No caches running"
|
||||
]
|
||||
no_caches_running = [r"No caches running"]
|
||||
|
||||
unavailable_device = [
|
||||
r"Error while opening \'\S+\'exclusively\. This can be due to\n"
|
||||
r"cache instance running on this device\. In such case please stop the cache and try again\."
|
||||
]
|
||||
|
||||
error_handling = [
|
||||
r"Error during options handling"
|
||||
]
|
||||
error_handling = [r"Error during options handling"]
|
||||
|
||||
no_cas_metadata = [
|
||||
r"Device \'\S+\' does not contain OpenCAS's metadata\."
|
||||
]
|
||||
no_cas_metadata = [r"Device \'\S+\' does not contain OpenCAS's metadata\."]
|
||||
|
||||
cache_dirty_data = [
|
||||
r"Cache instance contains dirty data\. Clearing metadata will result in loss of dirty data\.\n"
|
||||
@ -140,21 +129,16 @@ cache_dirty_shutdown = [
|
||||
r"Alternatively, if you wish to clear metadata anyway, please use \'--force\' option\."
|
||||
]
|
||||
|
||||
missing_param = [
|
||||
r"Option \'.+\' is missing"
|
||||
]
|
||||
missing_param = [r"Option \'.+\' is missing"]
|
||||
|
||||
disallowed_param = [
|
||||
r"Unrecognized option \S+"
|
||||
]
|
||||
disallowed_param = [r"Unrecognized option \S+"]
|
||||
|
||||
operation_forbiden_in_standby = [
|
||||
r"The operation is not permited while the cache is in the standby mode"
|
||||
]
|
||||
|
||||
mutually_exclusive_params_init = [
|
||||
r"Can\'t use \'load\' and \'init\' options simultaneously\n"
|
||||
r"Error during options handling"
|
||||
r"Can\'t use \'load\' and \'init\' options simultaneously\nError during options handling"
|
||||
]
|
||||
|
||||
mutually_exclusive_params_load = [
|
||||
@ -166,30 +150,22 @@ activate_with_different_cache_id = [
|
||||
r"Cache id specified by user and loaded from metadata are different"
|
||||
]
|
||||
|
||||
cache_activated_successfully = [
|
||||
r"Successfully activated cache instance \d+"
|
||||
]
|
||||
cache_activated_successfully = [r"Successfully activated cache instance \d+"]
|
||||
|
||||
invalid_core_volume_size = [
|
||||
r"Core volume size does not match the size stored in cache metadata"
|
||||
]
|
||||
invalid_core_volume_size = [r"Core volume size does not match the size stored in cache metadata"]
|
||||
|
||||
error_activating_cache = [
|
||||
r"Error activating cache \d+"
|
||||
]
|
||||
error_activating_cache = [r"Error activating cache \d+"]
|
||||
|
||||
activate_without_detach = [
|
||||
r"Cannot open the device exclusively. Make sure to detach cache before activation."
|
||||
]
|
||||
|
||||
cache_line_size_mismatch = [
|
||||
r"Cache line size mismatch"
|
||||
]
|
||||
cache_line_size_mismatch = [r"Cache line size mismatch"]
|
||||
|
||||
headerless_io_class_config = [
|
||||
r'Cannot parse configuration file - unknown column "1"\.\n'
|
||||
r'Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n'
|
||||
r'Please consult Admin Guide to check how columns in configuration file should be named\.'
|
||||
r"Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n"
|
||||
r"Please consult Admin Guide to check how columns in configuration file should be named\."
|
||||
]
|
||||
|
||||
illegal_io_class_config_L2C1 = [
|
||||
@ -205,9 +181,7 @@ illegal_io_class_config_L2C4 = [
|
||||
r"Cannot parse configuration file - error in line 2 in column 4 \(Allocation\)\."
|
||||
]
|
||||
|
||||
illegal_io_class_config_L2 = [
|
||||
r"Cannot parse configuration file - error in line 2\."
|
||||
]
|
||||
illegal_io_class_config_L2 = [r"Cannot parse configuration file - error in line 2\."]
|
||||
|
||||
double_io_class_config = [
|
||||
r"Double configuration for IO class id \d+\n"
|
||||
@ -243,14 +217,13 @@ illegal_io_class_invalid_allocation_number = [
|
||||
]
|
||||
|
||||
malformed_io_class_header = [
|
||||
r'Cannot parse configuration file - unknown column \"value_template\"\.\n'
|
||||
r'Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n'
|
||||
r'Please consult Admin Guide to check how columns in configuration file should be named\.'
|
||||
r"Cannot parse configuration file - unknown column \"value_template\"\.\n"
|
||||
r"Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n"
|
||||
r"Please consult Admin Guide to check how columns in configuration file should be named\."
|
||||
]
|
||||
|
||||
unexpected_cls_option = [
|
||||
r"Option '--cache-line-size \(-x\)' is not allowed"
|
||||
]
|
||||
unexpected_cls_option = [r"Option '--cache-line-size \(-x\)' is not allowed"]
|
||||
|
||||
|
||||
def check_stderr_msg(output: Output, expected_messages, negate=False):
|
||||
return __check_string_msg(output.stderr, expected_messages, negate)
|
||||
@ -268,7 +241,8 @@ def __check_string_msg(text: str, expected_messages, negate=False):
|
||||
TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {text}.")
|
||||
msg_ok = False
|
||||
elif matches and negate:
|
||||
TestRun.LOGGER.error(f"Message is incorrect, expected to not find: {msg}\n "
|
||||
f"actual: {text}.")
|
||||
TestRun.LOGGER.error(
|
||||
f"Message is incorrect, expected to not find: {msg}\n " f"actual: {text}."
|
||||
)
|
||||
msg_ok = False
|
||||
return msg_ok
|
||||
|
@ -1,16 +1,17 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
from datetime import timedelta
|
||||
from typing import List
|
||||
|
||||
from aenum import Enum
|
||||
from enum import Enum
|
||||
|
||||
from api.cas import casadm
|
||||
from api.cas.cache_config import SeqCutOffParameters, SeqCutOffPolicy
|
||||
from api.cas.casadm_params import OutputFormat, StatsFilter
|
||||
from api.cas.casadm_parser import get_statistics, get_seq_cut_off_parameters, get_core_info_by_path
|
||||
from api.cas.casadm_params import StatsFilter
|
||||
from api.cas.casadm_parser import get_seq_cut_off_parameters, get_core_info_by_path
|
||||
from api.cas.statistics import CoreStats, CoreIoClassStats
|
||||
from core.test_run_utils import TestRun
|
||||
from storage_devices.device import Device
|
||||
@ -20,9 +21,9 @@ from test_utils.size import Unit, Size
|
||||
|
||||
|
||||
class CoreStatus(Enum):
|
||||
empty = 0,
|
||||
active = 1,
|
||||
inactive = 2,
|
||||
empty = 0
|
||||
active = 1
|
||||
inactive = 2
|
||||
detached = 3
|
||||
|
||||
|
||||
@ -51,27 +52,28 @@ class Core(Device):
|
||||
super().create_filesystem(fs_type, force, blocksize)
|
||||
self.core_device.filesystem = self.filesystem
|
||||
|
||||
def get_io_class_statistics(self,
|
||||
io_class_id: int,
|
||||
stat_filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False):
|
||||
stats = get_statistics(self.cache_id, self.core_id, io_class_id,
|
||||
stat_filter, percentage_val)
|
||||
return CoreIoClassStats(stats)
|
||||
def get_io_class_statistics(
|
||||
self,
|
||||
io_class_id: int,
|
||||
stat_filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False,
|
||||
) -> CoreIoClassStats:
|
||||
return CoreIoClassStats(
|
||||
cache_id=self.cache_id,
|
||||
filter=stat_filter,
|
||||
io_class_id=io_class_id,
|
||||
percentage_val=percentage_val,
|
||||
)
|
||||
|
||||
def get_statistics(self,
|
||||
stat_filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False):
|
||||
stats = get_statistics(self.cache_id, self.core_id, None,
|
||||
stat_filter, percentage_val)
|
||||
return CoreStats(stats)
|
||||
|
||||
def get_statistics_flat(self,
|
||||
io_class_id: int = None,
|
||||
stat_filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False):
|
||||
return get_statistics(self.cache_id, self.core_id, io_class_id,
|
||||
stat_filter, percentage_val)
|
||||
def get_statistics(
|
||||
self, stat_filter: List[StatsFilter] = None, percentage_val: bool = False
|
||||
) -> CoreStats:
|
||||
return CoreStats(
|
||||
cache_id=self.cache_id,
|
||||
core_id=self.core_id,
|
||||
filter=stat_filter,
|
||||
percentage_val=percentage_val,
|
||||
)
|
||||
|
||||
def get_status(self):
|
||||
return CoreStatus[self.__get_core_info()["status"].lower()]
|
||||
@ -106,31 +108,30 @@ class Core(Device):
|
||||
return casadm.reset_counters(self.cache_id, self.core_id)
|
||||
|
||||
def flush_core(self):
|
||||
casadm.flush(self.cache_id, self.core_id)
|
||||
casadm.flush_core(self.cache_id, self.core_id)
|
||||
sync()
|
||||
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0
|
||||
|
||||
def purge_core(self):
|
||||
casadm.purge_core(self.cache_id, self.core_id)
|
||||
sync()
|
||||
|
||||
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
|
||||
return casadm.set_param_cutoff(self.cache_id, self.core_id,
|
||||
seq_cutoff_param.threshold,
|
||||
seq_cutoff_param.policy,
|
||||
seq_cutoff_param.promotion_count)
|
||||
return casadm.set_param_cutoff(
|
||||
self.cache_id,
|
||||
self.core_id,
|
||||
seq_cutoff_param.threshold,
|
||||
seq_cutoff_param.policy,
|
||||
seq_cutoff_param.promotion_count,
|
||||
)
|
||||
|
||||
def set_seq_cutoff_threshold(self, threshold: Size):
|
||||
return casadm.set_param_cutoff(self.cache_id, self.core_id,
|
||||
threshold=threshold)
|
||||
return casadm.set_param_cutoff(self.cache_id, self.core_id, threshold=threshold)
|
||||
|
||||
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy):
|
||||
return casadm.set_param_cutoff(self.cache_id, self.core_id,
|
||||
policy=policy)
|
||||
return casadm.set_param_cutoff(self.cache_id, self.core_id, policy=policy)
|
||||
|
||||
def set_seq_cutoff_promotion_count(self, promotion_count: int):
|
||||
return casadm.set_param_cutoff(self.cache_id, self.core_id,
|
||||
promotion_count=promotion_count)
|
||||
return casadm.set_param_cutoff(self.cache_id, self.core_id, promotion_count=promotion_count)
|
||||
|
||||
def check_if_is_present_in_os(self, should_be_visible=True):
|
||||
device_in_system_message = "CAS device exists in OS."
|
||||
|
@ -1,39 +1,42 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import re
|
||||
|
||||
from test_utils.dmesg import get_dmesg
|
||||
from test_utils.size import Size, Unit
|
||||
|
||||
|
||||
def get_metadata_size_on_device(dmesg):
|
||||
for s in dmesg.split("\n"):
|
||||
m = re.search(r'Metadata size on device: ([0-9]*) kiB', s)
|
||||
if m:
|
||||
return Size(int(m.groups()[0]), Unit.KibiByte)
|
||||
|
||||
raise ValueError("Can't find the metadata size in the provided dmesg output")
|
||||
def get_metadata_size_on_device(cache_id: int) -> Size:
|
||||
dmesg_reversed = list(reversed(get_dmesg().split("\n")))
|
||||
cache_name = "cache" + str(cache_id)
|
||||
cache_dmesg = "\n".join(line for line in dmesg_reversed if re.search(f"{cache_name}:", line))
|
||||
try:
|
||||
return _get_metadata_info(dmesg=cache_dmesg, section_name="Metadata size on device")
|
||||
except ValueError:
|
||||
raise ValueError("Can't find the metadata size in dmesg output")
|
||||
|
||||
|
||||
def _get_metadata_info(dmesg, section_name):
|
||||
def _get_metadata_info(dmesg, section_name) -> Size:
|
||||
for s in dmesg.split("\n"):
|
||||
if section_name in s:
|
||||
size, unit = re.search("[0-9]* (B|kiB)", s).group().split()
|
||||
size, unit = re.search("\\d+ (B|kiB)", s).group().split()
|
||||
unit = Unit.KibiByte if unit == "kiB" else Unit.Byte
|
||||
return Size(int(re.search("[0-9]*", size).group()), unit)
|
||||
return Size(int(re.search("\\d+", size).group()), unit)
|
||||
|
||||
raise ValueError(f'"{section_name}" entry doesn\'t exist in the given dmesg output')
|
||||
|
||||
|
||||
def get_md_section_size(section_name, dmesg):
|
||||
def get_md_section_size(section_name, dmesg) -> Size:
|
||||
section_name = section_name.strip()
|
||||
section_name += " size"
|
||||
return _get_metadata_info(dmesg, section_name)
|
||||
|
||||
|
||||
def get_md_section_offset(section_name, dmesg):
|
||||
def get_md_section_offset(section_name, dmesg) -> Size:
|
||||
section_name = section_name.strip()
|
||||
section_name += " offset"
|
||||
return _get_metadata_info(dmesg, section_name)
|
||||
|
@ -1,116 +0,0 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from core.test_run import TestRun
|
||||
from connection.local_executor import LocalExecutor
|
||||
from test_utils.output import CmdException
|
||||
|
||||
|
||||
def get_submodules_paths(from_dut: bool = False):
|
||||
executor = TestRun.executor if from_dut else LocalExecutor()
|
||||
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
|
||||
git_params = "config --file .gitmodules --get-regexp path | cut -d' ' -f2"
|
||||
|
||||
output = executor.run(f"git -C {repo_path} {git_params}")
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to get submodules paths", output)
|
||||
|
||||
return output.stdout.splitlines()
|
||||
|
||||
|
||||
def get_repo_files(
|
||||
branch: str = "HEAD",
|
||||
with_submodules: bool = True,
|
||||
with_dirs: bool = False,
|
||||
from_dut: bool = False,
|
||||
):
|
||||
executor = TestRun.executor if from_dut else LocalExecutor()
|
||||
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
|
||||
git_params = f"ls-tree -r --name-only --full-tree {branch}"
|
||||
|
||||
output = executor.run(f"git -C {repo_path} {git_params}")
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to get repo files list", output)
|
||||
|
||||
files = output.stdout.splitlines()
|
||||
|
||||
if with_submodules:
|
||||
for subm_path in get_submodules_paths(from_dut):
|
||||
output = executor.run(f"git -C {os.path.join(repo_path, subm_path)} {git_params}")
|
||||
if output.exit_code != 0:
|
||||
raise CmdException(f"Failed to get {subm_path} submodule repo files list", output)
|
||||
|
||||
subm_files = [os.path.join(subm_path, file) for file in output.stdout.splitlines()]
|
||||
files.extend(subm_files)
|
||||
|
||||
if with_dirs:
|
||||
# use set() to get unique values only
|
||||
dirs = set(os.path.dirname(file) for file in files)
|
||||
files.extend(dirs)
|
||||
|
||||
# change to absolute paths and remove empty values
|
||||
files = [os.path.realpath(os.path.join(repo_path, file)) for file in files if file]
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def get_current_commit_hash(from_dut: bool = False):
|
||||
executor = TestRun.executor if from_dut else LocalExecutor()
|
||||
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
|
||||
|
||||
return executor.run(
|
||||
f"cd {repo_path} &&"
|
||||
f'git show HEAD -s --pretty=format:"%H"').stdout
|
||||
|
||||
|
||||
def get_current_commit_message():
|
||||
local_executor = LocalExecutor()
|
||||
return local_executor.run(
|
||||
f"cd {TestRun.usr.repo_dir} &&"
|
||||
f'git show HEAD -s --pretty=format:"%B"').stdout
|
||||
|
||||
|
||||
def get_commit_hash(cas_version, from_dut: bool = False):
|
||||
executor = TestRun.executor if from_dut else LocalExecutor()
|
||||
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
|
||||
|
||||
output = executor.run(
|
||||
f"cd {repo_path} && "
|
||||
f"git rev-parse {cas_version}")
|
||||
if output.exit_code != 0:
|
||||
raise CmdException(f"Failed to resolve '{cas_version}' to commit hash", output)
|
||||
|
||||
TestRun.LOGGER.info(f"Resolved '{cas_version}' as commit {output.stdout}")
|
||||
|
||||
return output.stdout
|
||||
|
||||
|
||||
def get_release_tags():
|
||||
repo_path = os.path.join(TestRun.usr.working_dir, ".git")
|
||||
output = TestRun.executor.run_expect_success(f"git --git-dir={repo_path} tag").stdout
|
||||
|
||||
# Tags containing '-' or '_' are not CAS release versions
|
||||
tags = [v for v in output.splitlines() if "-" not in v and "_" not in v]
|
||||
|
||||
return tags
|
||||
|
||||
|
||||
def checkout_cas_version(cas_version):
|
||||
commit_hash = get_commit_hash(cas_version)
|
||||
TestRun.LOGGER.info(f"Checkout CAS to {commit_hash}")
|
||||
|
||||
output = TestRun.executor.run(
|
||||
f"cd {TestRun.usr.working_dir} && "
|
||||
f"git checkout --force {commit_hash}")
|
||||
if output.exit_code != 0:
|
||||
raise CmdException(f"Failed to checkout to {commit_hash}", output)
|
||||
|
||||
output = TestRun.executor.run(
|
||||
f"cd {TestRun.usr.working_dir} && "
|
||||
f"git submodule update --force")
|
||||
if output.exit_code != 0:
|
||||
raise CmdException(f"Failed to update submodules", output)
|
@ -1,5 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -17,18 +18,24 @@ class InitConfig:
|
||||
self.cache_config_lines = []
|
||||
self.core_config_lines = []
|
||||
|
||||
def add_cache(self, cache_id, cache_device: Device,
|
||||
cache_mode: CacheMode = CacheMode.WT, extra_flags=""):
|
||||
def add_cache(
|
||||
self,
|
||||
cache_id,
|
||||
cache_device: Device,
|
||||
cache_mode: CacheMode = CacheMode.WT,
|
||||
extra_flags="",
|
||||
):
|
||||
self.cache_config_lines.append(
|
||||
CacheConfigLine(cache_id, cache_device, cache_mode, extra_flags))
|
||||
CacheConfigLine(cache_id, cache_device, cache_mode, extra_flags)
|
||||
)
|
||||
|
||||
def add_core(self, cache_id, core_id, core_device: Device, extra_flags=""):
|
||||
self.core_config_lines.append(CoreConfigLine(cache_id, core_id, core_device, extra_flags))
|
||||
|
||||
def remove_config_file(self):
|
||||
@staticmethod
|
||||
def remove_config_file():
|
||||
fs_utils.remove(opencas_conf_path, force=False)
|
||||
|
||||
|
||||
def save_config_file(self):
|
||||
config_lines = []
|
||||
InitConfig.create_default_init_config()
|
||||
@ -40,18 +47,20 @@ class InitConfig:
|
||||
config_lines.append(CoreConfigLine.header)
|
||||
for c in self.core_config_lines:
|
||||
config_lines.append(str(c))
|
||||
fs_utils.write_file(opencas_conf_path, '\n'.join(config_lines), False)
|
||||
fs_utils.write_file(opencas_conf_path, "\n".join(config_lines), False)
|
||||
|
||||
@classmethod
|
||||
def create_init_config_from_running_configuration(
|
||||
cls, cache_extra_flags="", core_extra_flags=""
|
||||
cls, cache_extra_flags="", core_extra_flags=""
|
||||
):
|
||||
init_conf = cls()
|
||||
for cache in casadm_parser.get_caches():
|
||||
init_conf.add_cache(cache.cache_id,
|
||||
cache.cache_device,
|
||||
cache.get_cache_mode(),
|
||||
cache_extra_flags)
|
||||
init_conf.add_cache(
|
||||
cache.cache_id,
|
||||
cache.cache_device,
|
||||
cache.get_cache_mode(),
|
||||
cache_extra_flags,
|
||||
)
|
||||
for core in casadm_parser.get_cores(cache.cache_id):
|
||||
init_conf.add_core(cache.cache_id, core.core_id, core.core_device, core_extra_flags)
|
||||
init_conf.save_config_file()
|
||||
@ -66,17 +75,20 @@ class InitConfig:
|
||||
class CacheConfigLine:
|
||||
header = "[caches]"
|
||||
|
||||
def __init__(self, cache_id, cache_device: Device,
|
||||
cache_mode: CacheMode, extra_flags=""):
|
||||
def __init__(self, cache_id, cache_device: Device, cache_mode: CacheMode, extra_flags=""):
|
||||
self.cache_id = cache_id
|
||||
self.cache_device = cache_device
|
||||
self.cache_mode = cache_mode
|
||||
self.extra_flags = extra_flags
|
||||
|
||||
def __str__(self):
|
||||
params = [str(self.cache_id), self.cache_device.path,
|
||||
self.cache_mode.name, self.extra_flags]
|
||||
return '\t'.join(params)
|
||||
params = [
|
||||
str(self.cache_id),
|
||||
self.cache_device.path,
|
||||
self.cache_mode.name,
|
||||
self.extra_flags,
|
||||
]
|
||||
return "\t".join(params)
|
||||
|
||||
|
||||
class CoreConfigLine:
|
||||
@ -89,6 +101,10 @@ class CoreConfigLine:
|
||||
self.extra_flags = extra_flags
|
||||
|
||||
def __str__(self):
|
||||
params = [str(self.cache_id), str(self.core_id),
|
||||
self.core_device.path, self.extra_flags]
|
||||
return '\t'.join(params)
|
||||
params = [
|
||||
str(self.cache_id),
|
||||
str(self.core_id),
|
||||
self.core_device.path,
|
||||
self.extra_flags,
|
||||
]
|
||||
return "\t".join(params)
|
||||
|
@ -1,17 +1,15 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from tests import conftest
|
||||
from core.test_run import TestRun
|
||||
from api.cas import cas_module, git
|
||||
from api.cas import cas_module
|
||||
from api.cas.version import get_installed_cas_version
|
||||
from test_utils import os_utils
|
||||
from test_utils import os_utils, git
|
||||
from test_utils.output import CmdException
|
||||
|
||||
|
||||
@ -22,27 +20,23 @@ def rsync_opencas_sources():
|
||||
# to make sure path ends with directory separator.
|
||||
# Needed for rsync to copy only contents of a directory
|
||||
# and not the directory itself.
|
||||
os.path.join(TestRun.usr.repo_dir, ''),
|
||||
os.path.join(TestRun.usr.working_dir, ''),
|
||||
os.path.join(TestRun.usr.repo_dir, ""),
|
||||
os.path.join(TestRun.usr.working_dir, ""),
|
||||
exclude_list=["test/functional/results/"],
|
||||
delete=True)
|
||||
delete=True,
|
||||
)
|
||||
|
||||
|
||||
def clean_opencas_repo():
|
||||
TestRun.LOGGER.info("Cleaning Open CAS repo")
|
||||
output = TestRun.executor.run(
|
||||
f"cd {TestRun.usr.working_dir} && "
|
||||
"make distclean")
|
||||
output = TestRun.executor.run(f"cd {TestRun.usr.working_dir} && make distclean")
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("make distclean command executed with nonzero status", output)
|
||||
|
||||
|
||||
def build_opencas():
|
||||
TestRun.LOGGER.info("Building Open CAS")
|
||||
output = TestRun.executor.run(
|
||||
f"cd {TestRun.usr.working_dir} && "
|
||||
"./configure && "
|
||||
"make -j")
|
||||
output = TestRun.executor.run(f"cd {TestRun.usr.working_dir} && ./configure && make -j")
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Make command executed with nonzero status", output)
|
||||
|
||||
@ -54,8 +48,8 @@ def install_opencas(destdir: str = ""):
|
||||
destdir = os.path.join(TestRun.usr.working_dir, destdir)
|
||||
|
||||
output = TestRun.executor.run(
|
||||
f"cd {TestRun.usr.working_dir} && "
|
||||
f"make {'DESTDIR='+destdir if destdir else ''} install")
|
||||
f"cd {TestRun.usr.working_dir} && make {'DESTDIR='+destdir if destdir else ''} install"
|
||||
)
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Failed to install Open CAS", output)
|
||||
|
||||
@ -78,7 +72,7 @@ def set_up_opencas(version: str = ""):
|
||||
clean_opencas_repo()
|
||||
|
||||
if version:
|
||||
git.checkout_cas_version(version)
|
||||
git.checkout_version(version)
|
||||
|
||||
build_opencas()
|
||||
install_opencas()
|
||||
@ -90,9 +84,7 @@ def uninstall_opencas():
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("Open CAS is not properly installed", output)
|
||||
else:
|
||||
TestRun.executor.run(
|
||||
f"cd {TestRun.usr.working_dir} && "
|
||||
f"make uninstall")
|
||||
TestRun.executor.run(f"cd {TestRun.usr.working_dir} && make uninstall")
|
||||
if output.exit_code != 0:
|
||||
raise CmdException("There was an error during uninstall process", output)
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -8,8 +9,8 @@ import functools
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
from datetime import timedelta
|
||||
|
||||
from datetime import timedelta
|
||||
from packaging import version
|
||||
|
||||
from core.test_run import TestRun
|
||||
@ -30,33 +31,59 @@ IO_CLASS_CONFIG_HEADER = "IO class id,IO class name,Eviction priority,Allocation
|
||||
|
||||
@functools.total_ordering
|
||||
class IoClass:
|
||||
def __init__(self, class_id: int, rule: str = '', priority: int = None,
|
||||
allocation: str = "1.00"):
|
||||
def __init__(
|
||||
self,
|
||||
class_id: int,
|
||||
rule: str = "",
|
||||
priority: int = None,
|
||||
allocation: str = "1.00",
|
||||
):
|
||||
self.id = class_id
|
||||
self.rule = rule
|
||||
self.priority = priority
|
||||
self.allocation = allocation
|
||||
|
||||
def __str__(self):
|
||||
return (f'{self.id},{self.rule},{"" if self.priority is None else self.priority}'
|
||||
f',{self.allocation}')
|
||||
return (
|
||||
f'{self.id},{self.rule},{"" if self.priority is None else self.priority}'
|
||||
f",{self.allocation}"
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
return ((self.id, self.rule, self.priority, self.allocation)
|
||||
== (other.id, other.rule, other.priority, other.allocation))
|
||||
return (
|
||||
self.id,
|
||||
self.rule,
|
||||
self.priority,
|
||||
self.allocation,
|
||||
) == (
|
||||
other.id,
|
||||
other.rule,
|
||||
other.priority,
|
||||
other.allocation,
|
||||
)
|
||||
|
||||
def __lt__(self, other):
|
||||
return ((self.id, self.rule, self.priority, self.allocation)
|
||||
< (other.id, other.rule, other.priority, other.allocation))
|
||||
return (
|
||||
self.id,
|
||||
self.rule,
|
||||
self.priority,
|
||||
self.allocation,
|
||||
) < (
|
||||
other.id,
|
||||
other.rule,
|
||||
other.priority,
|
||||
other.allocation,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_string(ioclass_str: str):
|
||||
parts = [part.strip() for part in re.split('[,|]', ioclass_str.replace('║', ''))]
|
||||
parts = [part.strip() for part in re.split("[,|]", ioclass_str.replace("║", ""))]
|
||||
return IoClass(
|
||||
class_id=int(parts[0]),
|
||||
rule=parts[1],
|
||||
priority=int(parts[2]),
|
||||
allocation="%.2f" % float(parts[3]))
|
||||
allocation="%.2f" % float(parts[3]),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_to_csv(ioclass_list: [], add_default_rule: bool = True):
|
||||
@ -64,7 +91,7 @@ class IoClass:
|
||||
if add_default_rule and not len([c for c in list_copy if c.id == 0]):
|
||||
list_copy.insert(0, IoClass.default())
|
||||
list_copy.insert(0, IO_CLASS_CONFIG_HEADER)
|
||||
return '\n'.join(str(c) for c in list_copy)
|
||||
return "\n".join(str(c) for c in list_copy)
|
||||
|
||||
@staticmethod
|
||||
def csv_to_list(csv: str):
|
||||
@ -76,12 +103,15 @@ class IoClass:
|
||||
return ioclass_list
|
||||
|
||||
@staticmethod
|
||||
def save_list_to_config_file(ioclass_list: [],
|
||||
add_default_rule: bool = True,
|
||||
ioclass_config_path: str = default_config_file_path):
|
||||
def save_list_to_config_file(
|
||||
ioclass_list: [],
|
||||
add_default_rule: bool = True,
|
||||
ioclass_config_path: str = default_config_file_path,
|
||||
):
|
||||
TestRun.LOGGER.info(f"Creating config file {ioclass_config_path}")
|
||||
fs_utils.write_file(ioclass_config_path,
|
||||
IoClass.list_to_csv(ioclass_list, add_default_rule))
|
||||
fs_utils.write_file(
|
||||
ioclass_config_path, IoClass.list_to_csv(ioclass_list, add_default_rule)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def default(priority=DEFAULT_IO_CLASS_PRIORITY, allocation="1.00"):
|
||||
@ -93,12 +123,12 @@ class IoClass:
|
||||
"id": "IO class id",
|
||||
"name": "IO class name",
|
||||
"eviction_prio": "Eviction priority",
|
||||
"allocation": "Allocation"
|
||||
"allocation": "Allocation",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def default_header():
|
||||
return ','.join(IoClass.default_header_dict().values())
|
||||
return ",".join(IoClass.default_header_dict().values())
|
||||
|
||||
@staticmethod
|
||||
def compare_ioclass_lists(list1: [], list2: []):
|
||||
@ -106,18 +136,37 @@ class IoClass:
|
||||
|
||||
@staticmethod
|
||||
def generate_random_ioclass_list(count: int, max_priority: int = MAX_IO_CLASS_PRIORITY):
|
||||
random_list = [IoClass.default(priority=random.randint(0, max_priority),
|
||||
allocation=f"{random.randint(0, 100) / 100:0.2f}")]
|
||||
random_list = [
|
||||
IoClass.default(
|
||||
priority=random.randint(0, max_priority),
|
||||
allocation=f"{random.randint(0, 100) / 100:0.2f}",
|
||||
)
|
||||
]
|
||||
for i in range(1, count):
|
||||
random_list.append(IoClass(i, priority=random.randint(0, max_priority),
|
||||
allocation=f"{random.randint(0, 100) / 100:0.2f}")
|
||||
.set_random_rule())
|
||||
random_list.append(
|
||||
IoClass(
|
||||
i,
|
||||
priority=random.randint(0, max_priority),
|
||||
allocation=f"{random.randint(0, 100) / 100:0.2f}",
|
||||
).set_random_rule()
|
||||
)
|
||||
return random_list
|
||||
|
||||
def set_random_rule(self):
|
||||
rules = ["metadata", "direct", "file_size", "directory", "io_class",
|
||||
"extension", "file_name_prefix", "lba", "pid", "process_name",
|
||||
"file_offset", "request_size"]
|
||||
rules = [
|
||||
"metadata",
|
||||
"direct",
|
||||
"file_size",
|
||||
"directory",
|
||||
"io_class",
|
||||
"extension",
|
||||
"file_name_prefix",
|
||||
"lba",
|
||||
"pid",
|
||||
"process_name",
|
||||
"file_offset",
|
||||
"request_size",
|
||||
]
|
||||
if os_utils.get_kernel_version() >= version.Version("4.13"):
|
||||
rules.append("wlth")
|
||||
|
||||
@ -128,7 +177,7 @@ class IoClass:
|
||||
@staticmethod
|
||||
def add_random_params(rule: str):
|
||||
if rule == "directory":
|
||||
allowed_chars = string.ascii_letters + string.digits + '/'
|
||||
allowed_chars = string.ascii_letters + string.digits + "/"
|
||||
rule += f":/{random_string(random.randint(1, 40), allowed_chars)}"
|
||||
elif rule in ["file_size", "lba", "pid", "file_offset", "request_size", "wlth"]:
|
||||
rule += f":{Operator(random.randrange(len(Operator))).name}:{random.randrange(1000000)}"
|
||||
@ -151,12 +200,10 @@ class Operator(enum.Enum):
|
||||
|
||||
# TODO: replace below methods with methods using IoClass
|
||||
def create_ioclass_config(
|
||||
add_default_rule: bool = True, ioclass_config_path: str = default_config_file_path
|
||||
add_default_rule: bool = True, ioclass_config_path: str = default_config_file_path
|
||||
):
|
||||
TestRun.LOGGER.info(f"Creating config file {ioclass_config_path}")
|
||||
output = TestRun.executor.run(
|
||||
f'echo {IO_CLASS_CONFIG_HEADER} > {ioclass_config_path}'
|
||||
)
|
||||
output = TestRun.executor.run(f"echo {IO_CLASS_CONFIG_HEADER} > {ioclass_config_path}")
|
||||
if output.exit_code != 0:
|
||||
raise Exception(
|
||||
"Failed to create ioclass config file. "
|
||||
@ -180,26 +227,21 @@ def remove_ioclass_config(ioclass_config_path: str = default_config_file_path):
|
||||
output = TestRun.executor.run(f"rm -f {ioclass_config_path}")
|
||||
if output.exit_code != 0:
|
||||
raise Exception(
|
||||
"Failed to remove config file. "
|
||||
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
|
||||
f"Failed to remove config file. stdout: {output.stdout} \n stderr :{output.stderr}"
|
||||
)
|
||||
|
||||
|
||||
def add_ioclass(
|
||||
ioclass_id: int,
|
||||
rule: str,
|
||||
eviction_priority: int,
|
||||
allocation,
|
||||
ioclass_config_path: str = default_config_file_path,
|
||||
ioclass_id: int,
|
||||
rule: str,
|
||||
eviction_priority: int,
|
||||
allocation,
|
||||
ioclass_config_path: str = default_config_file_path,
|
||||
):
|
||||
new_ioclass = f"{ioclass_id},{rule},{eviction_priority},{allocation}"
|
||||
TestRun.LOGGER.info(
|
||||
f"Adding rule {new_ioclass} " + f"to config file {ioclass_config_path}"
|
||||
)
|
||||
TestRun.LOGGER.info(f"Adding rule {new_ioclass} to config file {ioclass_config_path}")
|
||||
|
||||
output = TestRun.executor.run(
|
||||
f'echo "{new_ioclass}" >> {ioclass_config_path}'
|
||||
)
|
||||
output = TestRun.executor.run(f'echo "{new_ioclass}" >> {ioclass_config_path}')
|
||||
if output.exit_code != 0:
|
||||
raise Exception(
|
||||
"Failed to append ioclass to config file. "
|
||||
@ -208,9 +250,7 @@ def add_ioclass(
|
||||
|
||||
|
||||
def get_ioclass(ioclass_id: int, ioclass_config_path: str = default_config_file_path):
|
||||
TestRun.LOGGER.info(
|
||||
f"Retrieving rule no. {ioclass_id} " + f"from config file {ioclass_config_path}"
|
||||
)
|
||||
TestRun.LOGGER.info(f"Retrieving rule no. {ioclass_id} from config file {ioclass_config_path}")
|
||||
output = TestRun.executor.run(f"cat {ioclass_config_path}")
|
||||
if output.exit_code != 0:
|
||||
raise Exception(
|
||||
@ -225,12 +265,8 @@ def get_ioclass(ioclass_id: int, ioclass_config_path: str = default_config_file_
|
||||
return ioclass
|
||||
|
||||
|
||||
def remove_ioclass(
|
||||
ioclass_id: int, ioclass_config_path: str = default_config_file_path
|
||||
):
|
||||
TestRun.LOGGER.info(
|
||||
f"Removing rule no.{ioclass_id} " + f"from config file {ioclass_config_path}"
|
||||
)
|
||||
def remove_ioclass(ioclass_id: int, ioclass_config_path: str = default_config_file_path):
|
||||
TestRun.LOGGER.info(f"Removing rule no.{ioclass_id} from config file {ioclass_config_path}")
|
||||
output = TestRun.executor.run(f"cat {ioclass_config_path}")
|
||||
if output.exit_code != 0:
|
||||
raise Exception(
|
||||
@ -243,9 +279,7 @@ def remove_ioclass(
|
||||
|
||||
# First line in valid config file is always a header, not a rule - it is
|
||||
# already extracted above
|
||||
new_ioclass_config = [
|
||||
x for x in old_ioclass_config[1:] if int(x.split(",")[0]) != ioclass_id
|
||||
]
|
||||
new_ioclass_config = [x for x in old_ioclass_config[1:] if int(x.split(",")[0]) != ioclass_id]
|
||||
|
||||
new_ioclass_config.insert(0, config_header)
|
||||
|
||||
@ -255,9 +289,7 @@ def remove_ioclass(
|
||||
)
|
||||
|
||||
new_ioclass_config_str = "\n".join(new_ioclass_config)
|
||||
output = TestRun.executor.run(
|
||||
f'echo "{new_ioclass_config_str}" > {ioclass_config_path}'
|
||||
)
|
||||
output = TestRun.executor.run(f'echo "{new_ioclass_config_str}" > {ioclass_config_path}')
|
||||
if output.exit_code != 0:
|
||||
raise Exception(
|
||||
"Failed to save new ioclass config. "
|
||||
|
@ -1,5 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -31,8 +32,8 @@ def check_progress_bar(command: str, progress_bar_expected: bool = True):
|
||||
|
||||
percentage = 0
|
||||
while True:
|
||||
output = stdout.channel.recv(1024).decode('utf-8')
|
||||
search = re.search(r'\d+.\d+', output)
|
||||
output = stdout.channel.recv(1024).decode("utf-8")
|
||||
search = re.search(r"\d+.\d+", output)
|
||||
last_percentage = percentage
|
||||
if search:
|
||||
TestRun.LOGGER.info(output)
|
||||
|
@ -1,256 +1,250 @@
|
||||
#
|
||||
# Copyright(c) 2019-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
# Order in arrays is important!
|
||||
config_stats_cache = [
|
||||
"cache id", "cache size", "cache device", "exported object", "core devices",
|
||||
"inactive core devices", "write policy", "cleaning policy", "promotion policy",
|
||||
"cache line size", "metadata memory footprint", "dirty for", "status"
|
||||
]
|
||||
config_stats_core = [
|
||||
"core id", "core device", "exported object", "core size", "dirty for", "status",
|
||||
"seq cutoff threshold", "seq cutoff policy"
|
||||
]
|
||||
config_stats_ioclass = ["io class id", "io class name", "eviction priority", "max size"]
|
||||
usage_stats = ["occupancy", "free", "clean", "dirty"]
|
||||
usage_stats_ioclass = ["occupancy", "clean", "dirty"]
|
||||
inactive_usage_stats = ["inactive occupancy", "inactive clean", "inactive dirty"]
|
||||
request_stats = [
|
||||
"read hits", "read partial misses", "read full misses", "read total",
|
||||
"write hits", "write partial misses", "write full misses", "write total",
|
||||
"pass-through reads", "pass-through writes",
|
||||
"serviced requests", "total requests"
|
||||
]
|
||||
block_stats_cache = [
|
||||
"reads from core(s)", "writes to core(s)", "total to/from core(s)",
|
||||
"reads from cache", "writes to cache", "total to/from cache",
|
||||
"reads from exported object(s)", "writes to exported object(s)",
|
||||
"total to/from exported object(s)"
|
||||
]
|
||||
block_stats_core = [stat.replace("(s)", "") for stat in block_stats_cache]
|
||||
error_stats = [
|
||||
"cache read errors", "cache write errors", "cache total errors",
|
||||
"core read errors", "core write errors", "core total errors",
|
||||
"total errors"
|
||||
]
|
||||
import csv
|
||||
|
||||
from enum import Enum
|
||||
from datetime import timedelta
|
||||
from typing import List
|
||||
|
||||
from api.cas import casadm
|
||||
from api.cas.casadm_params import StatsFilter
|
||||
from test_utils.size import Size, Unit
|
||||
|
||||
|
||||
class UnitType(Enum):
|
||||
requests = "[Requests]"
|
||||
percentage = "[%]"
|
||||
block_4k = "[4KiB Blocks]"
|
||||
mebibyte = "[MiB]"
|
||||
kibibyte = "[KiB]"
|
||||
gibibyte = "[GiB]"
|
||||
seconds = "[s]"
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
|
||||
class OperationType(Enum):
|
||||
read = "Read"
|
||||
write = "Write"
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
|
||||
class CacheStats:
|
||||
stats_list = [
|
||||
"config_stats",
|
||||
"usage_stats",
|
||||
"inactive_usage_stats",
|
||||
"request_stats",
|
||||
"block_stats",
|
||||
"error_stats",
|
||||
]
|
||||
def __init__(
|
||||
self,
|
||||
cache_id: int,
|
||||
filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False,
|
||||
):
|
||||
|
||||
def __init__(self, stats):
|
||||
try:
|
||||
self.config_stats = CacheConfigStats(
|
||||
*[stats[stat] for stat in config_stats_cache]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.usage_stats = UsageStats(
|
||||
*[stats[stat] for stat in usage_stats]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.inactive_usage_stats = InactiveUsageStats(
|
||||
*[stats[stat] for stat in inactive_usage_stats]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.request_stats = RequestStats(
|
||||
*[stats[stat] for stat in request_stats]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.block_stats = BlockStats(
|
||||
*[stats[stat] for stat in block_stats_cache]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.error_stats = ErrorStats(
|
||||
*[stats[stat] for stat in error_stats]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
if filter is None:
|
||||
filters = [
|
||||
StatsFilter.conf,
|
||||
StatsFilter.usage,
|
||||
StatsFilter.req,
|
||||
StatsFilter.blk,
|
||||
StatsFilter.err,
|
||||
]
|
||||
else:
|
||||
filters = filter
|
||||
|
||||
csv_stats = casadm.print_statistics(
|
||||
cache_id=cache_id,
|
||||
filter=filter,
|
||||
output_format=casadm.OutputFormat.csv,
|
||||
).stdout.splitlines()
|
||||
|
||||
stat_keys, stat_values = csv.reader(csv_stats)
|
||||
|
||||
# Unify names in block stats for core and cache:
|
||||
# cache stats: Reads from core(s)
|
||||
# core stats: Reads from core
|
||||
stat_keys = [x.replace("(s)", "") for x in stat_keys]
|
||||
stats_dict = dict(zip(stat_keys, stat_values))
|
||||
|
||||
for filter in filters:
|
||||
match filter:
|
||||
case StatsFilter.conf:
|
||||
self.config_stats = CacheConfigStats(stats_dict)
|
||||
case StatsFilter.usage:
|
||||
self.usage_stats = UsageStats(stats_dict, percentage_val)
|
||||
case StatsFilter.req:
|
||||
self.request_stats = RequestStats(stats_dict, percentage_val)
|
||||
case StatsFilter.blk:
|
||||
self.block_stats = BlockStats(stats_dict, percentage_val)
|
||||
case StatsFilter.err:
|
||||
self.error_stats = ErrorStats(stats_dict, percentage_val)
|
||||
|
||||
def __str__(self):
|
||||
status = ""
|
||||
for stats_item in self.stats_list:
|
||||
current_stat = getattr(self, stats_item, None)
|
||||
if current_stat:
|
||||
status += f"--- Cache {current_stat}"
|
||||
return status
|
||||
# stats_list contains all Class.__str__ methods initialized in CacheStats
|
||||
stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
|
||||
return "\n".join(stats_list)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not other:
|
||||
return False
|
||||
for stats_item in self.stats_list:
|
||||
if getattr(self, stats_item, None) != getattr(other, stats_item, None):
|
||||
return False
|
||||
return True
|
||||
# check if all initialized variable in self(CacheStats) match other(CacheStats)
|
||||
return [getattr(self, stats_item) for stats_item in self.__dict__] == [
|
||||
getattr(other, stats_item) for stats_item in other.__dict__
|
||||
]
|
||||
|
||||
|
||||
class CoreStats:
|
||||
stats_list = [
|
||||
"config_stats",
|
||||
"usage_stats",
|
||||
"request_stats",
|
||||
"block_stats",
|
||||
"error_stats",
|
||||
]
|
||||
def __init__(
|
||||
self,
|
||||
cache_id: int,
|
||||
core_id: int,
|
||||
filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False,
|
||||
):
|
||||
|
||||
def __init__(self, stats):
|
||||
try:
|
||||
self.config_stats = CoreConfigStats(
|
||||
*[stats[stat] for stat in config_stats_core]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.usage_stats = UsageStats(
|
||||
*[stats[stat] for stat in usage_stats]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.request_stats = RequestStats(
|
||||
*[stats[stat] for stat in request_stats]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.block_stats = BlockStats(
|
||||
*[stats[stat] for stat in block_stats_core]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.error_stats = ErrorStats(
|
||||
*[stats[stat] for stat in error_stats]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
if filter is None:
|
||||
filters = [
|
||||
StatsFilter.conf,
|
||||
StatsFilter.usage,
|
||||
StatsFilter.req,
|
||||
StatsFilter.blk,
|
||||
StatsFilter.err,
|
||||
]
|
||||
else:
|
||||
filters = filter
|
||||
|
||||
csv_stats = casadm.print_statistics(
|
||||
cache_id=cache_id,
|
||||
core_id=core_id,
|
||||
filter=filter,
|
||||
output_format=casadm.OutputFormat.csv,
|
||||
).stdout.splitlines()
|
||||
|
||||
stat_keys, stat_values = csv.reader(csv_stats)
|
||||
stats_dict = dict(zip(stat_keys, stat_values))
|
||||
|
||||
for filter in filters:
|
||||
match filter:
|
||||
case StatsFilter.conf:
|
||||
self.config_stats = CoreConfigStats(stats_dict)
|
||||
case StatsFilter.usage:
|
||||
self.usage_stats = UsageStats(stats_dict, percentage_val)
|
||||
case StatsFilter.req:
|
||||
self.request_stats = RequestStats(stats_dict, percentage_val)
|
||||
case StatsFilter.blk:
|
||||
self.block_stats = BlockStats(stats_dict, percentage_val)
|
||||
case StatsFilter.err:
|
||||
self.error_stats = ErrorStats(stats_dict, percentage_val)
|
||||
|
||||
def __str__(self):
|
||||
status = ""
|
||||
for stats_item in self.stats_list:
|
||||
current_stat = getattr(self, stats_item, None)
|
||||
if current_stat:
|
||||
status += f"--- Core {current_stat}"
|
||||
return status
|
||||
# stats_list contains all Class.__str__ methods initialized in CacheStats
|
||||
stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
|
||||
return "\n".join(stats_list)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not other:
|
||||
return False
|
||||
for stats_item in self.stats_list:
|
||||
if getattr(self, stats_item, None) != getattr(other, stats_item, None):
|
||||
return False
|
||||
return True
|
||||
# check if all initialized variable in self(CacheStats) match other(CacheStats)
|
||||
return [getattr(self, stats_item) for stats_item in self.__dict__] == [
|
||||
getattr(other, stats_item) for stats_item in other.__dict__
|
||||
]
|
||||
|
||||
|
||||
class IoClassStats:
|
||||
stats_list = [
|
||||
"config_stats",
|
||||
"usage_stats",
|
||||
"request_stats",
|
||||
"block_stats",
|
||||
]
|
||||
class CoreIoClassStats:
|
||||
def __init__(
|
||||
self,
|
||||
cache_id: int,
|
||||
io_class_id: int,
|
||||
core_id: int = None,
|
||||
filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False,
|
||||
):
|
||||
if filter is None:
|
||||
filters = [
|
||||
StatsFilter.conf,
|
||||
StatsFilter.usage,
|
||||
StatsFilter.req,
|
||||
StatsFilter.blk,
|
||||
]
|
||||
else:
|
||||
filters = filter
|
||||
|
||||
def __init__(self, stats, block_stats_list):
|
||||
try:
|
||||
self.config_stats = IoClassConfigStats(
|
||||
*[stats[stat] for stat in config_stats_ioclass]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.usage_stats = IoClassUsageStats(
|
||||
*[stats[stat] for stat in usage_stats_ioclass]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.request_stats = RequestStats(
|
||||
*[stats[stat] for stat in request_stats]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.block_stats = BlockStats(
|
||||
*[stats[stat] for stat in block_stats_list]
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
csv_stats = casadm.print_statistics(
|
||||
cache_id=cache_id,
|
||||
core_id=core_id,
|
||||
io_class_id=io_class_id,
|
||||
filter=filter,
|
||||
output_format=casadm.OutputFormat.csv,
|
||||
).stdout.splitlines()
|
||||
|
||||
stat_keys, stat_values = csv.reader(csv_stats)
|
||||
|
||||
# Unify names in block stats for core and cache:
|
||||
# cache stats: Reads from core(s)
|
||||
# core stats: Reads from core
|
||||
stat_keys = [x.replace("(s)", "") for x in stat_keys]
|
||||
stats_dict = dict(zip(stat_keys, stat_values))
|
||||
|
||||
for filter in filters:
|
||||
match filter:
|
||||
case StatsFilter.conf:
|
||||
self.config_stats = IoClassConfigStats(stats_dict)
|
||||
case StatsFilter.usage:
|
||||
self.usage_stats = IoClassUsageStats(stats_dict, percentage_val)
|
||||
case StatsFilter.req:
|
||||
self.request_stats = RequestStats(stats_dict, percentage_val)
|
||||
case StatsFilter.blk:
|
||||
self.block_stats = BlockStats(stats_dict, percentage_val)
|
||||
|
||||
def __eq__(self, other):
|
||||
# check if all initialized variable in self(CacheStats) match other(CacheStats)
|
||||
return [getattr(self, stats_item) for stats_item in self.__dict__] == [
|
||||
getattr(other, stats_item) for stats_item in other.__dict__
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
status = ""
|
||||
for stats_item in self.stats_list:
|
||||
current_stat = getattr(self, stats_item, None)
|
||||
if current_stat:
|
||||
status += f"--- IO class {current_stat}"
|
||||
return status
|
||||
|
||||
def __eq__(self, other):
|
||||
if not other:
|
||||
return False
|
||||
for stats_item in self.stats_list:
|
||||
if getattr(self, stats_item, None) != getattr(other, stats_item, None):
|
||||
return False
|
||||
return True
|
||||
# stats_list contains all Class.__str__ methods initialized in CacheStats
|
||||
stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
|
||||
return "\n".join(stats_list)
|
||||
|
||||
|
||||
class CacheIoClassStats(IoClassStats):
|
||||
def __init__(self, stats):
|
||||
super().__init__(stats, block_stats_cache)
|
||||
|
||||
|
||||
class CoreIoClassStats(IoClassStats):
|
||||
def __init__(self, stats):
|
||||
super().__init__(stats, block_stats_core)
|
||||
class CacheIoClassStats(CoreIoClassStats):
|
||||
def __init__(
|
||||
self,
|
||||
cache_id: int,
|
||||
io_class_id: int,
|
||||
filter: List[StatsFilter] = None,
|
||||
percentage_val: bool = False,
|
||||
):
|
||||
super().__init__(
|
||||
cache_id=cache_id,
|
||||
io_class_id=io_class_id,
|
||||
core_id=None,
|
||||
filter=filter,
|
||||
percentage_val=percentage_val,
|
||||
)
|
||||
|
||||
|
||||
class CacheConfigStats:
|
||||
def __init__(
|
||||
self,
|
||||
cache_id,
|
||||
cache_size,
|
||||
cache_dev,
|
||||
exp_obj,
|
||||
core_dev,
|
||||
inactive_core_dev,
|
||||
write_policy,
|
||||
cleaning_policy,
|
||||
promotion_policy,
|
||||
cache_line_size,
|
||||
metadata_memory_footprint,
|
||||
dirty_for,
|
||||
status,
|
||||
):
|
||||
self.cache_id = cache_id
|
||||
self.cache_size = cache_size
|
||||
self.cache_dev = cache_dev
|
||||
self.exp_obj = exp_obj
|
||||
self.core_dev = core_dev
|
||||
self.inactive_core_dev = inactive_core_dev
|
||||
self.write_policy = write_policy
|
||||
self.cleaning_policy = cleaning_policy
|
||||
self.promotion_policy = promotion_policy
|
||||
self.cache_line_size = cache_line_size
|
||||
self.metadata_memory_footprint = metadata_memory_footprint
|
||||
self.dirty_for = dirty_for
|
||||
self.status = status
|
||||
def __init__(self, stats_dict):
|
||||
self.cache_id = stats_dict["Cache Id"]
|
||||
self.cache_size = parse_value(
|
||||
value=stats_dict["Cache Size [4KiB Blocks]"], unit_type=UnitType.block_4k
|
||||
)
|
||||
self.cache_dev = stats_dict["Cache Device"]
|
||||
self.exp_obj = stats_dict["Exported Object"]
|
||||
self.core_dev = stats_dict["Core Devices"]
|
||||
self.inactive_core_devices = stats_dict["Inactive Core Devices"]
|
||||
self.write_policy = stats_dict["Write Policy"]
|
||||
self.cleaning_policy = stats_dict["Cleaning Policy"]
|
||||
self.promotion_policy = stats_dict["Promotion Policy"]
|
||||
self.cache_line_size = parse_value(
|
||||
value=stats_dict["Cache line size [KiB]"], unit_type=UnitType.kibibyte
|
||||
)
|
||||
self.metadata_memory_footprint = parse_value(
|
||||
value=stats_dict["Metadata Memory Footprint [MiB]"], unit_type=UnitType.mebibyte
|
||||
)
|
||||
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type="[s]")
|
||||
self.status = stats_dict["Status"]
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
@ -260,10 +254,10 @@ class CacheConfigStats:
|
||||
f"Cache device: {self.cache_dev}\n"
|
||||
f"Exported object: {self.exp_obj}\n"
|
||||
f"Core devices: {self.core_dev}\n"
|
||||
f"Inactive core devices: {self.inactive_core_dev}\n"
|
||||
f"Write policy: {self.write_policy}\n"
|
||||
f"Cleaning policy: {self.cleaning_policy}\n"
|
||||
f"Promotion policy: {self.promotion_policy}\n"
|
||||
f"Inactive Core Devices: {self.inactive_core_devices}\n"
|
||||
f"Write Policy: {self.write_policy}\n"
|
||||
f"Cleaning Policy: {self.cleaning_policy}\n"
|
||||
f"Promotion Policy: {self.promotion_policy}\n"
|
||||
f"Cache line size: {self.cache_line_size}\n"
|
||||
f"Metadata memory footprint: {self.metadata_memory_footprint}\n"
|
||||
f"Dirty for: {self.dirty_for}\n"
|
||||
@ -279,7 +273,7 @@ class CacheConfigStats:
|
||||
and self.cache_dev == other.cache_dev
|
||||
and self.exp_obj == other.exp_obj
|
||||
and self.core_dev == other.core_dev
|
||||
and self.inactive_core_dev == other.inactive_core_dev
|
||||
and self.inactive_core_devices == other.inactive_core_devices
|
||||
and self.write_policy == other.write_policy
|
||||
and self.cleaning_policy == other.cleaning_policy
|
||||
and self.promotion_policy == other.promotion_policy
|
||||
@ -291,25 +285,19 @@ class CacheConfigStats:
|
||||
|
||||
|
||||
class CoreConfigStats:
|
||||
def __init__(
|
||||
self,
|
||||
core_id,
|
||||
core_dev,
|
||||
exp_obj,
|
||||
core_size,
|
||||
dirty_for,
|
||||
status,
|
||||
seq_cutoff_threshold,
|
||||
seq_cutoff_policy,
|
||||
):
|
||||
self.core_id = core_id
|
||||
self.core_dev = core_dev
|
||||
self.exp_obj = exp_obj
|
||||
self.core_size = core_size
|
||||
self.dirty_for = dirty_for
|
||||
self.status = status
|
||||
self.seq_cutoff_threshold = seq_cutoff_threshold
|
||||
self.seq_cutoff_policy = seq_cutoff_policy
|
||||
def __init__(self, stats_dict):
|
||||
self.core_id = stats_dict["Core Id"]
|
||||
self.core_dev = stats_dict["Core Device"]
|
||||
self.exp_obj = stats_dict["Exported Object"]
|
||||
self.core_size = parse_value(
|
||||
value=stats_dict["Core Size [4KiB Blocks]"], unit_type=UnitType.block_4k
|
||||
)
|
||||
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type=UnitType.seconds)
|
||||
self.status = stats_dict["Status"]
|
||||
self.seq_cutoff_threshold = parse_value(
|
||||
value=stats_dict["Seq cutoff threshold [KiB]"], unit_type=UnitType.kibibyte
|
||||
)
|
||||
self.seq_cutoff_policy = stats_dict["Seq cutoff policy"]
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
@ -340,13 +328,11 @@ class CoreConfigStats:
|
||||
|
||||
|
||||
class IoClassConfigStats:
|
||||
def __init__(
|
||||
self, io_class_id, io_class_name, eviction_priority, selective_allocation
|
||||
):
|
||||
self.io_class_id = io_class_id
|
||||
self.io_class_name = io_class_name
|
||||
self.eviction_priority = eviction_priority
|
||||
self.selective_allocation = selective_allocation
|
||||
def __init__(self, stats_dict):
|
||||
self.io_class_id = stats_dict["IO class ID"]
|
||||
self.io_class_name = stats_dict["IO class name"]
|
||||
self.eviction_priority = stats_dict["Eviction priority"]
|
||||
self.max_size = stats_dict["Max size"]
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
@ -354,7 +340,7 @@ class IoClassConfigStats:
|
||||
f"IO class ID: {self.io_class_id}\n"
|
||||
f"IO class name: {self.io_class_name}\n"
|
||||
f"Eviction priority: {self.eviction_priority}\n"
|
||||
f"Selective allocation: {self.selective_allocation}\n"
|
||||
f"Max size: {self.max_size}\n"
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
@ -364,16 +350,17 @@ class IoClassConfigStats:
|
||||
self.io_class_id == other.io_class_id
|
||||
and self.io_class_name == other.io_class_name
|
||||
and self.eviction_priority == other.eviction_priority
|
||||
and self.selective_allocation == other.selective_allocation
|
||||
and self.max_size == other.max_size
|
||||
)
|
||||
|
||||
|
||||
class UsageStats:
|
||||
def __init__(self, occupancy, free, clean, dirty):
|
||||
self.occupancy = occupancy
|
||||
self.free = free
|
||||
self.clean = clean
|
||||
self.dirty = dirty
|
||||
def __init__(self, stats_dict, percentage_val):
|
||||
unit = UnitType.percentage if percentage_val else UnitType.block_4k
|
||||
self.occupancy = parse_value(value=stats_dict[f"Occupancy {unit}"], unit_type=unit)
|
||||
self.free = parse_value(value=stats_dict[f"Free {unit}"], unit_type=unit)
|
||||
self.clean = parse_value(value=stats_dict[f"Clean {unit}"], unit_type=unit)
|
||||
self.dirty = parse_value(value=stats_dict[f"Dirty {unit}"], unit_type=unit)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
@ -405,7 +392,7 @@ class UsageStats:
|
||||
self.occupancy + other.occupancy,
|
||||
self.free + other.free,
|
||||
self.clean + other.clean,
|
||||
self.dirty + other.dirty
|
||||
self.dirty + other.dirty,
|
||||
)
|
||||
|
||||
def __iadd__(self, other):
|
||||
@ -417,10 +404,11 @@ class UsageStats:
|
||||
|
||||
|
||||
class IoClassUsageStats:
|
||||
def __init__(self, occupancy, clean, dirty):
|
||||
self.occupancy = occupancy
|
||||
self.clean = clean
|
||||
self.dirty = dirty
|
||||
def __init__(self, stats_dict, percentage_val):
|
||||
unit = UnitType.percentage if percentage_val else UnitType.block_4k
|
||||
self.occupancy = parse_value(value=stats_dict[f"Occupancy {unit}"], unit_type=unit)
|
||||
self.clean = parse_value(value=stats_dict[f"Clean {unit}"], unit_type=unit)
|
||||
self.dirty = parse_value(value=stats_dict[f"Dirty {unit}"], unit_type=unit)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
@ -449,7 +437,7 @@ class IoClassUsageStats:
|
||||
return UsageStats(
|
||||
self.occupancy + other.occupancy,
|
||||
self.clean + other.clean,
|
||||
self.dirty + other.dirty
|
||||
self.dirty + other.dirty,
|
||||
)
|
||||
|
||||
def __iadd__(self, other):
|
||||
@ -484,31 +472,26 @@ class InactiveUsageStats:
|
||||
|
||||
|
||||
class RequestStats:
|
||||
def __init__(
|
||||
self,
|
||||
read_hits,
|
||||
read_part_misses,
|
||||
read_full_misses,
|
||||
read_total,
|
||||
write_hits,
|
||||
write_part_misses,
|
||||
write_full_misses,
|
||||
write_total,
|
||||
pass_through_reads,
|
||||
pass_through_writes,
|
||||
requests_serviced,
|
||||
requests_total,
|
||||
):
|
||||
def __init__(self, stats_dict, percentage_val):
|
||||
unit = UnitType.percentage if percentage_val else UnitType.requests
|
||||
self.read = RequestStatsChunk(
|
||||
read_hits, read_part_misses, read_full_misses, read_total
|
||||
stats_dict=stats_dict, percentage_val=percentage_val, operation=OperationType.read
|
||||
)
|
||||
self.write = RequestStatsChunk(
|
||||
write_hits, write_part_misses, write_full_misses, write_total
|
||||
stats_dict=stats_dict, percentage_val=percentage_val, operation=OperationType.write
|
||||
)
|
||||
self.pass_through_reads = parse_value(
|
||||
value=stats_dict[f"Pass-Through reads {unit}"], unit_type=unit
|
||||
)
|
||||
self.pass_through_writes = parse_value(
|
||||
value=stats_dict[f"Pass-Through writes {unit}"], unit_type=unit
|
||||
)
|
||||
self.requests_serviced = parse_value(
|
||||
value=stats_dict[f"Serviced requests {unit}"], unit_type=unit
|
||||
)
|
||||
self.requests_total = parse_value(
|
||||
value=stats_dict[f"Total requests {unit}"], unit_type=unit
|
||||
)
|
||||
self.pass_through_reads = pass_through_reads
|
||||
self.pass_through_writes = pass_through_writes
|
||||
self.requests_serviced = requests_serviced
|
||||
self.requests_total = requests_total
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
@ -535,11 +518,16 @@ class RequestStats:
|
||||
|
||||
|
||||
class RequestStatsChunk:
|
||||
def __init__(self, hits, part_misses, full_misses, total):
|
||||
self.hits = hits
|
||||
self.part_misses = part_misses
|
||||
self.full_misses = full_misses
|
||||
self.total = total
|
||||
def __init__(self, stats_dict, percentage_val: bool, operation: OperationType):
|
||||
unit = UnitType.percentage if percentage_val else UnitType.requests
|
||||
self.hits = parse_value(value=stats_dict[f"{operation} hits {unit}"], unit_type=unit)
|
||||
self.part_misses = parse_value(
|
||||
value=stats_dict[f"{operation} partial misses {unit}"], unit_type=unit
|
||||
)
|
||||
self.full_misses = parse_value(
|
||||
value=stats_dict[f"{operation} full misses {unit}"], unit_type=unit
|
||||
)
|
||||
self.total = parse_value(value=stats_dict[f"{operation} total {unit}"], unit_type=unit)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
@ -561,21 +549,18 @@ class RequestStatsChunk:
|
||||
|
||||
|
||||
class BlockStats:
|
||||
def __init__(
|
||||
self,
|
||||
core_reads,
|
||||
core_writes,
|
||||
core_total,
|
||||
cache_reads,
|
||||
cache_writes,
|
||||
cache_total,
|
||||
exp_obj_reads,
|
||||
exp_obj_writes,
|
||||
exp_obj_total,
|
||||
):
|
||||
self.core = BasicStatsChunk(core_reads, core_writes, core_total)
|
||||
self.cache = BasicStatsChunk(cache_reads, cache_writes, cache_total)
|
||||
self.exp_obj = BasicStatsChunk(exp_obj_reads, exp_obj_writes, exp_obj_total)
|
||||
def __init__(self, stats_dict, percentage_val):
|
||||
self.core = BasicStatsChunk(
|
||||
stats_dict=stats_dict, percentage_val=percentage_val, device="core"
|
||||
)
|
||||
self.cache = BasicStatsChunk(
|
||||
stats_dict=stats_dict, percentage_val=percentage_val, device="cache"
|
||||
)
|
||||
self.exp_obj = BasicStatsChunk(
|
||||
stats_dict=stats_dict,
|
||||
percentage_val=percentage_val,
|
||||
device="exported object",
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
@ -589,30 +574,20 @@ class BlockStats:
|
||||
if not other:
|
||||
return False
|
||||
return (
|
||||
self.core == other.core
|
||||
and self.cache == other.cache
|
||||
and self.exp_obj == other.exp_obj
|
||||
self.core == other.core and self.cache == other.cache and self.exp_obj == other.exp_obj
|
||||
)
|
||||
|
||||
|
||||
class ErrorStats:
|
||||
def __init__(
|
||||
self,
|
||||
cache_read_errors,
|
||||
cache_write_errors,
|
||||
cache_total_errors,
|
||||
core_read_errors,
|
||||
core_write_errors,
|
||||
core_total_errors,
|
||||
total_errors,
|
||||
):
|
||||
self.cache = BasicStatsChunk(
|
||||
cache_read_errors, cache_write_errors, cache_total_errors
|
||||
def __init__(self, stats_dict, percentage_val):
|
||||
unit = UnitType.percentage if percentage_val else UnitType.requests
|
||||
self.cache = BasicStatsChunkError(
|
||||
stats_dict=stats_dict, percentage_val=percentage_val, device="Cache"
|
||||
)
|
||||
self.core = BasicStatsChunk(
|
||||
core_read_errors, core_write_errors, core_total_errors
|
||||
self.core = BasicStatsChunkError(
|
||||
stats_dict=stats_dict, percentage_val=percentage_val, device="Core"
|
||||
)
|
||||
self.total_errors = total_errors
|
||||
self.total_errors = parse_value(value=stats_dict[f"Total errors {unit}"], unit_type=unit)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
@ -633,10 +608,11 @@ class ErrorStats:
|
||||
|
||||
|
||||
class BasicStatsChunk:
|
||||
def __init__(self, reads, writes, total):
|
||||
self.reads = reads
|
||||
self.writes = writes
|
||||
self.total = total
|
||||
def __init__(self, stats_dict: dict, percentage_val: bool, device: str):
|
||||
unit = UnitType.percentage if percentage_val else UnitType.block_4k
|
||||
self.reads = parse_value(value=stats_dict[f"Reads from {device} {unit}"], unit_type=unit)
|
||||
self.writes = parse_value(value=stats_dict[f"Writes to {device} {unit}"], unit_type=unit)
|
||||
self.total = parse_value(value=stats_dict[f"Total to/from {device} {unit}"], unit_type=unit)
|
||||
|
||||
def __str__(self):
|
||||
return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n"
|
||||
@ -645,7 +621,44 @@ class BasicStatsChunk:
|
||||
if not other:
|
||||
return False
|
||||
return (
|
||||
self.reads == other.reads
|
||||
and self.writes == other.writes
|
||||
and self.total == other.total
|
||||
self.reads == other.reads and self.writes == other.writes and self.total == other.total
|
||||
)
|
||||
|
||||
|
||||
class BasicStatsChunkError:
|
||||
def __init__(self, stats_dict: dict, percentage_val: bool, device: str):
|
||||
unit = UnitType.percentage if percentage_val else UnitType.requests
|
||||
self.reads = parse_value(value=stats_dict[f"{device} read errors {unit}"], unit_type=unit)
|
||||
self.writes = parse_value(value=stats_dict[f"{device} write errors {unit}"], unit_type=unit)
|
||||
self.total = parse_value(value=stats_dict[f"{device} total errors {unit}"], unit_type=unit)
|
||||
|
||||
def __str__(self):
|
||||
return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n"
|
||||
|
||||
def __eq__(self, other):
|
||||
if not other:
|
||||
return False
|
||||
return (
|
||||
self.reads == other.reads and self.writes == other.writes and self.total == other.total
|
||||
)
|
||||
|
||||
|
||||
def parse_value(value: str, unit_type: UnitType) -> int | float | Size | timedelta | str:
|
||||
match unit_type:
|
||||
case UnitType.requests:
|
||||
stat_unit = int(value)
|
||||
case UnitType.percentage:
|
||||
stat_unit = float(value)
|
||||
case UnitType.block_4k:
|
||||
stat_unit = Size(float(value), Unit.Blocks4096)
|
||||
case UnitType.mebibyte:
|
||||
stat_unit = Size(float(value), Unit.MebiByte)
|
||||
case UnitType.kibibyte:
|
||||
stat_unit = Size(float(value), Unit.KibiByte)
|
||||
case UnitType.gibibyte:
|
||||
stat_unit = Size(float(value), Unit.GibiByte)
|
||||
case UnitType.seconds:
|
||||
stat_unit = timedelta(seconds=float(value))
|
||||
case _:
|
||||
stat_unit = value
|
||||
return stat_unit
|
||||
|
@ -1,11 +1,12 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import re
|
||||
|
||||
from api.cas import git
|
||||
from test_utils import git
|
||||
from core.test_run import TestRun
|
||||
from test_utils.output import CmdException
|
||||
|
||||
@ -20,23 +21,25 @@ class CasVersion:
|
||||
self.base = f"{self.main}.{self.major}.{self.minor}"
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.main}.{self.major}.{self.minor}.{self.pr}" \
|
||||
f"{'.' + self.type if self.type is not None else ''}"
|
||||
return (
|
||||
f"{self.main}.{self.major}.{self.minor}.{self.pr}"
|
||||
f"{'.' + self.type if self.type is not None else ''}"
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
@classmethod
|
||||
def from_git_tag(cls, version_tag):
|
||||
m = re.fullmatch(r'v([0-9]+)\.([0-9]+)\.?([0-9]?)', "v20.3")
|
||||
m = re.fullmatch(r"v([0-9]+)\.([0-9]+)\.?([0-9]?)", "v20.3")
|
||||
main, major, minor = m.groups()
|
||||
if not minor:
|
||||
minor = '0'
|
||||
minor = "0"
|
||||
return cls(main, major, minor, 0, "master")
|
||||
|
||||
@classmethod
|
||||
def from_version_string(cls, version_string):
|
||||
return cls(*version_string.split('.'))
|
||||
return cls(*version_string.split("."))
|
||||
|
||||
|
||||
def get_available_cas_versions():
|
||||
|
@ -1,4 +1,4 @@
|
||||
pytest>=4.4.0,<=6.2.5
|
||||
pytest>=7.0,<=7.4.4
|
||||
multimethod>=1.1
|
||||
paramiko>=2.7.2
|
||||
IPy>=1.00
|
||||
@ -12,8 +12,7 @@ attotime>=0.2.0
|
||||
gitpython>=3.1.7
|
||||
cryptography>=3.4.6
|
||||
psutil>=5.8.0
|
||||
py==1.10.0
|
||||
py==1.11.0
|
||||
portalocker>=2.3.1
|
||||
pytest-asyncio>=0.14.0
|
||||
recordclass>=0.8.4
|
||||
schema==0.7.2
|
||||
|
@ -1,9 +1,11 @@
|
||||
#
|
||||
# Copyright(c) 2019-2022 Intel Corporation
|
||||
# Copyright(c) 2023-2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import os
|
||||
import posixpath
|
||||
import sys
|
||||
import traceback
|
||||
from datetime import timedelta
|
||||
@ -14,10 +16,11 @@ import yaml
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), "../test-framework"))
|
||||
|
||||
from core.test_run import Blocked
|
||||
from core.test_run_utils import TestRun
|
||||
from api.cas import installer
|
||||
from api.cas import casadm
|
||||
from api.cas import git
|
||||
from test_utils import git
|
||||
from api.cas.cas_service import opencas_drop_in_directory
|
||||
from storage_devices.raid import Raid
|
||||
from storage_devices.ramdisk import RamDisk
|
||||
@ -91,7 +94,7 @@ def pytest_runtest_setup(item):
|
||||
TestRun.presetup()
|
||||
try:
|
||||
TestRun.executor.wait_for_connection(timedelta(seconds=20))
|
||||
except paramiko.AuthenticationException:
|
||||
except (paramiko.AuthenticationException, Blocked):
|
||||
raise
|
||||
except Exception:
|
||||
try:
|
||||
@ -167,7 +170,9 @@ def pytest_runtest_teardown():
|
||||
for dut in TestRun.duts:
|
||||
with TestRun.use_dut(dut):
|
||||
if TestRun.executor:
|
||||
os.makedirs(os.path.join(TestRun.LOGGER.base_dir, "dut_info", dut.ip),
|
||||
os.makedirs(os.path.join(TestRun.LOGGER.base_dir, "dut_info",
|
||||
dut.ip if dut.ip is not None
|
||||
else dut.config.get("host")),
|
||||
exist_ok=True)
|
||||
TestRun.LOGGER.get_additional_logs()
|
||||
Log.destroy()
|
||||
@ -187,7 +192,6 @@ def pytest_addoption(parser):
|
||||
parser.addoption("--dut-config", action="append", type=str)
|
||||
parser.addoption("--log-path", action="store",
|
||||
default=f"{os.path.join(os.path.dirname(__file__), '../results')}")
|
||||
parser.addoption("--force-reinstall", action="store_true", default=False)
|
||||
parser.addoption("--fuzzy-iter-count", action="store")
|
||||
|
||||
|
||||
@ -213,10 +217,6 @@ def unmount_cas_devices():
|
||||
)
|
||||
|
||||
|
||||
def get_force_param(item):
|
||||
return item.config.getoption("--force-reinstall")
|
||||
|
||||
|
||||
def __drbd_cleanup():
|
||||
from storage_devices.drbd import Drbd
|
||||
Drbd.down_all()
|
||||
@ -266,33 +266,24 @@ def base_prepare(item):
|
||||
raid.remove_partitions()
|
||||
raid.stop()
|
||||
for device in raid.array_devices:
|
||||
Mdadm.zero_superblock(os.path.join('/dev', device.get_device_id()))
|
||||
Mdadm.zero_superblock(posixpath.join('/dev', device.get_device_id()))
|
||||
Udev.settle()
|
||||
|
||||
RamDisk.remove_all()
|
||||
|
||||
for disk in TestRun.dut.disks:
|
||||
disk_serial = get_disk_serial_number(disk.path)
|
||||
if disk.serial_number != disk_serial:
|
||||
if disk.serial_number and disk.serial_number != disk_serial:
|
||||
raise Exception(
|
||||
f"Serial for {disk.path} doesn't match the one from the config."
|
||||
f"Serial from config {disk.serial_number}, actual serial {disk_serial}"
|
||||
)
|
||||
|
||||
disk.umount_all_partitions()
|
||||
Mdadm.zero_superblock(os.path.join('/dev', disk.get_device_id()))
|
||||
Mdadm.zero_superblock(posixpath.join('/dev', disk.get_device_id()))
|
||||
TestRun.executor.run_expect_success("udevadm settle")
|
||||
disk.remove_partitions()
|
||||
create_partition_table(disk, PartitionTable.gpt)
|
||||
|
||||
cas_version = TestRun.config.get("cas_version") or git.get_current_commit_hash()
|
||||
if get_force_param(item) and not TestRun.usr.already_updated:
|
||||
installer.rsync_opencas_sources()
|
||||
installer.reinstall_opencas(cas_version)
|
||||
elif not installer.check_if_installed(cas_version):
|
||||
installer.rsync_opencas_sources()
|
||||
installer.set_up_opencas(cas_version)
|
||||
|
||||
TestRun.usr.already_updated = True
|
||||
TestRun.LOGGER.add_build_info(f'Commit hash:')
|
||||
TestRun.LOGGER.add_build_info(f"{git.get_current_commit_hash()}")
|
||||
|
Loading…
Reference in New Issue
Block a user