Merge pull request #1488 from Kamoppl/kamilg/update_cas_api
test-api: Update cas api
This commit is contained in:
commit
d324f541a1
@ -1,54 +1,64 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
from api.cas.casadm_parser import *
|
from api.cas.casadm_parser import *
|
||||||
from api.cas.cli import *
|
from api.cas.core import Core
|
||||||
|
from api.cas.dmesg import get_metadata_size_on_device
|
||||||
from api.cas.statistics import CacheStats, CacheIoClassStats
|
from api.cas.statistics import CacheStats, CacheIoClassStats
|
||||||
from test_utils.os_utils import *
|
from test_utils.os_utils import *
|
||||||
|
from test_utils.output import Output
|
||||||
|
|
||||||
|
|
||||||
class Cache:
|
class Cache:
|
||||||
def __init__(self, device: Device):
|
def __init__(self, device: Device, cache_id: int = None) -> None:
|
||||||
self.cache_device = device
|
self.cache_device = device
|
||||||
self.cache_id = int(self.__get_cache_id())
|
self.cache_id = cache_id if cache_id else self.__get_cache_id()
|
||||||
self.__cache_line_size = None
|
self.__cache_line_size = None
|
||||||
self.__metadata_size = None
|
self.metadata_size_on_disk = self.get_metadata_size_on_disk()
|
||||||
|
|
||||||
def __get_cache_id(self):
|
def __get_cache_id(self) -> int:
|
||||||
cmd = f"{list_cmd(by_id_path=False)} | grep {self.cache_device.get_device_id()}"
|
device_path = self.__get_cache_device_path()
|
||||||
output = TestRun.executor.run(cmd)
|
|
||||||
if output.exit_code == 0 and output.stdout.strip():
|
|
||||||
return output.stdout.split()[1]
|
|
||||||
else:
|
|
||||||
raise Exception(f"There is no cache started on {self.cache_device.get_device_id()}.")
|
|
||||||
|
|
||||||
def get_core_devices(self):
|
caches_dict = get_cas_devices_dict()["caches"]
|
||||||
|
|
||||||
|
for cache in caches_dict.values():
|
||||||
|
if cache["device_path"] == device_path:
|
||||||
|
return int(cache["id"])
|
||||||
|
|
||||||
|
raise Exception(f"There is no cache started on {device_path}")
|
||||||
|
|
||||||
|
def __get_cache_device_path(self) -> str:
|
||||||
|
return self.cache_device.path if self.cache_device is not None else "-"
|
||||||
|
|
||||||
|
def get_core_devices(self) -> list:
|
||||||
return get_cores(self.cache_id)
|
return get_cores(self.cache_id)
|
||||||
|
|
||||||
def get_cache_line_size(self):
|
def get_cache_line_size(self) -> CacheLineSize:
|
||||||
if self.__cache_line_size is None:
|
if self.__cache_line_size is None:
|
||||||
stats = self.get_statistics()
|
stats = self.get_statistics()
|
||||||
stats_line_size = stats.config_stats.cache_line_size
|
stats_line_size = stats.config_stats.cache_line_size
|
||||||
self.__cache_line_size = CacheLineSize(stats_line_size)
|
self.__cache_line_size = CacheLineSize(stats_line_size)
|
||||||
return self.__cache_line_size
|
return self.__cache_line_size
|
||||||
|
|
||||||
def get_cleaning_policy(self):
|
def get_cleaning_policy(self) -> CleaningPolicy:
|
||||||
stats = self.get_statistics()
|
stats = self.get_statistics()
|
||||||
cp = stats.config_stats.cleaning_policy
|
cp = stats.config_stats.cleaning_policy
|
||||||
return CleaningPolicy[cp]
|
return CleaningPolicy[cp]
|
||||||
|
|
||||||
def get_metadata_size(self):
|
def get_metadata_size_in_ram(self) -> Size:
|
||||||
if self.__metadata_size is None:
|
|
||||||
stats = self.get_statistics()
|
stats = self.get_statistics()
|
||||||
self.__metadata_size = stats.config_stats.metadata_memory_footprint
|
return stats.config_stats.metadata_memory_footprint
|
||||||
return self.__metadata_size
|
|
||||||
|
def get_metadata_size_on_disk(self) -> Size:
|
||||||
|
return get_metadata_size_on_device(cache_id=self.cache_id)
|
||||||
|
|
||||||
def get_occupancy(self):
|
def get_occupancy(self):
|
||||||
return self.get_statistics().usage_stats.occupancy
|
return self.get_statistics().usage_stats.occupancy
|
||||||
|
|
||||||
def get_status(self):
|
def get_status(self) -> CacheStatus:
|
||||||
status = (
|
status = (
|
||||||
self.get_statistics(stat_filter=[StatsFilter.conf])
|
self.get_statistics(stat_filter=[StatsFilter.conf])
|
||||||
.config_stats.status.replace(" ", "_")
|
.config_stats.status.replace(" ", "_")
|
||||||
@ -57,131 +67,142 @@ class Cache:
|
|||||||
return CacheStatus[status]
|
return CacheStatus[status]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def size(self):
|
def size(self) -> Size:
|
||||||
return self.get_statistics().config_stats.cache_size
|
return self.get_statistics().config_stats.cache_size
|
||||||
|
|
||||||
def get_cache_mode(self):
|
def get_cache_mode(self) -> CacheMode:
|
||||||
return CacheMode[self.get_statistics().config_stats.write_policy.upper()]
|
return CacheMode[self.get_statistics().config_stats.write_policy.upper()]
|
||||||
|
|
||||||
def get_dirty_blocks(self):
|
def get_dirty_blocks(self) -> Size:
|
||||||
return self.get_statistics().usage_stats.dirty
|
return self.get_statistics().usage_stats.dirty
|
||||||
|
|
||||||
def get_dirty_for(self):
|
def get_dirty_for(self) -> timedelta:
|
||||||
return self.get_statistics().config_stats.dirty_for
|
return self.get_statistics().config_stats.dirty_for
|
||||||
|
|
||||||
def get_clean_blocks(self):
|
def get_clean_blocks(self) -> Size:
|
||||||
return self.get_statistics().usage_stats.clean
|
return self.get_statistics().usage_stats.clean
|
||||||
|
|
||||||
def get_flush_parameters_alru(self):
|
def get_flush_parameters_alru(self) -> FlushParametersAlru:
|
||||||
return get_flush_parameters_alru(self.cache_id)
|
return get_flush_parameters_alru(self.cache_id)
|
||||||
|
|
||||||
def get_flush_parameters_acp(self):
|
def get_flush_parameters_acp(self) -> FlushParametersAcp:
|
||||||
return get_flush_parameters_acp(self.cache_id)
|
return get_flush_parameters_acp(self.cache_id)
|
||||||
|
|
||||||
# Casadm methods:
|
# Casadm methods:
|
||||||
|
|
||||||
def get_io_class_statistics(self,
|
def get_statistics(
|
||||||
io_class_id: int,
|
self,
|
||||||
stat_filter: List[StatsFilter] = None,
|
stat_filter: List[StatsFilter] = None,
|
||||||
percentage_val: bool = False):
|
percentage_val: bool = False,
|
||||||
stats = get_statistics(self.cache_id, None, io_class_id,
|
) -> CacheStats:
|
||||||
stat_filter, percentage_val)
|
return CacheStats(
|
||||||
return CacheIoClassStats(stats)
|
cache_id=self.cache_id,
|
||||||
|
filter=stat_filter,
|
||||||
|
percentage_val=percentage_val,
|
||||||
|
)
|
||||||
|
|
||||||
def get_statistics(self,
|
def get_io_class_statistics(
|
||||||
stat_filter: List[StatsFilter] = None,
|
self,
|
||||||
percentage_val: bool = False):
|
|
||||||
stats = get_statistics(self.cache_id, None, None,
|
|
||||||
stat_filter, percentage_val)
|
|
||||||
return CacheStats(stats)
|
|
||||||
|
|
||||||
def get_statistics_flat(self,
|
|
||||||
io_class_id: int = None,
|
io_class_id: int = None,
|
||||||
stat_filter: List[StatsFilter] = None,
|
stat_filter: List[StatsFilter] = None,
|
||||||
percentage_val: bool = False):
|
percentage_val: bool = False,
|
||||||
return get_statistics(self.cache_id, None, io_class_id,
|
) -> CacheIoClassStats:
|
||||||
stat_filter, percentage_val)
|
return CacheIoClassStats(
|
||||||
|
cache_id=self.cache_id,
|
||||||
|
filter=stat_filter,
|
||||||
|
io_class_id=io_class_id,
|
||||||
|
percentage_val=percentage_val,
|
||||||
|
)
|
||||||
|
|
||||||
def flush_cache(self):
|
def flush_cache(self) -> Output:
|
||||||
casadm.flush(cache_id=self.cache_id)
|
output = casadm.flush_cache(cache_id=self.cache_id)
|
||||||
sync()
|
sync()
|
||||||
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0
|
return output
|
||||||
|
|
||||||
def purge_cache(self):
|
def purge_cache(self) -> Output:
|
||||||
casadm.purge_cache(cache_id=self.cache_id)
|
output = casadm.purge_cache(cache_id=self.cache_id)
|
||||||
sync()
|
sync()
|
||||||
|
return output
|
||||||
|
|
||||||
def stop(self, no_data_flush: bool = False):
|
def stop(self, no_data_flush: bool = False) -> Output:
|
||||||
return casadm.stop_cache(self.cache_id, no_data_flush)
|
return casadm.stop_cache(self.cache_id, no_data_flush)
|
||||||
|
|
||||||
def add_core(self, core_dev, core_id: int = None):
|
def add_core(self, core_dev, core_id: int = None) -> Core:
|
||||||
return casadm.add_core(self, core_dev, core_id)
|
return casadm.add_core(self, core_dev, core_id)
|
||||||
|
|
||||||
def remove_core(self, core_id: int, force: bool = False):
|
def remove_core(self, core_id: int, force: bool = False) -> Output:
|
||||||
return casadm.remove_core(self.cache_id, core_id, force)
|
return casadm.remove_core(self.cache_id, core_id, force)
|
||||||
|
|
||||||
def remove_inactive_core(self, core_id: int, force: bool = False):
|
def remove_inactive_core(self, core_id: int, force: bool = False) -> Output:
|
||||||
return casadm.remove_inactive(self.cache_id, core_id, force)
|
return casadm.remove_inactive(self.cache_id, core_id, force)
|
||||||
|
|
||||||
def reset_counters(self):
|
def reset_counters(self) -> Output:
|
||||||
return casadm.reset_counters(self.cache_id)
|
return casadm.reset_counters(self.cache_id)
|
||||||
|
|
||||||
def set_cache_mode(self, cache_mode: CacheMode, flush=None):
|
def set_cache_mode(self, cache_mode: CacheMode, flush=None) -> Output:
|
||||||
return casadm.set_cache_mode(cache_mode, self.cache_id, flush)
|
return casadm.set_cache_mode(cache_mode, self.cache_id, flush)
|
||||||
|
|
||||||
def load_io_class(self, file_path: str):
|
def load_io_class(self, file_path: str) -> Output:
|
||||||
return casadm.load_io_classes(self.cache_id, file_path)
|
return casadm.load_io_classes(self.cache_id, file_path)
|
||||||
|
|
||||||
def list_io_classes(self):
|
def list_io_classes(self) -> list:
|
||||||
return get_io_class_list(self.cache_id)
|
return get_io_class_list(self.cache_id)
|
||||||
|
|
||||||
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
|
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters) -> Output:
|
||||||
return casadm.set_param_cutoff(self.cache_id,
|
return casadm.set_param_cutoff(
|
||||||
|
self.cache_id,
|
||||||
threshold=seq_cutoff_param.threshold,
|
threshold=seq_cutoff_param.threshold,
|
||||||
policy=seq_cutoff_param.policy,
|
policy=seq_cutoff_param.policy,
|
||||||
promotion_count=seq_cutoff_param.promotion_count)
|
promotion_count=seq_cutoff_param.promotion_count,
|
||||||
|
)
|
||||||
|
|
||||||
def set_seq_cutoff_threshold(self, threshold: Size):
|
def set_seq_cutoff_threshold(self, threshold: Size) -> Output:
|
||||||
return casadm.set_param_cutoff(self.cache_id,
|
return casadm.set_param_cutoff(self.cache_id, threshold=threshold, policy=None)
|
||||||
threshold=threshold,
|
|
||||||
policy=None)
|
|
||||||
|
|
||||||
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy):
|
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy) -> Output:
|
||||||
return casadm.set_param_cutoff(self.cache_id,
|
return casadm.set_param_cutoff(self.cache_id, threshold=None, policy=policy)
|
||||||
threshold=None,
|
|
||||||
policy=policy)
|
|
||||||
|
|
||||||
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
|
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy) -> Output:
|
||||||
return casadm.set_param_cleaning(self.cache_id, cleaning_policy)
|
return casadm.set_param_cleaning(self.cache_id, cleaning_policy)
|
||||||
|
|
||||||
def set_params_acp(self, acp_params: FlushParametersAcp):
|
def set_params_acp(self, acp_params: FlushParametersAcp) -> Output:
|
||||||
return casadm.set_param_cleaning_acp(self.cache_id,
|
return casadm.set_param_cleaning_acp(
|
||||||
int(acp_params.wake_up_time.total_milliseconds())
|
self.cache_id,
|
||||||
if acp_params.wake_up_time else None,
|
int(acp_params.wake_up_time.total_milliseconds()) if acp_params.wake_up_time else None,
|
||||||
int(acp_params.flush_max_buffers)
|
int(acp_params.flush_max_buffers) if acp_params.flush_max_buffers else None,
|
||||||
if acp_params.flush_max_buffers else None)
|
)
|
||||||
|
|
||||||
def set_params_alru(self, alru_params: FlushParametersAlru):
|
def set_params_alru(self, alru_params: FlushParametersAlru) -> Output:
|
||||||
return casadm.set_param_cleaning_alru(
|
return casadm.set_param_cleaning_alru(
|
||||||
self.cache_id,
|
self.cache_id,
|
||||||
int(alru_params.wake_up_time.total_seconds())
|
(int(alru_params.wake_up_time.total_seconds()) if alru_params.wake_up_time else None),
|
||||||
if alru_params.wake_up_time is not None else None,
|
(
|
||||||
int(alru_params.staleness_time.total_seconds())
|
int(alru_params.staleness_time.total_seconds())
|
||||||
if alru_params.staleness_time is not None else None,
|
if alru_params.staleness_time
|
||||||
alru_params.flush_max_buffers
|
else None
|
||||||
if alru_params.flush_max_buffers is not None else None,
|
),
|
||||||
|
(alru_params.flush_max_buffers if alru_params.flush_max_buffers else None),
|
||||||
|
(
|
||||||
int(alru_params.activity_threshold.total_milliseconds())
|
int(alru_params.activity_threshold.total_milliseconds())
|
||||||
if alru_params.activity_threshold is not None else None)
|
if alru_params.activity_threshold
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def get_cache_config(self):
|
def get_cache_config(self) -> CacheConfig:
|
||||||
return CacheConfig(self.get_cache_line_size(),
|
return CacheConfig(
|
||||||
|
self.get_cache_line_size(),
|
||||||
self.get_cache_mode(),
|
self.get_cache_mode(),
|
||||||
self.get_cleaning_policy())
|
self.get_cleaning_policy(),
|
||||||
|
)
|
||||||
|
|
||||||
def standby_detach(self, shortcut: bool = False):
|
def standby_detach(self, shortcut: bool = False) -> Output:
|
||||||
return casadm.standby_detach_cache(cache_id=self.cache_id, shortcut=shortcut)
|
return casadm.standby_detach_cache(cache_id=self.cache_id, shortcut=shortcut)
|
||||||
|
|
||||||
def standby_activate(self, device, shortcut: bool = False):
|
def standby_activate(self, device, shortcut: bool = False) -> Output:
|
||||||
return casadm.standby_activate_cache(
|
return casadm.standby_activate_cache(
|
||||||
cache_id=self.cache_id, cache_dev=device, shortcut=shortcut
|
cache_id=self.cache_id, cache_dev=device, shortcut=shortcut
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def has_volatile_metadata(self) -> bool:
|
||||||
|
return self.get_metadata_size_on_disk() == Size.zero()
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
from aenum import Enum, IntFlag
|
from enum import Enum, IntFlag
|
||||||
|
|
||||||
from test_utils.os_utils import get_kernel_module_parameter
|
from test_utils.os_utils import get_kernel_module_parameter
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
@ -40,37 +41,34 @@ class CacheMode(Enum):
|
|||||||
return self.value
|
return self.value
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_traits(cache_mode):
|
def get_traits(cache_mode) -> CacheModeTrait:
|
||||||
if cache_mode == CacheMode.PT:
|
match cache_mode:
|
||||||
|
case CacheMode.PT:
|
||||||
return CacheModeTrait(0)
|
return CacheModeTrait(0)
|
||||||
elif cache_mode == CacheMode.WT:
|
case CacheMode.WT:
|
||||||
return CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite
|
return CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite
|
||||||
elif cache_mode == CacheMode.WB:
|
case CacheMode.WB:
|
||||||
return (
|
return (
|
||||||
CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites
|
CacheModeTrait.InsertRead
|
||||||
|
| CacheModeTrait.InsertWrite
|
||||||
|
| CacheModeTrait.LazyWrites
|
||||||
)
|
)
|
||||||
elif cache_mode == CacheMode.WO:
|
case CacheMode.WO:
|
||||||
return CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites
|
return CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites
|
||||||
elif cache_mode == CacheMode.WA:
|
case CacheMode.WA:
|
||||||
return CacheModeTrait.InsertRead
|
return CacheModeTrait.InsertRead
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def with_traits(flags: CacheModeTrait):
|
def with_traits(flags: CacheModeTrait) -> list:
|
||||||
return [
|
return [m for m in CacheMode if all(map(lambda t: t in CacheMode.get_traits(m), flags))]
|
||||||
m for m in CacheMode if all(map(lambda t: t in CacheMode.get_traits(m), flags))
|
|
||||||
]
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def without_traits(flags: CacheModeTrait):
|
def without_traits(flags: CacheModeTrait) -> list:
|
||||||
return [
|
return [m for m in CacheMode if not any(map(lambda t: t in CacheMode.get_traits(m), flags))]
|
||||||
m for m in CacheMode if not any(map(lambda t: t in CacheMode.get_traits(m), flags))
|
|
||||||
]
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def with_any_trait(flags: CacheModeTrait):
|
def with_any_trait(flags: CacheModeTrait) -> list:
|
||||||
return [
|
return [m for m in CacheMode if any(map(lambda t: t in CacheMode.get_traits(m), flags))]
|
||||||
m for m in CacheMode if any(map(lambda t: t in CacheMode.get_traits(m), flags))
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class SeqCutOffPolicy(Enum):
|
class SeqCutOffPolicy(Enum):
|
||||||
@ -90,7 +88,6 @@ class SeqCutOffPolicy(Enum):
|
|||||||
|
|
||||||
class MetadataMode(Enum):
|
class MetadataMode(Enum):
|
||||||
normal = "normal"
|
normal = "normal"
|
||||||
atomic = "atomic"
|
|
||||||
DEFAULT = normal
|
DEFAULT = normal
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -133,10 +130,10 @@ class CacheStatus(Enum):
|
|||||||
class FlushParametersAlru:
|
class FlushParametersAlru:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
activity_threshold=None,
|
activity_threshold: Time = None,
|
||||||
flush_max_buffers=None,
|
flush_max_buffers: int = None,
|
||||||
staleness_time=None,
|
staleness_time: Time = None,
|
||||||
wake_up_time=None,
|
wake_up_time: Time = None,
|
||||||
):
|
):
|
||||||
self.activity_threshold = activity_threshold
|
self.activity_threshold = activity_threshold
|
||||||
self.flush_max_buffers = flush_max_buffers
|
self.flush_max_buffers = flush_max_buffers
|
||||||
@ -152,18 +149,16 @@ class FlushParametersAlru:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
ret = ["activity threshold: "
|
ret = [
|
||||||
+ (f"{self.activity_threshold}" if self.activity_threshold is not None
|
"activity threshold: "
|
||||||
else "default"),
|
+ (f"{self.activity_threshold}" if self.activity_threshold is not None else "default"),
|
||||||
"flush max buffers: "
|
"flush max buffers: "
|
||||||
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None
|
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None else "default"),
|
||||||
else "default"),
|
|
||||||
"staleness time: "
|
"staleness time: "
|
||||||
+ (f"{self.staleness_time}" if self.staleness_time is not None
|
+ (f"{self.staleness_time}" if self.staleness_time is not None else "default"),
|
||||||
else "default"),
|
|
||||||
"wake up time: "
|
"wake up time: "
|
||||||
+ (f"{self.wake_up_time}" if self.wake_up_time is not None
|
+ (f"{self.wake_up_time}" if self.wake_up_time is not None else "default"),
|
||||||
else "default")]
|
]
|
||||||
return " | ".join(ret)
|
return " | ".join(ret)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -197,12 +192,12 @@ class FlushParametersAcp:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
ret = ["flush max buffers: "
|
ret = [
|
||||||
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None
|
"flush max buffers: "
|
||||||
else "default"),
|
+ (f"{self.flush_max_buffers}" if self.flush_max_buffers is not None else "default"),
|
||||||
"wake up time: "
|
"wake up time: "
|
||||||
+ (f"{self.wake_up_time}" if self.wake_up_time is not None
|
+ (f"{self.wake_up_time}" if self.wake_up_time is not None else "default"),
|
||||||
else "default")]
|
]
|
||||||
return " | ".join(ret)
|
return " | ".join(ret)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -221,7 +216,9 @@ class FlushParametersAcp:
|
|||||||
|
|
||||||
|
|
||||||
class SeqCutOffParameters:
|
class SeqCutOffParameters:
|
||||||
def __init__(self, policy=None, threshold=None, promotion_count=None):
|
def __init__(
|
||||||
|
self, policy: CleaningPolicy = None, threshold: Size = None, promotion_count: int = None
|
||||||
|
):
|
||||||
self.policy = policy
|
self.policy = policy
|
||||||
self.threshold = threshold
|
self.threshold = threshold
|
||||||
self.promotion_count = promotion_count
|
self.promotion_count = promotion_count
|
||||||
@ -238,20 +235,17 @@ class SeqCutOffParameters:
|
|||||||
return SeqCutOffParameters(
|
return SeqCutOffParameters(
|
||||||
threshold=Size(1024, Unit.KibiByte),
|
threshold=Size(1024, Unit.KibiByte),
|
||||||
policy=SeqCutOffPolicy.full,
|
policy=SeqCutOffPolicy.full,
|
||||||
promotion_count=8
|
promotion_count=8,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class PromotionParametersNhit:
|
class PromotionParametersNhit:
|
||||||
def __init__(self, threshold=None, trigger=None):
|
def __init__(self, threshold: Size = None, trigger: int = None):
|
||||||
self.threshold = threshold
|
self.threshold = threshold
|
||||||
self.trigger = trigger
|
self.trigger = trigger
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return (
|
return self.threshold == other.threshold and self.trigger == other.trigger
|
||||||
self.threshold == other.threshold
|
|
||||||
and self.trigger == other.trigger
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def nhit_params_range():
|
def nhit_params_range():
|
||||||
@ -293,7 +287,7 @@ class KernelParameters:
|
|||||||
use_io_scheduler: UseIoScheduler = None,
|
use_io_scheduler: UseIoScheduler = None,
|
||||||
seq_cut_off_mb: int = None,
|
seq_cut_off_mb: int = None,
|
||||||
max_writeback_queue_size: int = None,
|
max_writeback_queue_size: int = None,
|
||||||
writeback_queue_unblock_size: int = None
|
writeback_queue_unblock_size: int = None,
|
||||||
):
|
):
|
||||||
self.unaligned_io = unaligned_io
|
self.unaligned_io = unaligned_io
|
||||||
self.use_io_scheduler = use_io_scheduler
|
self.use_io_scheduler = use_io_scheduler
|
||||||
@ -312,16 +306,17 @@ class KernelParameters:
|
|||||||
self.use_io_scheduler, other.use_io_scheduler, UseIoScheduler.DEFAULT
|
self.use_io_scheduler, other.use_io_scheduler, UseIoScheduler.DEFAULT
|
||||||
)
|
)
|
||||||
and equal_or_default(
|
and equal_or_default(
|
||||||
self.seq_cut_off_mb, other.seq_cut_off_mb,
|
self.seq_cut_off_mb, other.seq_cut_off_mb, self.seq_cut_off_mb_DEFAULT
|
||||||
self.seq_cut_off_mb_DEFAULT
|
|
||||||
)
|
)
|
||||||
and equal_or_default(
|
and equal_or_default(
|
||||||
self.max_writeback_queue_size, other.max_writeback_queue_size,
|
self.max_writeback_queue_size,
|
||||||
self.max_writeback_queue_size_DEFAULT
|
other.max_writeback_queue_size,
|
||||||
|
self.max_writeback_queue_size_DEFAULT,
|
||||||
)
|
)
|
||||||
and equal_or_default(
|
and equal_or_default(
|
||||||
self.writeback_queue_unblock_size, other.writeback_queue_unblock_size,
|
self.writeback_queue_unblock_size,
|
||||||
self.writeback_queue_unblock_size_DEFAULT
|
other.writeback_queue_unblock_size,
|
||||||
|
self.writeback_queue_unblock_size_DEFAULT,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -332,7 +327,7 @@ class KernelParameters:
|
|||||||
UseIoScheduler.DEFAULT,
|
UseIoScheduler.DEFAULT,
|
||||||
cls.seq_cut_off_mb_DEFAULT,
|
cls.seq_cut_off_mb_DEFAULT,
|
||||||
cls.max_writeback_queue_size_DEFAULT,
|
cls.max_writeback_queue_size_DEFAULT,
|
||||||
cls.writeback_queue_unblock_size_DEFAULT
|
cls.writeback_queue_unblock_size_DEFAULT,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -343,7 +338,7 @@ class KernelParameters:
|
|||||||
UseIoScheduler(int(get_kernel_module_parameter(module, "use_io_scheduler"))),
|
UseIoScheduler(int(get_kernel_module_parameter(module, "use_io_scheduler"))),
|
||||||
int(get_kernel_module_parameter(module, "seq_cut_off_mb")),
|
int(get_kernel_module_parameter(module, "seq_cut_off_mb")),
|
||||||
int(get_kernel_module_parameter(module, "max_writeback_queue_size")),
|
int(get_kernel_module_parameter(module, "max_writeback_queue_size")),
|
||||||
int(get_kernel_module_parameter(module, "writeback_queue_unblock_size"))
|
int(get_kernel_module_parameter(module, "writeback_queue_unblock_size")),
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_parameter_dictionary(self):
|
def get_parameter_dictionary(self):
|
||||||
@ -354,10 +349,15 @@ class KernelParameters:
|
|||||||
params["use_io_scheduler"] = str(self.use_io_scheduler.value)
|
params["use_io_scheduler"] = str(self.use_io_scheduler.value)
|
||||||
if self.seq_cut_off_mb not in [None, self.seq_cut_off_mb_DEFAULT]:
|
if self.seq_cut_off_mb not in [None, self.seq_cut_off_mb_DEFAULT]:
|
||||||
params["seq_cut_off_mb"] = str(self.seq_cut_off_mb)
|
params["seq_cut_off_mb"] = str(self.seq_cut_off_mb)
|
||||||
if self.max_writeback_queue_size not in [None, self.max_writeback_queue_size_DEFAULT]:
|
if self.max_writeback_queue_size not in [
|
||||||
|
None,
|
||||||
|
self.max_writeback_queue_size_DEFAULT,
|
||||||
|
]:
|
||||||
params["max_writeback_queue_size"] = str(self.max_writeback_queue_size)
|
params["max_writeback_queue_size"] = str(self.max_writeback_queue_size)
|
||||||
if (self.writeback_queue_unblock_size not in
|
if self.writeback_queue_unblock_size not in [
|
||||||
[None, self.writeback_queue_unblock_size_DEFAULT]):
|
None,
|
||||||
|
self.writeback_queue_unblock_size_DEFAULT,
|
||||||
|
]:
|
||||||
params["writeback_queue_unblock_size"] = str(self.writeback_queue_unblock_size)
|
params["writeback_queue_unblock_size"] = str(self.writeback_queue_unblock_size)
|
||||||
return params
|
return params
|
||||||
|
|
||||||
@ -367,10 +367,10 @@ class KernelParameters:
|
|||||||
class CacheConfig:
|
class CacheConfig:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
cache_line_size=CacheLineSize.DEFAULT,
|
cache_line_size: CacheLineSize = CacheLineSize.DEFAULT,
|
||||||
cache_mode=CacheMode.DEFAULT,
|
cache_mode: CacheMode = CacheMode.DEFAULT,
|
||||||
cleaning_policy=CleaningPolicy.DEFAULT,
|
cleaning_policy: CleaningPolicy = CleaningPolicy.DEFAULT,
|
||||||
kernel_parameters=None
|
kernel_parameters=None,
|
||||||
):
|
):
|
||||||
self.cache_line_size = cache_line_size
|
self.cache_line_size = cache_line_size
|
||||||
self.cache_mode = cache_mode
|
self.cache_mode = cache_mode
|
||||||
@ -383,7 +383,9 @@ class CacheConfig:
|
|||||||
and self.cache_mode == other.cache_mode
|
and self.cache_mode == other.cache_mode
|
||||||
and self.cleaning_policy == other.cleaning_policy
|
and self.cleaning_policy == other.cleaning_policy
|
||||||
and equal_or_default(
|
and equal_or_default(
|
||||||
self.kernel_parameters, other.kernel_parameters, KernelParameters.DEFAULT
|
self.kernel_parameters,
|
||||||
|
other.kernel_parameters,
|
||||||
|
KernelParameters.DEFAULT,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
from aenum import Enum
|
from enum import Enum
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from test_utils import os_utils
|
from test_utils import os_utils
|
||||||
from test_utils.os_utils import ModuleRemoveMethod
|
from test_utils.os_utils import ModuleRemoveMethod
|
||||||
@ -19,8 +20,7 @@ def reload_all_cas_modules():
|
|||||||
|
|
||||||
|
|
||||||
def unload_all_cas_modules():
|
def unload_all_cas_modules():
|
||||||
os_utils.unload_kernel_module(CasModule.cache.value,
|
os_utils.unload_kernel_module(CasModule.cache.value, os_utils.ModuleRemoveMethod.rmmod)
|
||||||
os_utils.ModuleRemoveMethod.rmmod)
|
|
||||||
|
|
||||||
|
|
||||||
def is_cas_management_dev_present():
|
def is_cas_management_dev_present():
|
||||||
|
@ -36,7 +36,8 @@ class Packages:
|
|||||||
|
|
||||||
|
|
||||||
class _Rpm(RpmSet):
|
class _Rpm(RpmSet):
|
||||||
def __init__(self, packages_dir: str = ""):
|
def __init__(self, packages_paths: list, packages_dir: str = ""):
|
||||||
|
super().__init__(packages_paths)
|
||||||
self.packages_dir = packages_dir
|
self.packages_dir = packages_dir
|
||||||
self.packages = get_packages_list("rpm", self.packages_dir)
|
self.packages = get_packages_list("rpm", self.packages_dir)
|
||||||
|
|
||||||
@ -65,7 +66,8 @@ class _Rpm(RpmSet):
|
|||||||
|
|
||||||
|
|
||||||
class _Deb(DebSet):
|
class _Deb(DebSet):
|
||||||
def __init__(self, packages_dir: str = ""):
|
def __init__(self, packages_paths: list, packages_dir: str = ""):
|
||||||
|
super().__init__(packages_paths)
|
||||||
self.packages_dir = packages_dir
|
self.packages_dir = packages_dir
|
||||||
self.packages = get_packages_list("deb", self.packages_dir)
|
self.packages = get_packages_list("deb", self.packages_dir)
|
||||||
|
|
||||||
@ -98,7 +100,8 @@ def get_packages_list(package_type: str, packages_dir: str):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
return [
|
return [
|
||||||
package for package in find_all_files(packages_dir, recursive=False)
|
package
|
||||||
|
for package in find_all_files(packages_dir, recursive=False)
|
||||||
# include only binary packages (ready to be processed by package manager)
|
# include only binary packages (ready to be processed by package manager)
|
||||||
if package.endswith(package_type.lower())
|
if package.endswith(package_type.lower())
|
||||||
and not package.endswith("src." + package_type.lower())
|
and not package.endswith("src." + package_type.lower())
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -8,7 +9,12 @@ from datetime import timedelta
|
|||||||
from string import Template
|
from string import Template
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
|
|
||||||
from test_tools.fs_utils import check_if_directory_exists, create_directory, write_file, remove
|
from test_tools.fs_utils import (
|
||||||
|
check_if_directory_exists,
|
||||||
|
create_directory,
|
||||||
|
write_file,
|
||||||
|
remove,
|
||||||
|
)
|
||||||
from test_utils.systemd import reload_daemon
|
from test_utils.systemd import reload_daemon
|
||||||
|
|
||||||
opencas_drop_in_directory = Path("/etc/systemd/system/open-cas.service.d/")
|
opencas_drop_in_directory = Path("/etc/systemd/system/open-cas.service.d/")
|
||||||
|
@ -1,55 +1,419 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from api.cas.cache import Cache
|
from api.cas.cache import Cache
|
||||||
from api.cas.cache_config import CacheLineSize, CacheMode, SeqCutOffPolicy, CleaningPolicy, \
|
from api.cas.cache_config import (
|
||||||
KernelParameters
|
CacheLineSize,
|
||||||
|
CacheMode,
|
||||||
|
SeqCutOffPolicy,
|
||||||
|
CleaningPolicy,
|
||||||
|
KernelParameters,
|
||||||
|
)
|
||||||
|
from api.cas.casadm_params import OutputFormat, StatsFilter
|
||||||
|
from api.cas.cli import *
|
||||||
from api.cas.core import Core
|
from api.cas.core import Core
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from storage_devices.device import Device
|
from storage_devices.device import Device
|
||||||
from test_utils.os_utils import reload_kernel_module
|
from test_utils.os_utils import reload_kernel_module
|
||||||
from test_utils.output import CmdException
|
from test_utils.output import CmdException, Output
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
from .casadm_params import *
|
|
||||||
from .casctl import stop as casctl_stop
|
|
||||||
from .cli import *
|
|
||||||
|
|
||||||
|
|
||||||
def help(shortcut: bool = False):
|
# casadm commands
|
||||||
return TestRun.executor.run(help_cmd(shortcut))
|
|
||||||
|
|
||||||
|
|
||||||
def start_cache(cache_dev: Device, cache_mode: CacheMode = None,
|
def start_cache(
|
||||||
cache_line_size: CacheLineSize = None, cache_id: int = None,
|
cache_dev: Device,
|
||||||
force: bool = False, load: bool = False, shortcut: bool = False,
|
cache_mode: CacheMode = None,
|
||||||
kernel_params: KernelParameters = KernelParameters()):
|
cache_line_size: CacheLineSize = None,
|
||||||
|
cache_id: int = None,
|
||||||
|
force: bool = False,
|
||||||
|
load: bool = False,
|
||||||
|
shortcut: bool = False,
|
||||||
|
kernel_params: KernelParameters = KernelParameters(),
|
||||||
|
) -> Cache:
|
||||||
if kernel_params != KernelParameters.read_current_settings():
|
if kernel_params != KernelParameters.read_current_settings():
|
||||||
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
|
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
|
||||||
|
|
||||||
_cache_line_size = None if cache_line_size is None else str(
|
_cache_line_size = (
|
||||||
int(cache_line_size.value.get_value(Unit.KibiByte)))
|
str(int(cache_line_size.value.get_value(Unit.KibiByte)))
|
||||||
_cache_id = None if cache_id is None else str(cache_id)
|
if cache_line_size is not None
|
||||||
_cache_mode = None if cache_mode is None else cache_mode.name.lower()
|
else None
|
||||||
output = TestRun.executor.run(start_cmd(
|
)
|
||||||
cache_dev=cache_dev.path, cache_mode=_cache_mode, cache_line_size=_cache_line_size,
|
_cache_id = str(cache_id) if cache_id is not None else None
|
||||||
cache_id=_cache_id, force=force, load=load, shortcut=shortcut))
|
_cache_mode = cache_mode.name.lower() if cache_mode else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
start_cmd(
|
||||||
|
cache_dev=cache_dev.path,
|
||||||
|
cache_mode=_cache_mode,
|
||||||
|
cache_line_size=_cache_line_size,
|
||||||
|
cache_id=_cache_id,
|
||||||
|
force=force,
|
||||||
|
load=load,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
)
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Failed to start cache.", output)
|
raise CmdException("Failed to start cache.", output)
|
||||||
return Cache(cache_dev)
|
return Cache(cache_dev)
|
||||||
|
|
||||||
|
|
||||||
def standby_init(cache_dev: Device, cache_id: int, cache_line_size: CacheLineSize,
|
def load_cache(device: Device, shortcut: bool = False) -> Cache:
|
||||||
force: bool = False, shortcut: bool = False,
|
output = TestRun.executor.run(load_cmd(cache_dev=device.path, shortcut=shortcut))
|
||||||
kernel_params: KernelParameters = KernelParameters()):
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to load cache.", output)
|
||||||
|
return Cache(device)
|
||||||
|
|
||||||
|
|
||||||
|
def attach_cache(cache_id: int, device: Device, force: bool, shortcut: bool = False) -> Output:
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
attach_cache_cmd(
|
||||||
|
cache_dev=device.path, cache_id=str(cache_id), force=force, shortcut=shortcut
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to attach cache.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def detach_cache(cache_id: int, shortcut: bool = False) -> Output:
|
||||||
|
output = TestRun.executor.run(detach_cache_cmd(cache_id=str(cache_id), shortcut=shortcut))
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to detach cache.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = False) -> Output:
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
stop_cmd(cache_id=str(cache_id), no_data_flush=no_data_flush, shortcut=shortcut)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to stop cache.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_cutoff(
|
||||||
|
cache_id: int,
|
||||||
|
core_id: int = None,
|
||||||
|
threshold: Size = None,
|
||||||
|
policy: SeqCutOffPolicy = None,
|
||||||
|
promotion_count: int = None,
|
||||||
|
shortcut: bool = False,
|
||||||
|
) -> Output:
|
||||||
|
_core_id = str(core_id) if core_id is not None else None
|
||||||
|
_threshold = str(int(threshold.get_value(Unit.KibiByte))) if threshold else None
|
||||||
|
_policy = policy.name if policy else None
|
||||||
|
_promotion_count = str(promotion_count) if promotion_count is not None else None
|
||||||
|
command = set_param_cutoff_cmd(
|
||||||
|
cache_id=str(cache_id),
|
||||||
|
core_id=_core_id,
|
||||||
|
threshold=_threshold,
|
||||||
|
policy=_policy,
|
||||||
|
promotion_count=_promotion_count,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
output = TestRun.executor.run(command)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Error while setting sequential cut-off params.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_cleaning(cache_id: int, policy: CleaningPolicy, shortcut: bool = False) -> Output:
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
set_param_cleaning_cmd(cache_id=str(cache_id), policy=policy.name, shortcut=shortcut)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Error while setting cleaning policy.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_cleaning_alru(
|
||||||
|
cache_id: int,
|
||||||
|
wake_up: int = None,
|
||||||
|
staleness_time: int = None,
|
||||||
|
flush_max_buffers: int = None,
|
||||||
|
activity_threshold: int = None,
|
||||||
|
shortcut: bool = False,
|
||||||
|
) -> Output:
|
||||||
|
_wake_up = str(wake_up) if wake_up is not None else None
|
||||||
|
_staleness_time = str(staleness_time) if staleness_time is not None else None
|
||||||
|
_flush_max_buffers = str(flush_max_buffers) if flush_max_buffers is not None else None
|
||||||
|
_activity_threshold = str(activity_threshold) if activity_threshold is not None else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
set_param_cleaning_alru_cmd(
|
||||||
|
cache_id=str(cache_id),
|
||||||
|
wake_up=_wake_up,
|
||||||
|
staleness_time=_staleness_time,
|
||||||
|
flush_max_buffers=_flush_max_buffers,
|
||||||
|
activity_threshold=_activity_threshold,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Error while setting alru cleaning policy parameters.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_cleaning_acp(
|
||||||
|
cache_id: int, wake_up: int = None, flush_max_buffers: int = None, shortcut: bool = False
|
||||||
|
) -> Output:
|
||||||
|
_wake_up = str(wake_up) if wake_up is not None else None
|
||||||
|
_flush_max_buffers = str(flush_max_buffers) if flush_max_buffers is not None else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
set_param_cleaning_acp_cmd(
|
||||||
|
cache_id=str(cache_id),
|
||||||
|
wake_up=_wake_up,
|
||||||
|
flush_max_buffers=_flush_max_buffers,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Error while setting acp cleaning policy parameters.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_cutoff(
|
||||||
|
cache_id: int, core_id: int, output_format: OutputFormat = None, shortcut: bool = False
|
||||||
|
) -> Output:
|
||||||
|
_output_format = output_format.name if output_format else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
get_param_cutoff_cmd(
|
||||||
|
cache_id=str(cache_id),
|
||||||
|
core_id=str(core_id),
|
||||||
|
output_format=_output_format,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Getting sequential cutoff params failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_cleaning(cache_id: int, output_format: OutputFormat = None, shortcut: bool = False):
|
||||||
|
_output_format = output_format.name if output_format else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
get_param_cleaning_cmd(
|
||||||
|
cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Getting cleaning policy failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_cleaning_alru(
|
||||||
|
cache_id: int, output_format: OutputFormat = None, shortcut: bool = False
|
||||||
|
):
|
||||||
|
_output_format = output_format.name if output_format else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
get_param_cleaning_alru_cmd(
|
||||||
|
cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Getting alru cleaning policy params failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_cleaning_acp(
|
||||||
|
cache_id: int, output_format: OutputFormat = None, shortcut: bool = False
|
||||||
|
):
|
||||||
|
_output_format = output_format.name if output_format else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
get_param_cleaning_acp_cmd(
|
||||||
|
cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Getting acp cleaning policy params failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def set_cache_mode(
|
||||||
|
cache_mode: CacheMode, cache_id: int, flush=None, shortcut: bool = False
|
||||||
|
) -> Output:
|
||||||
|
flush_cache = None
|
||||||
|
if flush:
|
||||||
|
flush_cache = "yes" if flush else "no"
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
set_cache_mode_cmd(
|
||||||
|
cache_mode=cache_mode.name.lower(),
|
||||||
|
cache_id=str(cache_id),
|
||||||
|
flush_cache=flush_cache,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Set cache mode command failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False) -> Core:
|
||||||
|
_core_id = str(core_id) if core_id is not None else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
add_core_cmd(
|
||||||
|
cache_id=str(cache.cache_id),
|
||||||
|
core_dev=core_dev.path,
|
||||||
|
core_id=_core_id,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to add core.", output)
|
||||||
|
return Core(core_dev.path, cache.cache_id)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False) -> Output:
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
remove_core_cmd(
|
||||||
|
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to remove core.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def remove_inactive(
|
||||||
|
cache_id: int, core_id: int, force: bool = False, shortcut: bool = False
|
||||||
|
) -> Output:
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
remove_inactive_cmd(
|
||||||
|
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to remove inactive core.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def remove_detached(core_device: Device, shortcut: bool = False) -> Output:
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
remove_detached_cmd(core_device=core_device.path, shortcut=shortcut)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to remove detached core.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def list_caches(
|
||||||
|
output_format: OutputFormat = None, by_id_path: bool = True, shortcut: bool = False
|
||||||
|
) -> Output:
|
||||||
|
_output_format = output_format.name if output_format else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
list_caches_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to list caches.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def print_statistics(
|
||||||
|
cache_id: int,
|
||||||
|
core_id: int = None,
|
||||||
|
io_class_id: int = None,
|
||||||
|
filter: List[StatsFilter] = None,
|
||||||
|
output_format: OutputFormat = None,
|
||||||
|
by_id_path: bool = True,
|
||||||
|
shortcut: bool = False,
|
||||||
|
) -> Output:
|
||||||
|
_output_format = output_format.name if output_format else None
|
||||||
|
_io_class_id = str(io_class_id) if io_class_id is not None else None
|
||||||
|
_core_id = str(core_id) if core_id is not None else None
|
||||||
|
if filter is None:
|
||||||
|
_filter = filter
|
||||||
|
else:
|
||||||
|
names = (x.name for x in filter)
|
||||||
|
_filter = ",".join(names)
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
print_statistics_cmd(
|
||||||
|
cache_id=str(cache_id),
|
||||||
|
core_id=_core_id,
|
||||||
|
io_class_id=_io_class_id,
|
||||||
|
filter=_filter,
|
||||||
|
output_format=_output_format,
|
||||||
|
by_id_path=by_id_path,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Printing statistics failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def reset_counters(cache_id: int, core_id: int = None, shortcut: bool = False) -> Output:
|
||||||
|
_core_id = str(core_id) if core_id is not None else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
reset_counters_cmd(cache_id=str(cache_id), core_id=_core_id, shortcut=shortcut)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to reset counters.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def flush_cache(cache_id: int, shortcut: bool = False) -> Output:
|
||||||
|
command = flush_cache_cmd(cache_id=str(cache_id), shortcut=shortcut)
|
||||||
|
output = TestRun.executor.run(command)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Flushing cache failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def flush_core(cache_id: int, core_id: int, shortcut: bool = False) -> Output:
|
||||||
|
command = flush_core_cmd(cache_id=str(cache_id), core_id=str(core_id), shortcut=shortcut)
|
||||||
|
output = TestRun.executor.run(command)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Flushing core failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def load_io_classes(cache_id: int, file: str, shortcut: bool = False) -> Output:
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
load_io_classes_cmd(cache_id=str(cache_id), file=file, shortcut=shortcut)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Load IO class command failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def list_io_classes(cache_id: int, output_format: OutputFormat, shortcut: bool = False) -> Output:
|
||||||
|
_output_format = output_format.name if output_format else None
|
||||||
|
output = TestRun.executor.run(
|
||||||
|
list_io_classes_cmd(cache_id=str(cache_id), output_format=_output_format, shortcut=shortcut)
|
||||||
|
)
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("List IO class command failed.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def print_version(output_format: OutputFormat = None, shortcut: bool = False) -> Output:
|
||||||
|
_output_format = output_format.name if output_format else None
|
||||||
|
output = TestRun.executor.run(version_cmd(output_format=_output_format, shortcut=shortcut))
|
||||||
|
if output.exit_code != 0:
|
||||||
|
raise CmdException("Failed to print version.", output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def help(shortcut: bool = False) -> Output:
|
||||||
|
return TestRun.executor.run(help_cmd(shortcut))
|
||||||
|
|
||||||
|
|
||||||
|
def standby_init(
|
||||||
|
cache_dev: Device,
|
||||||
|
cache_id: int,
|
||||||
|
cache_line_size: CacheLineSize,
|
||||||
|
force: bool = False,
|
||||||
|
shortcut: bool = False,
|
||||||
|
kernel_params: KernelParameters = KernelParameters(),
|
||||||
|
) -> Cache:
|
||||||
if kernel_params != KernelParameters.read_current_settings():
|
if kernel_params != KernelParameters.read_current_settings():
|
||||||
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
|
reload_kernel_module("cas_cache", kernel_params.get_parameter_dictionary())
|
||||||
|
_cache_line_size = str(int(cache_line_size.value.get_value(Unit.KibiByte)))
|
||||||
_cache_line_size = None if cache_line_size is None else str(
|
|
||||||
int(cache_line_size.value.get_value(Unit.KibiByte)))
|
|
||||||
|
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(
|
||||||
standby_init_cmd(
|
standby_init_cmd(
|
||||||
@ -65,334 +429,92 @@ def standby_init(cache_dev: Device, cache_id: int, cache_line_size: CacheLineSiz
|
|||||||
return Cache(cache_dev)
|
return Cache(cache_dev)
|
||||||
|
|
||||||
|
|
||||||
def standby_load(cache_dev: Device, shortcut: bool = False):
|
def standby_load(cache_dev: Device, shortcut: bool = False) -> Cache:
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(standby_load_cmd(cache_dev=cache_dev.path, shortcut=shortcut))
|
||||||
standby_load_cmd(cache_dev=cache_dev.path, shortcut=shortcut)
|
|
||||||
)
|
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Failed to load standby cache.", output)
|
raise CmdException("Failed to load standby cache.", output)
|
||||||
return Cache(cache_dev)
|
return Cache(cache_dev)
|
||||||
|
|
||||||
|
|
||||||
def standby_detach_cache(cache_id: int, shortcut: bool = False):
|
def standby_detach_cache(cache_id: int, shortcut: bool = False) -> Output:
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(standby_detach_cmd(cache_id=str(cache_id), shortcut=shortcut))
|
||||||
standby_detach_cmd(cache_id=str(cache_id), shortcut=shortcut)
|
|
||||||
)
|
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Failed to detach standby cache.", output)
|
raise CmdException("Failed to detach standby cache.", output)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def standby_activate_cache(cache_dev: Device, cache_id: int, shortcut: bool = False):
|
def standby_activate_cache(cache_dev: Device, cache_id: int, shortcut: bool = False) -> Output:
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(
|
||||||
standby_activate_cmd(
|
standby_activate_cmd(cache_dev=cache_dev.path, cache_id=str(cache_id), shortcut=shortcut)
|
||||||
cache_dev=cache_dev.path, cache_id=str(cache_id), shortcut=shortcut
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Failed to activate standby cache.", output)
|
raise CmdException("Failed to activate standby cache.", output)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = False):
|
def zero_metadata(cache_dev: Device, force: bool = False, shortcut: bool = False) -> Output:
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(
|
||||||
stop_cmd(cache_id=str(cache_id), no_data_flush=no_data_flush, shortcut=shortcut))
|
zero_metadata_cmd(cache_dev=cache_dev.path, force=force, shortcut=shortcut)
|
||||||
|
)
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Failed to stop cache.", output)
|
raise CmdException("Failed to wipe metadata.", output)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False):
|
# script command
|
||||||
_core_id = None if core_id is None else str(core_id)
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.path,
|
|
||||||
core_id=_core_id, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to add core.", output)
|
|
||||||
core = Core(core_dev.path, cache.cache_id)
|
|
||||||
return core
|
|
||||||
|
|
||||||
|
|
||||||
def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False):
|
def try_add(core_device: Device, cache_id: int, core_id: int) -> Core:
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path, str(core_id)))
|
||||||
remove_core_cmd(cache_id=str(cache_id), core_id=str(core_id),
|
|
||||||
force=force, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to remove core.", output)
|
|
||||||
|
|
||||||
|
|
||||||
def remove_inactive(cache_id: int, core_id: int, force: bool = False, shortcut: bool = False):
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
remove_inactive_cmd(
|
|
||||||
cache_id=str(cache_id), core_id=str(core_id), force=force, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to remove inactive core.", output)
|
|
||||||
|
|
||||||
|
|
||||||
def remove_detached(core_device: Device, shortcut: bool = False):
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
remove_detached_cmd(core_device=core_device.path, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to remove detached core.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def try_add(core_device: Device, cache_id: int, core_id: int):
|
|
||||||
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path,
|
|
||||||
str(core_id)))
|
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Failed to execute try add script command.", output)
|
raise CmdException("Failed to execute try add script command.", output)
|
||||||
return Core(core_device.path, cache_id)
|
return Core(core_device.path, cache_id)
|
||||||
|
|
||||||
|
|
||||||
def purge_cache(cache_id: int):
|
def purge_cache(cache_id: int) -> Output:
|
||||||
output = TestRun.executor.run(script_purge_cache_cmd(str(cache_id)))
|
output = TestRun.executor.run(script_purge_cache_cmd(str(cache_id)))
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Purge cache failed.", output)
|
raise CmdException("Purge cache failed.", output)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def purge_core(cache_id: int, core_id: int):
|
def purge_core(cache_id: int, core_id: int) -> Output:
|
||||||
output = TestRun.executor.run(script_purge_core_cmd(str(cache_id), str(core_id)))
|
output = TestRun.executor.run(script_purge_core_cmd(str(cache_id), str(core_id)))
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Purge core failed.", output)
|
raise CmdException("Purge core failed.", output)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def detach_core(cache_id: int, core_id: int):
|
def detach_core(cache_id: int, core_id: int) -> Output:
|
||||||
output = TestRun.executor.run(script_detach_core_cmd(str(cache_id), str(core_id)))
|
output = TestRun.executor.run(script_detach_core_cmd(str(cache_id), str(core_id)))
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Failed to execute detach core script command.", output)
|
raise CmdException("Failed to execute detach core script command.", output)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def remove_core_with_script_command(cache_id: int, core_id: int, no_flush: bool = False):
|
def remove_core_with_script_command(cache_id: int, core_id: int, no_flush: bool = False) -> Output:
|
||||||
output = TestRun.executor.run(script_remove_core_cmd(str(cache_id), str(core_id), no_flush))
|
output = TestRun.executor.run(script_remove_core_cmd(str(cache_id), str(core_id), no_flush))
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Failed to execute remove core script command.", output)
|
raise CmdException("Failed to execute remove core script command.", output)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def reset_counters(cache_id: int, core_id: int = None, shortcut: bool = False):
|
# casadm custom commands
|
||||||
_core_id = None if core_id is None else str(core_id)
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
reset_counters_cmd(cache_id=str(cache_id), core_id=_core_id, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to reset counters.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def flush(cache_id: int, core_id: int = None, shortcut: bool = False):
|
def stop_all_caches() -> None:
|
||||||
if core_id is None:
|
from api.cas.casadm_parser import get_caches
|
||||||
command = flush_cache_cmd(cache_id=str(cache_id), shortcut=shortcut)
|
|
||||||
else:
|
|
||||||
command = flush_core_cmd(cache_id=str(cache_id), core_id=str(core_id), shortcut=shortcut)
|
|
||||||
output = TestRun.executor.run(command)
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Flushing failed.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
caches = get_caches()
|
||||||
def load_cache(device: Device, shortcut: bool = False):
|
if not caches:
|
||||||
output = TestRun.executor.run(
|
|
||||||
load_cmd(cache_dev=device.path, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to load cache.", output)
|
|
||||||
return Cache(device)
|
|
||||||
|
|
||||||
|
|
||||||
def list_caches(output_format: OutputFormat = None, by_id_path: bool = True,
|
|
||||||
shortcut: bool = False):
|
|
||||||
_output_format = None if output_format is None else output_format.name
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
list_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to list caches.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def print_version(output_format: OutputFormat = None, shortcut: bool = False):
|
|
||||||
_output_format = None if output_format is None else output_format.name
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
version_cmd(output_format=_output_format, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to print version.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def zero_metadata(cache_dev: Device, force: bool = False, shortcut: bool = False):
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
zero_metadata_cmd(cache_dev=cache_dev.path, force=force, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to wipe metadata.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def stop_all_caches():
|
|
||||||
if "No caches running" in list_caches().stdout:
|
|
||||||
return
|
return
|
||||||
TestRun.LOGGER.info("Stop all caches")
|
for cache in caches:
|
||||||
stop_output = casctl_stop()
|
stop_cache(cache_id=cache.cache_id, no_data_flush=True)
|
||||||
caches_output = list_caches()
|
|
||||||
if "No caches running" not in caches_output.stdout:
|
|
||||||
raise CmdException(f"Error while stopping caches. "
|
|
||||||
f"Listing caches: {caches_output}", stop_output)
|
|
||||||
|
|
||||||
|
|
||||||
def remove_all_detached_cores():
|
def remove_all_detached_cores() -> None:
|
||||||
from api.cas import casadm_parser
|
from api.cas.casadm_parser import get_cas_devices_dict
|
||||||
devices = casadm_parser.get_cas_devices_dict()
|
|
||||||
|
devices = get_cas_devices_dict()
|
||||||
for dev in devices["core_pool"]:
|
for dev in devices["core_pool"]:
|
||||||
TestRun.executor.run(remove_detached_cmd(dev["device"]))
|
TestRun.executor.run(remove_detached_cmd(dev["device"]))
|
||||||
|
|
||||||
|
|
||||||
def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = False,
|
|
||||||
io_class_id: int = None, filter: List[StatsFilter] = None,
|
|
||||||
output_format: OutputFormat = None, by_id_path: bool = True,
|
|
||||||
shortcut: bool = False):
|
|
||||||
_output_format = None if output_format is None else output_format.name
|
|
||||||
_core_id = None if core_id is None else str(core_id)
|
|
||||||
_io_class_id = None if io_class_id is None else str(io_class_id)
|
|
||||||
if filter is None:
|
|
||||||
_filter = filter
|
|
||||||
else:
|
|
||||||
names = (x.name for x in filter)
|
|
||||||
_filter = ",".join(names)
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
print_statistics_cmd(
|
|
||||||
cache_id=str(cache_id), core_id=_core_id,
|
|
||||||
per_io_class=per_io_class, io_class_id=_io_class_id,
|
|
||||||
filter=_filter, output_format=_output_format,
|
|
||||||
by_id_path=by_id_path, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Printing statistics failed.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def set_cache_mode(cache_mode: CacheMode, cache_id: int,
|
|
||||||
flush=None, shortcut: bool = False):
|
|
||||||
flush_cache = None
|
|
||||||
if flush is True:
|
|
||||||
flush_cache = "yes"
|
|
||||||
elif flush is False:
|
|
||||||
flush_cache = "no"
|
|
||||||
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
set_cache_mode_cmd(cache_mode=cache_mode.name.lower(), cache_id=str(cache_id),
|
|
||||||
flush_cache=flush_cache, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Set cache mode command failed.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def load_io_classes(cache_id: int, file: str, shortcut: bool = False):
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
load_io_classes_cmd(cache_id=str(cache_id), file=file, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Load IO class command failed.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def list_io_classes(cache_id: int, output_format: OutputFormat, shortcut: bool = False):
|
|
||||||
_output_format = None if output_format is None else output_format.name
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
list_io_classes_cmd(cache_id=str(cache_id),
|
|
||||||
output_format=_output_format, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("List IO class command failed.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def get_param_cutoff(cache_id: int, core_id: int,
|
|
||||||
output_format: OutputFormat = None, shortcut: bool = False):
|
|
||||||
_output_format = None if output_format is None else output_format.name
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
get_param_cutoff_cmd(cache_id=str(cache_id), core_id=str(core_id),
|
|
||||||
output_format=_output_format, shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Getting sequential cutoff params failed.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def get_param_cleaning(cache_id: int, output_format: OutputFormat = None, shortcut: bool = False):
|
|
||||||
_output_format = None if output_format is None else output_format.name
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
get_param_cleaning_cmd(cache_id=str(cache_id), output_format=_output_format,
|
|
||||||
shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Getting cleaning policy params failed.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def get_param_cleaning_alru(cache_id: int, output_format: OutputFormat = None,
|
|
||||||
shortcut: bool = False):
|
|
||||||
_output_format = None if output_format is None else output_format.name
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
get_param_cleaning_alru_cmd(cache_id=str(cache_id), output_format=_output_format,
|
|
||||||
shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Getting alru cleaning policy params failed.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def get_param_cleaning_acp(cache_id: int, output_format: OutputFormat = None,
|
|
||||||
shortcut: bool = False):
|
|
||||||
_output_format = None if output_format is None else output_format.name
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
get_param_cleaning_acp_cmd(cache_id=str(cache_id), output_format=_output_format,
|
|
||||||
shortcut=shortcut))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Getting acp cleaning policy params failed.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
|
|
||||||
policy: SeqCutOffPolicy = None, promotion_count: int = None):
|
|
||||||
_core_id = None if core_id is None else str(core_id)
|
|
||||||
_threshold = None if threshold is None else str(int(threshold.get_value(Unit.KibiByte)))
|
|
||||||
_policy = None if policy is None else policy.name
|
|
||||||
_promotion_count = None if promotion_count is None else str(promotion_count)
|
|
||||||
command = set_param_cutoff_cmd(
|
|
||||||
cache_id=str(cache_id),
|
|
||||||
core_id=_core_id,
|
|
||||||
threshold=_threshold,
|
|
||||||
policy=_policy,
|
|
||||||
promotion_count=_promotion_count
|
|
||||||
)
|
|
||||||
output = TestRun.executor.run(command)
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Error while setting sequential cut-off params.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_cleaning(cache_id: int, policy: CleaningPolicy):
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
set_param_cleaning_cmd(cache_id=str(cache_id), policy=policy.name))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Error while setting cleaning policy.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_cleaning_alru(cache_id: int, wake_up: int = None, staleness_time: int = None,
|
|
||||||
flush_max_buffers: int = None, activity_threshold: int = None):
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
set_param_cleaning_alru_cmd(
|
|
||||||
cache_id=cache_id,
|
|
||||||
wake_up=wake_up,
|
|
||||||
staleness_time=staleness_time,
|
|
||||||
flush_max_buffers=flush_max_buffers,
|
|
||||||
activity_threshold=activity_threshold))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Error while setting alru cleaning policy parameters.", output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_cleaning_acp(cache_id: int, wake_up: int = None, flush_max_buffers: int = None):
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
set_param_cleaning_acp_cmd(
|
|
||||||
cache_id=str(cache_id),
|
|
||||||
wake_up=str(wake_up) if wake_up is not None else None,
|
|
||||||
flush_max_buffers=str(flush_max_buffers) if flush_max_buffers else None))
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Error while setting acp cleaning policy parameters.", output)
|
|
||||||
return output
|
|
||||||
|
@ -1,9 +1,22 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
from aenum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
class ParamName(Enum):
|
||||||
|
seq_cutoff = "seq-cutoff"
|
||||||
|
cleaning = "cleaning"
|
||||||
|
cleaning_alru = "cleaning-alru"
|
||||||
|
cleaning_acp = "cleaning-acp"
|
||||||
|
promotion = "promotion"
|
||||||
|
promotion_nhit = "promotion-nhit"
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.value
|
||||||
|
|
||||||
|
|
||||||
class OutputFormat(Enum):
|
class OutputFormat(Enum):
|
||||||
|
@ -1,12 +1,13 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
import csv
|
import csv
|
||||||
import io
|
import io
|
||||||
import json
|
import json
|
||||||
import re
|
|
||||||
from datetime import timedelta, datetime
|
from datetime import timedelta, datetime
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
@ -18,7 +19,6 @@ from api.cas.version import CasVersion
|
|||||||
from core.test_run_utils import TestRun
|
from core.test_run_utils import TestRun
|
||||||
from storage_devices.device import Device
|
from storage_devices.device import Device
|
||||||
from test_utils.output import CmdException
|
from test_utils.output import CmdException
|
||||||
from test_utils.size import parse_unit
|
|
||||||
|
|
||||||
|
|
||||||
class Stats(dict):
|
class Stats(dict):
|
||||||
@ -26,188 +26,79 @@ class Stats(dict):
|
|||||||
return json.dumps(self, default=lambda o: str(o), indent=2)
|
return json.dumps(self, default=lambda o: str(o), indent=2)
|
||||||
|
|
||||||
|
|
||||||
def parse_stats_unit(unit: str):
|
|
||||||
if unit is None:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
unit = re.search(r".*[^\]]", unit).group()
|
|
||||||
|
|
||||||
if unit == "s":
|
|
||||||
return "s"
|
|
||||||
elif unit == "%":
|
|
||||||
return "%"
|
|
||||||
elif unit == "Requests":
|
|
||||||
return "requests"
|
|
||||||
else:
|
|
||||||
return parse_unit(unit)
|
|
||||||
|
|
||||||
|
|
||||||
def get_filter(filter: List[StatsFilter]):
|
def get_filter(filter: List[StatsFilter]):
|
||||||
"""Prepare list of statistic sections which should be retrieved and parsed."""
|
"""Prepare list of statistic sections which should be retrieved and parsed."""
|
||||||
if filter is None or StatsFilter.all in filter:
|
if filter is None or StatsFilter.all in filter:
|
||||||
_filter = [
|
_filter = [f for f in StatsFilter if (f != StatsFilter.all and f != StatsFilter.conf)]
|
||||||
f for f in StatsFilter if (f != StatsFilter.all and f != StatsFilter.conf)
|
|
||||||
]
|
|
||||||
else:
|
else:
|
||||||
_filter = [
|
_filter = [f for f in filter if (f != StatsFilter.all and f != StatsFilter.conf)]
|
||||||
f for f in filter if (f != StatsFilter.all and f != StatsFilter.conf)
|
|
||||||
]
|
|
||||||
|
|
||||||
return _filter
|
return _filter
|
||||||
|
|
||||||
|
|
||||||
def get_statistics(
|
def get_caches() -> list:
|
||||||
cache_id: int,
|
|
||||||
core_id: int = None,
|
|
||||||
io_class_id: int = None,
|
|
||||||
filter: List[StatsFilter] = None,
|
|
||||||
percentage_val: bool = False,
|
|
||||||
):
|
|
||||||
stats = Stats()
|
|
||||||
|
|
||||||
_filter = get_filter(filter)
|
|
||||||
|
|
||||||
per_io_class = True if io_class_id is not None else False
|
|
||||||
|
|
||||||
# No need to retrieve all stats if user specified only 'conf' flag
|
|
||||||
if filter != [StatsFilter.conf]:
|
|
||||||
csv_stats = casadm.print_statistics(
|
|
||||||
cache_id=cache_id,
|
|
||||||
core_id=core_id,
|
|
||||||
per_io_class=per_io_class,
|
|
||||||
io_class_id=io_class_id,
|
|
||||||
filter=_filter,
|
|
||||||
output_format=casadm.OutputFormat.csv,
|
|
||||||
).stdout.splitlines()
|
|
||||||
|
|
||||||
if filter is None or StatsFilter.conf in filter or StatsFilter.all in filter:
|
|
||||||
# Conf statistics have different unit or may have no unit at all. For parsing
|
|
||||||
# convenience they are gathered separately. As this is only configuration stats
|
|
||||||
# there is no risk they are divergent.
|
|
||||||
conf_stats = casadm.print_statistics(
|
|
||||||
cache_id=cache_id,
|
|
||||||
core_id=core_id,
|
|
||||||
per_io_class=per_io_class,
|
|
||||||
io_class_id=io_class_id,
|
|
||||||
filter=[StatsFilter.conf],
|
|
||||||
output_format=casadm.OutputFormat.csv,
|
|
||||||
).stdout.splitlines()
|
|
||||||
stat_keys = conf_stats[0]
|
|
||||||
stat_values = conf_stats[1]
|
|
||||||
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
|
|
||||||
# Some of configuration stats have no unit
|
|
||||||
try:
|
|
||||||
stat_name, stat_unit = name.split(" [")
|
|
||||||
except ValueError:
|
|
||||||
stat_name = name
|
|
||||||
stat_unit = None
|
|
||||||
|
|
||||||
stat_name = stat_name.lower()
|
|
||||||
|
|
||||||
# 'dirty for' and 'cache size' stats occurs twice
|
|
||||||
if stat_name in stats:
|
|
||||||
continue
|
|
||||||
|
|
||||||
stat_unit = parse_stats_unit(stat_unit)
|
|
||||||
|
|
||||||
if isinstance(stat_unit, Unit):
|
|
||||||
stats[stat_name] = Size(float(val), stat_unit)
|
|
||||||
elif stat_unit == "s":
|
|
||||||
stats[stat_name] = timedelta(seconds=int(val))
|
|
||||||
elif stat_unit == "":
|
|
||||||
# Some of stats without unit can be a number like IDs,
|
|
||||||
# some of them can be string like device path
|
|
||||||
try:
|
|
||||||
stats[stat_name] = float(val)
|
|
||||||
except ValueError:
|
|
||||||
stats[stat_name] = val
|
|
||||||
|
|
||||||
# No need to parse all stats if user specified only 'conf' flag
|
|
||||||
if filter == [StatsFilter.conf]:
|
|
||||||
return stats
|
|
||||||
|
|
||||||
stat_keys = csv_stats[0]
|
|
||||||
stat_values = csv_stats[1]
|
|
||||||
for (name, val) in zip(stat_keys.split(","), stat_values.split(",")):
|
|
||||||
if percentage_val and " [%]" in name:
|
|
||||||
stats[name.split(" [")[0].lower()] = float(val)
|
|
||||||
elif not percentage_val and "[%]" not in name:
|
|
||||||
stat_name, stat_unit = name.split(" [")
|
|
||||||
|
|
||||||
stat_unit = parse_stats_unit(stat_unit)
|
|
||||||
|
|
||||||
stat_name = stat_name.lower()
|
|
||||||
|
|
||||||
if isinstance(stat_unit, Unit):
|
|
||||||
stats[stat_name] = Size(float(val), stat_unit)
|
|
||||||
elif stat_unit == "requests":
|
|
||||||
stats[stat_name] = float(val)
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Invalid unit {stat_unit}")
|
|
||||||
|
|
||||||
return stats
|
|
||||||
|
|
||||||
|
|
||||||
def get_caches(): # This method does not return inactive or detached CAS devices
|
|
||||||
from api.cas.cache import Cache
|
from api.cas.cache import Cache
|
||||||
|
|
||||||
|
caches_dict = get_cas_devices_dict()["caches"]
|
||||||
caches_list = []
|
caches_list = []
|
||||||
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n')
|
|
||||||
for line in lines:
|
for cache in caches_dict.values():
|
||||||
args = line.split(',')
|
caches_list.append(
|
||||||
if args[0] == "cache":
|
Cache(
|
||||||
current_cache = Cache(Device(args[2]))
|
device=(Device(cache["device_path"]) if cache["device_path"] != "-" else None),
|
||||||
caches_list.append(current_cache)
|
cache_id=cache["id"],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
return caches_list
|
return caches_list
|
||||||
|
|
||||||
|
|
||||||
def get_cores(cache_id: int):
|
def get_cores(cache_id: int) -> list:
|
||||||
from api.cas.core import Core, CoreStatus
|
from api.cas.core import Core, CoreStatus
|
||||||
cores_list = []
|
|
||||||
lines = casadm.list_caches(OutputFormat.csv).stdout.split('\n')
|
cores_dict = get_cas_devices_dict()["cores"].values()
|
||||||
is_proper_core_line = False
|
|
||||||
for line in lines:
|
def is_active(core):
|
||||||
args = line.split(',')
|
return CoreStatus[core["status"].lower()] == CoreStatus.active
|
||||||
if args[0] == "core" and is_proper_core_line:
|
|
||||||
core_status_str = args[3].lower()
|
return [
|
||||||
is_valid_status = CoreStatus[core_status_str].value[0] <= 1
|
Core(core["device_path"], core["cache_id"])
|
||||||
if is_valid_status:
|
for core in cores_dict
|
||||||
cores_list.append(Core(args[2], cache_id))
|
if is_active(core) and core["cache_id"] == cache_id
|
||||||
if args[0] == "cache":
|
]
|
||||||
is_proper_core_line = True if int(args[1]) == cache_id else False
|
|
||||||
return cores_list
|
|
||||||
|
|
||||||
|
|
||||||
def get_cas_devices_dict():
|
def get_cas_devices_dict() -> dict:
|
||||||
device_list = list(csv.DictReader(casadm.list_caches(OutputFormat.csv).stdout.split('\n')))
|
device_list = list(csv.DictReader(casadm.list_caches(OutputFormat.csv).stdout.split("\n")))
|
||||||
devices = {"core_pool": [], "caches": {}, "cores": {}}
|
devices = {"caches": {}, "cores": {}, "core_pool": {}}
|
||||||
|
cache_id = -1
|
||||||
core_pool = False
|
core_pool = False
|
||||||
prev_cache_id = -1
|
|
||||||
|
|
||||||
for device in device_list:
|
for device in device_list:
|
||||||
if device["type"] == "core pool":
|
|
||||||
core_pool = True
|
|
||||||
continue
|
|
||||||
|
|
||||||
if device["type"] == "cache":
|
if device["type"] == "cache":
|
||||||
core_pool = False
|
cache_id = int(device["id"])
|
||||||
prev_cache_id = int(device["id"])
|
params = [
|
||||||
devices["caches"].update(
|
("id", cache_id),
|
||||||
{
|
("device_path", device["disk"]),
|
||||||
int(device["id"]): {
|
("status", device["status"]),
|
||||||
"device": device["disk"],
|
]
|
||||||
"status": device["status"],
|
devices["caches"][cache_id] = dict([(key, value) for key, value in params])
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
elif device["type"] == "core":
|
elif device["type"] == "core":
|
||||||
core = {"device": device["disk"], "status": device["status"]}
|
params = [
|
||||||
|
("cache_id", cache_id),
|
||||||
|
("device_path", device["disk"]),
|
||||||
|
("status", device["status"]),
|
||||||
|
]
|
||||||
if core_pool:
|
if core_pool:
|
||||||
devices["core_pool"].append(core)
|
params.append(("core_pool", device))
|
||||||
else:
|
devices["core_pool"][(cache_id, int(device["id"]))] = dict(
|
||||||
core.update({"cache_id": prev_cache_id})
|
[(key, value) for key, value in params]
|
||||||
devices["cores"].update(
|
|
||||||
{(prev_cache_id, int(device["id"])): core}
|
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
devices["cores"][(cache_id, int(device["id"]))] = dict(
|
||||||
|
[(key, value) for key, value in params]
|
||||||
|
)
|
||||||
|
|
||||||
return devices
|
return devices
|
||||||
|
|
||||||
|
|
||||||
@ -215,20 +106,26 @@ def get_flushing_progress(cache_id: int, core_id: int = None):
|
|||||||
casadm_output = casadm.list_caches(OutputFormat.csv)
|
casadm_output = casadm.list_caches(OutputFormat.csv)
|
||||||
lines = casadm_output.stdout.splitlines()
|
lines = casadm_output.stdout.splitlines()
|
||||||
for line in lines:
|
for line in lines:
|
||||||
line_elements = line.split(',')
|
line_elements = line.split(",")
|
||||||
if core_id is not None and line_elements[0] == "core" \
|
if (
|
||||||
and int(line_elements[1]) == core_id \
|
core_id is not None
|
||||||
or core_id is None and line_elements[0] == "cache" \
|
and line_elements[0] == "core"
|
||||||
and int(line_elements[1]) == cache_id:
|
and int(line_elements[1]) == core_id
|
||||||
|
or core_id is None
|
||||||
|
and line_elements[0] == "cache"
|
||||||
|
and int(line_elements[1]) == cache_id
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
flush_line_elements = line_elements[3].split()
|
flush_line_elements = line_elements[3].split()
|
||||||
flush_percent = flush_line_elements[1][1:]
|
flush_percent = flush_line_elements[1][1:]
|
||||||
return float(flush_percent)
|
return float(flush_percent)
|
||||||
except Exception:
|
except Exception:
|
||||||
break
|
break
|
||||||
raise CmdException(f"There is no flushing progress in casadm list output. (cache {cache_id}"
|
raise CmdException(
|
||||||
|
f"There is no flushing progress in casadm list output. (cache {cache_id}"
|
||||||
f"{' core ' + str(core_id) if core_id is not None else ''})",
|
f"{' core ' + str(core_id) if core_id is not None else ''})",
|
||||||
casadm_output)
|
casadm_output,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)):
|
def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)):
|
||||||
@ -243,54 +140,57 @@ def wait_for_flushing(cache, core, timeout: timedelta = timedelta(seconds=30)):
|
|||||||
|
|
||||||
|
|
||||||
def get_flush_parameters_alru(cache_id: int):
|
def get_flush_parameters_alru(cache_id: int):
|
||||||
casadm_output = casadm.get_param_cleaning_alru(cache_id,
|
casadm_output = casadm.get_param_cleaning_alru(
|
||||||
casadm.OutputFormat.csv).stdout.splitlines()
|
cache_id, casadm.OutputFormat.csv
|
||||||
|
).stdout.splitlines()
|
||||||
flush_parameters = FlushParametersAlru()
|
flush_parameters = FlushParametersAlru()
|
||||||
for line in casadm_output:
|
for line in casadm_output:
|
||||||
if 'max buffers' in line:
|
if "max buffers" in line:
|
||||||
flush_parameters.flush_max_buffers = int(line.split(',')[1])
|
flush_parameters.flush_max_buffers = int(line.split(",")[1])
|
||||||
if 'Activity threshold' in line:
|
if "Activity threshold" in line:
|
||||||
flush_parameters.activity_threshold = Time(milliseconds=int(line.split(',')[1]))
|
flush_parameters.activity_threshold = Time(milliseconds=int(line.split(",")[1]))
|
||||||
if 'Stale buffer time' in line:
|
if "Stale buffer time" in line:
|
||||||
flush_parameters.staleness_time = Time(seconds=int(line.split(',')[1]))
|
flush_parameters.staleness_time = Time(seconds=int(line.split(",")[1]))
|
||||||
if 'Wake up time' in line:
|
if "Wake up time" in line:
|
||||||
flush_parameters.wake_up_time = Time(seconds=int(line.split(',')[1]))
|
flush_parameters.wake_up_time = Time(seconds=int(line.split(",")[1]))
|
||||||
return flush_parameters
|
return flush_parameters
|
||||||
|
|
||||||
|
|
||||||
def get_flush_parameters_acp(cache_id: int):
|
def get_flush_parameters_acp(cache_id: int):
|
||||||
casadm_output = casadm.get_param_cleaning_acp(cache_id,
|
casadm_output = casadm.get_param_cleaning_acp(
|
||||||
casadm.OutputFormat.csv).stdout.splitlines()
|
cache_id, casadm.OutputFormat.csv
|
||||||
|
).stdout.splitlines()
|
||||||
flush_parameters = FlushParametersAcp()
|
flush_parameters = FlushParametersAcp()
|
||||||
for line in casadm_output:
|
for line in casadm_output:
|
||||||
if 'max buffers' in line:
|
if "max buffers" in line:
|
||||||
flush_parameters.flush_max_buffers = int(line.split(',')[1])
|
flush_parameters.flush_max_buffers = int(line.split(",")[1])
|
||||||
if 'Wake up time' in line:
|
if "Wake up time" in line:
|
||||||
flush_parameters.wake_up_time = Time(milliseconds=int(line.split(',')[1]))
|
flush_parameters.wake_up_time = Time(milliseconds=int(line.split(",")[1]))
|
||||||
return flush_parameters
|
return flush_parameters
|
||||||
|
|
||||||
|
|
||||||
def get_seq_cut_off_parameters(cache_id: int, core_id: int):
|
def get_seq_cut_off_parameters(cache_id: int, core_id: int):
|
||||||
casadm_output = casadm.get_param_cutoff(
|
casadm_output = casadm.get_param_cutoff(
|
||||||
cache_id, core_id, casadm.OutputFormat.csv).stdout.splitlines()
|
cache_id, core_id, casadm.OutputFormat.csv
|
||||||
|
).stdout.splitlines()
|
||||||
seq_cut_off_params = SeqCutOffParameters()
|
seq_cut_off_params = SeqCutOffParameters()
|
||||||
for line in casadm_output:
|
for line in casadm_output:
|
||||||
if 'Sequential cutoff threshold' in line:
|
if "Sequential cutoff threshold" in line:
|
||||||
seq_cut_off_params.threshold = Size(int(line.split(',')[1]), Unit.KibiByte)
|
seq_cut_off_params.threshold = Size(int(line.split(",")[1]), Unit.KibiByte)
|
||||||
if 'Sequential cutoff policy' in line:
|
if "Sequential cutoff policy" in line:
|
||||||
seq_cut_off_params.policy = SeqCutOffPolicy.from_name(line.split(',')[1])
|
seq_cut_off_params.policy = SeqCutOffPolicy.from_name(line.split(",")[1])
|
||||||
if 'Sequential cutoff promotion request count threshold' in line:
|
if "Sequential cutoff promotion request count threshold" in line:
|
||||||
seq_cut_off_params.promotion_count = int(line.split(',')[1])
|
seq_cut_off_params.promotion_count = int(line.split(",")[1])
|
||||||
return seq_cut_off_params
|
return seq_cut_off_params
|
||||||
|
|
||||||
|
|
||||||
def get_casadm_version():
|
def get_casadm_version():
|
||||||
casadm_output = casadm.print_version(OutputFormat.csv).stdout.split('\n')
|
casadm_output = casadm.print_version(OutputFormat.csv).stdout.split("\n")
|
||||||
version_str = casadm_output[1].split(',')[-1]
|
version_str = casadm_output[1].split(",")[-1]
|
||||||
return CasVersion.from_version_string(version_str)
|
return CasVersion.from_version_string(version_str)
|
||||||
|
|
||||||
|
|
||||||
def get_io_class_list(cache_id: int):
|
def get_io_class_list(cache_id: int) -> list:
|
||||||
ret = []
|
ret = []
|
||||||
casadm_output = casadm.list_io_classes(cache_id, OutputFormat.csv).stdout.splitlines()
|
casadm_output = casadm.list_io_classes(cache_id, OutputFormat.csv).stdout.splitlines()
|
||||||
casadm_output.pop(0) # Remove header
|
casadm_output.pop(0) # Remove header
|
||||||
@ -301,14 +201,16 @@ def get_io_class_list(cache_id: int):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def get_core_info_by_path(core_disk_path):
|
def get_core_info_by_path(core_disk_path) -> dict | None:
|
||||||
output = casadm.list_caches(OutputFormat.csv, by_id_path=True)
|
output = casadm.list_caches(OutputFormat.csv, by_id_path=True)
|
||||||
reader = csv.DictReader(io.StringIO(output.stdout))
|
reader = csv.DictReader(io.StringIO(output.stdout))
|
||||||
for row in reader:
|
for row in reader:
|
||||||
if row['type'] == "core" and row['disk'] == core_disk_path:
|
if row["type"] == "core" and row["disk"] == core_disk_path:
|
||||||
return {"core_id": row['id'],
|
return {
|
||||||
"core_device": row['disk'],
|
"core_id": row["id"],
|
||||||
"status": row['status'],
|
"core_device": row["disk"],
|
||||||
"exp_obj": row['device']}
|
"status": row["status"],
|
||||||
|
"exp_obj": row["device"],
|
||||||
|
}
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -11,88 +12,15 @@ casadm_bin = "casadm"
|
|||||||
casctl = "casctl"
|
casctl = "casctl"
|
||||||
|
|
||||||
|
|
||||||
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False):
|
def start_cmd(
|
||||||
command = f" -A -i {cache_id} -d {core_dev}" if shortcut \
|
cache_dev: str,
|
||||||
else f" --add-core --cache-id {cache_id} --core-device {core_dev}"
|
cache_mode: str = None,
|
||||||
if core_id is not None:
|
cache_line_size: str = None,
|
||||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
cache_id: str = None,
|
||||||
return casadm_bin + command
|
force: bool = False,
|
||||||
|
load: bool = False,
|
||||||
|
shortcut: bool = False,
|
||||||
def script_try_add_cmd(cache_id: str, core_dev: str, core_id: str = None):
|
) -> str:
|
||||||
command = f"{casadm_bin} --script --add-core --try-add --cache-id {cache_id} " \
|
|
||||||
f"--core-device {core_dev}"
|
|
||||||
if core_id:
|
|
||||||
command += f" --core-id {core_id}"
|
|
||||||
return command
|
|
||||||
|
|
||||||
|
|
||||||
def script_purge_cache_cmd(cache_id: str):
|
|
||||||
return f"{casadm_bin} --script --purge-cache --cache-id {cache_id}"
|
|
||||||
|
|
||||||
|
|
||||||
def script_purge_core_cmd(cache_id: str, core_id: str):
|
|
||||||
return f"{casadm_bin} --script --purge-core --cache-id {cache_id} --core-id {core_id}"
|
|
||||||
|
|
||||||
|
|
||||||
def script_detach_core_cmd(cache_id: str, core_id: str):
|
|
||||||
return f"{casadm_bin} --script --remove-core --detach --cache-id {cache_id} " \
|
|
||||||
f"--core-id {core_id}"
|
|
||||||
|
|
||||||
|
|
||||||
def script_remove_core_cmd(cache_id: str, core_id: str, no_flush: bool = False):
|
|
||||||
command = f"{casadm_bin} --script --remove-core --cache-id {cache_id} --core-id {core_id}"
|
|
||||||
if no_flush:
|
|
||||||
command += ' --no-flush'
|
|
||||||
return command
|
|
||||||
|
|
||||||
|
|
||||||
def remove_core_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False):
|
|
||||||
command = f" -R -i {cache_id} -j {core_id}" if shortcut \
|
|
||||||
else f" --remove-core --cache-id {cache_id} --core-id {core_id}"
|
|
||||||
if force:
|
|
||||||
command += " -f" if shortcut else " --force"
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def remove_inactive_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False):
|
|
||||||
command = f" --remove-inactive {'-i' if shortcut else '--cache-id'} {cache_id} " \
|
|
||||||
f"{'-j' if shortcut else '--core-id'} {core_id}"
|
|
||||||
if force:
|
|
||||||
command += " -f" if shortcut else " --force"
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def remove_detached_cmd(core_device: str, shortcut: bool = False):
|
|
||||||
command = " --remove-detached" + (" -d " if shortcut else " --device ") + core_device
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def help_cmd(shortcut: bool = False):
|
|
||||||
return casadm_bin + (" -H" if shortcut else " --help")
|
|
||||||
|
|
||||||
|
|
||||||
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False):
|
|
||||||
command = (" -Z -i " if shortcut else " --reset-counters --cache-id ") + cache_id
|
|
||||||
if core_id is not None:
|
|
||||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def flush_cache_cmd(cache_id: str, shortcut: bool = False):
|
|
||||||
command = (" -F -i " if shortcut else " --flush-cache --cache-id ") + cache_id
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False):
|
|
||||||
command = (f" -F -i {cache_id} -j {core_id}" if shortcut
|
|
||||||
else f" --flush-cache --cache-id {cache_id} --core-id {core_id}")
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = None,
|
|
||||||
cache_id: str = None, force: bool = False,
|
|
||||||
load: bool = False, shortcut: bool = False):
|
|
||||||
command = " -S" if shortcut else " --start-cache"
|
command = " -S" if shortcut else " --start-cache"
|
||||||
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||||
if cache_mode is not None:
|
if cache_mode is not None:
|
||||||
@ -108,8 +36,341 @@ def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = Non
|
|||||||
return casadm_bin + command
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
def standby_init_cmd(cache_dev: str, cache_id: str, cache_line_size: str,
|
def load_cmd(cache_dev: str, shortcut: bool = False) -> str:
|
||||||
force: bool = False, shortcut: bool = False):
|
return start_cmd(cache_dev=cache_dev, load=True, shortcut=shortcut)
|
||||||
|
|
||||||
|
|
||||||
|
def attach_cache_cmd(
|
||||||
|
cache_dev: str, cache_id: str, force: bool = False, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
command = " --attach-cache"
|
||||||
|
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
if force:
|
||||||
|
command += " -f" if shortcut else " --force"
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def detach_cache_cmd(cache_id: str, shortcut: bool = False) -> str:
|
||||||
|
command = " --detach-cache"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False) -> str:
|
||||||
|
command = " -T" if shortcut else " --stop-cache"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
if no_data_flush:
|
||||||
|
command += " --no-data-flush"
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def _set_param_cmd(name: str, cache_id: str, shortcut: bool = False) -> str:
|
||||||
|
command = (" X -n" if shortcut else " --set-param --name ") + name
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
return command
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_cutoff_cmd(
|
||||||
|
cache_id: str,
|
||||||
|
core_id: str = None,
|
||||||
|
threshold: str = None,
|
||||||
|
policy: str = None,
|
||||||
|
promotion_count: str = None,
|
||||||
|
shortcut: bool = False,
|
||||||
|
) -> str:
|
||||||
|
name = "seq-cutoff"
|
||||||
|
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||||
|
if core_id:
|
||||||
|
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||||
|
if threshold:
|
||||||
|
command += (" -t " if shortcut else " --threshold ") + threshold
|
||||||
|
if policy:
|
||||||
|
command += (" -p " if shortcut else " --policy ") + policy
|
||||||
|
if promotion_count:
|
||||||
|
command += " --promotion-count " + promotion_count
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_promotion_cmd(cache_id: str, policy: str, shortcut: bool = False) -> str:
|
||||||
|
name = "promotion"
|
||||||
|
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||||
|
command += (" -p " if shortcut else " --policy ") + policy
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_promotion_nhit_cmd(
|
||||||
|
cache_id: str, threshold: str = None, trigger: str = None, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
name = "promotion-nhit"
|
||||||
|
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||||
|
if threshold:
|
||||||
|
command += (" -t " if shortcut else " --threshold ") + threshold
|
||||||
|
if trigger is not None:
|
||||||
|
command += (" -o " if shortcut else " --trigger ") + trigger
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False) -> str:
|
||||||
|
name = "cleaning"
|
||||||
|
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||||
|
command += (" -p " if shortcut else " --policy ") + policy
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_cleaning_alru_cmd(
|
||||||
|
cache_id: str,
|
||||||
|
wake_up: str = None,
|
||||||
|
staleness_time: str = None,
|
||||||
|
flush_max_buffers: str = None,
|
||||||
|
activity_threshold: str = None,
|
||||||
|
shortcut: bool = False,
|
||||||
|
) -> str:
|
||||||
|
name = "cleaning-alru"
|
||||||
|
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||||
|
if wake_up:
|
||||||
|
command += (" -w " if shortcut else " --wake-up ") + wake_up
|
||||||
|
if staleness_time:
|
||||||
|
command += (" -s " if shortcut else " --staleness-time ") + staleness_time
|
||||||
|
if flush_max_buffers:
|
||||||
|
command += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
|
||||||
|
if activity_threshold:
|
||||||
|
command += (" -t " if shortcut else " --activity-threshold ") + activity_threshold
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def set_param_cleaning_acp_cmd(
|
||||||
|
cache_id: str,
|
||||||
|
wake_up: str = None,
|
||||||
|
flush_max_buffers: str = None,
|
||||||
|
shortcut: bool = False,
|
||||||
|
) -> str:
|
||||||
|
name = "cleaning-acp"
|
||||||
|
command = _set_param_cmd(name=name, cache_id=cache_id, shortcut=shortcut)
|
||||||
|
if wake_up is not None:
|
||||||
|
command += (" -w " if shortcut else " --wake-up ") + wake_up
|
||||||
|
if flush_max_buffers is not None:
|
||||||
|
command += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def _get_param_cmd(
|
||||||
|
name: str,
|
||||||
|
cache_id: str,
|
||||||
|
output_format: str = None,
|
||||||
|
shortcut: bool = False,
|
||||||
|
) -> str:
|
||||||
|
command = (" -G -n" if shortcut else " --get-param --name ") + name
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
if output_format:
|
||||||
|
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||||
|
return command
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_cutoff_cmd(
|
||||||
|
cache_id: str, core_id: str, output_format: str = None, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
name = "seq-cutoff"
|
||||||
|
command = _get_param_cmd(
|
||||||
|
name=name,
|
||||||
|
cache_id=cache_id,
|
||||||
|
output_format=output_format,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_promotion_cmd(
|
||||||
|
cache_id: str, output_format: str = None, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
name = "promotion"
|
||||||
|
command = _get_param_cmd(
|
||||||
|
name=name,
|
||||||
|
cache_id=cache_id,
|
||||||
|
output_format=output_format,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_promotion_nhit_cmd(
|
||||||
|
cache_id: str, output_format: str = None, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
name = "promotion-nhit"
|
||||||
|
command = _get_param_cmd(
|
||||||
|
name=name,
|
||||||
|
cache_id=cache_id,
|
||||||
|
output_format=output_format,
|
||||||
|
shortcut=shortcut,
|
||||||
|
)
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False) -> str:
|
||||||
|
name = "cleaning"
|
||||||
|
command = _get_param_cmd(
|
||||||
|
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
|
||||||
|
)
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_cleaning_alru_cmd(
|
||||||
|
cache_id: str, output_format: str = None, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
name = "cleaning-alru"
|
||||||
|
command = _get_param_cmd(
|
||||||
|
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
|
||||||
|
)
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def get_param_cleaning_acp_cmd(
|
||||||
|
cache_id: str, output_format: str = None, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
name = "cleaning-acp"
|
||||||
|
command = _get_param_cmd(
|
||||||
|
name=name, cache_id=cache_id, output_format=output_format, shortcut=shortcut
|
||||||
|
)
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def set_cache_mode_cmd(
|
||||||
|
cache_mode: str, cache_id: str, flush_cache: str = None, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
command = (" -Q -c" if shortcut else " --set-cache-mode --cache-mode ") + cache_mode
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
if flush_cache:
|
||||||
|
command += (" -f " if shortcut else " --flush-cache ") + flush_cache
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False) -> str:
|
||||||
|
command = " -A " if shortcut else " --add-core"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
command += (" -d " if shortcut else " --core-device ") + core_dev
|
||||||
|
if core_id:
|
||||||
|
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def remove_core_cmd(
|
||||||
|
cache_id: str, core_id: str, force: bool = False, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
command = " -R " if shortcut else " --remove-core"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||||
|
if force:
|
||||||
|
command += " -f" if shortcut else " --force"
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def remove_inactive_cmd(
|
||||||
|
cache_id: str, core_id: str, force: bool = False, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
command = " --remove-inactive"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||||
|
if force:
|
||||||
|
command += " -f" if shortcut else " --force"
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def remove_detached_cmd(core_device: str, shortcut: bool = False) -> str:
|
||||||
|
command = " --remove-detached"
|
||||||
|
command += (" -d " if shortcut else " --device ") + core_device
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def list_caches_cmd(
|
||||||
|
output_format: str = None, by_id_path: bool = True, shortcut: bool = False
|
||||||
|
) -> str:
|
||||||
|
command = " -L" if shortcut else " --list-caches"
|
||||||
|
if output_format:
|
||||||
|
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||||
|
if by_id_path:
|
||||||
|
command += " -b" if shortcut else " --by-id-path"
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def print_statistics_cmd(
|
||||||
|
cache_id: str,
|
||||||
|
core_id: str = None,
|
||||||
|
io_class_id: str = None,
|
||||||
|
filter: str = None,
|
||||||
|
output_format: str = None,
|
||||||
|
by_id_path: bool = True,
|
||||||
|
shortcut: bool = False,
|
||||||
|
) -> str:
|
||||||
|
command = " -P" if shortcut else " --stats"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
if core_id:
|
||||||
|
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||||
|
if io_class_id:
|
||||||
|
command += (" -d " if shortcut else " --io-class-id ") + io_class_id
|
||||||
|
if filter:
|
||||||
|
command += (" -f " if shortcut else " --filter ") + filter
|
||||||
|
if output_format:
|
||||||
|
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||||
|
if by_id_path:
|
||||||
|
command += " -b " if shortcut else " --by-id-path "
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False) -> str:
|
||||||
|
command = " -Z" if shortcut else " --reset-counters"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
if core_id is not None:
|
||||||
|
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def flush_cache_cmd(cache_id: str, shortcut: bool = False) -> str:
|
||||||
|
command = " -F" if shortcut else " --flush-cache"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False) -> str:
|
||||||
|
command = " -F" if shortcut else " --flush-cache"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
command += (" -j " if shortcut else " --core-id ") + core_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False) -> str:
|
||||||
|
command = " -C -C" if shortcut else " --io-class --load-config"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
command += (" -f " if shortcut else " --file ") + file
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False) -> str:
|
||||||
|
command = " -C -L" if shortcut else " --io-class --list"
|
||||||
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
|
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def version_cmd(output_format: str = None, shortcut: bool = False) -> str:
|
||||||
|
command = " -V" if shortcut else " --version"
|
||||||
|
if output_format:
|
||||||
|
command += (" -o " if shortcut else " --output-format ") + output_format
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def help_cmd(shortcut: bool = False) -> str:
|
||||||
|
command = " -H" if shortcut else " --help"
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def standby_init_cmd(
|
||||||
|
cache_dev: str,
|
||||||
|
cache_id: str,
|
||||||
|
cache_line_size: str,
|
||||||
|
force: bool = False,
|
||||||
|
shortcut: bool = False,
|
||||||
|
) -> str:
|
||||||
command = " --standby --init"
|
command = " --standby --init"
|
||||||
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
@ -119,229 +380,95 @@ def standby_init_cmd(cache_dev: str, cache_id: str, cache_line_size: str,
|
|||||||
return casadm_bin + command
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
def standby_load_cmd(cache_dev: str, shortcut: bool = False):
|
def standby_load_cmd(cache_dev: str, shortcut: bool = False) -> str:
|
||||||
command = " --standby --load"
|
command = " --standby --load"
|
||||||
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||||
return casadm_bin + command
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
def standby_detach_cmd(cache_id: str, shortcut: bool = False):
|
def standby_detach_cmd(cache_id: str, shortcut: bool = False) -> str:
|
||||||
command = " --standby --detach"
|
command = " --standby --detach"
|
||||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
return casadm_bin + command
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
def standby_activate_cmd(cache_dev: str, cache_id: str, shortcut: bool = False):
|
def standby_activate_cmd(cache_dev: str, cache_id: str, shortcut: bool = False) -> str:
|
||||||
command = " --standby --activate"
|
command = " --standby --activate"
|
||||||
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
command += (" -d " if shortcut else " --cache-device ") + cache_dev
|
||||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
||||||
return casadm_bin + command
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False,
|
def zero_metadata_cmd(cache_dev: str, force: bool = False, shortcut: bool = False) -> str:
|
||||||
io_class_id: str = None, filter: str = None,
|
|
||||||
output_format: str = None, by_id_path: bool = True,
|
|
||||||
shortcut: bool = False):
|
|
||||||
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id
|
|
||||||
if core_id is not None:
|
|
||||||
command += (" -j " if shortcut else " --core-id ") + core_id
|
|
||||||
if per_io_class:
|
|
||||||
command += " -d" if shortcut else " --io-class-id"
|
|
||||||
if io_class_id is not None:
|
|
||||||
command += " " + io_class_id
|
|
||||||
elif io_class_id is not None:
|
|
||||||
raise Exception("Per io class flag not set but ID given.")
|
|
||||||
if filter is not None:
|
|
||||||
command += (" -f " if shortcut else " --filter ") + filter
|
|
||||||
if output_format is not None:
|
|
||||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
|
||||||
if by_id_path:
|
|
||||||
command += (" -b " if shortcut else " --by-id-path ")
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def zero_metadata_cmd(cache_dev: str, force: bool = False, shortcut: bool = False):
|
|
||||||
command = " --zero-metadata"
|
command = " --zero-metadata"
|
||||||
command += (" -d " if shortcut else " --device ") + cache_dev
|
command += (" -d " if shortcut else " --device ") + cache_dev
|
||||||
if force:
|
if force:
|
||||||
command += (" -f" if shortcut else " --force")
|
command += " -f" if shortcut else " --force"
|
||||||
return casadm_bin + command
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False):
|
# casctl command
|
||||||
command = " -T " if shortcut else " --stop-cache"
|
|
||||||
command += (" -i " if shortcut else " --cache-id ") + cache_id
|
|
||||||
if no_data_flush:
|
|
||||||
command += " --no-data-flush"
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def list_cmd(output_format: str = None, by_id_path: bool = True, shortcut: bool = False):
|
def ctl_help(shortcut: bool = False) -> str:
|
||||||
command = " -L" if shortcut else " --list-caches"
|
command = " --help" if shortcut else " -h"
|
||||||
if output_format == "table" or output_format == "csv":
|
return casctl + command
|
||||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
|
||||||
if by_id_path:
|
|
||||||
command += (" -b " if shortcut else " --by-id-path ")
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def load_cmd(cache_dev: str, shortcut: bool = False):
|
def ctl_start() -> str:
|
||||||
return start_cmd(cache_dev, load=True, shortcut=shortcut)
|
command = " start"
|
||||||
|
return casctl + command
|
||||||
|
|
||||||
|
|
||||||
def version_cmd(output_format: str = None, shortcut: bool = False):
|
def ctl_stop(flush: bool = False) -> str:
|
||||||
command = " -V" if shortcut else " --version"
|
command = " stop"
|
||||||
if output_format == "table" or output_format == "csv":
|
|
||||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def set_cache_mode_cmd(cache_mode: str, cache_id: str,
|
|
||||||
flush_cache: str = None, shortcut: bool = False):
|
|
||||||
command = f" -Q -c {cache_mode} -i {cache_id}" if shortcut else \
|
|
||||||
f" --set-cache-mode --cache-mode {cache_mode} --cache-id {cache_id}"
|
|
||||||
if flush_cache:
|
|
||||||
command += (" -f " if shortcut else " --flush-cache ") + flush_cache
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False):
|
|
||||||
command = f" -C -C -i {cache_id} -f {file}" if shortcut else \
|
|
||||||
f" --io-class --load-config --cache-id {cache_id} --file {file}"
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False):
|
|
||||||
command = f" -C -L -i {cache_id} -o {output_format}" if shortcut else \
|
|
||||||
f" --io-class --list --cache-id {cache_id} --output-format {output_format}"
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def _get_param_cmd(namespace: str, cache_id: str, output_format: str = None,
|
|
||||||
additional_params: str = None, shortcut: bool = False):
|
|
||||||
command = f" -G -n {namespace} -i {cache_id}" if shortcut else\
|
|
||||||
f" --get-param --name {namespace} --cache-id {cache_id}"
|
|
||||||
if additional_params is not None:
|
|
||||||
command += additional_params
|
|
||||||
if output_format is not None:
|
|
||||||
command += (" -o " if shortcut else " --output-format ") + output_format
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def get_param_cutoff_cmd(cache_id: str, core_id: str,
|
|
||||||
output_format: str = None, shortcut: bool = False):
|
|
||||||
add_param = (" -j " if shortcut else " --core-id ") + core_id
|
|
||||||
return _get_param_cmd(namespace="seq-cutoff", cache_id=cache_id, output_format=output_format,
|
|
||||||
additional_params=add_param, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
|
|
||||||
return _get_param_cmd(namespace="cleaning", cache_id=cache_id,
|
|
||||||
output_format=output_format, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def get_param_cleaning_alru_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
|
|
||||||
return _get_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
|
|
||||||
output_format=output_format, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def get_param_cleaning_acp_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
|
|
||||||
return _get_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
|
|
||||||
output_format=output_format, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def _set_param_cmd(namespace: str, cache_id: str, additional_params: str = None,
|
|
||||||
shortcut: bool = False):
|
|
||||||
command = f" -X -n {namespace} -i {cache_id}" if shortcut else\
|
|
||||||
f" --set-param --name {namespace} --cache-id {cache_id}"
|
|
||||||
command += additional_params
|
|
||||||
return casadm_bin + command
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_cutoff_cmd(cache_id: str, core_id: str = None, threshold: str = None,
|
|
||||||
policy: str = None, promotion_count: str = None, shortcut: bool = False):
|
|
||||||
add_params = ""
|
|
||||||
if core_id is not None:
|
|
||||||
add_params += (" -j " if shortcut else " --core-id ") + str(core_id)
|
|
||||||
if threshold is not None:
|
|
||||||
add_params += (" -t " if shortcut else " --threshold ") + str(threshold)
|
|
||||||
if policy is not None:
|
|
||||||
add_params += (" -p " if shortcut else " --policy ") + policy
|
|
||||||
if promotion_count is not None:
|
|
||||||
add_params += " --promotion-count " + str(promotion_count)
|
|
||||||
return _set_param_cmd(namespace="seq-cutoff", cache_id=cache_id,
|
|
||||||
additional_params=add_params, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_promotion_cmd(cache_id: str, policy: str, shortcut: bool = False):
|
|
||||||
add_params = (" -p " if shortcut else " --policy ") + policy
|
|
||||||
return _set_param_cmd(namespace="promotion", cache_id=cache_id,
|
|
||||||
additional_params=add_params, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_promotion_nhit_cmd(
|
|
||||||
cache_id: str, threshold=None, trigger=None, shortcut: bool = False
|
|
||||||
):
|
|
||||||
add_params = ""
|
|
||||||
if threshold is not None:
|
|
||||||
add_params += (" -t " if shortcut else " --threshold ") + str(threshold)
|
|
||||||
if trigger is not None:
|
|
||||||
add_params += (" -o " if shortcut else " --trigger ") + str(trigger)
|
|
||||||
return _set_param_cmd(namespace="promotion-nhit", cache_id=cache_id,
|
|
||||||
additional_params=add_params, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False):
|
|
||||||
add_params = (" -p " if shortcut else " --policy ") + policy
|
|
||||||
return _set_param_cmd(namespace="cleaning", cache_id=cache_id,
|
|
||||||
additional_params=add_params, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_cleaning_alru_cmd(cache_id, wake_up=None, staleness_time=None,
|
|
||||||
flush_max_buffers=None, activity_threshold=None,
|
|
||||||
shortcut: bool = False):
|
|
||||||
add_param = ""
|
|
||||||
if wake_up is not None:
|
|
||||||
add_param += (" -w " if shortcut else " --wake-up ") + str(wake_up)
|
|
||||||
if staleness_time is not None:
|
|
||||||
add_param += (" -s " if shortcut else " --staleness-time ") + str(staleness_time)
|
|
||||||
if flush_max_buffers is not None:
|
|
||||||
add_param += (" -b " if shortcut else " --flush-max-buffers ") + str(flush_max_buffers)
|
|
||||||
if activity_threshold is not None:
|
|
||||||
add_param += (" -t " if shortcut else " --activity-threshold ") + str(activity_threshold)
|
|
||||||
|
|
||||||
return _set_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
|
|
||||||
additional_params=add_param, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def set_param_cleaning_acp_cmd(cache_id: str, wake_up: str = None,
|
|
||||||
flush_max_buffers: str = None, shortcut: bool = False):
|
|
||||||
add_param = ""
|
|
||||||
if wake_up is not None:
|
|
||||||
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
|
|
||||||
if flush_max_buffers is not None:
|
|
||||||
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
|
|
||||||
return _set_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
|
|
||||||
additional_params=add_param, shortcut=shortcut)
|
|
||||||
|
|
||||||
|
|
||||||
def ctl_help(shortcut: bool = False):
|
|
||||||
return casctl + " --help" if shortcut else " -h"
|
|
||||||
|
|
||||||
|
|
||||||
def ctl_start():
|
|
||||||
return casctl + " start"
|
|
||||||
|
|
||||||
|
|
||||||
def ctl_stop(flush: bool = False):
|
|
||||||
command = casctl + " stop"
|
|
||||||
if flush:
|
if flush:
|
||||||
command += " --flush"
|
command += " --flush"
|
||||||
return command
|
return casctl + command
|
||||||
|
|
||||||
|
|
||||||
def ctl_init(force: bool = False):
|
def ctl_init(force: bool = False) -> str:
|
||||||
command = casctl + " init"
|
command = " init"
|
||||||
if force:
|
if force:
|
||||||
command += " --force"
|
command += " --force"
|
||||||
return command
|
return casctl + command
|
||||||
|
|
||||||
|
|
||||||
|
# casadm script
|
||||||
|
|
||||||
|
|
||||||
|
def script_try_add_cmd(cache_id: str, core_dev: str, core_id: str) -> str:
|
||||||
|
command = " --script --add-core --try-add"
|
||||||
|
command += " --cache-id " + cache_id
|
||||||
|
command += " --core-device " + core_dev
|
||||||
|
command += f" --core-id " + core_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def script_purge_cache_cmd(cache_id: str) -> str:
|
||||||
|
command = " --script --purge-cache"
|
||||||
|
command += " --cache-id " + cache_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def script_purge_core_cmd(cache_id: str, core_id: str) -> str:
|
||||||
|
command = " --script --purge-core"
|
||||||
|
command += " --cache-id " + cache_id
|
||||||
|
command += " --core-id " + core_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def script_detach_core_cmd(cache_id: str, core_id: str) -> str:
|
||||||
|
command = " --script --remove-core --detach"
|
||||||
|
command += " --cache-id " + cache_id
|
||||||
|
command += " --core-id " + core_id
|
||||||
|
return casadm_bin + command
|
||||||
|
|
||||||
|
|
||||||
|
def script_remove_core_cmd(cache_id: str, core_id: str, no_flush: bool = False) -> str:
|
||||||
|
command = " --script --remove-core"
|
||||||
|
command += " --cache-id " + cache_id
|
||||||
|
command += " --core-id " + core_id
|
||||||
|
if no_flush:
|
||||||
|
command += " --no-flush"
|
||||||
|
return casadm_bin + command
|
||||||
|
@ -9,6 +9,8 @@ casadm_help = [
|
|||||||
r"Usage: casadm \<command\> \[option\.\.\.\]",
|
r"Usage: casadm \<command\> \[option\.\.\.\]",
|
||||||
r"Available commands:",
|
r"Available commands:",
|
||||||
r"-S --start-cache Start new cache instance or load using metadata",
|
r"-S --start-cache Start new cache instance or load using metadata",
|
||||||
|
r"--attach-cache Attach cache device",
|
||||||
|
r"--detach-cache Detach cache device",
|
||||||
r"-T --stop-cache Stop cache instance",
|
r"-T --stop-cache Stop cache instance",
|
||||||
r"-X --set-param Set various runtime parameters",
|
r"-X --set-param Set various runtime parameters",
|
||||||
r"-G --get-param Get various runtime parameters",
|
r"-G --get-param Get various runtime parameters",
|
||||||
@ -29,112 +31,101 @@ casadm_help = [
|
|||||||
r"e\.g\.",
|
r"e\.g\.",
|
||||||
r"casadm --start-cache --help",
|
r"casadm --start-cache --help",
|
||||||
r"For more information, please refer to manual, Admin Guide \(man casadm\)",
|
r"For more information, please refer to manual, Admin Guide \(man casadm\)",
|
||||||
r"or go to support page \<https://open-cas\.github\.io\>\."
|
r"or go to support page \<https://open-cas\.github\.io\>\.",
|
||||||
]
|
]
|
||||||
|
|
||||||
help_help = [
|
start_cache_help = [
|
||||||
r"Usage: casadm --help",
|
r"Usage: casadm --start-cache --cache-device \<DEVICE\> \[option\.\.\.\]",
|
||||||
r"Print help"
|
r"Start new cache instance or load using metadata",
|
||||||
|
r"Options that are valid with --start-cache \(-S\) are:",
|
||||||
|
r"-d --cache-device \<DEVICE\> Caching device to be used",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\> "
|
||||||
|
r"\(if not provided, the first available number will be used\)",
|
||||||
|
r"-l --load Load cache metadata from caching device "
|
||||||
|
r"\(DANGEROUS - see manual or Admin Guide for details\)",
|
||||||
|
r"-f --force Force the creation of cache instance",
|
||||||
|
r"-c --cache-mode \<NAME\> Set cache mode from available: \{wt|wb|wa|pt|wo\} "
|
||||||
|
r"Write-Through, Write-Back, Write-Around, Pass-Through, Write-Only; "
|
||||||
|
r"without this parameter Write-Through will be set by default",
|
||||||
|
r"-x --cache-line-size \<NUMBER\> Set cache line size in kibibytes: "
|
||||||
|
r"\{4,8,16,32,64\}\[KiB\] \(default: 4\)",
|
||||||
]
|
]
|
||||||
|
|
||||||
version_help = [
|
attach_cache_help = [
|
||||||
r"Usage: casadm --version \[option\.\.\.\]",
|
r"Usage: casadm --attach-cache --cache-device \<DEVICE\> --cache-id \<ID\> \[option\.\.\.\]",
|
||||||
r"Print CAS version",
|
r"Attach cache device",
|
||||||
r"Options that are valid with --version \(-V\) are:"
|
r"Options that are valid with --attach-cache are:",
|
||||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
r"-d --cache-device \<DEVICE\> Caching device to be used",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\> "
|
||||||
|
r"\(if not provided, the first available number will be used\)",
|
||||||
|
r"-f --force Force attaching the cache device",
|
||||||
]
|
]
|
||||||
|
detach_cache_help = [
|
||||||
ioclass_help = [
|
r"Usage: casadm --detach-cache --cache-id \<ID\>",
|
||||||
r"Usage: casadm --io-class \{--load-config|--list\}",
|
r"Detach cache device",
|
||||||
r"Manage IO classes",
|
r"Options that are valid with --detach-cache are:",
|
||||||
r"Loads configuration for IO classes:",
|
|
||||||
r"Usage: casadm --io-class --load-config --cache-id \<ID\> --file \<FILE\>",
|
|
||||||
r"Options that are valid with --load-config \(-C\) are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"-f --file \<FILE\> Configuration file containing IO class definition",
|
]
|
||||||
r"Lists currently configured IO classes:",
|
|
||||||
r"Usage: casadm --io-class --list --cache-id \<ID\> \[option\.\.\.\]",
|
|
||||||
r"Options that are valid with --list \(-L\) are:",
|
stop_cache_help = [
|
||||||
|
r"Usage: casadm --stop-cache --cache-id \<ID\> \[option\.\.\.\]",
|
||||||
|
r"Stop cache instance",
|
||||||
|
r"Options that are valid with --stop-cache \(-T\) are:",
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
r"-n --no-data-flush Do not flush dirty data \(may be dangerous\)",
|
||||||
]
|
]
|
||||||
|
|
||||||
flush_cache_help = [
|
set_params_help = [
|
||||||
r"Usage: casadm --flush-cache --cache-id \<ID\>",
|
r"Usage: casadm --set-param --name \<NAME\>",
|
||||||
r"Flush all dirty data from the caching device to core devices",
|
r"Set various runtime parameters",
|
||||||
r"Options that are valid with --flush-cache \(-F\) are:",
|
r"Valid values of NAME are:",
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
r"seq-cutoff - Sequential cutoff parameters",
|
||||||
r"-j --core-id \[\<ID\>\] Identifier of core <0-4095> within given cache instance"
|
r"cleaning - Cleaning policy parameters",
|
||||||
]
|
r"promotion - Promotion policy parameters",
|
||||||
|
r"promotion-nhit - Promotion policy NHIT parameters",
|
||||||
reset_counters_help = [
|
r"cleaning-alru - Cleaning policy ALRU parameters",
|
||||||
r"Usage: casadm --reset-counters --cache-id \<ID\> \[option\.\.\.\]",
|
r"cleaning-acp - Cleaning policy ACP parameters",
|
||||||
r"Reset cache statistics for core device within cache instance",
|
r"Options that are valid with --set-param \(-X\) --name \(-n\) seq-cutoff are:",
|
||||||
r"Options that are valid with --reset-counters \(-Z\) are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
|
||||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
|
||||||
r"instance. If not specified, statistics are reset for all cores in cache instance\."
|
|
||||||
]
|
|
||||||
|
|
||||||
stats_help = [
|
|
||||||
r"Usage: casadm --stats --cache-id \<ID\> \[option\.\.\.\]",
|
|
||||||
r"Print statistics for cache instance",
|
|
||||||
r"Options that are valid with --stats \(-P\) are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
|
||||||
r"-j --core-id \<ID\> Limit display of core-specific statistics to only ones "
|
|
||||||
r"pertaining to a specific core. If this option is not given, casadm will display statistics "
|
|
||||||
r"pertaining to all cores assigned to given cache instance\.",
|
|
||||||
r"-d --io-class-id \[\<ID\>\] Display per IO class statistics",
|
|
||||||
r"-f --filter \<FILTER-SPEC\> Apply filters from the following set: "
|
|
||||||
r"\{all, conf, usage, req, blk, err\}",
|
|
||||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
|
||||||
]
|
|
||||||
|
|
||||||
list_help = [
|
|
||||||
r"Usage: casadm --list-caches \[option\.\.\.\]",
|
|
||||||
r"List all cache instances and core devices",
|
|
||||||
r"Options that are valid with --list-caches \(-L\) are:",
|
|
||||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
|
||||||
]
|
|
||||||
|
|
||||||
remove_detached_help = [
|
|
||||||
r"Usage: casadm --remove-detached --device \<DEVICE\>",
|
|
||||||
r"Remove core device from core pool",
|
|
||||||
r"Options that are valid with --remove-detached are:",
|
|
||||||
r"-d --device \<DEVICE\> Path to core device"
|
|
||||||
]
|
|
||||||
|
|
||||||
remove_core_help = [
|
|
||||||
r"Usage: casadm --remove-core --cache-id \<ID\> --core-id \<ID\> \[option\.\.\.\]",
|
|
||||||
r"Remove active core device from cache instance",
|
|
||||||
r"Options that are valid with --remove-core \(-R\) are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||||
r"instance",
|
r"instance",
|
||||||
r"-f --force Force active core removal without data flush"
|
r"-t --threshold \<KiB\> Sequential cutoff activation threshold \[KiB\]",
|
||||||
]
|
r"-p --policy \<POLICY\> Sequential cutoff policy\. Available policies: "
|
||||||
|
r"\{always|full|never\}",
|
||||||
add_core_help = [
|
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning are:",
|
||||||
r"Usage: casadm --add-core --cache-id \<ID\> --core-device \<DEVICE\> \[option\.\.\.\]",
|
|
||||||
r"Add core device to cache instance",
|
|
||||||
r"Options that are valid with --add-core \(-A\) are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
r"-p --policy \<POLICY\> Cleaning policy type\. Available policy types: "
|
||||||
r"instance",
|
r"\{nop|alru|acp\}",
|
||||||
r"-d --core-device \<DEVICE\> Path to core device"
|
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion are:",
|
||||||
|
|
||||||
]
|
|
||||||
|
|
||||||
set_cache_mode_help = [
|
|
||||||
r"Usage: casadm --set-cache-mode --cache-mode \<NAME\> --cache-id \<ID\> \[option\.\.\.\]",
|
|
||||||
r"Set cache mode",
|
|
||||||
r"Options that are valid with --set-cache-mode \(-Q\) are:",
|
|
||||||
r"-c --cache-mode \<NAME\> Cache mode. Available cache modes: \{wt|wb|wa|pt|wo\}",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"-f --flush-cache \<yes|no\> Flush all dirty data from cache before switching "
|
r"-p --policy \<POLICY\> Promotion policy type\. Available policy types: "
|
||||||
r"to new mode\. Option is required when switching from Write-Back or Write-Only mode"
|
r"\{always|nhit\}",
|
||||||
|
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion-nhit are:",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
|
r"-t --threshold \<NUMBER\> Number of requests for given core line after which "
|
||||||
|
r"NHIT policy allows insertion into cache \<2-1000\> \(default: 3\)",
|
||||||
|
r"-o --trigger \<NUMBER\> Cache occupancy value over which NHIT promotion "
|
||||||
|
r"is active \<0-100\>\[\%\] \(default: 80\%\)",
|
||||||
|
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning-alru are:",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
|
r"-w --wake-up \<NUMBER\> Cleaning thread sleep time after an idle wake up "
|
||||||
|
r"\<0-3600\>\[s\] \(default: 20 s\)",
|
||||||
|
r"-s --staleness-time \<NUMBER\> Time that has to pass from the last write operation "
|
||||||
|
r"before a dirty cache block can be scheduled to be flushed \<1-3600\>\[s\] \(default: 120 s\)",
|
||||||
|
r"-b --flush-max-buffers \<NUMBER\> Number of dirty cache blocks to be flushed in one "
|
||||||
|
r"cleaning cycle \<1-10000\> \(default: 100\)",
|
||||||
|
r"-t --activity-threshold \<NUMBER\> Cache idle time before flushing thread can start "
|
||||||
|
r"\<0-1000000\>\[ms\] \(default: 10000 ms\)",
|
||||||
|
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning-acp are:",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
|
r" -w --wake-up \<NUMBER\> Time between ACP cleaning thread iterations "
|
||||||
|
r"\<0-10000\>\[ms\] \(default: 10 ms\)",
|
||||||
|
r"-b --flush-max-buffers \<NUMBER\> Number of cache lines flushed in single ACP cleaning "
|
||||||
|
r"thread iteration \<1-10000\> \(default: 128\)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
get_params_help = [
|
get_params_help = [
|
||||||
r"Usage: casadm --get-param --name \<NAME\>",
|
r"Usage: casadm --get-param --name \<NAME\>",
|
||||||
r"Get various runtime parameters",
|
r"Get various runtime parameters",
|
||||||
@ -164,99 +155,142 @@ get_params_help = [
|
|||||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||||
r"Options that are valid with --get-param \(-G\) --name \(-n\) promotion-nhit are:",
|
r"Options that are valid with --get-param \(-G\) --name \(-n\) promotion-nhit are:",
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}"
|
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||||
]
|
]
|
||||||
|
|
||||||
set_params_help = [
|
|
||||||
r"Usage: casadm --set-param --name \<NAME\>",
|
set_cache_mode_help = [
|
||||||
r"Set various runtime parameters",
|
r"Usage: casadm --set-cache-mode --cache-mode \<NAME\> --cache-id \<ID\> \[option\.\.\.\]",
|
||||||
r"Valid values of NAME are:",
|
r"Set cache mode",
|
||||||
r"seq-cutoff - Sequential cutoff parameters",
|
r"Options that are valid with --set-cache-mode \(-Q\) are:",
|
||||||
r"cleaning - Cleaning policy parameters",
|
r"-c --cache-mode \<NAME\> Cache mode\. Available cache modes: \{wt|wb|wa|pt|wo\}",
|
||||||
r"promotion - Promotion policy parameters",
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"promotion-nhit - Promotion policy NHIT parameters",
|
r"-f --flush-cache \<yes|no\> Flush all dirty data from cache before switching "
|
||||||
r"cleaning-alru - Cleaning policy ALRU parameters",
|
r"to new mode\. Option is required when switching from Write-Back or Write-Only mode",
|
||||||
r"cleaning-acp - Cleaning policy ACP parameters",
|
]
|
||||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) seq-cutoff are:",
|
|
||||||
|
|
||||||
|
add_core_help = [
|
||||||
|
r"Usage: casadm --add-core --cache-id \<ID\> --core-device \<DEVICE\> \[option\.\.\.\]",
|
||||||
|
r"Add core device to cache instance",
|
||||||
|
r"Options that are valid with --add-core \(-A\) are:",
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||||
r"instance",
|
r"instance",
|
||||||
r"-t --threshold \<KiB\> Sequential cutoff activation threshold \[KiB\]",
|
r"-d --core-device \<DEVICE\> Path to core device",
|
||||||
r"-p --policy \<POLICY\> Sequential cutoff policy. Available policies: "
|
|
||||||
r"\{always|full|never\}",
|
|
||||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
|
||||||
r"-p --policy \<POLICY\> Cleaning policy type. Available policy types: "
|
|
||||||
r"\{nop|alru|acp\}",
|
|
||||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
|
||||||
r"-p --policy \<POLICY\> Promotion policy type. Available policy types: "
|
|
||||||
r"\{always|nhit\}",
|
|
||||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) promotion-nhit are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
|
||||||
r"-t --threshold \<NUMBER\> Number of requests for given core line after which "
|
|
||||||
r"NHIT policy allows insertion into cache \<2-1000\> \(default: 3\)",
|
|
||||||
r"-o --trigger \<NUMBER\> Cache occupancy value over which NHIT promotion "
|
|
||||||
r"is active \<0-100\>\[\%\] \(default: 80\%\)",
|
|
||||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning-alru are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
|
||||||
r"-w --wake-up \<NUMBER\> Cleaning thread sleep time after an idle wake up "
|
|
||||||
r"\<0-3600\>\[s\] \(default: 20 s\)",
|
|
||||||
r"-s --staleness-time \<NUMBER\> Time that has to pass from the last write operation "
|
|
||||||
r"before a dirty cache block can be scheduled to be flushed \<1-3600\>\[s\] \(default: 120 s\)",
|
|
||||||
r"-b --flush-max-buffers \<NUMBER\> Number of dirty cache blocks to be flushed in one "
|
|
||||||
r"cleaning cycle \<1-10000\> \(default: 100\)",
|
|
||||||
r"-t --activity-threshold \<NUMBER\> Cache idle time before flushing thread can start "
|
|
||||||
r"\<0-1000000\>\[ms\] \(default: 10000 ms\)",
|
|
||||||
r"Options that are valid with --set-param \(-X\) --name \(-n\) cleaning-acp are:",
|
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
|
||||||
r" -w --wake-up \<NUMBER\> Time between ACP cleaning thread iterations "
|
|
||||||
r"\<0-10000\>\[ms\] \(default: 10 ms\)",
|
|
||||||
r"-b --flush-max-buffers \<NUMBER\> Number of cache lines flushed in single ACP cleaning "
|
|
||||||
r"thread iteration \<1-10000\> \(default: 128\)"
|
|
||||||
]
|
]
|
||||||
|
|
||||||
stop_cache_help = [
|
remove_core_help = [
|
||||||
r"Usage: casadm --stop-cache --cache-id \<ID\> \[option\.\.\.\]",
|
r"Usage: casadm --remove-core --cache-id \<ID\> --core-id \<ID\> \[option\.\.\.\]",
|
||||||
r"Stop cache instance",
|
r"Remove active core device from cache instance",
|
||||||
r"Options that are valid with --stop-cache \(-T\) are:",
|
r"Options that are valid with --remove-core \(-R\) are:",
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"-n --no-data-flush Do not flush dirty data \(may be dangerous\)"
|
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||||
|
r"instance",
|
||||||
|
r"-f --force Force active core removal without data flush",
|
||||||
]
|
]
|
||||||
|
|
||||||
start_cache_help = [
|
|
||||||
r"Usage: casadm --start-cache --cache-device \<DEVICE\> \[option\.\.\.\]",
|
remove_inactive_help = [
|
||||||
r"Start new cache instance or load using metadata",
|
r"casadm --remove-inactive --cache-id \<ID\> --core-id \<ID\> \[option\.\.\.\]",
|
||||||
r"Options that are valid with --start-cache \(-S\) are:",
|
r"Remove inactive core device from cache instance",
|
||||||
r"-d --cache-device \<DEVICE\> Caching device to be used",
|
r"Options that are valid with --remove-inactive are:",
|
||||||
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\> "
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
r"\(if not provided, the first available number will be used\)",
|
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given "
|
||||||
r"-l --load Load cache metadata from caching device "
|
r"cache instance",
|
||||||
r"\(DANGEROUS - see manual or Admin Guide for details\)",
|
r"-f --force Force dirty inactive core removal",
|
||||||
r"-f --force Force the creation of cache instance",
|
|
||||||
r"-c --cache-mode \<NAME\> Set cache mode from available: \{wt|wb|wa|pt|wo\} "
|
|
||||||
r"Write-Through, Write-Back, Write-Around, Pass-Through, Write-Only; "
|
|
||||||
r"without this parameter Write-Through will be set by default",
|
|
||||||
r"-x --cache-line-size \<NUMBER\> Set cache line size in kibibytes: "
|
|
||||||
r"\{4,8,16,32,64\}\[KiB\] \(default: 4\)"
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
remove_detached_help = [
|
||||||
|
r"Usage: casadm --remove-detached --device \<DEVICE\>",
|
||||||
|
r"Remove core device from core pool",
|
||||||
|
r"Options that are valid with --remove-detached are:",
|
||||||
|
r"-d --device \<DEVICE\> Path to core device",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
list_caches_help = [
|
||||||
|
r"Usage: casadm --list-caches \[option\.\.\.\]",
|
||||||
|
r"List all cache instances and core devices",
|
||||||
|
r"Options that are valid with --list-caches \(-L\) are:",
|
||||||
|
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||||
|
]
|
||||||
|
|
||||||
|
stats_help = [
|
||||||
|
r"Usage: casadm --stats --cache-id \<ID\> \[option\.\.\.\]",
|
||||||
|
r"Print statistics for cache instance",
|
||||||
|
r"Options that are valid with --stats \(-P\) are:",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
|
r"-j --core-id \<ID\> Limit display of core-specific statistics to only ones "
|
||||||
|
r"pertaining to a specific core\. If this option is not given, casadm will display statistics "
|
||||||
|
r"pertaining to all cores assigned to given cache instance\.",
|
||||||
|
r"-d --io-class-id \[\<ID\>\] Display per IO class statistics",
|
||||||
|
r"-f --filter \<FILTER-SPEC\> Apply filters from the following set: "
|
||||||
|
r"\{all, conf, usage, req, blk, err\}",
|
||||||
|
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
reset_counters_help = [
|
||||||
|
r"Usage: casadm --reset-counters --cache-id \<ID\> \[option\.\.\.\]",
|
||||||
|
r"Reset cache statistics for core device within cache instance",
|
||||||
|
r"Options that are valid with --reset-counters \(-Z\) are:",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
|
r"-j --core-id \<ID\> Identifier of core \<0-4095\> within given cache "
|
||||||
|
r"instance\. If not specified, statistics are reset for all cores in cache instance\.",
|
||||||
|
]
|
||||||
|
|
||||||
|
flush_cache_help = [
|
||||||
|
r"Usage: casadm --flush-cache --cache-id \<ID\>",
|
||||||
|
r"Flush all dirty data from the caching device to core devices",
|
||||||
|
r"Options that are valid with --flush-cache \(-F\) are:",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
|
r"-j --core-id \[\<ID\>\] Identifier of core <0-4095> within given cache "
|
||||||
|
r"instance",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
ioclass_help = [
|
||||||
|
r"Usage: casadm --io-class \{--load-config|--list\}",
|
||||||
|
r"Manage IO classes",
|
||||||
|
r"Loads configuration for IO classes:",
|
||||||
|
r"Usage: casadm --io-class --load-config --cache-id \<ID\> --file \<FILE\>",
|
||||||
|
r"Options that are valid with --load-config \(-C\) are:",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
|
r"-f --file \<FILE\> Configuration file containing IO class definition",
|
||||||
|
r"Lists currently configured IO classes:",
|
||||||
|
r"Usage: casadm --io-class --list --cache-id \<ID\> \[option\.\.\.\]",
|
||||||
|
r"Options that are valid with --list \(-L\) are:",
|
||||||
|
r"-i --cache-id \<ID\> Identifier of cache instance \<1-16384\>",
|
||||||
|
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
version_help = [
|
||||||
|
r"Usage: casadm --version \[option\.\.\.\]",
|
||||||
|
r"Print CAS version",
|
||||||
|
r"Options that are valid with --version \(-V\) are:"
|
||||||
|
r"-o --output-format \<FORMAT\> Output format: \{table|csv\}",
|
||||||
|
]
|
||||||
|
|
||||||
|
help_help = [r"Usage: casadm --help", r"Print help"]
|
||||||
|
|
||||||
|
|
||||||
standby_help = [
|
standby_help = [
|
||||||
r"The command is not supported"
|
r"The command is not supported"
|
||||||
]
|
]
|
||||||
|
|
||||||
zero_metadata_help = [
|
zero_metadata_help = [
|
||||||
r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option...\]",
|
r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option\.\.\.\]]",
|
||||||
r"Clear metadata from caching device",
|
r"Clear metadata from caching device",
|
||||||
r"Options that are valid with --zero-metadata are:",
|
r"Options that are valid with --zero-metadata are:",
|
||||||
r"-d --device \<DEVICE\> Path to device on which metadata would be cleared",
|
r"-d --device \<DEVICE\> Path to device on which metadata would be cleared",
|
||||||
r"-f --force Ignore potential dirty data on cache device"
|
r"-f --force Ignore potential dirty data on cache device",
|
||||||
]
|
]
|
||||||
|
|
||||||
unrecognized_stderr = [
|
unrecognized_stderr = [
|
||||||
r"Unrecognized command -\S+",
|
r"Unrecognized command -\S+",
|
||||||
]
|
]
|
||||||
|
|
||||||
unrecognized_stdout = [
|
unrecognized_stdout = [r"Try \`casadm --help | -H\' for more information\."]
|
||||||
r"Try \`casadm --help | -H\' for more information\."
|
|
||||||
]
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -18,32 +19,30 @@ start_cache_with_existing_metadata = [
|
|||||||
r"Error inserting cache \d+",
|
r"Error inserting cache \d+",
|
||||||
r"Old metadata found on device\.",
|
r"Old metadata found on device\.",
|
||||||
r"Please load cache metadata using --load option or use --force to",
|
r"Please load cache metadata using --load option or use --force to",
|
||||||
r" discard on-disk metadata and start fresh cache instance\."
|
r" discard on-disk metadata and start fresh cache instance\.",
|
||||||
]
|
]
|
||||||
|
|
||||||
start_cache_on_already_used_dev = [
|
start_cache_on_already_used_dev = [
|
||||||
r"Error inserting cache \d+",
|
r"Error inserting cache \d+",
|
||||||
r"Cache device \'\/dev\/\S+\' is already used as cache\."
|
r"Cache device \'\/dev\/\S+\' is already used as cache\.",
|
||||||
]
|
]
|
||||||
|
|
||||||
start_cache_with_existing_id = [
|
start_cache_with_existing_id = [
|
||||||
r"Error inserting cache \d+",
|
r"Error inserting cache \d+",
|
||||||
r"Cache ID already exists"
|
r"Cache ID already exists",
|
||||||
]
|
]
|
||||||
|
|
||||||
standby_init_with_existing_filesystem = [
|
standby_init_with_existing_filesystem = [
|
||||||
r"A filesystem exists on \S+. Specify the --force option if you wish to add the cache anyway.",
|
r"A filesystem exists on \S+. Specify the --force option if you wish to add the cache anyway.",
|
||||||
r"Note: this may result in loss of data"
|
r"Note: this may result in loss of data",
|
||||||
]
|
]
|
||||||
|
|
||||||
error_inserting_cache = [
|
error_inserting_cache = [r"Error inserting cache \d+"]
|
||||||
r"Error inserting cache \d+"
|
|
||||||
]
|
|
||||||
|
|
||||||
reinitialize_with_force_or_recovery = [
|
reinitialize_with_force_or_recovery = [
|
||||||
r"Old metadata found on device\.",
|
r"Old metadata found on device\.",
|
||||||
r"Please load cache metadata using --load option or use --force to",
|
r"Please load cache metadata using --load option or use --force to",
|
||||||
r" discard on-disk metadata and start fresh cache instance\."
|
r" discard on-disk metadata and start fresh cache instance\.",
|
||||||
]
|
]
|
||||||
|
|
||||||
remove_inactive_core_with_remove_command = [
|
remove_inactive_core_with_remove_command = [
|
||||||
@ -52,40 +51,36 @@ remove_inactive_core_with_remove_command = [
|
|||||||
|
|
||||||
remove_inactive_dirty_core = [
|
remove_inactive_dirty_core = [
|
||||||
r"The cache contains dirty data assigned to the core\. If you want to ",
|
r"The cache contains dirty data assigned to the core\. If you want to ",
|
||||||
r"continue, please use --force option\.\nWarning: the data will be lost"
|
r"continue, please use --force option\.\nWarning: the data will be lost",
|
||||||
]
|
]
|
||||||
|
|
||||||
stop_cache_incomplete = [
|
stop_cache_incomplete = [
|
||||||
r"Error while removing cache \d+",
|
r"Error while removing cache \d+",
|
||||||
r"Cache is in incomplete state - at least one core is inactive"
|
r"Cache is in incomplete state - at least one core is inactive",
|
||||||
]
|
]
|
||||||
|
|
||||||
stop_cache_errors = [
|
stop_cache_errors = [
|
||||||
r"Removed cache \d+ with errors",
|
r"Removed cache \d+ with errors",
|
||||||
r"Error while writing to cache device"
|
r"Error while writing to cache device",
|
||||||
]
|
]
|
||||||
|
|
||||||
get_stats_ioclass_id_not_configured = [
|
get_stats_ioclass_id_not_configured = [r"IO class \d+ is not configured\."]
|
||||||
r"IO class \d+ is not configured\."
|
|
||||||
]
|
|
||||||
|
|
||||||
get_stats_ioclass_id_out_of_range = [
|
get_stats_ioclass_id_out_of_range = [r"Invalid IO class id, must be in the range 0-32\."]
|
||||||
r"Invalid IO class id, must be in the range 0-32\."
|
|
||||||
]
|
|
||||||
|
|
||||||
remove_multilevel_core = [
|
remove_multilevel_core = [
|
||||||
r"Error while removing core device \d+ from cache instance \d+",
|
r"Error while removing core device \d+ from cache instance \d+",
|
||||||
r"Device opens or mount are pending to this cache"
|
r"Device opens or mount are pending to this cache",
|
||||||
]
|
]
|
||||||
|
|
||||||
add_cached_core = [
|
add_cached_core = [
|
||||||
r"Error while adding core device to cache instance \d+",
|
r"Error while adding core device to cache instance \d+",
|
||||||
r"Core device \'/dev/\S+\' is already cached\."
|
r"Core device \'/dev/\S+\' is already cached\.",
|
||||||
]
|
]
|
||||||
|
|
||||||
already_cached_core = [
|
already_cached_core = [
|
||||||
r"Error while adding core device to cache instance \d+",
|
r"Error while adding core device to cache instance \d+",
|
||||||
r"Device already added as a core"
|
r"Device already added as a core",
|
||||||
]
|
]
|
||||||
|
|
||||||
remove_mounted_core = [
|
remove_mounted_core = [
|
||||||
@ -94,37 +89,31 @@ remove_mounted_core = [
|
|||||||
|
|
||||||
stop_cache_mounted_core = [
|
stop_cache_mounted_core = [
|
||||||
r"Error while removing cache \d+",
|
r"Error while removing cache \d+",
|
||||||
r"Device opens or mount are pending to this cache"
|
r"Device opens or mount are pending to this cache",
|
||||||
]
|
]
|
||||||
|
|
||||||
load_and_force = [
|
load_and_force = [
|
||||||
r"Use of \'load\' with \'force\', \'cache-id\', \'cache-mode\' or \'cache-line-size\'",
|
r"Use of \'load\' with \'force\', \'cache-id\', \'cache-mode\' or \'cache-line-size\'",
|
||||||
r" simultaneously is forbidden."
|
r" simultaneously is forbidden.",
|
||||||
]
|
]
|
||||||
|
|
||||||
try_add_core_sector_size_mismatch = [
|
try_add_core_sector_size_mismatch = [
|
||||||
r"Error while adding core device to cache instance \d+",
|
r"Error while adding core device to cache instance \d+",
|
||||||
r"Cache device logical sector size is greater than core device logical sector size\.",
|
r"Cache device logical sector size is greater than core device logical sector size\.",
|
||||||
r"Consider changing logical sector size on current cache device",
|
r"Consider changing logical sector size on current cache device",
|
||||||
r"or try other device with the same logical sector size as core device\."
|
r"or try other device with the same logical sector size as core device\.",
|
||||||
]
|
]
|
||||||
|
|
||||||
no_caches_running = [
|
no_caches_running = [r"No caches running"]
|
||||||
r"No caches running"
|
|
||||||
]
|
|
||||||
|
|
||||||
unavailable_device = [
|
unavailable_device = [
|
||||||
r"Error while opening \'\S+\'exclusively\. This can be due to\n"
|
r"Error while opening \'\S+\'exclusively\. This can be due to\n"
|
||||||
r"cache instance running on this device\. In such case please stop the cache and try again\."
|
r"cache instance running on this device\. In such case please stop the cache and try again\."
|
||||||
]
|
]
|
||||||
|
|
||||||
error_handling = [
|
error_handling = [r"Error during options handling"]
|
||||||
r"Error during options handling"
|
|
||||||
]
|
|
||||||
|
|
||||||
no_cas_metadata = [
|
no_cas_metadata = [r"Device \'\S+\' does not contain OpenCAS's metadata\."]
|
||||||
r"Device \'\S+\' does not contain OpenCAS's metadata\."
|
|
||||||
]
|
|
||||||
|
|
||||||
cache_dirty_data = [
|
cache_dirty_data = [
|
||||||
r"Cache instance contains dirty data\. Clearing metadata will result in loss of dirty data\.\n"
|
r"Cache instance contains dirty data\. Clearing metadata will result in loss of dirty data\.\n"
|
||||||
@ -140,21 +129,16 @@ cache_dirty_shutdown = [
|
|||||||
r"Alternatively, if you wish to clear metadata anyway, please use \'--force\' option\."
|
r"Alternatively, if you wish to clear metadata anyway, please use \'--force\' option\."
|
||||||
]
|
]
|
||||||
|
|
||||||
missing_param = [
|
missing_param = [r"Option \'.+\' is missing"]
|
||||||
r"Option \'.+\' is missing"
|
|
||||||
]
|
|
||||||
|
|
||||||
disallowed_param = [
|
disallowed_param = [r"Unrecognized option \S+"]
|
||||||
r"Unrecognized option \S+"
|
|
||||||
]
|
|
||||||
|
|
||||||
operation_forbiden_in_standby = [
|
operation_forbiden_in_standby = [
|
||||||
r"The operation is not permited while the cache is in the standby mode"
|
r"The operation is not permited while the cache is in the standby mode"
|
||||||
]
|
]
|
||||||
|
|
||||||
mutually_exclusive_params_init = [
|
mutually_exclusive_params_init = [
|
||||||
r"Can\'t use \'load\' and \'init\' options simultaneously\n"
|
r"Can\'t use \'load\' and \'init\' options simultaneously\nError during options handling"
|
||||||
r"Error during options handling"
|
|
||||||
]
|
]
|
||||||
|
|
||||||
mutually_exclusive_params_load = [
|
mutually_exclusive_params_load = [
|
||||||
@ -166,30 +150,22 @@ activate_with_different_cache_id = [
|
|||||||
r"Cache id specified by user and loaded from metadata are different"
|
r"Cache id specified by user and loaded from metadata are different"
|
||||||
]
|
]
|
||||||
|
|
||||||
cache_activated_successfully = [
|
cache_activated_successfully = [r"Successfully activated cache instance \d+"]
|
||||||
r"Successfully activated cache instance \d+"
|
|
||||||
]
|
|
||||||
|
|
||||||
invalid_core_volume_size = [
|
invalid_core_volume_size = [r"Core volume size does not match the size stored in cache metadata"]
|
||||||
r"Core volume size does not match the size stored in cache metadata"
|
|
||||||
]
|
|
||||||
|
|
||||||
error_activating_cache = [
|
error_activating_cache = [r"Error activating cache \d+"]
|
||||||
r"Error activating cache \d+"
|
|
||||||
]
|
|
||||||
|
|
||||||
activate_without_detach = [
|
activate_without_detach = [
|
||||||
r"Cannot open the device exclusively. Make sure to detach cache before activation."
|
r"Cannot open the device exclusively. Make sure to detach cache before activation."
|
||||||
]
|
]
|
||||||
|
|
||||||
cache_line_size_mismatch = [
|
cache_line_size_mismatch = [r"Cache line size mismatch"]
|
||||||
r"Cache line size mismatch"
|
|
||||||
]
|
|
||||||
|
|
||||||
headerless_io_class_config = [
|
headerless_io_class_config = [
|
||||||
r'Cannot parse configuration file - unknown column "1"\.\n'
|
r'Cannot parse configuration file - unknown column "1"\.\n'
|
||||||
r'Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n'
|
r"Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n"
|
||||||
r'Please consult Admin Guide to check how columns in configuration file should be named\.'
|
r"Please consult Admin Guide to check how columns in configuration file should be named\."
|
||||||
]
|
]
|
||||||
|
|
||||||
illegal_io_class_config_L2C1 = [
|
illegal_io_class_config_L2C1 = [
|
||||||
@ -205,9 +181,7 @@ illegal_io_class_config_L2C4 = [
|
|||||||
r"Cannot parse configuration file - error in line 2 in column 4 \(Allocation\)\."
|
r"Cannot parse configuration file - error in line 2 in column 4 \(Allocation\)\."
|
||||||
]
|
]
|
||||||
|
|
||||||
illegal_io_class_config_L2 = [
|
illegal_io_class_config_L2 = [r"Cannot parse configuration file - error in line 2\."]
|
||||||
r"Cannot parse configuration file - error in line 2\."
|
|
||||||
]
|
|
||||||
|
|
||||||
double_io_class_config = [
|
double_io_class_config = [
|
||||||
r"Double configuration for IO class id \d+\n"
|
r"Double configuration for IO class id \d+\n"
|
||||||
@ -243,14 +217,13 @@ illegal_io_class_invalid_allocation_number = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
malformed_io_class_header = [
|
malformed_io_class_header = [
|
||||||
r'Cannot parse configuration file - unknown column \"value_template\"\.\n'
|
r"Cannot parse configuration file - unknown column \"value_template\"\.\n"
|
||||||
r'Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n'
|
r"Failed to parse I/O classes configuration file header\. It is either malformed or missing\.\n"
|
||||||
r'Please consult Admin Guide to check how columns in configuration file should be named\.'
|
r"Please consult Admin Guide to check how columns in configuration file should be named\."
|
||||||
]
|
]
|
||||||
|
|
||||||
unexpected_cls_option = [
|
unexpected_cls_option = [r"Option '--cache-line-size \(-x\)' is not allowed"]
|
||||||
r"Option '--cache-line-size \(-x\)' is not allowed"
|
|
||||||
]
|
|
||||||
|
|
||||||
def check_stderr_msg(output: Output, expected_messages, negate=False):
|
def check_stderr_msg(output: Output, expected_messages, negate=False):
|
||||||
return __check_string_msg(output.stderr, expected_messages, negate)
|
return __check_string_msg(output.stderr, expected_messages, negate)
|
||||||
@ -268,7 +241,8 @@ def __check_string_msg(text: str, expected_messages, negate=False):
|
|||||||
TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {text}.")
|
TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {text}.")
|
||||||
msg_ok = False
|
msg_ok = False
|
||||||
elif matches and negate:
|
elif matches and negate:
|
||||||
TestRun.LOGGER.error(f"Message is incorrect, expected to not find: {msg}\n "
|
TestRun.LOGGER.error(
|
||||||
f"actual: {text}.")
|
f"Message is incorrect, expected to not find: {msg}\n " f"actual: {text}."
|
||||||
|
)
|
||||||
msg_ok = False
|
msg_ok = False
|
||||||
return msg_ok
|
return msg_ok
|
||||||
|
@ -1,16 +1,17 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from typing import List
|
from typing import List
|
||||||
|
from enum import Enum
|
||||||
from aenum import Enum
|
|
||||||
|
|
||||||
from api.cas import casadm
|
from api.cas import casadm
|
||||||
from api.cas.cache_config import SeqCutOffParameters, SeqCutOffPolicy
|
from api.cas.cache_config import SeqCutOffParameters, SeqCutOffPolicy
|
||||||
from api.cas.casadm_params import OutputFormat, StatsFilter
|
from api.cas.casadm_params import StatsFilter
|
||||||
from api.cas.casadm_parser import get_statistics, get_seq_cut_off_parameters, get_core_info_by_path
|
from api.cas.casadm_parser import get_seq_cut_off_parameters, get_core_info_by_path
|
||||||
from api.cas.statistics import CoreStats, CoreIoClassStats
|
from api.cas.statistics import CoreStats, CoreIoClassStats
|
||||||
from core.test_run_utils import TestRun
|
from core.test_run_utils import TestRun
|
||||||
from storage_devices.device import Device
|
from storage_devices.device import Device
|
||||||
@ -20,9 +21,9 @@ from test_utils.size import Unit, Size
|
|||||||
|
|
||||||
|
|
||||||
class CoreStatus(Enum):
|
class CoreStatus(Enum):
|
||||||
empty = 0,
|
empty = 0
|
||||||
active = 1,
|
active = 1
|
||||||
inactive = 2,
|
inactive = 2
|
||||||
detached = 3
|
detached = 3
|
||||||
|
|
||||||
|
|
||||||
@ -51,27 +52,28 @@ class Core(Device):
|
|||||||
super().create_filesystem(fs_type, force, blocksize)
|
super().create_filesystem(fs_type, force, blocksize)
|
||||||
self.core_device.filesystem = self.filesystem
|
self.core_device.filesystem = self.filesystem
|
||||||
|
|
||||||
def get_io_class_statistics(self,
|
def get_io_class_statistics(
|
||||||
|
self,
|
||||||
io_class_id: int,
|
io_class_id: int,
|
||||||
stat_filter: List[StatsFilter] = None,
|
stat_filter: List[StatsFilter] = None,
|
||||||
percentage_val: bool = False):
|
percentage_val: bool = False,
|
||||||
stats = get_statistics(self.cache_id, self.core_id, io_class_id,
|
) -> CoreIoClassStats:
|
||||||
stat_filter, percentage_val)
|
return CoreIoClassStats(
|
||||||
return CoreIoClassStats(stats)
|
cache_id=self.cache_id,
|
||||||
|
filter=stat_filter,
|
||||||
|
io_class_id=io_class_id,
|
||||||
|
percentage_val=percentage_val,
|
||||||
|
)
|
||||||
|
|
||||||
def get_statistics(self,
|
def get_statistics(
|
||||||
stat_filter: List[StatsFilter] = None,
|
self, stat_filter: List[StatsFilter] = None, percentage_val: bool = False
|
||||||
percentage_val: bool = False):
|
) -> CoreStats:
|
||||||
stats = get_statistics(self.cache_id, self.core_id, None,
|
return CoreStats(
|
||||||
stat_filter, percentage_val)
|
cache_id=self.cache_id,
|
||||||
return CoreStats(stats)
|
core_id=self.core_id,
|
||||||
|
filter=stat_filter,
|
||||||
def get_statistics_flat(self,
|
percentage_val=percentage_val,
|
||||||
io_class_id: int = None,
|
)
|
||||||
stat_filter: List[StatsFilter] = None,
|
|
||||||
percentage_val: bool = False):
|
|
||||||
return get_statistics(self.cache_id, self.core_id, io_class_id,
|
|
||||||
stat_filter, percentage_val)
|
|
||||||
|
|
||||||
def get_status(self):
|
def get_status(self):
|
||||||
return CoreStatus[self.__get_core_info()["status"].lower()]
|
return CoreStatus[self.__get_core_info()["status"].lower()]
|
||||||
@ -106,31 +108,30 @@ class Core(Device):
|
|||||||
return casadm.reset_counters(self.cache_id, self.core_id)
|
return casadm.reset_counters(self.cache_id, self.core_id)
|
||||||
|
|
||||||
def flush_core(self):
|
def flush_core(self):
|
||||||
casadm.flush(self.cache_id, self.core_id)
|
casadm.flush_core(self.cache_id, self.core_id)
|
||||||
sync()
|
sync()
|
||||||
assert self.get_dirty_blocks().get_value(Unit.Blocks4096) == 0
|
|
||||||
|
|
||||||
def purge_core(self):
|
def purge_core(self):
|
||||||
casadm.purge_core(self.cache_id, self.core_id)
|
casadm.purge_core(self.cache_id, self.core_id)
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
|
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
|
||||||
return casadm.set_param_cutoff(self.cache_id, self.core_id,
|
return casadm.set_param_cutoff(
|
||||||
|
self.cache_id,
|
||||||
|
self.core_id,
|
||||||
seq_cutoff_param.threshold,
|
seq_cutoff_param.threshold,
|
||||||
seq_cutoff_param.policy,
|
seq_cutoff_param.policy,
|
||||||
seq_cutoff_param.promotion_count)
|
seq_cutoff_param.promotion_count,
|
||||||
|
)
|
||||||
|
|
||||||
def set_seq_cutoff_threshold(self, threshold: Size):
|
def set_seq_cutoff_threshold(self, threshold: Size):
|
||||||
return casadm.set_param_cutoff(self.cache_id, self.core_id,
|
return casadm.set_param_cutoff(self.cache_id, self.core_id, threshold=threshold)
|
||||||
threshold=threshold)
|
|
||||||
|
|
||||||
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy):
|
def set_seq_cutoff_policy(self, policy: SeqCutOffPolicy):
|
||||||
return casadm.set_param_cutoff(self.cache_id, self.core_id,
|
return casadm.set_param_cutoff(self.cache_id, self.core_id, policy=policy)
|
||||||
policy=policy)
|
|
||||||
|
|
||||||
def set_seq_cutoff_promotion_count(self, promotion_count: int):
|
def set_seq_cutoff_promotion_count(self, promotion_count: int):
|
||||||
return casadm.set_param_cutoff(self.cache_id, self.core_id,
|
return casadm.set_param_cutoff(self.cache_id, self.core_id, promotion_count=promotion_count)
|
||||||
promotion_count=promotion_count)
|
|
||||||
|
|
||||||
def check_if_is_present_in_os(self, should_be_visible=True):
|
def check_if_is_present_in_os(self, should_be_visible=True):
|
||||||
device_in_system_message = "CAS device exists in OS."
|
device_in_system_message = "CAS device exists in OS."
|
||||||
|
@ -1,39 +1,42 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from test_utils.dmesg import get_dmesg
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
|
|
||||||
def get_metadata_size_on_device(dmesg):
|
def get_metadata_size_on_device(cache_id: int) -> Size:
|
||||||
for s in dmesg.split("\n"):
|
dmesg_reversed = list(reversed(get_dmesg().split("\n")))
|
||||||
m = re.search(r'Metadata size on device: ([0-9]*) kiB', s)
|
cache_name = "cache" + str(cache_id)
|
||||||
if m:
|
cache_dmesg = "\n".join(line for line in dmesg_reversed if re.search(f"{cache_name}:", line))
|
||||||
return Size(int(m.groups()[0]), Unit.KibiByte)
|
try:
|
||||||
|
return _get_metadata_info(dmesg=cache_dmesg, section_name="Metadata size on device")
|
||||||
raise ValueError("Can't find the metadata size in the provided dmesg output")
|
except ValueError:
|
||||||
|
raise ValueError("Can't find the metadata size in dmesg output")
|
||||||
|
|
||||||
|
|
||||||
def _get_metadata_info(dmesg, section_name):
|
def _get_metadata_info(dmesg, section_name) -> Size:
|
||||||
for s in dmesg.split("\n"):
|
for s in dmesg.split("\n"):
|
||||||
if section_name in s:
|
if section_name in s:
|
||||||
size, unit = re.search("[0-9]* (B|kiB)", s).group().split()
|
size, unit = re.search("\\d+ (B|kiB)", s).group().split()
|
||||||
unit = Unit.KibiByte if unit == "kiB" else Unit.Byte
|
unit = Unit.KibiByte if unit == "kiB" else Unit.Byte
|
||||||
return Size(int(re.search("[0-9]*", size).group()), unit)
|
return Size(int(re.search("\\d+", size).group()), unit)
|
||||||
|
|
||||||
raise ValueError(f'"{section_name}" entry doesn\'t exist in the given dmesg output')
|
raise ValueError(f'"{section_name}" entry doesn\'t exist in the given dmesg output')
|
||||||
|
|
||||||
|
|
||||||
def get_md_section_size(section_name, dmesg):
|
def get_md_section_size(section_name, dmesg) -> Size:
|
||||||
section_name = section_name.strip()
|
section_name = section_name.strip()
|
||||||
section_name += " size"
|
section_name += " size"
|
||||||
return _get_metadata_info(dmesg, section_name)
|
return _get_metadata_info(dmesg, section_name)
|
||||||
|
|
||||||
|
|
||||||
def get_md_section_offset(section_name, dmesg):
|
def get_md_section_offset(section_name, dmesg) -> Size:
|
||||||
section_name = section_name.strip()
|
section_name = section_name.strip()
|
||||||
section_name += " offset"
|
section_name += " offset"
|
||||||
return _get_metadata_info(dmesg, section_name)
|
return _get_metadata_info(dmesg, section_name)
|
||||||
|
@ -1,116 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
|
||||||
#
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from core.test_run import TestRun
|
|
||||||
from connection.local_executor import LocalExecutor
|
|
||||||
from test_utils.output import CmdException
|
|
||||||
|
|
||||||
|
|
||||||
def get_submodules_paths(from_dut: bool = False):
|
|
||||||
executor = TestRun.executor if from_dut else LocalExecutor()
|
|
||||||
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
|
|
||||||
git_params = "config --file .gitmodules --get-regexp path | cut -d' ' -f2"
|
|
||||||
|
|
||||||
output = executor.run(f"git -C {repo_path} {git_params}")
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to get submodules paths", output)
|
|
||||||
|
|
||||||
return output.stdout.splitlines()
|
|
||||||
|
|
||||||
|
|
||||||
def get_repo_files(
|
|
||||||
branch: str = "HEAD",
|
|
||||||
with_submodules: bool = True,
|
|
||||||
with_dirs: bool = False,
|
|
||||||
from_dut: bool = False,
|
|
||||||
):
|
|
||||||
executor = TestRun.executor if from_dut else LocalExecutor()
|
|
||||||
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
|
|
||||||
git_params = f"ls-tree -r --name-only --full-tree {branch}"
|
|
||||||
|
|
||||||
output = executor.run(f"git -C {repo_path} {git_params}")
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException("Failed to get repo files list", output)
|
|
||||||
|
|
||||||
files = output.stdout.splitlines()
|
|
||||||
|
|
||||||
if with_submodules:
|
|
||||||
for subm_path in get_submodules_paths(from_dut):
|
|
||||||
output = executor.run(f"git -C {os.path.join(repo_path, subm_path)} {git_params}")
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException(f"Failed to get {subm_path} submodule repo files list", output)
|
|
||||||
|
|
||||||
subm_files = [os.path.join(subm_path, file) for file in output.stdout.splitlines()]
|
|
||||||
files.extend(subm_files)
|
|
||||||
|
|
||||||
if with_dirs:
|
|
||||||
# use set() to get unique values only
|
|
||||||
dirs = set(os.path.dirname(file) for file in files)
|
|
||||||
files.extend(dirs)
|
|
||||||
|
|
||||||
# change to absolute paths and remove empty values
|
|
||||||
files = [os.path.realpath(os.path.join(repo_path, file)) for file in files if file]
|
|
||||||
|
|
||||||
return files
|
|
||||||
|
|
||||||
|
|
||||||
def get_current_commit_hash(from_dut: bool = False):
|
|
||||||
executor = TestRun.executor if from_dut else LocalExecutor()
|
|
||||||
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
|
|
||||||
|
|
||||||
return executor.run(
|
|
||||||
f"cd {repo_path} &&"
|
|
||||||
f'git show HEAD -s --pretty=format:"%H"').stdout
|
|
||||||
|
|
||||||
|
|
||||||
def get_current_commit_message():
|
|
||||||
local_executor = LocalExecutor()
|
|
||||||
return local_executor.run(
|
|
||||||
f"cd {TestRun.usr.repo_dir} &&"
|
|
||||||
f'git show HEAD -s --pretty=format:"%B"').stdout
|
|
||||||
|
|
||||||
|
|
||||||
def get_commit_hash(cas_version, from_dut: bool = False):
|
|
||||||
executor = TestRun.executor if from_dut else LocalExecutor()
|
|
||||||
repo_path = TestRun.usr.working_dir if from_dut else TestRun.usr.repo_dir
|
|
||||||
|
|
||||||
output = executor.run(
|
|
||||||
f"cd {repo_path} && "
|
|
||||||
f"git rev-parse {cas_version}")
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException(f"Failed to resolve '{cas_version}' to commit hash", output)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Resolved '{cas_version}' as commit {output.stdout}")
|
|
||||||
|
|
||||||
return output.stdout
|
|
||||||
|
|
||||||
|
|
||||||
def get_release_tags():
|
|
||||||
repo_path = os.path.join(TestRun.usr.working_dir, ".git")
|
|
||||||
output = TestRun.executor.run_expect_success(f"git --git-dir={repo_path} tag").stdout
|
|
||||||
|
|
||||||
# Tags containing '-' or '_' are not CAS release versions
|
|
||||||
tags = [v for v in output.splitlines() if "-" not in v and "_" not in v]
|
|
||||||
|
|
||||||
return tags
|
|
||||||
|
|
||||||
|
|
||||||
def checkout_cas_version(cas_version):
|
|
||||||
commit_hash = get_commit_hash(cas_version)
|
|
||||||
TestRun.LOGGER.info(f"Checkout CAS to {commit_hash}")
|
|
||||||
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
f"cd {TestRun.usr.working_dir} && "
|
|
||||||
f"git checkout --force {commit_hash}")
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException(f"Failed to checkout to {commit_hash}", output)
|
|
||||||
|
|
||||||
output = TestRun.executor.run(
|
|
||||||
f"cd {TestRun.usr.working_dir} && "
|
|
||||||
f"git submodule update --force")
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise CmdException(f"Failed to update submodules", output)
|
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -17,18 +18,24 @@ class InitConfig:
|
|||||||
self.cache_config_lines = []
|
self.cache_config_lines = []
|
||||||
self.core_config_lines = []
|
self.core_config_lines = []
|
||||||
|
|
||||||
def add_cache(self, cache_id, cache_device: Device,
|
def add_cache(
|
||||||
cache_mode: CacheMode = CacheMode.WT, extra_flags=""):
|
self,
|
||||||
|
cache_id,
|
||||||
|
cache_device: Device,
|
||||||
|
cache_mode: CacheMode = CacheMode.WT,
|
||||||
|
extra_flags="",
|
||||||
|
):
|
||||||
self.cache_config_lines.append(
|
self.cache_config_lines.append(
|
||||||
CacheConfigLine(cache_id, cache_device, cache_mode, extra_flags))
|
CacheConfigLine(cache_id, cache_device, cache_mode, extra_flags)
|
||||||
|
)
|
||||||
|
|
||||||
def add_core(self, cache_id, core_id, core_device: Device, extra_flags=""):
|
def add_core(self, cache_id, core_id, core_device: Device, extra_flags=""):
|
||||||
self.core_config_lines.append(CoreConfigLine(cache_id, core_id, core_device, extra_flags))
|
self.core_config_lines.append(CoreConfigLine(cache_id, core_id, core_device, extra_flags))
|
||||||
|
|
||||||
def remove_config_file(self):
|
@staticmethod
|
||||||
|
def remove_config_file():
|
||||||
fs_utils.remove(opencas_conf_path, force=False)
|
fs_utils.remove(opencas_conf_path, force=False)
|
||||||
|
|
||||||
|
|
||||||
def save_config_file(self):
|
def save_config_file(self):
|
||||||
config_lines = []
|
config_lines = []
|
||||||
InitConfig.create_default_init_config()
|
InitConfig.create_default_init_config()
|
||||||
@ -40,7 +47,7 @@ class InitConfig:
|
|||||||
config_lines.append(CoreConfigLine.header)
|
config_lines.append(CoreConfigLine.header)
|
||||||
for c in self.core_config_lines:
|
for c in self.core_config_lines:
|
||||||
config_lines.append(str(c))
|
config_lines.append(str(c))
|
||||||
fs_utils.write_file(opencas_conf_path, '\n'.join(config_lines), False)
|
fs_utils.write_file(opencas_conf_path, "\n".join(config_lines), False)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_init_config_from_running_configuration(
|
def create_init_config_from_running_configuration(
|
||||||
@ -48,10 +55,12 @@ class InitConfig:
|
|||||||
):
|
):
|
||||||
init_conf = cls()
|
init_conf = cls()
|
||||||
for cache in casadm_parser.get_caches():
|
for cache in casadm_parser.get_caches():
|
||||||
init_conf.add_cache(cache.cache_id,
|
init_conf.add_cache(
|
||||||
|
cache.cache_id,
|
||||||
cache.cache_device,
|
cache.cache_device,
|
||||||
cache.get_cache_mode(),
|
cache.get_cache_mode(),
|
||||||
cache_extra_flags)
|
cache_extra_flags,
|
||||||
|
)
|
||||||
for core in casadm_parser.get_cores(cache.cache_id):
|
for core in casadm_parser.get_cores(cache.cache_id):
|
||||||
init_conf.add_core(cache.cache_id, core.core_id, core.core_device, core_extra_flags)
|
init_conf.add_core(cache.cache_id, core.core_id, core.core_device, core_extra_flags)
|
||||||
init_conf.save_config_file()
|
init_conf.save_config_file()
|
||||||
@ -66,17 +75,20 @@ class InitConfig:
|
|||||||
class CacheConfigLine:
|
class CacheConfigLine:
|
||||||
header = "[caches]"
|
header = "[caches]"
|
||||||
|
|
||||||
def __init__(self, cache_id, cache_device: Device,
|
def __init__(self, cache_id, cache_device: Device, cache_mode: CacheMode, extra_flags=""):
|
||||||
cache_mode: CacheMode, extra_flags=""):
|
|
||||||
self.cache_id = cache_id
|
self.cache_id = cache_id
|
||||||
self.cache_device = cache_device
|
self.cache_device = cache_device
|
||||||
self.cache_mode = cache_mode
|
self.cache_mode = cache_mode
|
||||||
self.extra_flags = extra_flags
|
self.extra_flags = extra_flags
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
params = [str(self.cache_id), self.cache_device.path,
|
params = [
|
||||||
self.cache_mode.name, self.extra_flags]
|
str(self.cache_id),
|
||||||
return '\t'.join(params)
|
self.cache_device.path,
|
||||||
|
self.cache_mode.name,
|
||||||
|
self.extra_flags,
|
||||||
|
]
|
||||||
|
return "\t".join(params)
|
||||||
|
|
||||||
|
|
||||||
class CoreConfigLine:
|
class CoreConfigLine:
|
||||||
@ -89,6 +101,10 @@ class CoreConfigLine:
|
|||||||
self.extra_flags = extra_flags
|
self.extra_flags = extra_flags
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
params = [str(self.cache_id), str(self.core_id),
|
params = [
|
||||||
self.core_device.path, self.extra_flags]
|
str(self.cache_id),
|
||||||
return '\t'.join(params)
|
str(self.core_id),
|
||||||
|
self.core_device.path,
|
||||||
|
self.extra_flags,
|
||||||
|
]
|
||||||
|
return "\t".join(params)
|
||||||
|
@ -1,17 +1,15 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from tests import conftest
|
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from api.cas import cas_module, git
|
from api.cas import cas_module
|
||||||
from api.cas.version import get_installed_cas_version
|
from api.cas.version import get_installed_cas_version
|
||||||
from test_utils import os_utils
|
from test_utils import os_utils, git
|
||||||
from test_utils.output import CmdException
|
from test_utils.output import CmdException
|
||||||
|
|
||||||
|
|
||||||
@ -22,27 +20,23 @@ def rsync_opencas_sources():
|
|||||||
# to make sure path ends with directory separator.
|
# to make sure path ends with directory separator.
|
||||||
# Needed for rsync to copy only contents of a directory
|
# Needed for rsync to copy only contents of a directory
|
||||||
# and not the directory itself.
|
# and not the directory itself.
|
||||||
os.path.join(TestRun.usr.repo_dir, ''),
|
os.path.join(TestRun.usr.repo_dir, ""),
|
||||||
os.path.join(TestRun.usr.working_dir, ''),
|
os.path.join(TestRun.usr.working_dir, ""),
|
||||||
exclude_list=["test/functional/results/"],
|
exclude_list=["test/functional/results/"],
|
||||||
delete=True)
|
delete=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def clean_opencas_repo():
|
def clean_opencas_repo():
|
||||||
TestRun.LOGGER.info("Cleaning Open CAS repo")
|
TestRun.LOGGER.info("Cleaning Open CAS repo")
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(f"cd {TestRun.usr.working_dir} && make distclean")
|
||||||
f"cd {TestRun.usr.working_dir} && "
|
|
||||||
"make distclean")
|
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("make distclean command executed with nonzero status", output)
|
raise CmdException("make distclean command executed with nonzero status", output)
|
||||||
|
|
||||||
|
|
||||||
def build_opencas():
|
def build_opencas():
|
||||||
TestRun.LOGGER.info("Building Open CAS")
|
TestRun.LOGGER.info("Building Open CAS")
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(f"cd {TestRun.usr.working_dir} && ./configure && make -j")
|
||||||
f"cd {TestRun.usr.working_dir} && "
|
|
||||||
"./configure && "
|
|
||||||
"make -j")
|
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Make command executed with nonzero status", output)
|
raise CmdException("Make command executed with nonzero status", output)
|
||||||
|
|
||||||
@ -54,8 +48,8 @@ def install_opencas(destdir: str = ""):
|
|||||||
destdir = os.path.join(TestRun.usr.working_dir, destdir)
|
destdir = os.path.join(TestRun.usr.working_dir, destdir)
|
||||||
|
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(
|
||||||
f"cd {TestRun.usr.working_dir} && "
|
f"cd {TestRun.usr.working_dir} && make {'DESTDIR='+destdir if destdir else ''} install"
|
||||||
f"make {'DESTDIR='+destdir if destdir else ''} install")
|
)
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Failed to install Open CAS", output)
|
raise CmdException("Failed to install Open CAS", output)
|
||||||
|
|
||||||
@ -78,7 +72,7 @@ def set_up_opencas(version: str = ""):
|
|||||||
clean_opencas_repo()
|
clean_opencas_repo()
|
||||||
|
|
||||||
if version:
|
if version:
|
||||||
git.checkout_cas_version(version)
|
git.checkout_version(version)
|
||||||
|
|
||||||
build_opencas()
|
build_opencas()
|
||||||
install_opencas()
|
install_opencas()
|
||||||
@ -90,9 +84,7 @@ def uninstall_opencas():
|
|||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("Open CAS is not properly installed", output)
|
raise CmdException("Open CAS is not properly installed", output)
|
||||||
else:
|
else:
|
||||||
TestRun.executor.run(
|
TestRun.executor.run(f"cd {TestRun.usr.working_dir} && make uninstall")
|
||||||
f"cd {TestRun.usr.working_dir} && "
|
|
||||||
f"make uninstall")
|
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise CmdException("There was an error during uninstall process", output)
|
raise CmdException("There was an error during uninstall process", output)
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -8,8 +9,8 @@ import functools
|
|||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
import string
|
import string
|
||||||
from datetime import timedelta
|
|
||||||
|
|
||||||
|
from datetime import timedelta
|
||||||
from packaging import version
|
from packaging import version
|
||||||
|
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
@ -30,33 +31,59 @@ IO_CLASS_CONFIG_HEADER = "IO class id,IO class name,Eviction priority,Allocation
|
|||||||
|
|
||||||
@functools.total_ordering
|
@functools.total_ordering
|
||||||
class IoClass:
|
class IoClass:
|
||||||
def __init__(self, class_id: int, rule: str = '', priority: int = None,
|
def __init__(
|
||||||
allocation: str = "1.00"):
|
self,
|
||||||
|
class_id: int,
|
||||||
|
rule: str = "",
|
||||||
|
priority: int = None,
|
||||||
|
allocation: str = "1.00",
|
||||||
|
):
|
||||||
self.id = class_id
|
self.id = class_id
|
||||||
self.rule = rule
|
self.rule = rule
|
||||||
self.priority = priority
|
self.priority = priority
|
||||||
self.allocation = allocation
|
self.allocation = allocation
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (f'{self.id},{self.rule},{"" if self.priority is None else self.priority}'
|
return (
|
||||||
f',{self.allocation}')
|
f'{self.id},{self.rule},{"" if self.priority is None else self.priority}'
|
||||||
|
f",{self.allocation}"
|
||||||
|
)
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return ((self.id, self.rule, self.priority, self.allocation)
|
return (
|
||||||
== (other.id, other.rule, other.priority, other.allocation))
|
self.id,
|
||||||
|
self.rule,
|
||||||
|
self.priority,
|
||||||
|
self.allocation,
|
||||||
|
) == (
|
||||||
|
other.id,
|
||||||
|
other.rule,
|
||||||
|
other.priority,
|
||||||
|
other.allocation,
|
||||||
|
)
|
||||||
|
|
||||||
def __lt__(self, other):
|
def __lt__(self, other):
|
||||||
return ((self.id, self.rule, self.priority, self.allocation)
|
return (
|
||||||
< (other.id, other.rule, other.priority, other.allocation))
|
self.id,
|
||||||
|
self.rule,
|
||||||
|
self.priority,
|
||||||
|
self.allocation,
|
||||||
|
) < (
|
||||||
|
other.id,
|
||||||
|
other.rule,
|
||||||
|
other.priority,
|
||||||
|
other.allocation,
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_string(ioclass_str: str):
|
def from_string(ioclass_str: str):
|
||||||
parts = [part.strip() for part in re.split('[,|]', ioclass_str.replace('║', ''))]
|
parts = [part.strip() for part in re.split("[,|]", ioclass_str.replace("║", ""))]
|
||||||
return IoClass(
|
return IoClass(
|
||||||
class_id=int(parts[0]),
|
class_id=int(parts[0]),
|
||||||
rule=parts[1],
|
rule=parts[1],
|
||||||
priority=int(parts[2]),
|
priority=int(parts[2]),
|
||||||
allocation="%.2f" % float(parts[3]))
|
allocation="%.2f" % float(parts[3]),
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def list_to_csv(ioclass_list: [], add_default_rule: bool = True):
|
def list_to_csv(ioclass_list: [], add_default_rule: bool = True):
|
||||||
@ -64,7 +91,7 @@ class IoClass:
|
|||||||
if add_default_rule and not len([c for c in list_copy if c.id == 0]):
|
if add_default_rule and not len([c for c in list_copy if c.id == 0]):
|
||||||
list_copy.insert(0, IoClass.default())
|
list_copy.insert(0, IoClass.default())
|
||||||
list_copy.insert(0, IO_CLASS_CONFIG_HEADER)
|
list_copy.insert(0, IO_CLASS_CONFIG_HEADER)
|
||||||
return '\n'.join(str(c) for c in list_copy)
|
return "\n".join(str(c) for c in list_copy)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def csv_to_list(csv: str):
|
def csv_to_list(csv: str):
|
||||||
@ -76,12 +103,15 @@ class IoClass:
|
|||||||
return ioclass_list
|
return ioclass_list
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def save_list_to_config_file(ioclass_list: [],
|
def save_list_to_config_file(
|
||||||
|
ioclass_list: [],
|
||||||
add_default_rule: bool = True,
|
add_default_rule: bool = True,
|
||||||
ioclass_config_path: str = default_config_file_path):
|
ioclass_config_path: str = default_config_file_path,
|
||||||
|
):
|
||||||
TestRun.LOGGER.info(f"Creating config file {ioclass_config_path}")
|
TestRun.LOGGER.info(f"Creating config file {ioclass_config_path}")
|
||||||
fs_utils.write_file(ioclass_config_path,
|
fs_utils.write_file(
|
||||||
IoClass.list_to_csv(ioclass_list, add_default_rule))
|
ioclass_config_path, IoClass.list_to_csv(ioclass_list, add_default_rule)
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def default(priority=DEFAULT_IO_CLASS_PRIORITY, allocation="1.00"):
|
def default(priority=DEFAULT_IO_CLASS_PRIORITY, allocation="1.00"):
|
||||||
@ -93,12 +123,12 @@ class IoClass:
|
|||||||
"id": "IO class id",
|
"id": "IO class id",
|
||||||
"name": "IO class name",
|
"name": "IO class name",
|
||||||
"eviction_prio": "Eviction priority",
|
"eviction_prio": "Eviction priority",
|
||||||
"allocation": "Allocation"
|
"allocation": "Allocation",
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def default_header():
|
def default_header():
|
||||||
return ','.join(IoClass.default_header_dict().values())
|
return ",".join(IoClass.default_header_dict().values())
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def compare_ioclass_lists(list1: [], list2: []):
|
def compare_ioclass_lists(list1: [], list2: []):
|
||||||
@ -106,18 +136,37 @@ class IoClass:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def generate_random_ioclass_list(count: int, max_priority: int = MAX_IO_CLASS_PRIORITY):
|
def generate_random_ioclass_list(count: int, max_priority: int = MAX_IO_CLASS_PRIORITY):
|
||||||
random_list = [IoClass.default(priority=random.randint(0, max_priority),
|
random_list = [
|
||||||
allocation=f"{random.randint(0, 100) / 100:0.2f}")]
|
IoClass.default(
|
||||||
|
priority=random.randint(0, max_priority),
|
||||||
|
allocation=f"{random.randint(0, 100) / 100:0.2f}",
|
||||||
|
)
|
||||||
|
]
|
||||||
for i in range(1, count):
|
for i in range(1, count):
|
||||||
random_list.append(IoClass(i, priority=random.randint(0, max_priority),
|
random_list.append(
|
||||||
allocation=f"{random.randint(0, 100) / 100:0.2f}")
|
IoClass(
|
||||||
.set_random_rule())
|
i,
|
||||||
|
priority=random.randint(0, max_priority),
|
||||||
|
allocation=f"{random.randint(0, 100) / 100:0.2f}",
|
||||||
|
).set_random_rule()
|
||||||
|
)
|
||||||
return random_list
|
return random_list
|
||||||
|
|
||||||
def set_random_rule(self):
|
def set_random_rule(self):
|
||||||
rules = ["metadata", "direct", "file_size", "directory", "io_class",
|
rules = [
|
||||||
"extension", "file_name_prefix", "lba", "pid", "process_name",
|
"metadata",
|
||||||
"file_offset", "request_size"]
|
"direct",
|
||||||
|
"file_size",
|
||||||
|
"directory",
|
||||||
|
"io_class",
|
||||||
|
"extension",
|
||||||
|
"file_name_prefix",
|
||||||
|
"lba",
|
||||||
|
"pid",
|
||||||
|
"process_name",
|
||||||
|
"file_offset",
|
||||||
|
"request_size",
|
||||||
|
]
|
||||||
if os_utils.get_kernel_version() >= version.Version("4.13"):
|
if os_utils.get_kernel_version() >= version.Version("4.13"):
|
||||||
rules.append("wlth")
|
rules.append("wlth")
|
||||||
|
|
||||||
@ -128,7 +177,7 @@ class IoClass:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def add_random_params(rule: str):
|
def add_random_params(rule: str):
|
||||||
if rule == "directory":
|
if rule == "directory":
|
||||||
allowed_chars = string.ascii_letters + string.digits + '/'
|
allowed_chars = string.ascii_letters + string.digits + "/"
|
||||||
rule += f":/{random_string(random.randint(1, 40), allowed_chars)}"
|
rule += f":/{random_string(random.randint(1, 40), allowed_chars)}"
|
||||||
elif rule in ["file_size", "lba", "pid", "file_offset", "request_size", "wlth"]:
|
elif rule in ["file_size", "lba", "pid", "file_offset", "request_size", "wlth"]:
|
||||||
rule += f":{Operator(random.randrange(len(Operator))).name}:{random.randrange(1000000)}"
|
rule += f":{Operator(random.randrange(len(Operator))).name}:{random.randrange(1000000)}"
|
||||||
@ -154,9 +203,7 @@ def create_ioclass_config(
|
|||||||
add_default_rule: bool = True, ioclass_config_path: str = default_config_file_path
|
add_default_rule: bool = True, ioclass_config_path: str = default_config_file_path
|
||||||
):
|
):
|
||||||
TestRun.LOGGER.info(f"Creating config file {ioclass_config_path}")
|
TestRun.LOGGER.info(f"Creating config file {ioclass_config_path}")
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(f"echo {IO_CLASS_CONFIG_HEADER} > {ioclass_config_path}")
|
||||||
f'echo {IO_CLASS_CONFIG_HEADER} > {ioclass_config_path}'
|
|
||||||
)
|
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Failed to create ioclass config file. "
|
"Failed to create ioclass config file. "
|
||||||
@ -180,8 +227,7 @@ def remove_ioclass_config(ioclass_config_path: str = default_config_file_path):
|
|||||||
output = TestRun.executor.run(f"rm -f {ioclass_config_path}")
|
output = TestRun.executor.run(f"rm -f {ioclass_config_path}")
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Failed to remove config file. "
|
f"Failed to remove config file. stdout: {output.stdout} \n stderr :{output.stderr}"
|
||||||
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -193,13 +239,9 @@ def add_ioclass(
|
|||||||
ioclass_config_path: str = default_config_file_path,
|
ioclass_config_path: str = default_config_file_path,
|
||||||
):
|
):
|
||||||
new_ioclass = f"{ioclass_id},{rule},{eviction_priority},{allocation}"
|
new_ioclass = f"{ioclass_id},{rule},{eviction_priority},{allocation}"
|
||||||
TestRun.LOGGER.info(
|
TestRun.LOGGER.info(f"Adding rule {new_ioclass} to config file {ioclass_config_path}")
|
||||||
f"Adding rule {new_ioclass} " + f"to config file {ioclass_config_path}"
|
|
||||||
)
|
|
||||||
|
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(f'echo "{new_ioclass}" >> {ioclass_config_path}')
|
||||||
f'echo "{new_ioclass}" >> {ioclass_config_path}'
|
|
||||||
)
|
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Failed to append ioclass to config file. "
|
"Failed to append ioclass to config file. "
|
||||||
@ -208,9 +250,7 @@ def add_ioclass(
|
|||||||
|
|
||||||
|
|
||||||
def get_ioclass(ioclass_id: int, ioclass_config_path: str = default_config_file_path):
|
def get_ioclass(ioclass_id: int, ioclass_config_path: str = default_config_file_path):
|
||||||
TestRun.LOGGER.info(
|
TestRun.LOGGER.info(f"Retrieving rule no. {ioclass_id} from config file {ioclass_config_path}")
|
||||||
f"Retrieving rule no. {ioclass_id} " + f"from config file {ioclass_config_path}"
|
|
||||||
)
|
|
||||||
output = TestRun.executor.run(f"cat {ioclass_config_path}")
|
output = TestRun.executor.run(f"cat {ioclass_config_path}")
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
@ -225,12 +265,8 @@ def get_ioclass(ioclass_id: int, ioclass_config_path: str = default_config_file_
|
|||||||
return ioclass
|
return ioclass
|
||||||
|
|
||||||
|
|
||||||
def remove_ioclass(
|
def remove_ioclass(ioclass_id: int, ioclass_config_path: str = default_config_file_path):
|
||||||
ioclass_id: int, ioclass_config_path: str = default_config_file_path
|
TestRun.LOGGER.info(f"Removing rule no.{ioclass_id} from config file {ioclass_config_path}")
|
||||||
):
|
|
||||||
TestRun.LOGGER.info(
|
|
||||||
f"Removing rule no.{ioclass_id} " + f"from config file {ioclass_config_path}"
|
|
||||||
)
|
|
||||||
output = TestRun.executor.run(f"cat {ioclass_config_path}")
|
output = TestRun.executor.run(f"cat {ioclass_config_path}")
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
@ -243,9 +279,7 @@ def remove_ioclass(
|
|||||||
|
|
||||||
# First line in valid config file is always a header, not a rule - it is
|
# First line in valid config file is always a header, not a rule - it is
|
||||||
# already extracted above
|
# already extracted above
|
||||||
new_ioclass_config = [
|
new_ioclass_config = [x for x in old_ioclass_config[1:] if int(x.split(",")[0]) != ioclass_id]
|
||||||
x for x in old_ioclass_config[1:] if int(x.split(",")[0]) != ioclass_id
|
|
||||||
]
|
|
||||||
|
|
||||||
new_ioclass_config.insert(0, config_header)
|
new_ioclass_config.insert(0, config_header)
|
||||||
|
|
||||||
@ -255,9 +289,7 @@ def remove_ioclass(
|
|||||||
)
|
)
|
||||||
|
|
||||||
new_ioclass_config_str = "\n".join(new_ioclass_config)
|
new_ioclass_config_str = "\n".join(new_ioclass_config)
|
||||||
output = TestRun.executor.run(
|
output = TestRun.executor.run(f'echo "{new_ioclass_config_str}" > {ioclass_config_path}')
|
||||||
f'echo "{new_ioclass_config_str}" > {ioclass_config_path}'
|
|
||||||
)
|
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"Failed to save new ioclass config. "
|
"Failed to save new ioclass config. "
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2022 Intel Corporation
|
# Copyright(c) 2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -31,8 +32,8 @@ def check_progress_bar(command: str, progress_bar_expected: bool = True):
|
|||||||
|
|
||||||
percentage = 0
|
percentage = 0
|
||||||
while True:
|
while True:
|
||||||
output = stdout.channel.recv(1024).decode('utf-8')
|
output = stdout.channel.recv(1024).decode("utf-8")
|
||||||
search = re.search(r'\d+.\d+', output)
|
search = re.search(r"\d+.\d+", output)
|
||||||
last_percentage = percentage
|
last_percentage = percentage
|
||||||
if search:
|
if search:
|
||||||
TestRun.LOGGER.info(output)
|
TestRun.LOGGER.info(output)
|
||||||
|
@ -1,256 +1,250 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
# Order in arrays is important!
|
import csv
|
||||||
config_stats_cache = [
|
|
||||||
"cache id", "cache size", "cache device", "exported object", "core devices",
|
from enum import Enum
|
||||||
"inactive core devices", "write policy", "cleaning policy", "promotion policy",
|
from datetime import timedelta
|
||||||
"cache line size", "metadata memory footprint", "dirty for", "status"
|
from typing import List
|
||||||
]
|
|
||||||
config_stats_core = [
|
from api.cas import casadm
|
||||||
"core id", "core device", "exported object", "core size", "dirty for", "status",
|
from api.cas.casadm_params import StatsFilter
|
||||||
"seq cutoff threshold", "seq cutoff policy"
|
from test_utils.size import Size, Unit
|
||||||
]
|
|
||||||
config_stats_ioclass = ["io class id", "io class name", "eviction priority", "max size"]
|
|
||||||
usage_stats = ["occupancy", "free", "clean", "dirty"]
|
class UnitType(Enum):
|
||||||
usage_stats_ioclass = ["occupancy", "clean", "dirty"]
|
requests = "[Requests]"
|
||||||
inactive_usage_stats = ["inactive occupancy", "inactive clean", "inactive dirty"]
|
percentage = "[%]"
|
||||||
request_stats = [
|
block_4k = "[4KiB Blocks]"
|
||||||
"read hits", "read partial misses", "read full misses", "read total",
|
mebibyte = "[MiB]"
|
||||||
"write hits", "write partial misses", "write full misses", "write total",
|
kibibyte = "[KiB]"
|
||||||
"pass-through reads", "pass-through writes",
|
gibibyte = "[GiB]"
|
||||||
"serviced requests", "total requests"
|
seconds = "[s]"
|
||||||
]
|
|
||||||
block_stats_cache = [
|
def __str__(self):
|
||||||
"reads from core(s)", "writes to core(s)", "total to/from core(s)",
|
return self.value
|
||||||
"reads from cache", "writes to cache", "total to/from cache",
|
|
||||||
"reads from exported object(s)", "writes to exported object(s)",
|
|
||||||
"total to/from exported object(s)"
|
class OperationType(Enum):
|
||||||
]
|
read = "Read"
|
||||||
block_stats_core = [stat.replace("(s)", "") for stat in block_stats_cache]
|
write = "Write"
|
||||||
error_stats = [
|
|
||||||
"cache read errors", "cache write errors", "cache total errors",
|
def __str__(self):
|
||||||
"core read errors", "core write errors", "core total errors",
|
return self.value
|
||||||
"total errors"
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class CacheStats:
|
class CacheStats:
|
||||||
stats_list = [
|
def __init__(
|
||||||
"config_stats",
|
self,
|
||||||
"usage_stats",
|
cache_id: int,
|
||||||
"inactive_usage_stats",
|
filter: List[StatsFilter] = None,
|
||||||
"request_stats",
|
percentage_val: bool = False,
|
||||||
"block_stats",
|
):
|
||||||
"error_stats",
|
|
||||||
]
|
|
||||||
|
|
||||||
def __init__(self, stats):
|
if filter is None:
|
||||||
try:
|
filters = [
|
||||||
self.config_stats = CacheConfigStats(
|
StatsFilter.conf,
|
||||||
*[stats[stat] for stat in config_stats_cache]
|
StatsFilter.usage,
|
||||||
)
|
StatsFilter.req,
|
||||||
except KeyError:
|
StatsFilter.blk,
|
||||||
pass
|
StatsFilter.err,
|
||||||
try:
|
]
|
||||||
self.usage_stats = UsageStats(
|
else:
|
||||||
*[stats[stat] for stat in usage_stats]
|
filters = filter
|
||||||
)
|
|
||||||
except KeyError:
|
csv_stats = casadm.print_statistics(
|
||||||
pass
|
cache_id=cache_id,
|
||||||
try:
|
filter=filter,
|
||||||
self.inactive_usage_stats = InactiveUsageStats(
|
output_format=casadm.OutputFormat.csv,
|
||||||
*[stats[stat] for stat in inactive_usage_stats]
|
).stdout.splitlines()
|
||||||
)
|
|
||||||
except KeyError:
|
stat_keys, stat_values = csv.reader(csv_stats)
|
||||||
pass
|
|
||||||
try:
|
# Unify names in block stats for core and cache:
|
||||||
self.request_stats = RequestStats(
|
# cache stats: Reads from core(s)
|
||||||
*[stats[stat] for stat in request_stats]
|
# core stats: Reads from core
|
||||||
)
|
stat_keys = [x.replace("(s)", "") for x in stat_keys]
|
||||||
except KeyError:
|
stats_dict = dict(zip(stat_keys, stat_values))
|
||||||
pass
|
|
||||||
try:
|
for filter in filters:
|
||||||
self.block_stats = BlockStats(
|
match filter:
|
||||||
*[stats[stat] for stat in block_stats_cache]
|
case StatsFilter.conf:
|
||||||
)
|
self.config_stats = CacheConfigStats(stats_dict)
|
||||||
except KeyError:
|
case StatsFilter.usage:
|
||||||
pass
|
self.usage_stats = UsageStats(stats_dict, percentage_val)
|
||||||
try:
|
case StatsFilter.req:
|
||||||
self.error_stats = ErrorStats(
|
self.request_stats = RequestStats(stats_dict, percentage_val)
|
||||||
*[stats[stat] for stat in error_stats]
|
case StatsFilter.blk:
|
||||||
)
|
self.block_stats = BlockStats(stats_dict, percentage_val)
|
||||||
except KeyError:
|
case StatsFilter.err:
|
||||||
pass
|
self.error_stats = ErrorStats(stats_dict, percentage_val)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
status = ""
|
# stats_list contains all Class.__str__ methods initialized in CacheStats
|
||||||
for stats_item in self.stats_list:
|
stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
|
||||||
current_stat = getattr(self, stats_item, None)
|
return "\n".join(stats_list)
|
||||||
if current_stat:
|
|
||||||
status += f"--- Cache {current_stat}"
|
|
||||||
return status
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
if not other:
|
# check if all initialized variable in self(CacheStats) match other(CacheStats)
|
||||||
return False
|
return [getattr(self, stats_item) for stats_item in self.__dict__] == [
|
||||||
for stats_item in self.stats_list:
|
getattr(other, stats_item) for stats_item in other.__dict__
|
||||||
if getattr(self, stats_item, None) != getattr(other, stats_item, None):
|
]
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class CoreStats:
|
class CoreStats:
|
||||||
stats_list = [
|
def __init__(
|
||||||
"config_stats",
|
self,
|
||||||
"usage_stats",
|
cache_id: int,
|
||||||
"request_stats",
|
core_id: int,
|
||||||
"block_stats",
|
filter: List[StatsFilter] = None,
|
||||||
"error_stats",
|
percentage_val: bool = False,
|
||||||
]
|
):
|
||||||
|
|
||||||
def __init__(self, stats):
|
if filter is None:
|
||||||
try:
|
filters = [
|
||||||
self.config_stats = CoreConfigStats(
|
StatsFilter.conf,
|
||||||
*[stats[stat] for stat in config_stats_core]
|
StatsFilter.usage,
|
||||||
)
|
StatsFilter.req,
|
||||||
except KeyError:
|
StatsFilter.blk,
|
||||||
pass
|
StatsFilter.err,
|
||||||
try:
|
]
|
||||||
self.usage_stats = UsageStats(
|
else:
|
||||||
*[stats[stat] for stat in usage_stats]
|
filters = filter
|
||||||
)
|
|
||||||
except KeyError:
|
csv_stats = casadm.print_statistics(
|
||||||
pass
|
cache_id=cache_id,
|
||||||
try:
|
core_id=core_id,
|
||||||
self.request_stats = RequestStats(
|
filter=filter,
|
||||||
*[stats[stat] for stat in request_stats]
|
output_format=casadm.OutputFormat.csv,
|
||||||
)
|
).stdout.splitlines()
|
||||||
except KeyError:
|
|
||||||
pass
|
stat_keys, stat_values = csv.reader(csv_stats)
|
||||||
try:
|
stats_dict = dict(zip(stat_keys, stat_values))
|
||||||
self.block_stats = BlockStats(
|
|
||||||
*[stats[stat] for stat in block_stats_core]
|
for filter in filters:
|
||||||
)
|
match filter:
|
||||||
except KeyError:
|
case StatsFilter.conf:
|
||||||
pass
|
self.config_stats = CoreConfigStats(stats_dict)
|
||||||
try:
|
case StatsFilter.usage:
|
||||||
self.error_stats = ErrorStats(
|
self.usage_stats = UsageStats(stats_dict, percentage_val)
|
||||||
*[stats[stat] for stat in error_stats]
|
case StatsFilter.req:
|
||||||
)
|
self.request_stats = RequestStats(stats_dict, percentage_val)
|
||||||
except KeyError:
|
case StatsFilter.blk:
|
||||||
pass
|
self.block_stats = BlockStats(stats_dict, percentage_val)
|
||||||
|
case StatsFilter.err:
|
||||||
|
self.error_stats = ErrorStats(stats_dict, percentage_val)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
status = ""
|
# stats_list contains all Class.__str__ methods initialized in CacheStats
|
||||||
for stats_item in self.stats_list:
|
stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
|
||||||
current_stat = getattr(self, stats_item, None)
|
return "\n".join(stats_list)
|
||||||
if current_stat:
|
|
||||||
status += f"--- Core {current_stat}"
|
|
||||||
return status
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
if not other:
|
# check if all initialized variable in self(CacheStats) match other(CacheStats)
|
||||||
return False
|
return [getattr(self, stats_item) for stats_item in self.__dict__] == [
|
||||||
for stats_item in self.stats_list:
|
getattr(other, stats_item) for stats_item in other.__dict__
|
||||||
if getattr(self, stats_item, None) != getattr(other, stats_item, None):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class IoClassStats:
|
|
||||||
stats_list = [
|
|
||||||
"config_stats",
|
|
||||||
"usage_stats",
|
|
||||||
"request_stats",
|
|
||||||
"block_stats",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, stats, block_stats_list):
|
|
||||||
try:
|
|
||||||
self.config_stats = IoClassConfigStats(
|
|
||||||
*[stats[stat] for stat in config_stats_ioclass]
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
self.usage_stats = IoClassUsageStats(
|
|
||||||
*[stats[stat] for stat in usage_stats_ioclass]
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
self.request_stats = RequestStats(
|
|
||||||
*[stats[stat] for stat in request_stats]
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
self.block_stats = BlockStats(
|
|
||||||
*[stats[stat] for stat in block_stats_list]
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def __str__(self):
|
class CoreIoClassStats:
|
||||||
status = ""
|
def __init__(
|
||||||
for stats_item in self.stats_list:
|
self,
|
||||||
current_stat = getattr(self, stats_item, None)
|
cache_id: int,
|
||||||
if current_stat:
|
io_class_id: int,
|
||||||
status += f"--- IO class {current_stat}"
|
core_id: int = None,
|
||||||
return status
|
filter: List[StatsFilter] = None,
|
||||||
|
percentage_val: bool = False,
|
||||||
|
):
|
||||||
|
if filter is None:
|
||||||
|
filters = [
|
||||||
|
StatsFilter.conf,
|
||||||
|
StatsFilter.usage,
|
||||||
|
StatsFilter.req,
|
||||||
|
StatsFilter.blk,
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
filters = filter
|
||||||
|
|
||||||
|
csv_stats = casadm.print_statistics(
|
||||||
|
cache_id=cache_id,
|
||||||
|
core_id=core_id,
|
||||||
|
io_class_id=io_class_id,
|
||||||
|
filter=filter,
|
||||||
|
output_format=casadm.OutputFormat.csv,
|
||||||
|
).stdout.splitlines()
|
||||||
|
|
||||||
|
stat_keys, stat_values = csv.reader(csv_stats)
|
||||||
|
|
||||||
|
# Unify names in block stats for core and cache:
|
||||||
|
# cache stats: Reads from core(s)
|
||||||
|
# core stats: Reads from core
|
||||||
|
stat_keys = [x.replace("(s)", "") for x in stat_keys]
|
||||||
|
stats_dict = dict(zip(stat_keys, stat_values))
|
||||||
|
|
||||||
|
for filter in filters:
|
||||||
|
match filter:
|
||||||
|
case StatsFilter.conf:
|
||||||
|
self.config_stats = IoClassConfigStats(stats_dict)
|
||||||
|
case StatsFilter.usage:
|
||||||
|
self.usage_stats = IoClassUsageStats(stats_dict, percentage_val)
|
||||||
|
case StatsFilter.req:
|
||||||
|
self.request_stats = RequestStats(stats_dict, percentage_val)
|
||||||
|
case StatsFilter.blk:
|
||||||
|
self.block_stats = BlockStats(stats_dict, percentage_val)
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
if not other:
|
# check if all initialized variable in self(CacheStats) match other(CacheStats)
|
||||||
return False
|
return [getattr(self, stats_item) for stats_item in self.__dict__] == [
|
||||||
for stats_item in self.stats_list:
|
getattr(other, stats_item) for stats_item in other.__dict__
|
||||||
if getattr(self, stats_item, None) != getattr(other, stats_item, None):
|
]
|
||||||
return False
|
|
||||||
return True
|
def __str__(self):
|
||||||
|
# stats_list contains all Class.__str__ methods initialized in CacheStats
|
||||||
|
stats_list = [str(getattr(self, stats_item)) for stats_item in self.__dict__]
|
||||||
|
return "\n".join(stats_list)
|
||||||
|
|
||||||
|
|
||||||
class CacheIoClassStats(IoClassStats):
|
class CacheIoClassStats(CoreIoClassStats):
|
||||||
def __init__(self, stats):
|
def __init__(
|
||||||
super().__init__(stats, block_stats_cache)
|
self,
|
||||||
|
cache_id: int,
|
||||||
|
io_class_id: int,
|
||||||
class CoreIoClassStats(IoClassStats):
|
filter: List[StatsFilter] = None,
|
||||||
def __init__(self, stats):
|
percentage_val: bool = False,
|
||||||
super().__init__(stats, block_stats_core)
|
):
|
||||||
|
super().__init__(
|
||||||
|
cache_id=cache_id,
|
||||||
|
io_class_id=io_class_id,
|
||||||
|
core_id=None,
|
||||||
|
filter=filter,
|
||||||
|
percentage_val=percentage_val,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CacheConfigStats:
|
class CacheConfigStats:
|
||||||
def __init__(
|
def __init__(self, stats_dict):
|
||||||
self,
|
self.cache_id = stats_dict["Cache Id"]
|
||||||
cache_id,
|
self.cache_size = parse_value(
|
||||||
cache_size,
|
value=stats_dict["Cache Size [4KiB Blocks]"], unit_type=UnitType.block_4k
|
||||||
cache_dev,
|
)
|
||||||
exp_obj,
|
self.cache_dev = stats_dict["Cache Device"]
|
||||||
core_dev,
|
self.exp_obj = stats_dict["Exported Object"]
|
||||||
inactive_core_dev,
|
self.core_dev = stats_dict["Core Devices"]
|
||||||
write_policy,
|
self.inactive_core_devices = stats_dict["Inactive Core Devices"]
|
||||||
cleaning_policy,
|
self.write_policy = stats_dict["Write Policy"]
|
||||||
promotion_policy,
|
self.cleaning_policy = stats_dict["Cleaning Policy"]
|
||||||
cache_line_size,
|
self.promotion_policy = stats_dict["Promotion Policy"]
|
||||||
metadata_memory_footprint,
|
self.cache_line_size = parse_value(
|
||||||
dirty_for,
|
value=stats_dict["Cache line size [KiB]"], unit_type=UnitType.kibibyte
|
||||||
status,
|
)
|
||||||
):
|
self.metadata_memory_footprint = parse_value(
|
||||||
self.cache_id = cache_id
|
value=stats_dict["Metadata Memory Footprint [MiB]"], unit_type=UnitType.mebibyte
|
||||||
self.cache_size = cache_size
|
)
|
||||||
self.cache_dev = cache_dev
|
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type="[s]")
|
||||||
self.exp_obj = exp_obj
|
self.status = stats_dict["Status"]
|
||||||
self.core_dev = core_dev
|
|
||||||
self.inactive_core_dev = inactive_core_dev
|
|
||||||
self.write_policy = write_policy
|
|
||||||
self.cleaning_policy = cleaning_policy
|
|
||||||
self.promotion_policy = promotion_policy
|
|
||||||
self.cache_line_size = cache_line_size
|
|
||||||
self.metadata_memory_footprint = metadata_memory_footprint
|
|
||||||
self.dirty_for = dirty_for
|
|
||||||
self.status = status
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (
|
return (
|
||||||
@ -260,10 +254,10 @@ class CacheConfigStats:
|
|||||||
f"Cache device: {self.cache_dev}\n"
|
f"Cache device: {self.cache_dev}\n"
|
||||||
f"Exported object: {self.exp_obj}\n"
|
f"Exported object: {self.exp_obj}\n"
|
||||||
f"Core devices: {self.core_dev}\n"
|
f"Core devices: {self.core_dev}\n"
|
||||||
f"Inactive core devices: {self.inactive_core_dev}\n"
|
f"Inactive Core Devices: {self.inactive_core_devices}\n"
|
||||||
f"Write policy: {self.write_policy}\n"
|
f"Write Policy: {self.write_policy}\n"
|
||||||
f"Cleaning policy: {self.cleaning_policy}\n"
|
f"Cleaning Policy: {self.cleaning_policy}\n"
|
||||||
f"Promotion policy: {self.promotion_policy}\n"
|
f"Promotion Policy: {self.promotion_policy}\n"
|
||||||
f"Cache line size: {self.cache_line_size}\n"
|
f"Cache line size: {self.cache_line_size}\n"
|
||||||
f"Metadata memory footprint: {self.metadata_memory_footprint}\n"
|
f"Metadata memory footprint: {self.metadata_memory_footprint}\n"
|
||||||
f"Dirty for: {self.dirty_for}\n"
|
f"Dirty for: {self.dirty_for}\n"
|
||||||
@ -279,7 +273,7 @@ class CacheConfigStats:
|
|||||||
and self.cache_dev == other.cache_dev
|
and self.cache_dev == other.cache_dev
|
||||||
and self.exp_obj == other.exp_obj
|
and self.exp_obj == other.exp_obj
|
||||||
and self.core_dev == other.core_dev
|
and self.core_dev == other.core_dev
|
||||||
and self.inactive_core_dev == other.inactive_core_dev
|
and self.inactive_core_devices == other.inactive_core_devices
|
||||||
and self.write_policy == other.write_policy
|
and self.write_policy == other.write_policy
|
||||||
and self.cleaning_policy == other.cleaning_policy
|
and self.cleaning_policy == other.cleaning_policy
|
||||||
and self.promotion_policy == other.promotion_policy
|
and self.promotion_policy == other.promotion_policy
|
||||||
@ -291,25 +285,19 @@ class CacheConfigStats:
|
|||||||
|
|
||||||
|
|
||||||
class CoreConfigStats:
|
class CoreConfigStats:
|
||||||
def __init__(
|
def __init__(self, stats_dict):
|
||||||
self,
|
self.core_id = stats_dict["Core Id"]
|
||||||
core_id,
|
self.core_dev = stats_dict["Core Device"]
|
||||||
core_dev,
|
self.exp_obj = stats_dict["Exported Object"]
|
||||||
exp_obj,
|
self.core_size = parse_value(
|
||||||
core_size,
|
value=stats_dict["Core Size [4KiB Blocks]"], unit_type=UnitType.block_4k
|
||||||
dirty_for,
|
)
|
||||||
status,
|
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type=UnitType.seconds)
|
||||||
seq_cutoff_threshold,
|
self.status = stats_dict["Status"]
|
||||||
seq_cutoff_policy,
|
self.seq_cutoff_threshold = parse_value(
|
||||||
):
|
value=stats_dict["Seq cutoff threshold [KiB]"], unit_type=UnitType.kibibyte
|
||||||
self.core_id = core_id
|
)
|
||||||
self.core_dev = core_dev
|
self.seq_cutoff_policy = stats_dict["Seq cutoff policy"]
|
||||||
self.exp_obj = exp_obj
|
|
||||||
self.core_size = core_size
|
|
||||||
self.dirty_for = dirty_for
|
|
||||||
self.status = status
|
|
||||||
self.seq_cutoff_threshold = seq_cutoff_threshold
|
|
||||||
self.seq_cutoff_policy = seq_cutoff_policy
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (
|
return (
|
||||||
@ -340,13 +328,11 @@ class CoreConfigStats:
|
|||||||
|
|
||||||
|
|
||||||
class IoClassConfigStats:
|
class IoClassConfigStats:
|
||||||
def __init__(
|
def __init__(self, stats_dict):
|
||||||
self, io_class_id, io_class_name, eviction_priority, selective_allocation
|
self.io_class_id = stats_dict["IO class ID"]
|
||||||
):
|
self.io_class_name = stats_dict["IO class name"]
|
||||||
self.io_class_id = io_class_id
|
self.eviction_priority = stats_dict["Eviction priority"]
|
||||||
self.io_class_name = io_class_name
|
self.max_size = stats_dict["Max size"]
|
||||||
self.eviction_priority = eviction_priority
|
|
||||||
self.selective_allocation = selective_allocation
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (
|
return (
|
||||||
@ -354,7 +340,7 @@ class IoClassConfigStats:
|
|||||||
f"IO class ID: {self.io_class_id}\n"
|
f"IO class ID: {self.io_class_id}\n"
|
||||||
f"IO class name: {self.io_class_name}\n"
|
f"IO class name: {self.io_class_name}\n"
|
||||||
f"Eviction priority: {self.eviction_priority}\n"
|
f"Eviction priority: {self.eviction_priority}\n"
|
||||||
f"Selective allocation: {self.selective_allocation}\n"
|
f"Max size: {self.max_size}\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
@ -364,16 +350,17 @@ class IoClassConfigStats:
|
|||||||
self.io_class_id == other.io_class_id
|
self.io_class_id == other.io_class_id
|
||||||
and self.io_class_name == other.io_class_name
|
and self.io_class_name == other.io_class_name
|
||||||
and self.eviction_priority == other.eviction_priority
|
and self.eviction_priority == other.eviction_priority
|
||||||
and self.selective_allocation == other.selective_allocation
|
and self.max_size == other.max_size
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class UsageStats:
|
class UsageStats:
|
||||||
def __init__(self, occupancy, free, clean, dirty):
|
def __init__(self, stats_dict, percentage_val):
|
||||||
self.occupancy = occupancy
|
unit = UnitType.percentage if percentage_val else UnitType.block_4k
|
||||||
self.free = free
|
self.occupancy = parse_value(value=stats_dict[f"Occupancy {unit}"], unit_type=unit)
|
||||||
self.clean = clean
|
self.free = parse_value(value=stats_dict[f"Free {unit}"], unit_type=unit)
|
||||||
self.dirty = dirty
|
self.clean = parse_value(value=stats_dict[f"Clean {unit}"], unit_type=unit)
|
||||||
|
self.dirty = parse_value(value=stats_dict[f"Dirty {unit}"], unit_type=unit)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (
|
return (
|
||||||
@ -405,7 +392,7 @@ class UsageStats:
|
|||||||
self.occupancy + other.occupancy,
|
self.occupancy + other.occupancy,
|
||||||
self.free + other.free,
|
self.free + other.free,
|
||||||
self.clean + other.clean,
|
self.clean + other.clean,
|
||||||
self.dirty + other.dirty
|
self.dirty + other.dirty,
|
||||||
)
|
)
|
||||||
|
|
||||||
def __iadd__(self, other):
|
def __iadd__(self, other):
|
||||||
@ -417,10 +404,11 @@ class UsageStats:
|
|||||||
|
|
||||||
|
|
||||||
class IoClassUsageStats:
|
class IoClassUsageStats:
|
||||||
def __init__(self, occupancy, clean, dirty):
|
def __init__(self, stats_dict, percentage_val):
|
||||||
self.occupancy = occupancy
|
unit = UnitType.percentage if percentage_val else UnitType.block_4k
|
||||||
self.clean = clean
|
self.occupancy = parse_value(value=stats_dict[f"Occupancy {unit}"], unit_type=unit)
|
||||||
self.dirty = dirty
|
self.clean = parse_value(value=stats_dict[f"Clean {unit}"], unit_type=unit)
|
||||||
|
self.dirty = parse_value(value=stats_dict[f"Dirty {unit}"], unit_type=unit)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (
|
return (
|
||||||
@ -449,7 +437,7 @@ class IoClassUsageStats:
|
|||||||
return UsageStats(
|
return UsageStats(
|
||||||
self.occupancy + other.occupancy,
|
self.occupancy + other.occupancy,
|
||||||
self.clean + other.clean,
|
self.clean + other.clean,
|
||||||
self.dirty + other.dirty
|
self.dirty + other.dirty,
|
||||||
)
|
)
|
||||||
|
|
||||||
def __iadd__(self, other):
|
def __iadd__(self, other):
|
||||||
@ -484,31 +472,26 @@ class InactiveUsageStats:
|
|||||||
|
|
||||||
|
|
||||||
class RequestStats:
|
class RequestStats:
|
||||||
def __init__(
|
def __init__(self, stats_dict, percentage_val):
|
||||||
self,
|
unit = UnitType.percentage if percentage_val else UnitType.requests
|
||||||
read_hits,
|
|
||||||
read_part_misses,
|
|
||||||
read_full_misses,
|
|
||||||
read_total,
|
|
||||||
write_hits,
|
|
||||||
write_part_misses,
|
|
||||||
write_full_misses,
|
|
||||||
write_total,
|
|
||||||
pass_through_reads,
|
|
||||||
pass_through_writes,
|
|
||||||
requests_serviced,
|
|
||||||
requests_total,
|
|
||||||
):
|
|
||||||
self.read = RequestStatsChunk(
|
self.read = RequestStatsChunk(
|
||||||
read_hits, read_part_misses, read_full_misses, read_total
|
stats_dict=stats_dict, percentage_val=percentage_val, operation=OperationType.read
|
||||||
)
|
)
|
||||||
self.write = RequestStatsChunk(
|
self.write = RequestStatsChunk(
|
||||||
write_hits, write_part_misses, write_full_misses, write_total
|
stats_dict=stats_dict, percentage_val=percentage_val, operation=OperationType.write
|
||||||
|
)
|
||||||
|
self.pass_through_reads = parse_value(
|
||||||
|
value=stats_dict[f"Pass-Through reads {unit}"], unit_type=unit
|
||||||
|
)
|
||||||
|
self.pass_through_writes = parse_value(
|
||||||
|
value=stats_dict[f"Pass-Through writes {unit}"], unit_type=unit
|
||||||
|
)
|
||||||
|
self.requests_serviced = parse_value(
|
||||||
|
value=stats_dict[f"Serviced requests {unit}"], unit_type=unit
|
||||||
|
)
|
||||||
|
self.requests_total = parse_value(
|
||||||
|
value=stats_dict[f"Total requests {unit}"], unit_type=unit
|
||||||
)
|
)
|
||||||
self.pass_through_reads = pass_through_reads
|
|
||||||
self.pass_through_writes = pass_through_writes
|
|
||||||
self.requests_serviced = requests_serviced
|
|
||||||
self.requests_total = requests_total
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (
|
return (
|
||||||
@ -535,11 +518,16 @@ class RequestStats:
|
|||||||
|
|
||||||
|
|
||||||
class RequestStatsChunk:
|
class RequestStatsChunk:
|
||||||
def __init__(self, hits, part_misses, full_misses, total):
|
def __init__(self, stats_dict, percentage_val: bool, operation: OperationType):
|
||||||
self.hits = hits
|
unit = UnitType.percentage if percentage_val else UnitType.requests
|
||||||
self.part_misses = part_misses
|
self.hits = parse_value(value=stats_dict[f"{operation} hits {unit}"], unit_type=unit)
|
||||||
self.full_misses = full_misses
|
self.part_misses = parse_value(
|
||||||
self.total = total
|
value=stats_dict[f"{operation} partial misses {unit}"], unit_type=unit
|
||||||
|
)
|
||||||
|
self.full_misses = parse_value(
|
||||||
|
value=stats_dict[f"{operation} full misses {unit}"], unit_type=unit
|
||||||
|
)
|
||||||
|
self.total = parse_value(value=stats_dict[f"{operation} total {unit}"], unit_type=unit)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (
|
return (
|
||||||
@ -561,21 +549,18 @@ class RequestStatsChunk:
|
|||||||
|
|
||||||
|
|
||||||
class BlockStats:
|
class BlockStats:
|
||||||
def __init__(
|
def __init__(self, stats_dict, percentage_val):
|
||||||
self,
|
self.core = BasicStatsChunk(
|
||||||
core_reads,
|
stats_dict=stats_dict, percentage_val=percentage_val, device="core"
|
||||||
core_writes,
|
)
|
||||||
core_total,
|
self.cache = BasicStatsChunk(
|
||||||
cache_reads,
|
stats_dict=stats_dict, percentage_val=percentage_val, device="cache"
|
||||||
cache_writes,
|
)
|
||||||
cache_total,
|
self.exp_obj = BasicStatsChunk(
|
||||||
exp_obj_reads,
|
stats_dict=stats_dict,
|
||||||
exp_obj_writes,
|
percentage_val=percentage_val,
|
||||||
exp_obj_total,
|
device="exported object",
|
||||||
):
|
)
|
||||||
self.core = BasicStatsChunk(core_reads, core_writes, core_total)
|
|
||||||
self.cache = BasicStatsChunk(cache_reads, cache_writes, cache_total)
|
|
||||||
self.exp_obj = BasicStatsChunk(exp_obj_reads, exp_obj_writes, exp_obj_total)
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (
|
return (
|
||||||
@ -589,30 +574,20 @@ class BlockStats:
|
|||||||
if not other:
|
if not other:
|
||||||
return False
|
return False
|
||||||
return (
|
return (
|
||||||
self.core == other.core
|
self.core == other.core and self.cache == other.cache and self.exp_obj == other.exp_obj
|
||||||
and self.cache == other.cache
|
|
||||||
and self.exp_obj == other.exp_obj
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ErrorStats:
|
class ErrorStats:
|
||||||
def __init__(
|
def __init__(self, stats_dict, percentage_val):
|
||||||
self,
|
unit = UnitType.percentage if percentage_val else UnitType.requests
|
||||||
cache_read_errors,
|
self.cache = BasicStatsChunkError(
|
||||||
cache_write_errors,
|
stats_dict=stats_dict, percentage_val=percentage_val, device="Cache"
|
||||||
cache_total_errors,
|
|
||||||
core_read_errors,
|
|
||||||
core_write_errors,
|
|
||||||
core_total_errors,
|
|
||||||
total_errors,
|
|
||||||
):
|
|
||||||
self.cache = BasicStatsChunk(
|
|
||||||
cache_read_errors, cache_write_errors, cache_total_errors
|
|
||||||
)
|
)
|
||||||
self.core = BasicStatsChunk(
|
self.core = BasicStatsChunkError(
|
||||||
core_read_errors, core_write_errors, core_total_errors
|
stats_dict=stats_dict, percentage_val=percentage_val, device="Core"
|
||||||
)
|
)
|
||||||
self.total_errors = total_errors
|
self.total_errors = parse_value(value=stats_dict[f"Total errors {unit}"], unit_type=unit)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return (
|
return (
|
||||||
@ -633,10 +608,11 @@ class ErrorStats:
|
|||||||
|
|
||||||
|
|
||||||
class BasicStatsChunk:
|
class BasicStatsChunk:
|
||||||
def __init__(self, reads, writes, total):
|
def __init__(self, stats_dict: dict, percentage_val: bool, device: str):
|
||||||
self.reads = reads
|
unit = UnitType.percentage if percentage_val else UnitType.block_4k
|
||||||
self.writes = writes
|
self.reads = parse_value(value=stats_dict[f"Reads from {device} {unit}"], unit_type=unit)
|
||||||
self.total = total
|
self.writes = parse_value(value=stats_dict[f"Writes to {device} {unit}"], unit_type=unit)
|
||||||
|
self.total = parse_value(value=stats_dict[f"Total to/from {device} {unit}"], unit_type=unit)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n"
|
return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n"
|
||||||
@ -645,7 +621,44 @@ class BasicStatsChunk:
|
|||||||
if not other:
|
if not other:
|
||||||
return False
|
return False
|
||||||
return (
|
return (
|
||||||
self.reads == other.reads
|
self.reads == other.reads and self.writes == other.writes and self.total == other.total
|
||||||
and self.writes == other.writes
|
|
||||||
and self.total == other.total
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class BasicStatsChunkError:
|
||||||
|
def __init__(self, stats_dict: dict, percentage_val: bool, device: str):
|
||||||
|
unit = UnitType.percentage if percentage_val else UnitType.requests
|
||||||
|
self.reads = parse_value(value=stats_dict[f"{device} read errors {unit}"], unit_type=unit)
|
||||||
|
self.writes = parse_value(value=stats_dict[f"{device} write errors {unit}"], unit_type=unit)
|
||||||
|
self.total = parse_value(value=stats_dict[f"{device} total errors {unit}"], unit_type=unit)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"Reads: {self.reads}\nWrites: {self.writes}\nTotal: {self.total}\n"
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not other:
|
||||||
|
return False
|
||||||
|
return (
|
||||||
|
self.reads == other.reads and self.writes == other.writes and self.total == other.total
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_value(value: str, unit_type: UnitType) -> int | float | Size | timedelta | str:
|
||||||
|
match unit_type:
|
||||||
|
case UnitType.requests:
|
||||||
|
stat_unit = int(value)
|
||||||
|
case UnitType.percentage:
|
||||||
|
stat_unit = float(value)
|
||||||
|
case UnitType.block_4k:
|
||||||
|
stat_unit = Size(float(value), Unit.Blocks4096)
|
||||||
|
case UnitType.mebibyte:
|
||||||
|
stat_unit = Size(float(value), Unit.MebiByte)
|
||||||
|
case UnitType.kibibyte:
|
||||||
|
stat_unit = Size(float(value), Unit.KibiByte)
|
||||||
|
case UnitType.gibibyte:
|
||||||
|
stat_unit = Size(float(value), Unit.GibiByte)
|
||||||
|
case UnitType.seconds:
|
||||||
|
stat_unit = timedelta(seconds=float(value))
|
||||||
|
case _:
|
||||||
|
stat_unit = value
|
||||||
|
return stat_unit
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from api.cas import git
|
from test_utils import git
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from test_utils.output import CmdException
|
from test_utils.output import CmdException
|
||||||
|
|
||||||
@ -20,23 +21,25 @@ class CasVersion:
|
|||||||
self.base = f"{self.main}.{self.major}.{self.minor}"
|
self.base = f"{self.main}.{self.major}.{self.minor}"
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f"{self.main}.{self.major}.{self.minor}.{self.pr}" \
|
return (
|
||||||
|
f"{self.main}.{self.major}.{self.minor}.{self.pr}"
|
||||||
f"{'.' + self.type if self.type is not None else ''}"
|
f"{'.' + self.type if self.type is not None else ''}"
|
||||||
|
)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return str(self)
|
return str(self)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_git_tag(cls, version_tag):
|
def from_git_tag(cls, version_tag):
|
||||||
m = re.fullmatch(r'v([0-9]+)\.([0-9]+)\.?([0-9]?)', "v20.3")
|
m = re.fullmatch(r"v([0-9]+)\.([0-9]+)\.?([0-9]?)", "v20.3")
|
||||||
main, major, minor = m.groups()
|
main, major, minor = m.groups()
|
||||||
if not minor:
|
if not minor:
|
||||||
minor = '0'
|
minor = "0"
|
||||||
return cls(main, major, minor, 0, "master")
|
return cls(main, major, minor, 0, "master")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_version_string(cls, version_string):
|
def from_version_string(cls, version_string):
|
||||||
return cls(*version_string.split('.'))
|
return cls(*version_string.split("."))
|
||||||
|
|
||||||
|
|
||||||
def get_available_cas_versions():
|
def get_available_cas_versions():
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
pytest>=4.4.0,<=6.2.5
|
pytest>=7.0,<=7.4.4
|
||||||
multimethod>=1.1
|
multimethod>=1.1
|
||||||
paramiko>=2.7.2
|
paramiko>=2.7.2
|
||||||
IPy>=1.00
|
IPy>=1.00
|
||||||
@ -12,8 +12,7 @@ attotime>=0.2.0
|
|||||||
gitpython>=3.1.7
|
gitpython>=3.1.7
|
||||||
cryptography>=3.4.6
|
cryptography>=3.4.6
|
||||||
psutil>=5.8.0
|
psutil>=5.8.0
|
||||||
py==1.10.0
|
py==1.11.0
|
||||||
portalocker>=2.3.1
|
portalocker>=2.3.1
|
||||||
pytest-asyncio>=0.14.0
|
pytest-asyncio>=0.14.0
|
||||||
recordclass>=0.8.4
|
|
||||||
schema==0.7.2
|
schema==0.7.2
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2022 Intel Corporation
|
# Copyright(c) 2019-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2023-2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import posixpath
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
@ -14,10 +16,11 @@ import yaml
|
|||||||
|
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), "../test-framework"))
|
sys.path.append(os.path.join(os.path.dirname(__file__), "../test-framework"))
|
||||||
|
|
||||||
|
from core.test_run import Blocked
|
||||||
from core.test_run_utils import TestRun
|
from core.test_run_utils import TestRun
|
||||||
from api.cas import installer
|
from api.cas import installer
|
||||||
from api.cas import casadm
|
from api.cas import casadm
|
||||||
from api.cas import git
|
from test_utils import git
|
||||||
from api.cas.cas_service import opencas_drop_in_directory
|
from api.cas.cas_service import opencas_drop_in_directory
|
||||||
from storage_devices.raid import Raid
|
from storage_devices.raid import Raid
|
||||||
from storage_devices.ramdisk import RamDisk
|
from storage_devices.ramdisk import RamDisk
|
||||||
@ -91,7 +94,7 @@ def pytest_runtest_setup(item):
|
|||||||
TestRun.presetup()
|
TestRun.presetup()
|
||||||
try:
|
try:
|
||||||
TestRun.executor.wait_for_connection(timedelta(seconds=20))
|
TestRun.executor.wait_for_connection(timedelta(seconds=20))
|
||||||
except paramiko.AuthenticationException:
|
except (paramiko.AuthenticationException, Blocked):
|
||||||
raise
|
raise
|
||||||
except Exception:
|
except Exception:
|
||||||
try:
|
try:
|
||||||
@ -167,7 +170,9 @@ def pytest_runtest_teardown():
|
|||||||
for dut in TestRun.duts:
|
for dut in TestRun.duts:
|
||||||
with TestRun.use_dut(dut):
|
with TestRun.use_dut(dut):
|
||||||
if TestRun.executor:
|
if TestRun.executor:
|
||||||
os.makedirs(os.path.join(TestRun.LOGGER.base_dir, "dut_info", dut.ip),
|
os.makedirs(os.path.join(TestRun.LOGGER.base_dir, "dut_info",
|
||||||
|
dut.ip if dut.ip is not None
|
||||||
|
else dut.config.get("host")),
|
||||||
exist_ok=True)
|
exist_ok=True)
|
||||||
TestRun.LOGGER.get_additional_logs()
|
TestRun.LOGGER.get_additional_logs()
|
||||||
Log.destroy()
|
Log.destroy()
|
||||||
@ -187,7 +192,6 @@ def pytest_addoption(parser):
|
|||||||
parser.addoption("--dut-config", action="append", type=str)
|
parser.addoption("--dut-config", action="append", type=str)
|
||||||
parser.addoption("--log-path", action="store",
|
parser.addoption("--log-path", action="store",
|
||||||
default=f"{os.path.join(os.path.dirname(__file__), '../results')}")
|
default=f"{os.path.join(os.path.dirname(__file__), '../results')}")
|
||||||
parser.addoption("--force-reinstall", action="store_true", default=False)
|
|
||||||
parser.addoption("--fuzzy-iter-count", action="store")
|
parser.addoption("--fuzzy-iter-count", action="store")
|
||||||
|
|
||||||
|
|
||||||
@ -213,10 +217,6 @@ def unmount_cas_devices():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_force_param(item):
|
|
||||||
return item.config.getoption("--force-reinstall")
|
|
||||||
|
|
||||||
|
|
||||||
def __drbd_cleanup():
|
def __drbd_cleanup():
|
||||||
from storage_devices.drbd import Drbd
|
from storage_devices.drbd import Drbd
|
||||||
Drbd.down_all()
|
Drbd.down_all()
|
||||||
@ -266,33 +266,24 @@ def base_prepare(item):
|
|||||||
raid.remove_partitions()
|
raid.remove_partitions()
|
||||||
raid.stop()
|
raid.stop()
|
||||||
for device in raid.array_devices:
|
for device in raid.array_devices:
|
||||||
Mdadm.zero_superblock(os.path.join('/dev', device.get_device_id()))
|
Mdadm.zero_superblock(posixpath.join('/dev', device.get_device_id()))
|
||||||
Udev.settle()
|
Udev.settle()
|
||||||
|
|
||||||
RamDisk.remove_all()
|
RamDisk.remove_all()
|
||||||
|
|
||||||
for disk in TestRun.dut.disks:
|
for disk in TestRun.dut.disks:
|
||||||
disk_serial = get_disk_serial_number(disk.path)
|
disk_serial = get_disk_serial_number(disk.path)
|
||||||
if disk.serial_number != disk_serial:
|
if disk.serial_number and disk.serial_number != disk_serial:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"Serial for {disk.path} doesn't match the one from the config."
|
f"Serial for {disk.path} doesn't match the one from the config."
|
||||||
f"Serial from config {disk.serial_number}, actual serial {disk_serial}"
|
f"Serial from config {disk.serial_number}, actual serial {disk_serial}"
|
||||||
)
|
)
|
||||||
|
|
||||||
disk.umount_all_partitions()
|
disk.umount_all_partitions()
|
||||||
Mdadm.zero_superblock(os.path.join('/dev', disk.get_device_id()))
|
Mdadm.zero_superblock(posixpath.join('/dev', disk.get_device_id()))
|
||||||
TestRun.executor.run_expect_success("udevadm settle")
|
TestRun.executor.run_expect_success("udevadm settle")
|
||||||
disk.remove_partitions()
|
disk.remove_partitions()
|
||||||
create_partition_table(disk, PartitionTable.gpt)
|
create_partition_table(disk, PartitionTable.gpt)
|
||||||
|
|
||||||
cas_version = TestRun.config.get("cas_version") or git.get_current_commit_hash()
|
|
||||||
if get_force_param(item) and not TestRun.usr.already_updated:
|
|
||||||
installer.rsync_opencas_sources()
|
|
||||||
installer.reinstall_opencas(cas_version)
|
|
||||||
elif not installer.check_if_installed(cas_version):
|
|
||||||
installer.rsync_opencas_sources()
|
|
||||||
installer.set_up_opencas(cas_version)
|
|
||||||
|
|
||||||
TestRun.usr.already_updated = True
|
TestRun.usr.already_updated = True
|
||||||
TestRun.LOGGER.add_build_info(f'Commit hash:')
|
TestRun.LOGGER.add_build_info(f'Commit hash:')
|
||||||
TestRun.LOGGER.add_build_info(f"{git.get_current_commit_hash()}")
|
TestRun.LOGGER.add_build_info(f"{git.get_current_commit_hash()}")
|
||||||
|
Loading…
Reference in New Issue
Block a user