commit
82ce9342a2
@ -194,7 +194,7 @@ class Cache:
|
||||
def set_params_nhit(self, promotion_params_nhit: PromotionParametersNhit) -> Output:
|
||||
return casadm.set_param_promotion_nhit(
|
||||
self.cache_id,
|
||||
threshold=promotion_params_nhit.threshold.get_value(),
|
||||
threshold=promotion_params_nhit.threshold,
|
||||
trigger=promotion_params_nhit.trigger
|
||||
)
|
||||
|
||||
|
@ -72,9 +72,9 @@ class CacheMode(Enum):
|
||||
|
||||
|
||||
class SeqCutOffPolicy(Enum):
|
||||
full = 0
|
||||
always = 1
|
||||
never = 2
|
||||
full = "full"
|
||||
always = "always"
|
||||
never = "never"
|
||||
DEFAULT = full
|
||||
|
||||
@classmethod
|
||||
@ -85,6 +85,9 @@ class SeqCutOffPolicy(Enum):
|
||||
|
||||
raise ValueError(f"{name} is not a valid sequential cut off name")
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
|
||||
class MetadataMode(Enum):
|
||||
normal = "normal"
|
||||
@ -240,7 +243,7 @@ class SeqCutOffParameters:
|
||||
|
||||
|
||||
class PromotionParametersNhit:
|
||||
def __init__(self, threshold: Size = None, trigger: int = None):
|
||||
def __init__(self, threshold: int = None, trigger: int = None):
|
||||
self.threshold = threshold
|
||||
self.trigger = trigger
|
||||
|
||||
|
@ -68,6 +68,36 @@ def get_cores(cache_id: int) -> list:
|
||||
]
|
||||
|
||||
|
||||
def get_inactive_cores(cache_id: int) -> list:
|
||||
from api.cas.core import Core, CoreStatus
|
||||
|
||||
cores_dict = get_cas_devices_dict()["cores"].values()
|
||||
|
||||
def is_inactive(core):
|
||||
return CoreStatus[core["status"].lower()] == CoreStatus.inactive
|
||||
|
||||
return [
|
||||
Core(core["device_path"], core["cache_id"])
|
||||
for core in cores_dict
|
||||
if is_inactive(core) and core["cache_id"] == cache_id
|
||||
]
|
||||
|
||||
|
||||
def get_detached_cores(cache_id: int) -> list:
|
||||
from api.cas.core import Core, CoreStatus
|
||||
|
||||
cores_dict = get_cas_devices_dict()["cores"].values()
|
||||
|
||||
def is_detached(core):
|
||||
return CoreStatus[core["status"].lower()] == CoreStatus.detached
|
||||
|
||||
return [
|
||||
Core(core["device_path"], core["cache_id"])
|
||||
for core in cores_dict
|
||||
if is_detached(core) and core["cache_id"] == cache_id
|
||||
]
|
||||
|
||||
|
||||
def get_cas_devices_dict() -> dict:
|
||||
device_list = list(csv.DictReader(casadm.list_caches(OutputFormat.csv).stdout.split("\n")))
|
||||
devices = {"caches": {}, "cores": {}, "core_pool": {}}
|
||||
@ -92,9 +122,7 @@ def get_cas_devices_dict() -> dict:
|
||||
]
|
||||
if core_pool:
|
||||
params.append(("core_pool", device))
|
||||
devices["core_pool"][device["disk"]] = dict(
|
||||
[(key, value) for key, value in params]
|
||||
)
|
||||
devices["core_pool"][device["disk"]] = dict([(key, value) for key, value in params])
|
||||
else:
|
||||
devices["cores"][(cache_id, int(device["id"]))] = dict(
|
||||
[(key, value) for key, value in params]
|
||||
@ -205,11 +233,14 @@ def get_io_class_list(cache_id: int) -> list:
|
||||
return ret
|
||||
|
||||
|
||||
def get_core_info_by_path(core_disk_path) -> dict | None:
|
||||
def get_core_info_for_cache_by_path(core_disk_path: str, target_cache_id: int) -> dict | None:
|
||||
output = casadm.list_caches(OutputFormat.csv, by_id_path=True)
|
||||
reader = csv.DictReader(io.StringIO(output.stdout))
|
||||
cache_id = -1
|
||||
for row in reader:
|
||||
if row["type"] == "core" and row["disk"] == core_disk_path:
|
||||
if row["type"] == "cache":
|
||||
cache_id = int(row["id"])
|
||||
if row["type"] == "core" and row["disk"] == core_disk_path and target_cache_id == cache_id:
|
||||
return {
|
||||
"core_id": row["id"],
|
||||
"core_device": row["disk"],
|
||||
|
@ -11,7 +11,7 @@ from enum import Enum
|
||||
from api.cas import casadm
|
||||
from api.cas.cache_config import SeqCutOffParameters, SeqCutOffPolicy
|
||||
from api.cas.casadm_params import StatsFilter
|
||||
from api.cas.casadm_parser import get_seq_cut_off_parameters, get_core_info_by_path
|
||||
from api.cas.casadm_parser import get_seq_cut_off_parameters, get_core_info_for_cache_by_path
|
||||
from api.cas.statistics import CoreStats, CoreIoClassStats
|
||||
from core.test_run_utils import TestRun
|
||||
from storage_devices.device import Device
|
||||
@ -35,18 +35,19 @@ class Core(Device):
|
||||
def __init__(self, core_device: str, cache_id: int):
|
||||
self.core_device = Device(core_device)
|
||||
self.path = None
|
||||
self.cache_id = cache_id
|
||||
core_info = self.__get_core_info()
|
||||
# "-" is special case for cores in core pool
|
||||
if core_info["core_id"] != "-":
|
||||
self.core_id = int(core_info["core_id"])
|
||||
if core_info["exp_obj"] != "-":
|
||||
Device.__init__(self, core_info["exp_obj"])
|
||||
self.cache_id = cache_id
|
||||
self.partitions = []
|
||||
self.block_size = None
|
||||
|
||||
def __get_core_info(self):
|
||||
return get_core_info_by_path(self.core_device.path)
|
||||
return get_core_info_for_cache_by_path(core_disk_path=self.core_device.path,
|
||||
target_cache_id=self.cache_id)
|
||||
|
||||
def create_filesystem(self, fs_type: disk_utils.Filesystem, force=True, blocksize=None):
|
||||
super().create_filesystem(fs_type, force, blocksize)
|
||||
|
@ -1,6 +1,6 @@
|
||||
#
|
||||
# Copyright(c) 2020-2022 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# Copyright(c) 2024 Huawei Technologies
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -11,13 +11,13 @@ from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait
|
||||
from api.cas.casadm_parser import wait_for_flushing
|
||||
from core.test_run import TestRun
|
||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||
from test_tools.disk_utils import Filesystem
|
||||
from test_tools import fs_utils
|
||||
from test_tools.dd import Dd
|
||||
from test_tools.disk_utils import Filesystem
|
||||
from test_utils import os_utils
|
||||
from test_utils.os_utils import Udev, DropCachesMode
|
||||
from test_utils.size import Size, Unit
|
||||
from tests.lazy_writes.recovery.recovery_tests_methods import compare_files
|
||||
from test_tools import fs_utils
|
||||
|
||||
mount_point = "/mnt/cas"
|
||||
test_file_path = f"{mount_point}/test_file"
|
||||
@ -197,17 +197,17 @@ def test_interrupt_cache_flush(cache_mode, filesystem):
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
def test_interrupt_core_remove(cache_mode, filesystem):
|
||||
"""
|
||||
title: Test if OpenCAS works correctly after core's removing interruption.
|
||||
title: Core removal interruption.
|
||||
description: |
|
||||
Negative test of the ability of OpenCAS to handle core's removing interruption.
|
||||
Test for proper handling of 'core remove' operation interruption.
|
||||
pass_criteria:
|
||||
- No system crash.
|
||||
- Core would not be removed from cache after interruption.
|
||||
- Flushing would be stopped after interruption.
|
||||
- Md5sum are correct during all test steps.
|
||||
- Checksums are correct during all test steps.
|
||||
- Dirty blocks quantity after interruption is lower but non-zero.
|
||||
"""
|
||||
with TestRun.step("Prepare cache and core."):
|
||||
with TestRun.step("Prepare cache and core devices"):
|
||||
cache_dev = TestRun.disks["cache"]
|
||||
cache_dev.create_partitions([cache_size])
|
||||
cache_part = cache_dev.partitions[0]
|
||||
@ -216,37 +216,36 @@ def test_interrupt_core_remove(cache_mode, filesystem):
|
||||
core_part = core_dev.partitions[0]
|
||||
|
||||
for _ in TestRun.iteration(
|
||||
range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times."
|
||||
range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times"
|
||||
):
|
||||
|
||||
with TestRun.step("Start cache."):
|
||||
with TestRun.step("Start cache"):
|
||||
cache = casadm.start_cache(cache_part, cache_mode, force=True)
|
||||
|
||||
with TestRun.step("Set cleaning policy to NOP."):
|
||||
with TestRun.step("Set cleaning policy to NOP"):
|
||||
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||
|
||||
with TestRun.step(f"Add core device with {filesystem} filesystem and mount it."):
|
||||
with TestRun.step(f"Add core device with {filesystem} filesystem and mount it"):
|
||||
core_part.create_filesystem(filesystem)
|
||||
core = cache.add_core(core_part)
|
||||
core.mount(mount_point)
|
||||
|
||||
with TestRun.step(f"Create test file in mount point of exported object."):
|
||||
with TestRun.step("Create test file in mount point of exported object"):
|
||||
test_file = create_test_file()
|
||||
|
||||
with TestRun.step("Check md5 sum of test file."):
|
||||
test_file_md5sum_before = test_file.md5sum()
|
||||
with TestRun.step("Calculate checksum of test file"):
|
||||
test_file_crc32sum_before = test_file.crc32sum()
|
||||
|
||||
with TestRun.step(
|
||||
"Get number of dirty data on exported object before core removal interruption."
|
||||
"Get number of dirty data on exported object before core removal interruption"
|
||||
):
|
||||
os_utils.sync()
|
||||
os_utils.drop_caches(DropCachesMode.ALL)
|
||||
cache_dirty_blocks_before = cache.get_dirty_blocks()
|
||||
|
||||
with TestRun.step("Unmount core."):
|
||||
with TestRun.step("Unmount core"):
|
||||
core.unmount()
|
||||
|
||||
with TestRun.step("Start removing core device."):
|
||||
with TestRun.step("Start removing core"):
|
||||
flush_pid = TestRun.executor.run_in_background(
|
||||
cli.remove_core_cmd(str(cache.cache_id), str(core.core_id))
|
||||
)
|
||||
@ -258,42 +257,39 @@ def test_interrupt_core_remove(cache_mode, filesystem):
|
||||
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core.core_id)
|
||||
TestRun.executor.run(f"kill -s SIGINT {flush_pid}")
|
||||
|
||||
with TestRun.step("Check md5 sum of test file after interruption."):
|
||||
cache.set_cache_mode(CacheMode.WO)
|
||||
test_file_md5sum_interrupt = test_file.md5sum()
|
||||
cache.set_cache_mode(cache_mode)
|
||||
|
||||
with TestRun.step(
|
||||
"Check number of dirty data on exported object after core removal interruption."
|
||||
"Check number of dirty data on exported object after core removal interruption"
|
||||
):
|
||||
cache_dirty_blocks_after = cache.get_dirty_blocks()
|
||||
if cache_dirty_blocks_after >= cache_dirty_blocks_before:
|
||||
TestRun.LOGGER.error(
|
||||
"Quantity of dirty lines after core removal interruption " "should be lower."
|
||||
"Quantity of dirty lines after core removal interruption should be lower."
|
||||
)
|
||||
if int(cache_dirty_blocks_after) == 0:
|
||||
TestRun.LOGGER.error(
|
||||
"Quantity of dirty lines after core removal interruption " "should not be zero."
|
||||
"Quantity of dirty lines after core removal interruption should not be zero."
|
||||
)
|
||||
|
||||
with TestRun.step("Remove core from cache."):
|
||||
core.remove_core()
|
||||
with TestRun.step("Mount core and verify test file checksum after interruption"):
|
||||
core.mount(mount_point)
|
||||
|
||||
with TestRun.step("Stop cache."):
|
||||
if test_file.crc32sum() != test_file_crc32sum_before:
|
||||
TestRun.LOGGER.error("Checksum after interrupting core removal is different.")
|
||||
|
||||
with TestRun.step("Unmount core"):
|
||||
core.unmount()
|
||||
|
||||
with TestRun.step("Stop cache"):
|
||||
cache.stop()
|
||||
|
||||
with TestRun.step("Mount core device."):
|
||||
with TestRun.step("Mount core device"):
|
||||
core_part.mount(mount_point)
|
||||
|
||||
with TestRun.step("Check md5 sum of test file again."):
|
||||
if test_file_md5sum_before != test_file.md5sum():
|
||||
TestRun.LOGGER.error("Md5 sum before interrupting core removal is different.")
|
||||
with TestRun.step("Verify checksum of test file again"):
|
||||
if test_file.crc32sum() != test_file_crc32sum_before:
|
||||
TestRun.LOGGER.error("Checksum after core removal is different.")
|
||||
|
||||
is_sum_diff_after_interrupt = test_file_md5sum_interrupt != test_file.md5sum()
|
||||
if is_sum_diff_after_interrupt:
|
||||
TestRun.LOGGER.error("Md5 sum after interrupting core removal is different.")
|
||||
|
||||
with TestRun.step("Unmount core device."):
|
||||
with TestRun.step("Unmount core device"):
|
||||
core_part.unmount()
|
||||
|
||||
|
||||
@ -315,8 +311,19 @@ def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage):
|
||||
- Md5sum are correct during all test steps.
|
||||
- Dirty blocks quantity after interruption is lower but non-zero.
|
||||
"""
|
||||
|
||||
test_file_size = Size(1, Unit.GibiByte)
|
||||
test_file_path = "/mnt/cas/test_file"
|
||||
|
||||
with TestRun.step("Prepare cache and core."):
|
||||
cache_part, core_part = prepare()
|
||||
cache_dev = TestRun.disks["cache"]
|
||||
core_dev = TestRun.disks["core"]
|
||||
|
||||
cache_dev.create_partitions([cache_size])
|
||||
core_dev.create_partitions([cache_size * 2])
|
||||
|
||||
cache_part = cache_dev.partitions[0]
|
||||
core_part = core_dev.partitions[0]
|
||||
|
||||
with TestRun.step("Disable udev"):
|
||||
Udev.disable()
|
||||
@ -325,23 +332,22 @@ def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage):
|
||||
range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times."
|
||||
):
|
||||
|
||||
with TestRun.step("Start cache."):
|
||||
with TestRun.step("Start cache"):
|
||||
cache = casadm.start_cache(cache_part, cache_mode, force=True)
|
||||
|
||||
with TestRun.step("Set cleaning policy to NOP."):
|
||||
with TestRun.step("Set cleaning policy to NOP"):
|
||||
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||
|
||||
with TestRun.step(f"Add core device."):
|
||||
with TestRun.step(f"Add core device"):
|
||||
core = cache.add_core(core_part)
|
||||
|
||||
with TestRun.step(f"Create test file in mount point of exported object."):
|
||||
test_file_size = Size(4, Unit.GibiByte)
|
||||
with TestRun.step(f"Create test file in mount point of exported object"):
|
||||
test_file = fs_utils.create_random_test_file(test_file_path, test_file_size)
|
||||
|
||||
with TestRun.step("Check md5 sum of test file."):
|
||||
with TestRun.step("Calculate md5sum of test file"):
|
||||
test_file_md5_before = test_file.md5sum()
|
||||
|
||||
with TestRun.step("Export file to CAS"):
|
||||
with TestRun.step("Copy test data to core"):
|
||||
dd = (
|
||||
Dd()
|
||||
.block_size(test_file_size)
|
||||
@ -351,49 +357,58 @@ def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage):
|
||||
)
|
||||
dd.run()
|
||||
|
||||
with TestRun.step("Get number of dirty data on exported object before interruption."):
|
||||
with TestRun.step("Get number of dirty data on exported object before interruption"):
|
||||
os_utils.sync()
|
||||
os_utils.drop_caches(DropCachesMode.ALL)
|
||||
cache_dirty_blocks_before = cache.get_dirty_blocks()
|
||||
|
||||
with TestRun.step("Start switching cache mode."):
|
||||
with TestRun.step("Start switching cache mode"):
|
||||
flush_pid = TestRun.executor.run_in_background(
|
||||
cli.set_cache_mode_cmd(
|
||||
str(CacheMode.DEFAULT.name.lower()), str(cache.cache_id), "yes"
|
||||
cache_mode=str(CacheMode.DEFAULT.name.lower()),
|
||||
cache_id=str(cache.cache_id),
|
||||
flush_cache="yes",
|
||||
)
|
||||
)
|
||||
|
||||
with TestRun.step("Send interruption signal."):
|
||||
with TestRun.step("Kill flush process during cache flush operation"):
|
||||
wait_for_flushing(cache, core)
|
||||
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core.core_id)
|
||||
while percentage < stop_percentage:
|
||||
percentage = casadm_parser.get_flushing_progress(cache.cache_id, core.core_id)
|
||||
TestRun.executor.run(f"kill -s SIGINT {flush_pid}")
|
||||
TestRun.executor.kill_process(flush_pid)
|
||||
|
||||
with TestRun.step("Check number of dirty data on exported object after interruption."):
|
||||
with TestRun.step("Check number of dirty data on exported object after interruption"):
|
||||
cache_dirty_blocks_after = cache.get_dirty_blocks()
|
||||
if cache_dirty_blocks_after >= cache_dirty_blocks_before:
|
||||
TestRun.LOGGER.error(
|
||||
"Quantity of dirty lines after cache mode switching "
|
||||
"interruption should be lower."
|
||||
)
|
||||
if int(cache_dirty_blocks_after) == 0:
|
||||
if cache_dirty_blocks_after == Size.zero():
|
||||
TestRun.LOGGER.error(
|
||||
"Quantity of dirty lines after cache mode switching "
|
||||
"interruption should not be zero."
|
||||
)
|
||||
|
||||
with TestRun.step("Check cache mode."):
|
||||
with TestRun.step("Check cache mode"):
|
||||
if cache.get_cache_mode() != cache_mode:
|
||||
TestRun.LOGGER.error("Cache mode should remain the same.")
|
||||
|
||||
with TestRun.step("Stop cache."):
|
||||
with TestRun.step("Stop cache"):
|
||||
cache.stop()
|
||||
|
||||
with TestRun.step("Check md5 sum of test file again."):
|
||||
Dd().block_size(test_file_size).input(core.path).output(test_file.full_path).oflag(
|
||||
"direct"
|
||||
).run()
|
||||
with TestRun.step("Copy test data from the exported object to a file"):
|
||||
dd = (
|
||||
Dd()
|
||||
.block_size(test_file_size)
|
||||
.input(core.path)
|
||||
.output(test_file.full_path)
|
||||
.oflag("direct")
|
||||
)
|
||||
dd.run()
|
||||
|
||||
with TestRun.step("Compare md5 sum of test files"):
|
||||
target_file_md5 = test_file.md5sum()
|
||||
compare_files(test_file_md5_before, target_file_md5)
|
||||
|
||||
@ -491,7 +506,7 @@ def create_test_file():
|
||||
bs = Size(512, Unit.KibiByte)
|
||||
cnt = int(cache_size.value / bs.value)
|
||||
test_file = File.create_file(test_file_path)
|
||||
dd = Dd().output(test_file_path).input("/dev/zero").block_size(bs).count(cnt)
|
||||
dd = Dd().output(test_file_path).input("/dev/zero").block_size(bs).count(cnt).oflag("direct")
|
||||
dd.run()
|
||||
test_file.refresh_item()
|
||||
return test_file
|
||||
|
@ -12,7 +12,7 @@ from core.test_run import TestRun
|
||||
from test_utils.size import Size, Unit
|
||||
from api.cas.cache_config import CacheLineSize, CacheMode, CacheStatus
|
||||
from api.cas.casadm_params import StatsFilter
|
||||
from api.cas.casadm_parser import get_core_info_by_path
|
||||
from api.cas.casadm_parser import get_core_info_for_cache_by_path
|
||||
from api.cas.core import CoreStatus, Core
|
||||
from test_tools.dd import Dd
|
||||
from api.cas.cli import standby_activate_cmd
|
||||
@ -173,7 +173,11 @@ def test_activate_incomplete_cache():
|
||||
TestRun.fail(f"Expected one inactive core. Got {inactive_core_count}")
|
||||
|
||||
with TestRun.step("Check if core is in an appropriate state"):
|
||||
core_status = CoreStatus[get_core_info_by_path(core_dev_path)["status"].lower()]
|
||||
core_status = CoreStatus[
|
||||
get_core_info_for_cache_by_path(
|
||||
core_disk_path=core_dev_path, target_cache_id=cache.cache_id
|
||||
)["status"].lower()
|
||||
]
|
||||
if core_status != CoreStatus.inactive:
|
||||
TestRun.fail(
|
||||
"The core is in an invalid state. "
|
||||
|
@ -1,11 +1,13 @@
|
||||
#
|
||||
# Copyright(c) 2020-2021 Intel Corporation
|
||||
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import math
|
||||
import posixpath
|
||||
import pytest
|
||||
import os
|
||||
|
||||
from api.cas import casadm, cli_messages
|
||||
from api.cas.cache_config import CacheLineSize
|
||||
from core.test_run import TestRun
|
||||
@ -32,7 +34,7 @@ def test_device_capabilities():
|
||||
"""
|
||||
|
||||
core_device = TestRun.disks['core']
|
||||
max_io_size_path = os.path.join(disk_utils.get_sysfs_path(core_device.get_device_id()),
|
||||
max_io_size_path = posixpath.join(disk_utils.get_sysfs_path(core_device.get_device_id()),
|
||||
'queue/max_sectors_kb')
|
||||
default_max_io_size = fs_utils.read_file(max_io_size_path)
|
||||
|
||||
@ -148,7 +150,7 @@ def measure_capabilities(dev):
|
||||
dev_id = dev.parent_device.get_device_id() if isinstance(dev, Partition) \
|
||||
else dev.get_device_id()
|
||||
for c in capabilities:
|
||||
path = os.path.join(disk_utils.get_sysfs_path(dev_id), 'queue', c)
|
||||
path = posixpath.join(disk_utils.get_sysfs_path(dev_id), 'queue', c)
|
||||
command = f"cat {path}"
|
||||
output = TestRun.executor.run(command)
|
||||
if output.exit_code == 0:
|
||||
|
Loading…
Reference in New Issue
Block a user