diff --git a/test/functional/api/cas/cache.py b/test/functional/api/cas/cache.py index bc224ae..eb0ab27 100644 --- a/test/functional/api/cas/cache.py +++ b/test/functional/api/cas/cache.py @@ -14,8 +14,8 @@ from api.cas.statistics import CacheStats, IoClassStats class Cache: - def __init__(self, device_system_path): - self.cache_device = Device(device_system_path) + def __init__(self, device: Device): + self.cache_device = device self.cache_id = int(self.__get_cache_id()) self.__cache_line_size = None self.__metadata_mode = None @@ -36,7 +36,7 @@ class Cache: if self.__cache_line_size is None: stats = self.get_statistics() stats_line_size = stats.config_stats.cache_line_size - self.__cache_line_size = CacheLineSize(stats_line_size.get_value(Unit.Byte)) + self.__cache_line_size = CacheLineSize(stats_line_size) return self.__cache_line_size def get_cleaning_policy(self): @@ -69,6 +69,10 @@ class Cache: status = self.get_statistics().config_stats.status.replace(' ', '_').lower() return CacheStatus[status] + @property + def size(self): + return self.get_statistics().config_stats.cache_size + def get_cache_mode(self): return CacheMode[self.get_statistics().config_stats.write_policy.upper()] diff --git a/test/functional/api/cas/cache_config.py b/test/functional/api/cas/cache_config.py index a7aed8f..5aa3965 100644 --- a/test/functional/api/cas/cache_config.py +++ b/test/functional/api/cas/cache_config.py @@ -9,7 +9,7 @@ from test_utils.size import Size, Unit from datetime import timedelta -class CacheLineSize(IntEnum): +class CacheLineSize(Enum): LINE_4KiB = Size(4, Unit.KibiByte) LINE_8KiB = Size(8, Unit.KibiByte) LINE_16KiB = Size(16, Unit.KibiByte) @@ -17,6 +17,9 @@ class CacheLineSize(IntEnum): LINE_64KiB = Size(64, Unit.KibiByte) DEFAULT = LINE_4KiB + def __int__(self): + return int(self.value.get_value()) + class CacheMode(Enum): WT = "Write-Through" diff --git a/test/functional/api/cas/casadm.py b/test/functional/api/cas/casadm.py index ae943b5..b7d1bae 100644 --- a/test/functional/api/cas/casadm.py +++ b/test/functional/api/cas/casadm.py @@ -27,7 +27,7 @@ def start_cache(cache_dev: Device, cache_mode: CacheMode = None, cache_line_size: CacheLineSize = None, cache_id: int = None, force: bool = False, load: bool = False, shortcut: bool = False): _cache_line_size = None if cache_line_size is None else str( - CacheLineSize.get_value(Unit.KibiByte)) + int(cache_line_size.value.get_value(Unit.KibiByte))) _cache_id = None if cache_id is None else str(cache_id) _cache_mode = None if cache_mode is None else cache_mode.name.lower() output = TestRun.executor.run(start_cmd( @@ -35,7 +35,8 @@ def start_cache(cache_dev: Device, cache_mode: CacheMode = None, cache_id=_cache_id, force=force, load=load, shortcut=shortcut)) if output.exit_code != 0: raise CmdException("Failed to start cache.", output) - return Cache(cache_dev.system_path) + + return Cache(cache_dev) def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = False): @@ -97,7 +98,8 @@ def load_cache(device: Device, shortcut: bool = False): load_cmd(cache_dev=device.system_path, shortcut=shortcut)) if output.exit_code != 0: raise CmdException("Failed to load cache.", output) - return Cache(device.system_path) + + return Cache(device) def list_caches(output_format: OutputFormat = None, shortcut: bool = False): diff --git a/test/functional/api/cas/casadm_parser.py b/test/functional/api/cas/casadm_parser.py index 65c0cd2..c608e95 100644 --- a/test/functional/api/cas/casadm_parser.py +++ b/test/functional/api/cas/casadm_parser.py @@ -6,6 +6,11 @@ import csv import json import re +from api.cas import casadm +from test_utils.size import parse_unit +from storage_devices.device import Device +from api.cas.cache_config import * +from api.cas.casadm_params import * from datetime import timedelta from typing import List @@ -151,7 +156,7 @@ def get_caches(): # This method does not return inactive or detached CAS device for line in lines: args = line.split(',') if args[0] == "cache": - current_cache = Cache(args[2]) + current_cache = Cache(Device(args[2])) caches_list.append(current_cache) return caches_list diff --git a/test/functional/tests/cli/test_cli_start_stop.py b/test/functional/tests/cli/test_cli_start_stop.py index 626a3be..f5aef69 100644 --- a/test/functional/tests/cli/test_cli_start_stop.py +++ b/test/functional/tests/cli/test_cli_start_stop.py @@ -50,25 +50,33 @@ def test_cli_start_stop_default_value(shortcut): @pytest.mark.parametrize("shortcut", [True, False]) def test_cli_add_remove_default_value(shortcut): cache_device = TestRun.disks['cache'] - cache_device.create_partitions([Size(500, Unit.MebiByte)]) + cache_device.create_partitions([Size(50, Unit.MebiByte)]) cache_device = cache_device.partitions[0] cache = casadm.start_cache(cache_device, shortcut=shortcut, force=True) core_device = TestRun.disks['core'] + casadm.add_core(cache, core_device, shortcut=shortcut) caches = casadm_parser.get_caches() - assert len(caches[0].get_core_devices()) == 1 - assert caches[0].get_core_devices()[0].core_device.system_path == core_device.system_path + if len(caches[0].get_core_devices()) != 1: + TestRun.fail("One core should be present in cache") + if caches[0].get_core_devices()[0].core_device.system_path != core_device.system_path: + TestRun.fail("Core path should equal to path of core added") + casadm.remove_core(cache.cache_id, 1, shortcut=shortcut) caches = casadm_parser.get_caches() - assert len(caches) == 1 - assert len(caches[0].get_core_devices()) == 0 + if len(caches) != 1: + TestRun.fail("One cache should be present still after removing core") + if len(caches[0].get_core_devices()) != 0: + TestRun.fail("No core devices should be present after removing core") casadm.stop_cache(cache_id=cache.cache_id, shortcut=shortcut) output = casadm.list_caches(shortcut=shortcut) caches = casadm_parser.get_caches() - assert len(caches) == 0 - assert output.stdout == "No caches running" + if len(caches) != 0: + TestRun.fail("No cache should be present after stopping the cache") + if output.stdout != "No caches running": + TestRun.fail(f"Invalid message, expected 'No caches running', got {output.stdout}") diff --git a/test/functional/tests/conftest.py b/test/functional/tests/conftest.py index 7e6f29e..66e8025 100644 --- a/test/functional/tests/conftest.py +++ b/test/functional/tests/conftest.py @@ -19,6 +19,7 @@ from api.cas import casadm from api.cas import git from test_utils.os_utils import Udev, kill_all_io from test_tools.disk_utils import PartitionTable, create_partition_table +from test_tools.device_mapper import DeviceMapper from log.logger import create_log, Log from test_utils.singleton import Singleton @@ -122,6 +123,7 @@ def pytest_runtest_teardown(): casadm.stop_all_caches() from api.cas import init_config init_config.create_default_init_config() + DeviceMapper.remove_all() except Exception as ex: TestRun.LOGGER.warning(f"Exception occured during platform cleanup.\n" f"{str(ex)}\n{traceback.format_exc()}") diff --git a/test/functional/tests/fault/test_cache_insert_error.py b/test/functional/tests/fault/test_cache_insert_error.py new file mode 100644 index 0000000..36f06dd --- /dev/null +++ b/test/functional/tests/fault/test_cache_insert_error.py @@ -0,0 +1,187 @@ +# +# Copyright(c) 2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +import pytest + +from test_tools.fio.fio import Fio +from test_tools.fio.fio_param import ReadWrite, IoEngine, ErrorFilter, VerifyMethod +from test_utils.filesystem.file import File +from api.cas import casadm +from api.cas.cache_config import ( + CacheMode, + CacheLineSize, + SeqCutOffPolicy, + CleaningPolicy, + CacheStatus, +) +from test_tools.device_mapper import ErrorDevice, DmTable +from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan +from core.test_run import TestRun +from test_utils.size import Size, Unit +from storage_devices.device import Device +from test_utils.os_utils import Udev + + +@pytest.mark.parametrize("cache_line_size", CacheLineSize) +@pytest.mark.parametrize("cache_mode", CacheMode) +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) +def test_cache_insert_error(cache_mode, cache_line_size): + """ + title: Cache insert test with error device + description: | + Validate CAS ability to handle write errors while it tries to insert + cache lines. For lazy writes cache modes (WO, WB) issue only reads. + pass_criteria: + - No I/O errors returned to the user + - Cache write error statistics are counted properly + - No cache line gets inserted into cache + """ + with TestRun.step("Prepare core and cache"): + cache, core, core_device = prepare_configuration(cache_mode, cache_line_size) + + fio_cmd = ( + Fio() + .create_command() + .io_engine(IoEngine.libaio) + .size(core.size) + .block_size(cache_line_size) + .target(core) + .direct() + ) + if cache_mode in [CacheMode.WB, CacheMode.WO]: + fio_cmd = fio_cmd.read_write(ReadWrite.randread) + else: + fio_cmd = fio_cmd.read_write(ReadWrite.randrw).verify_pattern().verify(VerifyMethod.pattern) + + with TestRun.step("Run fio and verify no errors present"): + fio_errors = fio_cmd.run()[0].total_errors() + if fio_errors != 0: + TestRun.fail(f"Some I/O ended with errors {fio_errors}") + + with TestRun.step("Check error statistics on cache"): + stats = cache.get_statistics() + + occupancy = cache.get_occupancy().get_value() + if occupancy != 0: + TestRun.fail(f"Occupancy is not zero, but {occupancy}") + + cache_writes = (stats.block_stats.cache.writes / cache_line_size.value).get_value() + cache_errors = stats.error_stats.cache.total + if cache_writes != cache_errors: + TestRun.fail( + f"Cache errors ({cache_errors}) should equal to number of" + " requests to cache ({cache_writes})" + ) + + if cache_mode not in [CacheMode.WB, CacheMode.WO]: + with TestRun.step("Verify core device contents for non-lazy-writes cache modes"): + cache.stop() + + fio_cmd.target(core_device).verify_only().run() + + +@pytest.mark.parametrize("cache_line_size", CacheLineSize) +@pytest.mark.parametrize("cache_mode", [CacheMode.WB, CacheMode.WO]) +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) +def test_cache_write_lazy_insert_error(cache_mode, cache_line_size): + """ + title: Cache insert test with error device for writes on lazy writes cache mode + description: | + Validate CAS ability to handle write errors while it tries to insert + cache lines. This test is exclusively for lazy writes cache modes. + pass_criteria: + - I/O errors returned to user + - Cache automatically stops after encountering errors + - No cache line gets inserted into cache + """ + with TestRun.step("Prepare core and cache"): + cache, core, _ = prepare_configuration(cache_mode, cache_line_size) + + with TestRun.step("Run fio and verify errors are present"): + fio_errors = ( + Fio() + .create_command() + .io_engine(IoEngine.libaio) + .size(core.size) + .block_size(cache_line_size) + .read_write(ReadWrite.randwrite) + .target(core) + .continue_on_error(ErrorFilter.io) + .direct() + .run()[0] + .total_errors() + ) + if fio_errors == 0: + TestRun.fail(f"No I/O ended with error") + + with TestRun.step("Check error statistics and state on cache"): + stats = cache.get_statistics() + + occupancy = cache.get_occupancy().get_value() + if occupancy != 0: + TestRun.fail(f"Occupancy is not zero, but {occupancy}") + + cache_writes = (stats.block_stats.cache.writes / cache_line_size.value).get_value() + cache_errors = stats.error_stats.cache.total + + if cache_writes != 1: + TestRun.fail(f"There only should be one cache write attempt before cache stop") + if cache_writes != cache_errors: + TestRun.fail( + f"Cache errors ({cache_errors}) should equal to number of requests to" + " cache ({cache_writes})" + ) + + state = cache.get_status() + if state != CacheStatus.not_running: + TestRun.fail(f"Cache should be in 'Not running' state, and it's {state}") + + +def prepare_configuration(cache_mode, cache_line_size): + cache_device = TestRun.disks["cache"] + core_device = TestRun.disks["core"] + + with TestRun.step("Creating cache partition"): + cache_device.create_partitions([Size(25, Unit.MebiByte)]) + + with TestRun.step("Creating cache error device"): + error_device = ErrorDevice("error", cache_device.partitions[0]) + + with TestRun.step("Staring cache to check metadata offset"): + cache = casadm.start_cache(error_device, cache_line_size=cache_line_size, force=True) + cache_size = cache.size + cache.stop() + + with TestRun.step("Setting errors on non-metadata area"): + error_device.change_table( + DmTable.error_table( + offset=(cache_device.partitions[0].size - cache_size).get_value(Unit.Blocks512), + size=cache_size, + ).fill_gaps(cache_device.partitions[0]) + ) + + with TestRun.step("Create core partition with size of usable cache space"): + core_device.create_partitions([cache_size]) + + with TestRun.step("Starting and configuring cache"): + cache = casadm.start_cache( + error_device, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True + ) + result = cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) + if result.exit_code: + TestRun.LOGGER.exception("Couldn't set seq cutoff policy") + result = cache.set_cleaning_policy(CleaningPolicy.nop) + if result.exit_code: + TestRun.LOGGER.exception("Couldn't set cleaning policy") + + with TestRun.step("Stopping udev"): + Udev.disable() + + with TestRun.step("Adding core device"): + core = cache.add_core(core_dev=core_device.partitions[0]) + + return cache, core, core_device.partitions[0]