Merge pull request #513 from Ostrokrzew/fixes

Minor refactorings and updates
This commit is contained in:
Robert Baldyga 2020-11-26 13:23:01 +01:00 committed by GitHub
commit a9d33eb6ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 211 additions and 38 deletions

View File

@ -176,7 +176,7 @@ class Cache:
if alru_params.staleness_time is not None else None,
alru_params.flush_max_buffers
if alru_params.flush_max_buffers is not None else None,
alru_params.activity_threshold.total_milliseconds()
int(alru_params.activity_threshold.total_milliseconds())
if alru_params.activity_threshold is not None else None)
def get_cache_config(self):

View File

@ -4,10 +4,10 @@
#
from aenum import Enum, IntFlag
from attotime import attotimedelta
from test_utils.os_utils import get_kernel_module_parameter
from test_utils.size import Size, Unit
from test_utils.time import Time
class CacheLineSize(Enum):
@ -130,11 +130,6 @@ class CacheStatus(Enum):
return self.value
class Time(attotimedelta):
def total_milliseconds(self):
return int(self.total_seconds() * 1000)
class FlushParametersAlru:
def __init__(
self,

View File

@ -12,7 +12,6 @@ load_inactive_core_missing = [
r"WARNING: Can not resolve path to core \d+ from cache \d+\. By-id path will be shown for that "
r"core\.",
r"WARNING: Cache is in incomplete state - at least one core is inactive",
r"Successfully added cache instance \d+"
]
start_cache_with_existing_metadata = [

View File

@ -92,5 +92,6 @@ def run_workload(target):
fio_job = fio_run.add_job()
fio_job.stonewall()
fio_job.block_size(block_size)
fio_run.verify_backlog(block_size)
fio_run.run()

View File

@ -2,6 +2,7 @@
# Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from api.cas import casadm
@ -36,24 +37,32 @@ def test_attach_core_pool():
core_disk.create_partitions([Size(2, Unit.GibiByte), Size(2, Unit.GibiByte)])
core_dev = core_disk.partitions[0]
second_core_dev = core_disk.partitions[1]
with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, force=True)
with TestRun.step("Add core device."):
cache.add_core(core_dev)
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Add previously used core device to core pool using --try-add flag."):
first_core = casadm.try_add(core_dev, cache.cache_id)
with TestRun.step("Add different core device to core pool using --try-add flag."):
second_core = casadm.try_add(second_core_dev, cache.cache_id)
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Check each core status."):
if first_core.get_status() is not CoreStatus.active:
TestRun.fail(f"First core status should be active but is {first_core.get_status()}.")
if second_core.get_status() is not CoreStatus.detached:
TestRun.fail(
f"Second core status should be detached but is {second_core.get_status()}.")
with TestRun.step("Stop cache and remove core from core pool."):
casadm.remove_all_detached_cores()
cache.stop()
@ -77,12 +86,15 @@ def test_core_pool_exclusive_open():
core_disk.create_partitions([Size(1, Unit.GibiByte)])
core_dev = core_disk.partitions[0]
core_dev.create_filesystem(Filesystem.ext4)
with TestRun.step("Add core device to core device pool using --try-add flag."):
core = casadm.try_add(core_dev, 1)
with TestRun.step("Check if core status of added core in core pool is detached."):
status = core.get_status()
if status is not CoreStatus.detached:
TestRun.fail(f"Core status should be detached but is {status}.")
with TestRun.step("Check if it is impossible to add core device from core pool to "
"running cache."):
TestRun.disks["cache"].create_partitions([Size(2, Unit.GibiByte)])
@ -94,6 +106,7 @@ def test_core_pool_exclusive_open():
except CmdException:
TestRun.LOGGER.info("Adding core from core pool to cache is blocked as expected.")
cache.stop()
with TestRun.step("Check if it is impossible to start cache with casadm start command on the "
"core device from core pool."):
try:
@ -103,6 +116,7 @@ def test_core_pool_exclusive_open():
"this is unexpected behaviour.")
except CmdException:
TestRun.LOGGER.info("Using core device from core pool as cache is blocked as expected.")
with TestRun.step("Check if it is impossible to make filesystem on the core device "
"from core pool."):
try:
@ -112,11 +126,13 @@ def test_core_pool_exclusive_open():
except Exception:
TestRun.LOGGER.info("Creating filesystem on core device from core pool is "
"blocked as expected.")
with TestRun.step("Check if it is impossible to mount the core device from core pool."):
try:
core_dev.mount("/mnt")
TestRun.fail("Successfully mounted core pool device, this is unexpected behaviour.")
except Exception:
TestRun.LOGGER.info("Mounting core device form core pool is blocked as expected.")
with TestRun.step("Remove core from core pool."):
casadm.remove_all_detached_cores()

View File

@ -4,13 +4,11 @@
#
import time
from datetime import timedelta
from random import shuffle
import pytest
from api.cas import casadm, cli, cli_messages
from api.cas.cache_config import CacheStatus, SeqCutOffPolicy
from api.cas.cache_config import CacheStatus, SeqCutOffPolicy, CacheModeTrait
from api.cas.core import CoreStatus, CacheMode, CleaningPolicy, FlushParametersAlru, File
from api.cas.init_config import InitConfig
from api.cas.statistics import CacheStats
@ -21,8 +19,10 @@ from test_tools.disk_utils import Filesystem
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import IoEngine, ReadWrite
from test_utils import os_utils
from test_utils.os_utils import Udev
from test_utils.output import CmdException
from test_utils.size import Size, Unit
from test_utils.time import Time
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -45,32 +45,44 @@ def test_attach_core_to_incomplete_cache_volume():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Load cache."):
casadm.load_cache(cache_dev)
with TestRun.step("Check if there is CAS device in /dev and core is in active status."):
core.check_if_is_present_in_os()
core_status = core.get_status()
if core_status != CoreStatus.active:
TestRun.fail(f"Core should be in active state. (Actual: {core_status})")
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Check if there is no CAS device in /dev and core is in inactive status."):
core.check_if_is_present_in_os(False)
if core.get_status() != CoreStatus.inactive:
TestRun.fail("Core should be in inactive state.")
with TestRun.step("Plug core device."):
plug_device.plug()
time.sleep(1)
with TestRun.step("Check if core status changed to active and CAS device is visible in OS."):
core.wait_for_status_change(CoreStatus.active)
core.check_if_is_present_in_os()
@ -97,32 +109,41 @@ def test_flush_inactive_devices():
first_core_dev = devices["core1"].partitions[0]
second_core_dev = devices["core2"].partitions[0]
plug_device = devices["core1"]
with TestRun.step("Start cache in WB mode and set alru cleaning policy."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
cache.set_cleaning_policy(CleaningPolicy.alru)
cache.set_params_alru(FlushParametersAlru(
staleness_time=timedelta(seconds=10),
wake_up_time=timedelta(seconds=1),
activity_threshold=timedelta(milliseconds=500)))
staleness_time=Time(seconds=10),
wake_up_time=Time(seconds=1),
activity_threshold=Time(milliseconds=500)))
with TestRun.step("Add two cores."):
first_core = cache.add_core(first_core_dev)
second_core = cache.add_core(second_core_dev)
with TestRun.step("Create init config file using running CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to CAS device."):
run_fio([first_core.system_path, second_core.system_path])
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug one core disk."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Wait longer than required for alru cleaning thread to start and verify "
"that dirty data is flushed only from active device."):
dirty_lines_before = {first_core: first_core.get_dirty_blocks(),
second_core: second_core.get_dirty_blocks()}
time.sleep(30)
check_amount_of_dirty_data(dirty_lines_before)
with TestRun.step("Try to call 'flush cache' command."):
dirty_lines_before = {first_core: first_core.get_dirty_blocks(),
second_core: second_core.get_dirty_blocks()}
@ -133,6 +154,7 @@ def test_flush_inactive_devices():
except Exception as e:
TestRun.LOGGER.info(f"Flush cache operation is blocked as expected.\n{str(e)}")
check_amount_of_dirty_data(dirty_lines_before)
with TestRun.step("Try to call 'flush core' command for inactive core."):
dirty_lines_before = {first_core: first_core.get_dirty_blocks(),
second_core: second_core.get_dirty_blocks()}
@ -143,13 +165,16 @@ def test_flush_inactive_devices():
except Exception as e:
TestRun.LOGGER.info(f"Flush core operation is blocked as expected.\n{str(e)}")
check_amount_of_dirty_data(dirty_lines_before)
with TestRun.step("Plug core disk and verify that this change is reflected on the cache list."):
plug_device.plug()
time.sleep(1)
first_core.wait_for_status_change(CoreStatus.active)
cache_status = cache.get_status()
if cache_status != CacheStatus.running:
TestRun.fail(f"Cache did not change status to 'running' after plugging core device. "
f"Actual state: {cache_status}.")
with TestRun.step("Stop cache."):
cache.stop()
@ -170,9 +195,11 @@ def test_list_cache_and_cache_volumes():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Check if list caches command shows proper output (cache should have status "
"Running and cache volume should be Active)."):
core_status = core.get_status()
@ -181,14 +208,19 @@ def test_list_cache_and_cache_volumes():
cache_status = cache.get_status()
if cache_status != CacheStatus.running:
TestRun.fail(f"Cache should be in running state. Actual state: {cache_status}")
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Check if list cache command shows proper output (cache should have status "
"Incomplete and cache volume should be Inactive)."):
core_status = core.get_status()
@ -197,8 +229,10 @@ def test_list_cache_and_cache_volumes():
cache_status = cache.get_status()
if cache_status != CacheStatus.incomplete:
TestRun.fail(f"Cache should be in incomplete state. Actual state: {cache_status}.")
with TestRun.step("Plug missing device and stop cache."):
plug_device.plug()
time.sleep(1)
core.wait_for_status_change(CoreStatus.active)
cache_status = cache.get_status()
if cache_status != CacheStatus.running:
@ -224,20 +258,27 @@ def test_load_cache_with_inactive_core():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache."):
output = TestRun.executor.run(cli.load_cmd(cache_dev.system_path))
cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing)
with TestRun.step("Plug missing device and stop cache."):
plug_device.plug()
time.sleep(1)
core.wait_for_status_change(CoreStatus.active)
cache_status = cache.get_status()
if cache_status != CacheStatus.running:
@ -263,16 +304,20 @@ def test_preserve_data_for_inactive_device():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
cache.set_cleaning_policy(CleaningPolicy.nop)
core = cache.add_core(core_dev)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Create filesystem on CAS device and mount it."):
core.create_filesystem(Filesystem.ext3)
core.mount(mount_dir)
with TestRun.step("Create a test file with random writes on mount point and count it's md5."):
file_path = f"{mount_dir}/test_file"
test_file = File.create_file(file_path)
@ -285,35 +330,43 @@ def test_preserve_data_for_inactive_device():
md5_after_create = test_file.md5sum()
cache_stats_before_stop = cache.get_statistics()
core_stats_before_stop = core.get_statistics()
with TestRun.step("Unmount CAS device."):
core.unmount()
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
cache_stats_after_load = cache.get_statistics()
core_stats_after_load = core.get_statistics()
if cache_stats_before_stop.usage_stats.clean != cache_stats_after_load.usage_stats.clean or\
cache_stats_before_stop.usage_stats.dirty != \
cache_stats_after_load.usage_stats.dirty or\
core_stats_before_stop.usage_stats.clean != \
core_stats_after_load.usage_stats.clean or\
core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty:
if (
cache_stats_before_stop.usage_stats.clean != cache_stats_after_load.usage_stats.clean
or cache_stats_before_stop.usage_stats.dirty != cache_stats_after_load.usage_stats.dirty
or core_stats_before_stop.usage_stats.clean != core_stats_after_load.usage_stats.clean
or core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty
):
TestRun.fail(f"Statistics after counting md5 are different than after cache load.\n"
f"Cache stats before: {cache_stats_before_stop}\n"
f"Cache stats after: {cache_stats_after_load}\n"
f"Core stats before: {core_stats_before_stop}\n"
f"Core stats after: {core_stats_after_load}")
with TestRun.step("Plug core disk using sysfs and verify this change is reflected "
"on the cache list."):
plug_device.plug()
time.sleep(1)
if cache.get_status() != CacheStatus.running or core.get_status() != CoreStatus.active:
TestRun.fail(f"Expected cache status is running (actual - {cache.get_status()}).\n"
f"Expected core status is active (actual - {core.get_status()}).")
with TestRun.step("Mount CAS device"):
core.mount(mount_dir)
with TestRun.step("Count md5 checksum for test file and compare it with previous value."):
cache_read_hits_before_md5 = cache.get_statistics().request_stats.read.hits
md5_after_cache_load = test_file.md5sum()
@ -323,6 +376,7 @@ def test_preserve_data_for_inactive_device():
else:
TestRun.LOGGER.info("Md5 checksum is identical before and after cache load operation "
"with inactive CAS device.")
with TestRun.step("Verify that cache read hits increased after counting md5 checksum."):
cache_read_hits_after_md5 = cache.get_statistics().request_stats.read.hits
if cache_read_hits_after_md5 - cache_read_hits_before_md5 < 0:
@ -331,6 +385,7 @@ def test_preserve_data_for_inactive_device():
f"After: {cache_read_hits_after_md5}.")
else:
TestRun.LOGGER.info("Cache read hits increased as expected.")
with TestRun.step("Unmount CAS device and stop cache."):
core.unmount()
cache.stop()
@ -358,50 +413,79 @@ def test_print_statistics_inactive(cache_mode):
second_core_dev = devices["core2"].partitions[0]
first_plug_device = devices["core1"]
second_plug_device = devices["core2"]
Udev.disable() # disabling udev for a while prevents creating clean data on cores
with TestRun.step("Start cache and add cores."):
cache = casadm.start_cache(cache_dev, cache_mode=cache_mode, force=True)
first_core = cache.add_core(first_core_dev)
second_core = cache.add_core(second_core_dev)
cache_mode_traits = CacheMode.get_traits(cache.get_cache_mode())
with TestRun.step("Disable cleaning and sequential cutoff policies."):
cache.set_cleaning_policy(CleaningPolicy.nop)
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run IO."):
run_fio([first_core.system_path, second_core.system_path])
with TestRun.step("Print statistics and check if there is no inactive usage section."):
active_stats = cache.get_statistics()
check_if_inactive_section_exists(active_stats, False)
with TestRun.step("Stop cache."):
cache.stop()
if CacheModeTrait.LazyWrites in cache_mode_traits:
cache.stop(no_data_flush=True)
else:
cache.stop()
with TestRun.step("Remove both core devices from OS."):
Udev.enable() # enable udev back because it's necessary now
first_plug_device.unplug()
second_plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Check if inactive devices section appeared and contains appropriate "
"information."):
inactive_stats_before = cache.get_statistics()
check_if_inactive_section_exists(inactive_stats_before)
check_number_of_inactive_devices(inactive_stats_before, 2)
with TestRun.step("Attach one of detached core devices and add it to cache."):
first_plug_device.plug()
time.sleep(1)
first_core_status = first_core.get_status()
if first_core_status != CoreStatus.active:
TestRun.fail(f"Core {first_core.system_path} should be in active state but it is not. "
f"Actual state: {first_core_status}.")
with TestRun.step("Check cache statistics section of inactive devices."):
inactive_stats_after = cache.get_statistics()
check_if_inactive_section_exists(inactive_stats_after)
check_number_of_inactive_devices(inactive_stats_after, 1)
# criteria for checks below
insert_write_traits = CacheModeTrait.InsertWrite in cache_mode_traits
lazy_write_traits = CacheModeTrait.LazyWrites in cache_mode_traits
lazy_writes_or_no_insert_write_traits = (not insert_write_traits
or lazy_write_traits)
check_inactive_usage_stats(inactive_stats_before.inactive_usage_stats.inactive_occupancy,
inactive_stats_after.inactive_usage_stats.inactive_occupancy,
"inactive occupancy", cache.get_cache_mode() == CacheMode.PT)
"inactive occupancy",
not insert_write_traits)
check_inactive_usage_stats(inactive_stats_before.inactive_usage_stats.inactive_clean,
inactive_stats_after.inactive_usage_stats.inactive_clean,
"inactive clean",
cache.get_cache_mode() in [CacheMode.PT, CacheMode.WB])
lazy_writes_or_no_insert_write_traits)
check_inactive_usage_stats(inactive_stats_before.inactive_usage_stats.inactive_dirty,
inactive_stats_after.inactive_usage_stats.inactive_dirty,
"inactive dirty",
cache.get_cache_mode() != CacheMode.WB)
not lazy_write_traits)
with TestRun.step("Check statistics per inactive core."):
inactive_core_stats = second_core.get_statistics()
if inactive_stats_after.inactive_usage_stats.inactive_occupancy == \
@ -412,19 +496,23 @@ def test_print_statistics_inactive(cache_mode):
TestRun.fail(f"Inactive core occupancy ({inactive_core_stats.usage_stats.occupancy}) "
f"should be the same as cache inactive occupancy "
f"({inactive_stats_after.inactive_usage_stats.inactive_occupancy}).")
with TestRun.step("Remove inactive core from cache and check if cache is in running state."):
cache.remove_core(second_core.core_id, force=True)
cache_status = cache.get_status()
if cache_status != CacheStatus.running:
TestRun.fail(f"Cache did not change status to 'running' after plugging core device. "
f"Actual status: {cache_status}.")
with TestRun.step("Check if there is no inactive devices statistics section and if cache has "
"Running status."):
cache_stats = cache.get_statistics()
check_if_inactive_section_exists(cache_stats, False)
check_number_of_inactive_devices(cache_stats, 0)
with TestRun.step("Plug missing disk and stop cache."):
second_plug_device.plug()
time.sleep(1)
cache.stop()
@ -444,15 +532,19 @@ def test_remove_detached_cores():
cache_dev = devices["cache"].partitions[0]
core_devs = devices["core"].partitions
plug_device = devices["core"]
with TestRun.step("Start cache and add four cores."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
cores = []
for d in core_devs:
cores.append(cache.add_core(d))
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain "
"dirty data."):
for core in cores:
@ -462,20 +554,26 @@ def test_remove_detached_cores():
TestRun.fail("Failed to flush CAS device.")
elif core.get_dirty_blocks() == Size.zero():
TestRun.fail("There should be dirty data on CAS device.")
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug core device from system and plug it back."):
plug_device.unplug()
time.sleep(2)
plug_device.plug()
time.sleep(1)
with TestRun.step("Verify that all cores from plugged core device are listed with "
"proper status."):
for core in cores:
if core.get_status() != CoreStatus.detached:
TestRun.fail(f"Each core should be in detached state. "
f"Actual states: {casadm.list_caches().stdout}")
with TestRun.step("Remove CAS devices from core pool."):
casadm.remove_all_detached_cores()
with TestRun.step("Verify that cores are no longer listed."):
output = casadm.list_caches().stdout
for dev in core_devs:
@ -503,15 +601,19 @@ def test_remove_inactive_devices():
cache_dev = devices["cache"].partitions[0]
core_devs = devices["core"].partitions
plug_device = devices["core"]
with TestRun.step("Start cache and add four cores."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
cores = []
for d in core_devs:
cores.append(cache.add_core(d))
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two "
"contain dirty data."):
for core in cores:
@ -521,18 +623,23 @@ def test_remove_inactive_devices():
TestRun.fail("Failed to flush CAS device.")
elif core.get_dirty_blocks() == Size.zero():
TestRun.fail("There should be dirty data on CAS device.")
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug core disk."):
plug_device.unplug()
with TestRun.step("Load cache."):
casadm.load_cache(cache_dev)
with TestRun.step("Verify that all previously created CAS devices are listed with "
"proper status."):
for core in cores:
if core.get_status() != CoreStatus.inactive:
TestRun.fail(f"Each core should be in inactive state. "
f"Actual states:\n{casadm.list_caches().stdout}")
with TestRun.step("Try removing CAS device without force option. Verify that for "
"dirty CAS devices operation is blocked, proper message is displayed "
"and device is still listed."):
@ -546,9 +653,6 @@ def test_remove_inactive_devices():
"command executed without any error.")
TestRun.LOGGER.info("Removing core with force option skipped for clean CAS device.")
except CmdException as e:
if dirty_blocks == Size.zero():
TestRun.fail("Removing clean CAS device should be possible but remove "
"command returned an error.")
TestRun.LOGGER.info("Remove operation without force option is blocked for "
"dirty CAS device as expected.")
cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_core)
@ -557,8 +661,10 @@ def test_remove_inactive_devices():
TestRun.fail(f"CAS device is not listed in casadm list output but it should be."
f"\n{output}")
core.remove_core(force=True)
with TestRun.step("Plug missing disk and stop cache."):
plug_device.plug()
time.sleep(1)
casadm.stop_all_caches()
@ -580,46 +686,63 @@ def test_stop_cache_with_inactive_devices():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes and verify that CAS device contains dirty data."):
run_fio([core.system_path])
if core.get_dirty_blocks() == Size.zero():
TestRun.fail("There is no dirty data on core device.")
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug core disk."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Verify that previously created CAS device is listed with proper status."):
core_status = core.get_status()
if core_status != CoreStatus.inactive:
TestRun.fail(f"CAS device should be in inactive state. Actual status: {core_status}.")
with TestRun.step("Try stopping cache without no data flush option, verify that operation "
"was blocked and proper message is displayed."):
try_stop_incomplete_cache(cache)
with TestRun.step("Stop cache with force option."):
cache.stop(no_data_flush=True)
with TestRun.step("Plug missing core device."):
plug_device.plug()
time.sleep(1)
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Stop cache with flushing dirty data."):
cache.stop()
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache and verify core status is inactive."):
cache = casadm.load_cache(cache_dev)
core_status = core.get_status()
if core_status != CoreStatus.inactive:
TestRun.fail(f"CAS device should be in inactive state. Actual state: {core_status}.")
with TestRun.step("Try stopping cache without no data flush option, verify that "
"operation was blocked and proper message is displayed."):
try_stop_incomplete_cache(cache)
with TestRun.step("Stop cache with 'no data flush' option and plug missing core device."):
cache.stop(no_data_flush=True)
plug_device.plug()
@ -640,8 +763,8 @@ def check_inactive_usage_stats(stats_before, stats_after, stat_name, should_be_z
elif not should_be_zero and stats_after < stats_before:
TestRun.LOGGER.info(f"{stat_name} is lower than before as expected.")
else:
TestRun.fail(f"{stat_name} ({stats_after}) is not lower than before "
f"({stats_before}).")
TestRun.LOGGER.error(f"{stat_name} ({stats_after}) is not lower than before "
f"({stats_before}).")
def check_number_of_inactive_devices(stats: CacheStats, expected_num):

View File

@ -2,8 +2,8 @@
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
import random
import pytest
from api.cas import casadm, casadm_parser
@ -31,6 +31,7 @@ def test_udev_core_partition():
- Created partitions are added to core pool after attaching core drive.
"""
cores_count = 4
with TestRun.step("Create four partitions on core device and one on cache device."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
@ -38,18 +39,25 @@ def test_udev_core_partition():
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(2, Unit.GibiByte)] * cores_count)
core_devices = core_disk.partitions
with TestRun.step("Start cache and add created partitions as cores."):
cache = casadm.start_cache(cache_dev, force=True)
for dev in core_devices:
cache.add_core(dev)
with TestRun.step("Create init config from running CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Detach core disk."):
core_disk.unplug()
with TestRun.step("Plug missing core disk."):
core_disk.plug()
time.sleep(1)
with TestRun.step("List cache devices and check that created partitions are present "
"in core pool."):
for dev in core_devices:
@ -80,20 +88,29 @@ def test_udev_core():
core_dev = core_disk.partitions[0]
cache = casadm.start_cache(cache_dev, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Create init config from running CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug core disk."):
core_disk.unplug()
with TestRun.step("Plug core disk."):
core_disk.plug()
time.sleep(1)
with TestRun.step("Check if core device is listed in core pool."):
check_if_dev_in_core_pool(core_dev)
with TestRun.step("Unplug cache disk."):
cache_disk.unplug()
with TestRun.step("Plug cache disk."):
cache_disk.plug()
with TestRun.step("Check if core device is active and not in the core pool."):
check_if_dev_in_core_pool(core_dev, False)
if core.get_status() != CoreStatus.active:
@ -164,14 +181,20 @@ def test_udev_cache_load(cache_mode):
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
cache = casadm.start_cache(cache_dev, cache_mode=cache_mode)
with TestRun.step("Create init config from running configuration"):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug cache disk."):
cache_disk.unplug()
with TestRun.step("Plug cache disk."):
cache_disk.plug()
time.sleep(1)
with TestRun.step("List caches and check if cache is loaded."):
caches = casadm_parser.get_caches()
if len(caches) < 1:
@ -208,6 +231,7 @@ def test_neg_udev_cache_load():
"""
caches_count = 2
cores_count = 4
with TestRun.step("Create init config file with two caches and two cores per each cache."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(1, Unit.GibiByte)] * caches_count)
@ -221,15 +245,20 @@ def test_neg_udev_cache_load():
init_conf.add_core(1 if j in first_cache_core_numbers else 2,
j + 1, core_disk.partitions[j])
init_conf.save_config_file()
with TestRun.step("Start one cache and add two cores as defined in init config."):
cache = casadm.start_cache(cache_disk.partitions[0])
for i in first_cache_core_numbers:
cache.add_core(core_disk.partitions[i])
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug and plug cache disk."):
cache_disk.unplug()
cache_disk.plug()
time.sleep(1)
with TestRun.step("Check if CAS is loaded correctly."):
cas_devices = casadm_parser.get_cas_devices_dict()
if len(cas_devices["core_pool"]) != 0:
@ -244,6 +273,7 @@ def test_neg_udev_cache_load():
if len(cas_devices["cores"]) != 2:
TestRun.LOGGER.error(f"There is wrong number of cores. Expected: 2, actual: "
f"{len(cas_devices['caches'])}")
correct_core_devices = []
for i in first_cache_core_numbers:
correct_core_devices.append(core_disk.partitions[i].system_path)
@ -252,9 +282,12 @@ def test_neg_udev_cache_load():
CoreStatus[core["status"].lower()] != CoreStatus.active or \
core["cache_id"] != 1:
TestRun.LOGGER.error(f"Core did not load correctly: {core}.")
with TestRun.step("Unplug and plug core disk."):
core_disk.unplug()
core_disk.plug()
time.sleep(1)
with TestRun.step("Check if two cores assigned to not loaded cache are inserted to core pool."):
cas_devices = casadm_parser.get_cas_devices_dict()
if len(cas_devices["core_pool"]) != 2:

View File

@ -5,6 +5,7 @@
import os
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode
from core.test_run import TestRun
@ -12,6 +13,7 @@ from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File
from test_utils.os_utils import drop_caches, DropCachesMode, sync
from test_utils.size import Size, Unit
@ -36,8 +38,9 @@ def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
cache_disk = TestRun.disks['cache']
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
cache_dev_link = cache_dev.get_device_link("/dev/disk/by-id")
core_dev = TestRun.disks['core']
cache = casadm.start_cache(cache_dev, cache_mode)
cache = casadm.start_cache(cache_dev, cache_mode, force=True)
core = cache.add_core(core_dev)
core.create_filesystem(filesystem, blocksize=int(Size(1, Unit.Blocks4096)))
core.mount(mount_point)
@ -52,6 +55,8 @@ def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
.run()
test_file.refresh_item()
test_file_md5 = test_file.md5sum()
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Reset platform."):
if reboot_type == "soft":
@ -59,9 +64,10 @@ def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
else:
power_control = TestRun.plugin_manager.get_plugin('power_control')
power_control.power_cycle()
cache_dev.system_path = cache_dev_link.get_target()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
casadm.load_cache(cache_dev)
core.mount(mount_point)
with TestRun.step("Check file md5sum."):

View File

@ -88,8 +88,8 @@ def test_recovery_all_options(cache_mode, cache_line_size, cleaning_policy, file
core.unmount()
TestRun.LOGGER.info(f"Number of dirty blocks in cache: {cache.get_dirty_blocks()}")
power_cycle_dut()
cache_device.full_path = cache_device_link.get_target()
core_device.full_path = core_device_link.get_target()
cache_device.system_path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target()
with TestRun.step("Try to start cache without load and force option."):
try:

View File

@ -70,8 +70,8 @@ def test_recovery_flush_reset_raw(cache_mode):
with TestRun.step("Hard reset DUT during data flushing."):
power_cycle_dut(wait_for_flush_begin=True, core_device=core_device)
cache_device.full_path = cache_device_link.get_target()
core_device.full_path = core_device_link.get_target()
cache_device.system_path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target()
with TestRun.step("Copy file from core and check if current md5sum is different than "
"before restart."):
@ -155,8 +155,8 @@ def test_recovery_flush_reset_fs(cache_mode, fs):
with TestRun.step("Hard reset DUT during data flushing."):
power_cycle_dut(True, core_device)
cache_device.full_path = cache_device_link.get_target()
core_device.full_path = core_device_link.get_target()
cache_device.system_path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_device)