From deb34f62d5222d8e35e95684cdbed5e3153ae88d Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Mon, 31 Aug 2020 10:24:14 +0200 Subject: [PATCH 01/10] Use existing wrapper Signed-off-by: Slawomir Jankowski --- test/functional/api/cas/cache_config.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/test/functional/api/cas/cache_config.py b/test/functional/api/cas/cache_config.py index de97969..5936914 100644 --- a/test/functional/api/cas/cache_config.py +++ b/test/functional/api/cas/cache_config.py @@ -4,10 +4,10 @@ # from aenum import Enum, IntFlag -from attotime import attotimedelta from test_utils.os_utils import get_kernel_module_parameter from test_utils.size import Size, Unit +from test_utils.time import Time class CacheLineSize(Enum): @@ -130,11 +130,6 @@ class CacheStatus(Enum): return self.value -class Time(attotimedelta): - def total_milliseconds(self): - return int(self.total_seconds() * 1000) - - class FlushParametersAlru: def __init__( self, From 1ce946239d994d48b70395888e0138565d103158 Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Mon, 31 Aug 2020 10:24:37 +0200 Subject: [PATCH 02/10] Remove line which isn't in stderr output. Signed-off-by: Slawomir Jankowski --- test/functional/api/cas/cli_messages.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/functional/api/cas/cli_messages.py b/test/functional/api/cas/cli_messages.py index 55fac85..170b6f6 100644 --- a/test/functional/api/cas/cli_messages.py +++ b/test/functional/api/cas/cli_messages.py @@ -12,7 +12,6 @@ load_inactive_core_missing = [ r"WARNING: Can not resolve path to core \d+ from cache \d+\. By-id path will be shown for that " r"core\.", r"WARNING: Cache is in incomplete state - at least one core is inactive", - r"Successfully added cache instance \d+" ] start_cache_with_existing_metadata = [ From 607ad5d16c49a4790a2caa79aa6aebab930a4222 Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Mon, 31 Aug 2020 10:34:32 +0200 Subject: [PATCH 03/10] Use `Time` wrapper instead of `timedelta` because `set_params_alru` uses method `total_milliseconds()` Signed-off-by: Slawomir Jankowski --- .../tests/incremental_load/test_incremental_load.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/functional/tests/incremental_load/test_incremental_load.py b/test/functional/tests/incremental_load/test_incremental_load.py index 114adcc..14d547a 100644 --- a/test/functional/tests/incremental_load/test_incremental_load.py +++ b/test/functional/tests/incremental_load/test_incremental_load.py @@ -4,9 +4,7 @@ # import time -from datetime import timedelta from random import shuffle - import pytest from api.cas import casadm, cli, cli_messages @@ -23,6 +21,7 @@ from test_tools.fio.fio_param import IoEngine, ReadWrite from test_utils import os_utils from test_utils.output import CmdException from test_utils.size import Size, Unit +from test_utils.time import Time @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @@ -101,9 +100,10 @@ def test_flush_inactive_devices(): cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True) cache.set_cleaning_policy(CleaningPolicy.alru) cache.set_params_alru(FlushParametersAlru( - staleness_time=timedelta(seconds=10), - wake_up_time=timedelta(seconds=1), - activity_threshold=timedelta(milliseconds=500))) + staleness_time=Time(seconds=10), + wake_up_time=Time(seconds=1), + activity_threshold=Time(milliseconds=500))) + with TestRun.step("Add two cores."): first_core = cache.add_core(first_core_dev) second_core = cache.add_core(second_core_dev) From 0d6318461effcbeabe9b4b2b69ef7d453c72be1a Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Mon, 31 Aug 2020 10:35:00 +0200 Subject: [PATCH 04/10] Cast `alru_params.activity_threshold.total_milliseconds()` to int Signed-off-by: Slawomir Jankowski --- test/functional/api/cas/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/api/cas/cache.py b/test/functional/api/cas/cache.py index a31db71..811217a 100644 --- a/test/functional/api/cas/cache.py +++ b/test/functional/api/cas/cache.py @@ -176,7 +176,7 @@ class Cache: if alru_params.staleness_time is not None else None, alru_params.flush_max_buffers if alru_params.flush_max_buffers is not None else None, - alru_params.activity_threshold.total_milliseconds() + int(alru_params.activity_threshold.total_milliseconds()) if alru_params.activity_threshold is not None else None) def get_cache_config(self): From a99da7fa85e4abdb4e16860e4b752435b2e49d68 Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Mon, 31 Aug 2020 10:52:49 +0200 Subject: [PATCH 05/10] Add 1-second pause after plugging device sometimes test cannot find plugged device, because plug operation is too slow Signed-off-by: Slawomir Jankowski --- .../tests/incremental_load/test_incremental_load.py | 10 ++++++++++ test/functional/tests/incremental_load/test_udev.py | 5 +++++ 2 files changed, 15 insertions(+) diff --git a/test/functional/tests/incremental_load/test_incremental_load.py b/test/functional/tests/incremental_load/test_incremental_load.py index 14d547a..abb77d9 100644 --- a/test/functional/tests/incremental_load/test_incremental_load.py +++ b/test/functional/tests/incremental_load/test_incremental_load.py @@ -70,6 +70,7 @@ def test_attach_core_to_incomplete_cache_volume(): TestRun.fail("Core should be in inactive state.") with TestRun.step("Plug core device."): plug_device.plug() + time.sleep(1) with TestRun.step("Check if core status changed to active and CAS device is visible in OS."): core.wait_for_status_change(CoreStatus.active) core.check_if_is_present_in_os() @@ -145,6 +146,7 @@ def test_flush_inactive_devices(): check_amount_of_dirty_data(dirty_lines_before) with TestRun.step("Plug core disk and verify that this change is reflected on the cache list."): plug_device.plug() + time.sleep(1) first_core.wait_for_status_change(CoreStatus.active) cache_status = cache.get_status() if cache_status != CacheStatus.running: @@ -199,6 +201,7 @@ def test_list_cache_and_cache_volumes(): TestRun.fail(f"Cache should be in incomplete state. Actual state: {cache_status}.") with TestRun.step("Plug missing device and stop cache."): plug_device.plug() + time.sleep(1) core.wait_for_status_change(CoreStatus.active) cache_status = cache.get_status() if cache_status != CacheStatus.running: @@ -238,6 +241,7 @@ def test_load_cache_with_inactive_core(): cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing) with TestRun.step("Plug missing device and stop cache."): plug_device.plug() + time.sleep(1) core.wait_for_status_change(CoreStatus.active) cache_status = cache.get_status() if cache_status != CacheStatus.running: @@ -309,6 +313,7 @@ def test_preserve_data_for_inactive_device(): with TestRun.step("Plug core disk using sysfs and verify this change is reflected " "on the cache list."): plug_device.plug() + time.sleep(1) if cache.get_status() != CacheStatus.running or core.get_status() != CoreStatus.active: TestRun.fail(f"Expected cache status is running (actual - {cache.get_status()}).\n" f"Expected core status is active (actual - {core.get_status()}).") @@ -383,6 +388,7 @@ def test_print_statistics_inactive(cache_mode): check_number_of_inactive_devices(inactive_stats_before, 2) with TestRun.step("Attach one of detached core devices and add it to cache."): first_plug_device.plug() + time.sleep(1) first_core_status = first_core.get_status() if first_core_status != CoreStatus.active: TestRun.fail(f"Core {first_core.system_path} should be in active state but it is not. " @@ -425,6 +431,7 @@ def test_print_statistics_inactive(cache_mode): check_number_of_inactive_devices(cache_stats, 0) with TestRun.step("Plug missing disk and stop cache."): second_plug_device.plug() + time.sleep(1) cache.stop() @@ -468,6 +475,7 @@ def test_remove_detached_cores(): plug_device.unplug() time.sleep(2) plug_device.plug() + time.sleep(1) with TestRun.step("Verify that all cores from plugged core device are listed with " "proper status."): for core in cores: @@ -559,6 +567,7 @@ def test_remove_inactive_devices(): core.remove_core(force=True) with TestRun.step("Plug missing disk and stop cache."): plug_device.plug() + time.sleep(1) casadm.stop_all_caches() @@ -606,6 +615,7 @@ def test_stop_cache_with_inactive_devices(): cache.stop(no_data_flush=True) with TestRun.step("Plug missing core device."): plug_device.plug() + time.sleep(1) with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) with TestRun.step("Stop cache with flushing dirty data."): diff --git a/test/functional/tests/incremental_load/test_udev.py b/test/functional/tests/incremental_load/test_udev.py index 45082d5..3332865 100644 --- a/test/functional/tests/incremental_load/test_udev.py +++ b/test/functional/tests/incremental_load/test_udev.py @@ -49,6 +49,7 @@ def test_udev_core_partition(): core_disk.unplug() with TestRun.step("Plug missing core disk."): core_disk.plug() + time.sleep(1) with TestRun.step("List cache devices and check that created partitions are present " "in core pool."): for dev in core_devices: @@ -87,6 +88,7 @@ def test_udev_core(): core_disk.unplug() with TestRun.step("Plug core disk."): core_disk.plug() + time.sleep(1) with TestRun.step("Check if core device is listed in core pool."): check_if_dev_in_core_pool(core_dev) with TestRun.step("Unplug cache disk."): @@ -126,6 +128,7 @@ def test_udev_cache_load(cache_mode): cache_disk.unplug() with TestRun.step("Plug cache disk."): cache_disk.plug() + time.sleep(1) with TestRun.step("List caches and check if cache is loaded."): caches = casadm_parser.get_caches() if len(caches) < 1: @@ -184,6 +187,7 @@ def test_neg_udev_cache_load(): with TestRun.step("Unplug and plug cache disk."): cache_disk.unplug() cache_disk.plug() + time.sleep(1) with TestRun.step("Check if CAS is loaded correctly."): cas_devices = casadm_parser.get_cas_devices_dict() if len(cas_devices["core_pool"]) != 0: @@ -209,6 +213,7 @@ def test_neg_udev_cache_load(): with TestRun.step("Unplug and plug core disk."): core_disk.unplug() core_disk.plug() + time.sleep(1) with TestRun.step("Check if two cores assigned to not loaded cache are inserted to core pool."): cas_devices = casadm_parser.get_cas_devices_dict() if len(cas_devices["core_pool"]) != 2: From f6ec36e49ee64cacd841d77fcad64bd7ef646511 Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Mon, 31 Aug 2020 10:59:50 +0200 Subject: [PATCH 06/10] Increase readability Signed-off-by: Slawomir Jankowski --- .../tests/incremental_load/test_core_pool.py | 16 +++ .../incremental_load/test_incremental_load.py | 109 +++++++++++++++++- .../tests/incremental_load/test_udev.py | 30 ++++- 3 files changed, 148 insertions(+), 7 deletions(-) diff --git a/test/functional/tests/incremental_load/test_core_pool.py b/test/functional/tests/incremental_load/test_core_pool.py index 089a62e..e9eaf58 100644 --- a/test/functional/tests/incremental_load/test_core_pool.py +++ b/test/functional/tests/incremental_load/test_core_pool.py @@ -2,6 +2,7 @@ # Copyright(c) 2019-2020 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # + import pytest from api.cas import casadm @@ -36,24 +37,32 @@ def test_attach_core_pool(): core_disk.create_partitions([Size(2, Unit.GibiByte), Size(2, Unit.GibiByte)]) core_dev = core_disk.partitions[0] second_core_dev = core_disk.partitions[1] + with TestRun.step("Start cache."): cache = casadm.start_cache(cache_dev, force=True) + with TestRun.step("Add core device."): cache.add_core(core_dev) + with TestRun.step("Stop cache."): cache.stop() + with TestRun.step("Add previously used core device to core pool using --try-add flag."): first_core = casadm.try_add(core_dev, cache.cache_id) + with TestRun.step("Add different core device to core pool using --try-add flag."): second_core = casadm.try_add(second_core_dev, cache.cache_id) + with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) + with TestRun.step("Check each core status."): if first_core.get_status() is not CoreStatus.active: TestRun.fail(f"First core status should be active but is {first_core.get_status()}.") if second_core.get_status() is not CoreStatus.detached: TestRun.fail( f"Second core status should be detached but is {second_core.get_status()}.") + with TestRun.step("Stop cache and remove core from core pool."): casadm.remove_all_detached_cores() cache.stop() @@ -77,12 +86,15 @@ def test_core_pool_exclusive_open(): core_disk.create_partitions([Size(1, Unit.GibiByte)]) core_dev = core_disk.partitions[0] core_dev.create_filesystem(Filesystem.ext4) + with TestRun.step("Add core device to core device pool using --try-add flag."): core = casadm.try_add(core_dev, 1) + with TestRun.step("Check if core status of added core in core pool is detached."): status = core.get_status() if status is not CoreStatus.detached: TestRun.fail(f"Core status should be detached but is {status}.") + with TestRun.step("Check if it is impossible to add core device from core pool to " "running cache."): TestRun.disks["cache"].create_partitions([Size(2, Unit.GibiByte)]) @@ -94,6 +106,7 @@ def test_core_pool_exclusive_open(): except CmdException: TestRun.LOGGER.info("Adding core from core pool to cache is blocked as expected.") cache.stop() + with TestRun.step("Check if it is impossible to start cache with casadm start command on the " "core device from core pool."): try: @@ -103,6 +116,7 @@ def test_core_pool_exclusive_open(): "this is unexpected behaviour.") except CmdException: TestRun.LOGGER.info("Using core device from core pool as cache is blocked as expected.") + with TestRun.step("Check if it is impossible to make filesystem on the core device " "from core pool."): try: @@ -112,11 +126,13 @@ def test_core_pool_exclusive_open(): except Exception: TestRun.LOGGER.info("Creating filesystem on core device from core pool is " "blocked as expected.") + with TestRun.step("Check if it is impossible to mount the core device from core pool."): try: core_dev.mount("/mnt") TestRun.fail("Successfully mounted core pool device, this is unexpected behaviour.") except Exception: TestRun.LOGGER.info("Mounting core device form core pool is blocked as expected.") + with TestRun.step("Remove core from core pool."): casadm.remove_all_detached_cores() diff --git a/test/functional/tests/incremental_load/test_incremental_load.py b/test/functional/tests/incremental_load/test_incremental_load.py index abb77d9..f3ddf00 100644 --- a/test/functional/tests/incremental_load/test_incremental_load.py +++ b/test/functional/tests/incremental_load/test_incremental_load.py @@ -44,33 +44,44 @@ def test_attach_core_to_incomplete_cache_volume(): cache_dev = devices["cache"].partitions[0] core_dev = devices["core"].partitions[0] plug_device = devices["core"] + with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_dev, force=True) core = cache.add_core(core_dev) + with TestRun.step("Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Stop cache."): cache.stop() + with TestRun.step("Load cache."): casadm.load_cache(cache_dev) + with TestRun.step("Check if there is CAS device in /dev and core is in active status."): core.check_if_is_present_in_os() core_status = core.get_status() if core_status != CoreStatus.active: TestRun.fail(f"Core should be in active state. (Actual: {core_status})") + with TestRun.step("Stop cache."): cache.stop() + with TestRun.step("Unplug core device."): plug_device.unplug() + with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) + with TestRun.step("Check if there is no CAS device in /dev and core is in inactive status."): core.check_if_is_present_in_os(False) if core.get_status() != CoreStatus.inactive: TestRun.fail("Core should be in inactive state.") + with TestRun.step("Plug core device."): plug_device.plug() time.sleep(1) + with TestRun.step("Check if core status changed to active and CAS device is visible in OS."): core.wait_for_status_change(CoreStatus.active) core.check_if_is_present_in_os() @@ -97,6 +108,7 @@ def test_flush_inactive_devices(): first_core_dev = devices["core1"].partitions[0] second_core_dev = devices["core2"].partitions[0] plug_device = devices["core1"] + with TestRun.step("Start cache in WB mode and set alru cleaning policy."): cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True) cache.set_cleaning_policy(CleaningPolicy.alru) @@ -108,22 +120,29 @@ def test_flush_inactive_devices(): with TestRun.step("Add two cores."): first_core = cache.add_core(first_core_dev) second_core = cache.add_core(second_core_dev) + with TestRun.step("Create init config file using running CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Run random writes to CAS device."): run_fio([first_core.system_path, second_core.system_path]) + with TestRun.step("Stop cache without flushing dirty data."): cache.stop(no_data_flush=True) + with TestRun.step("Unplug one core disk."): plug_device.unplug() + with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) + with TestRun.step("Wait longer than required for alru cleaning thread to start and verify " "that dirty data is flushed only from active device."): dirty_lines_before = {first_core: first_core.get_dirty_blocks(), second_core: second_core.get_dirty_blocks()} time.sleep(30) check_amount_of_dirty_data(dirty_lines_before) + with TestRun.step("Try to call 'flush cache' command."): dirty_lines_before = {first_core: first_core.get_dirty_blocks(), second_core: second_core.get_dirty_blocks()} @@ -134,6 +153,7 @@ def test_flush_inactive_devices(): except Exception as e: TestRun.LOGGER.info(f"Flush cache operation is blocked as expected.\n{str(e)}") check_amount_of_dirty_data(dirty_lines_before) + with TestRun.step("Try to call 'flush core' command for inactive core."): dirty_lines_before = {first_core: first_core.get_dirty_blocks(), second_core: second_core.get_dirty_blocks()} @@ -144,6 +164,7 @@ def test_flush_inactive_devices(): except Exception as e: TestRun.LOGGER.info(f"Flush core operation is blocked as expected.\n{str(e)}") check_amount_of_dirty_data(dirty_lines_before) + with TestRun.step("Plug core disk and verify that this change is reflected on the cache list."): plug_device.plug() time.sleep(1) @@ -152,6 +173,7 @@ def test_flush_inactive_devices(): if cache_status != CacheStatus.running: TestRun.fail(f"Cache did not change status to 'running' after plugging core device. " f"Actual state: {cache_status}.") + with TestRun.step("Stop cache."): cache.stop() @@ -172,9 +194,11 @@ def test_list_cache_and_cache_volumes(): cache_dev = devices["cache"].partitions[0] core_dev = devices["core"].partitions[0] plug_device = devices["core"] + with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_dev, force=True) core = cache.add_core(core_dev) + with TestRun.step("Check if list caches command shows proper output (cache should have status " "Running and cache volume should be Active)."): core_status = core.get_status() @@ -183,14 +207,19 @@ def test_list_cache_and_cache_volumes(): cache_status = cache.get_status() if cache_status != CacheStatus.running: TestRun.fail(f"Cache should be in running state. Actual state: {cache_status}") + with TestRun.step("Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Stop cache."): cache.stop() + with TestRun.step("Unplug core device."): plug_device.unplug() + with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) + with TestRun.step("Check if list cache command shows proper output (cache should have status " "Incomplete and cache volume should be Inactive)."): core_status = core.get_status() @@ -199,6 +228,7 @@ def test_list_cache_and_cache_volumes(): cache_status = cache.get_status() if cache_status != CacheStatus.incomplete: TestRun.fail(f"Cache should be in incomplete state. Actual state: {cache_status}.") + with TestRun.step("Plug missing device and stop cache."): plug_device.plug() time.sleep(1) @@ -227,18 +257,24 @@ def test_load_cache_with_inactive_core(): cache_dev = devices["cache"].partitions[0] core_dev = devices["core"].partitions[0] plug_device = devices["core"] + with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_dev, force=True) core = cache.add_core(core_dev) + with TestRun.step("Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Stop cache."): cache.stop() + with TestRun.step("Unplug core device."): plug_device.unplug() + with TestRun.step("Load cache."): output = TestRun.executor.run(cli.load_cmd(cache_dev.system_path)) cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing) + with TestRun.step("Plug missing device and stop cache."): plug_device.plug() time.sleep(1) @@ -267,16 +303,20 @@ def test_preserve_data_for_inactive_device(): cache_dev = devices["cache"].partitions[0] core_dev = devices["core"].partitions[0] plug_device = devices["core"] + with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) cache.set_cleaning_policy(CleaningPolicy.nop) core = cache.add_core(core_dev) + with TestRun.step("Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Create filesystem on CAS device and mount it."): core.create_filesystem(Filesystem.ext3) core.mount(mount_dir) + with TestRun.step("Create a test file with random writes on mount point and count it's md5."): file_path = f"{mount_dir}/test_file" test_file = File.create_file(file_path) @@ -289,27 +329,32 @@ def test_preserve_data_for_inactive_device(): md5_after_create = test_file.md5sum() cache_stats_before_stop = cache.get_statistics() core_stats_before_stop = core.get_statistics() + with TestRun.step("Unmount CAS device."): core.unmount() + with TestRun.step("Stop cache without flushing dirty data."): cache.stop(no_data_flush=True) + with TestRun.step("Unplug core device."): plug_device.unplug() + with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) cache_stats_after_load = cache.get_statistics() core_stats_after_load = core.get_statistics() - if cache_stats_before_stop.usage_stats.clean != cache_stats_after_load.usage_stats.clean or\ - cache_stats_before_stop.usage_stats.dirty != \ - cache_stats_after_load.usage_stats.dirty or\ - core_stats_before_stop.usage_stats.clean != \ - core_stats_after_load.usage_stats.clean or\ - core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty: + if ( + cache_stats_before_stop.usage_stats.clean != cache_stats_after_load.usage_stats.clean + or cache_stats_before_stop.usage_stats.dirty != cache_stats_after_load.usage_stats.dirty + or core_stats_before_stop.usage_stats.clean != core_stats_after_load.usage_stats.clean + or core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty + ): TestRun.fail(f"Statistics after counting md5 are different than after cache load.\n" f"Cache stats before: {cache_stats_before_stop}\n" f"Cache stats after: {cache_stats_after_load}\n" f"Core stats before: {core_stats_before_stop}\n" f"Core stats after: {core_stats_after_load}") + with TestRun.step("Plug core disk using sysfs and verify this change is reflected " "on the cache list."): plug_device.plug() @@ -317,8 +362,10 @@ def test_preserve_data_for_inactive_device(): if cache.get_status() != CacheStatus.running or core.get_status() != CoreStatus.active: TestRun.fail(f"Expected cache status is running (actual - {cache.get_status()}).\n" f"Expected core status is active (actual - {core.get_status()}).") + with TestRun.step("Mount CAS device"): core.mount(mount_dir) + with TestRun.step("Count md5 checksum for test file and compare it with previous value."): cache_read_hits_before_md5 = cache.get_statistics().request_stats.read.hits md5_after_cache_load = test_file.md5sum() @@ -328,6 +375,7 @@ def test_preserve_data_for_inactive_device(): else: TestRun.LOGGER.info("Md5 checksum is identical before and after cache load operation " "with inactive CAS device.") + with TestRun.step("Verify that cache read hits increased after counting md5 checksum."): cache_read_hits_after_md5 = cache.get_statistics().request_stats.read.hits if cache_read_hits_after_md5 - cache_read_hits_before_md5 < 0: @@ -336,6 +384,7 @@ def test_preserve_data_for_inactive_device(): f"After: {cache_read_hits_after_md5}.") else: TestRun.LOGGER.info("Cache read hits increased as expected.") + with TestRun.step("Unmount CAS device and stop cache."): core.unmount() cache.stop() @@ -363,29 +412,37 @@ def test_print_statistics_inactive(cache_mode): second_core_dev = devices["core2"].partitions[0] first_plug_device = devices["core1"] second_plug_device = devices["core2"] + with TestRun.step("Start cache and add cores."): cache = casadm.start_cache(cache_dev, cache_mode=cache_mode, force=True) first_core = cache.add_core(first_core_dev) second_core = cache.add_core(second_core_dev) + with TestRun.step("Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Run IO."): run_fio([first_core.system_path, second_core.system_path]) + with TestRun.step("Print statistics and check if there is no inactive usage section."): active_stats = cache.get_statistics() check_if_inactive_section_exists(active_stats, False) + with TestRun.step("Stop cache."): cache.stop() with TestRun.step("Remove both core devices from OS."): first_plug_device.unplug() second_plug_device.unplug() + with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) + with TestRun.step("Check if inactive devices section appeared and contains appropriate " "information."): inactive_stats_before = cache.get_statistics() check_if_inactive_section_exists(inactive_stats_before) check_number_of_inactive_devices(inactive_stats_before, 2) + with TestRun.step("Attach one of detached core devices and add it to cache."): first_plug_device.plug() time.sleep(1) @@ -393,6 +450,7 @@ def test_print_statistics_inactive(cache_mode): if first_core_status != CoreStatus.active: TestRun.fail(f"Core {first_core.system_path} should be in active state but it is not. " f"Actual state: {first_core_status}.") + with TestRun.step("Check cache statistics section of inactive devices."): inactive_stats_after = cache.get_statistics() check_if_inactive_section_exists(inactive_stats_after) @@ -408,6 +466,7 @@ def test_print_statistics_inactive(cache_mode): inactive_stats_after.inactive_usage_stats.inactive_dirty, "inactive dirty", cache.get_cache_mode() != CacheMode.WB) + with TestRun.step("Check statistics per inactive core."): inactive_core_stats = second_core.get_statistics() if inactive_stats_after.inactive_usage_stats.inactive_occupancy == \ @@ -418,17 +477,20 @@ def test_print_statistics_inactive(cache_mode): TestRun.fail(f"Inactive core occupancy ({inactive_core_stats.usage_stats.occupancy}) " f"should be the same as cache inactive occupancy " f"({inactive_stats_after.inactive_usage_stats.inactive_occupancy}).") + with TestRun.step("Remove inactive core from cache and check if cache is in running state."): cache.remove_core(second_core.core_id, force=True) cache_status = cache.get_status() if cache_status != CacheStatus.running: TestRun.fail(f"Cache did not change status to 'running' after plugging core device. " f"Actual status: {cache_status}.") + with TestRun.step("Check if there is no inactive devices statistics section and if cache has " "Running status."): cache_stats = cache.get_statistics() check_if_inactive_section_exists(cache_stats, False) check_number_of_inactive_devices(cache_stats, 0) + with TestRun.step("Plug missing disk and stop cache."): second_plug_device.plug() time.sleep(1) @@ -451,15 +513,19 @@ def test_remove_detached_cores(): cache_dev = devices["cache"].partitions[0] core_devs = devices["core"].partitions plug_device = devices["core"] + with TestRun.step("Start cache and add four cores."): cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True) cores = [] for d in core_devs: cores.append(cache.add_core(d)) + with TestRun.step("Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Run random writes to all CAS devices."): run_fio([c.system_path for c in cores]) + with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain " "dirty data."): for core in cores: @@ -469,21 +535,26 @@ def test_remove_detached_cores(): TestRun.fail("Failed to flush CAS device.") elif core.get_dirty_blocks() == Size.zero(): TestRun.fail("There should be dirty data on CAS device.") + with TestRun.step("Stop cache without flushing dirty data."): cache.stop(no_data_flush=True) + with TestRun.step("Unplug core device from system and plug it back."): plug_device.unplug() time.sleep(2) plug_device.plug() time.sleep(1) + with TestRun.step("Verify that all cores from plugged core device are listed with " "proper status."): for core in cores: if core.get_status() != CoreStatus.detached: TestRun.fail(f"Each core should be in detached state. " f"Actual states: {casadm.list_caches().stdout}") + with TestRun.step("Remove CAS devices from core pool."): casadm.remove_all_detached_cores() + with TestRun.step("Verify that cores are no longer listed."): output = casadm.list_caches().stdout for dev in core_devs: @@ -511,15 +582,19 @@ def test_remove_inactive_devices(): cache_dev = devices["cache"].partitions[0] core_devs = devices["core"].partitions plug_device = devices["core"] + with TestRun.step("Start cache and add four cores."): cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True) cores = [] for d in core_devs: cores.append(cache.add_core(d)) + with TestRun.step("Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Run random writes to all CAS devices."): run_fio([c.system_path for c in cores]) + with TestRun.step("Flush dirty data from two CAS devices and verify than other two " "contain dirty data."): for core in cores: @@ -529,18 +604,23 @@ def test_remove_inactive_devices(): TestRun.fail("Failed to flush CAS device.") elif core.get_dirty_blocks() == Size.zero(): TestRun.fail("There should be dirty data on CAS device.") + with TestRun.step("Stop cache without flushing dirty data."): cache.stop(no_data_flush=True) + with TestRun.step("Unplug core disk."): plug_device.unplug() + with TestRun.step("Load cache."): casadm.load_cache(cache_dev) + with TestRun.step("Verify that all previously created CAS devices are listed with " "proper status."): for core in cores: if core.get_status() != CoreStatus.inactive: TestRun.fail(f"Each core should be in inactive state. " f"Actual states:\n{casadm.list_caches().stdout}") + with TestRun.step("Try removing CAS device without ‘force’ option. Verify that for " "dirty CAS devices operation is blocked, proper message is displayed " "and device is still listed."): @@ -565,6 +645,7 @@ def test_remove_inactive_devices(): TestRun.fail(f"CAS device is not listed in casadm list output but it should be." f"\n{output}") core.remove_core(force=True) + with TestRun.step("Plug missing disk and stop cache."): plug_device.plug() time.sleep(1) @@ -589,47 +670,63 @@ def test_stop_cache_with_inactive_devices(): cache_dev = devices["cache"].partitions[0] core_dev = devices["core"].partitions[0] plug_device = devices["core"] + with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True) core = cache.add_core(core_dev) + with TestRun.step("Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Run random writes and verify that CAS device contains dirty data."): run_fio([core.system_path]) if core.get_dirty_blocks() == Size.zero(): TestRun.fail("There is no dirty data on core device.") + with TestRun.step("Stop cache without flushing dirty data."): cache.stop(no_data_flush=True) + with TestRun.step("Unplug core disk."): plug_device.unplug() + with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) + with TestRun.step("Verify that previously created CAS device is listed with proper status."): core_status = core.get_status() if core_status != CoreStatus.inactive: TestRun.fail(f"CAS device should be in inactive state. Actual status: {core_status}.") + with TestRun.step("Try stopping cache without ‘no data flush’ option, verify that operation " "was blocked and proper message is displayed."): try_stop_incomplete_cache(cache) + with TestRun.step("Stop cache with force option."): cache.stop(no_data_flush=True) + with TestRun.step("Plug missing core device."): plug_device.plug() time.sleep(1) + with TestRun.step("Load cache."): cache = casadm.load_cache(cache_dev) + with TestRun.step("Stop cache with flushing dirty data."): cache.stop() + with TestRun.step("Unplug core device."): plug_device.unplug() + with TestRun.step("Load cache and verify core status is inactive."): cache = casadm.load_cache(cache_dev) core_status = core.get_status() if core_status != CoreStatus.inactive: TestRun.fail(f"CAS device should be in inactive state. Actual state: {core_status}.") + with TestRun.step("Try stopping cache without ‘no data flush’ option, verify that " "operation was blocked and proper message is displayed."): try_stop_incomplete_cache(cache) + with TestRun.step("Stop cache with 'no data flush' option and plug missing core device."): cache.stop(no_data_flush=True) plug_device.plug() diff --git a/test/functional/tests/incremental_load/test_udev.py b/test/functional/tests/incremental_load/test_udev.py index 3332865..6a5b431 100644 --- a/test/functional/tests/incremental_load/test_udev.py +++ b/test/functional/tests/incremental_load/test_udev.py @@ -2,8 +2,8 @@ # Copyright(c) 2020 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # -import random +import random import pytest from api.cas import casadm, casadm_parser @@ -30,6 +30,7 @@ def test_udev_core_partition(): - Created partitions are added to core pool after attaching core drive. """ cores_count = 4 + with TestRun.step("Create four partitions on core device and one on cache device."): cache_disk = TestRun.disks["cache"] cache_disk.create_partitions([Size(1, Unit.GibiByte)]) @@ -37,19 +38,25 @@ def test_udev_core_partition(): core_disk = TestRun.disks["core"] core_disk.create_partitions([Size(2, Unit.GibiByte)] * cores_count) core_devices = core_disk.partitions + with TestRun.step("Start cache and add created partitions as cores."): cache = casadm.start_cache(cache_dev, force=True) for dev in core_devices: cache.add_core(dev) + with TestRun.step("Create init config from running CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Stop cache."): cache.stop() + with TestRun.step("Detach core disk."): core_disk.unplug() + with TestRun.step("Plug missing core disk."): core_disk.plug() time.sleep(1) + with TestRun.step("List cache devices and check that created partitions are present " "in core pool."): for dev in core_devices: @@ -80,21 +87,29 @@ def test_udev_core(): core_dev = core_disk.partitions[0] cache = casadm.start_cache(cache_dev, force=True) core = cache.add_core(core_dev) + with TestRun.step("Create init config from running CAS configuration."): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Stop cache."): cache.stop() + with TestRun.step("Unplug core disk."): core_disk.unplug() + with TestRun.step("Plug core disk."): core_disk.plug() time.sleep(1) + with TestRun.step("Check if core device is listed in core pool."): check_if_dev_in_core_pool(core_dev) + with TestRun.step("Unplug cache disk."): cache_disk.unplug() + with TestRun.step("Plug cache disk."): cache_disk.plug() + with TestRun.step("Check if core device is active and not in the core pool."): check_if_dev_in_core_pool(core_dev, False) if core.get_status() != CoreStatus.active: @@ -120,15 +135,20 @@ def test_udev_cache_load(cache_mode): cache_disk.create_partitions([Size(1, Unit.GibiByte)]) cache_dev = cache_disk.partitions[0] cache = casadm.start_cache(cache_dev, cache_mode=cache_mode) + with TestRun.step("Create init config from running configuration"): InitConfig.create_init_config_from_running_configuration() + with TestRun.step("Stop cache."): cache.stop() + with TestRun.step("Unplug cache disk."): cache_disk.unplug() + with TestRun.step("Plug cache disk."): cache_disk.plug() time.sleep(1) + with TestRun.step("List caches and check if cache is loaded."): caches = casadm_parser.get_caches() if len(caches) < 1: @@ -165,6 +185,7 @@ def test_neg_udev_cache_load(): """ caches_count = 2 cores_count = 4 + with TestRun.step("Create init config file with two caches and two cores per each cache."): cache_disk = TestRun.disks["cache"] cache_disk.create_partitions([Size(1, Unit.GibiByte)] * caches_count) @@ -178,16 +199,20 @@ def test_neg_udev_cache_load(): init_conf.add_core(1 if j in first_cache_core_numbers else 2, j + 1, core_disk.partitions[j]) init_conf.save_config_file() + with TestRun.step("Start one cache and add two cores as defined in init config."): cache = casadm.start_cache(cache_disk.partitions[0]) for i in first_cache_core_numbers: cache.add_core(core_disk.partitions[i]) + with TestRun.step("Stop cache."): cache.stop() + with TestRun.step("Unplug and plug cache disk."): cache_disk.unplug() cache_disk.plug() time.sleep(1) + with TestRun.step("Check if CAS is loaded correctly."): cas_devices = casadm_parser.get_cas_devices_dict() if len(cas_devices["core_pool"]) != 0: @@ -202,6 +227,7 @@ def test_neg_udev_cache_load(): if len(cas_devices["cores"]) != 2: TestRun.LOGGER.error(f"There is wrong number of cores. Expected: 2, actual: " f"{len(cas_devices['caches'])}") + correct_core_devices = [] for i in first_cache_core_numbers: correct_core_devices.append(core_disk.partitions[i].system_path) @@ -210,10 +236,12 @@ def test_neg_udev_cache_load(): CoreStatus[core["status"].lower()] != CoreStatus.active or \ core["cache_id"] != 1: TestRun.LOGGER.error(f"Core did not load correctly: {core}.") + with TestRun.step("Unplug and plug core disk."): core_disk.unplug() core_disk.plug() time.sleep(1) + with TestRun.step("Check if two cores assigned to not loaded cache are inserted to core pool."): cas_devices = casadm_parser.get_cas_devices_dict() if len(cas_devices["core_pool"]) != 2: From cae2dd2ef5206b98e92585499b88750faf69c6e2 Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Mon, 31 Aug 2020 11:01:11 +0200 Subject: [PATCH 07/10] Fix incremental load tests Inactive statistics for particular modes: WT only dirty should be zero WA, PT all should be zero WB, WO only clean should be zero Signed-off-by: Slawomir Jankowski --- .../incremental_load/test_incremental_load.py | 36 +++++++++++++------ 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/test/functional/tests/incremental_load/test_incremental_load.py b/test/functional/tests/incremental_load/test_incremental_load.py index f3ddf00..dd5c296 100644 --- a/test/functional/tests/incremental_load/test_incremental_load.py +++ b/test/functional/tests/incremental_load/test_incremental_load.py @@ -8,7 +8,7 @@ from random import shuffle import pytest from api.cas import casadm, cli, cli_messages -from api.cas.cache_config import CacheStatus, SeqCutOffPolicy +from api.cas.cache_config import CacheStatus, SeqCutOffPolicy, CacheModeTrait from api.cas.core import CoreStatus, CacheMode, CleaningPolicy, FlushParametersAlru, File from api.cas.init_config import InitConfig from api.cas.statistics import CacheStats @@ -19,6 +19,7 @@ from test_tools.disk_utils import Filesystem from test_tools.fio.fio import Fio from test_tools.fio.fio_param import IoEngine, ReadWrite from test_utils import os_utils +from test_utils.os_utils import Udev from test_utils.output import CmdException from test_utils.size import Size, Unit from test_utils.time import Time @@ -412,11 +413,17 @@ def test_print_statistics_inactive(cache_mode): second_core_dev = devices["core2"].partitions[0] first_plug_device = devices["core1"] second_plug_device = devices["core2"] + Udev.disable() # disabling udev for a while prevents creating clean data on cores with TestRun.step("Start cache and add cores."): cache = casadm.start_cache(cache_dev, cache_mode=cache_mode, force=True) first_core = cache.add_core(first_core_dev) second_core = cache.add_core(second_core_dev) + cache_mode_traits = CacheMode.get_traits(cache.get_cache_mode()) + + with TestRun.step("Disable cleaning and sequential cutoff policies."): + cache.set_cleaning_policy(CleaningPolicy.nop) + cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) with TestRun.step("Create init config file using current CAS configuration."): InitConfig.create_init_config_from_running_configuration() @@ -429,8 +436,13 @@ def test_print_statistics_inactive(cache_mode): check_if_inactive_section_exists(active_stats, False) with TestRun.step("Stop cache."): - cache.stop() + if CacheModeTrait.LazyWrites in cache_mode_traits: + cache.stop(no_data_flush=True) + else: + cache.stop() + with TestRun.step("Remove both core devices from OS."): + Udev.enable() # enable udev back because it's necessary now first_plug_device.unplug() second_plug_device.unplug() @@ -455,17 +467,24 @@ def test_print_statistics_inactive(cache_mode): inactive_stats_after = cache.get_statistics() check_if_inactive_section_exists(inactive_stats_after) check_number_of_inactive_devices(inactive_stats_after, 1) + # criteria for checks below + insert_write_traits = CacheModeTrait.InsertWrite in cache_mode_traits + lazy_write_traits = CacheModeTrait.LazyWrites in cache_mode_traits + lazy_writes_or_no_insert_write_traits = (not insert_write_traits + or lazy_write_traits) + check_inactive_usage_stats(inactive_stats_before.inactive_usage_stats.inactive_occupancy, inactive_stats_after.inactive_usage_stats.inactive_occupancy, - "inactive occupancy", cache.get_cache_mode() == CacheMode.PT) + "inactive occupancy", + not insert_write_traits) check_inactive_usage_stats(inactive_stats_before.inactive_usage_stats.inactive_clean, inactive_stats_after.inactive_usage_stats.inactive_clean, "inactive clean", - cache.get_cache_mode() in [CacheMode.PT, CacheMode.WB]) + lazy_writes_or_no_insert_write_traits) check_inactive_usage_stats(inactive_stats_before.inactive_usage_stats.inactive_dirty, inactive_stats_after.inactive_usage_stats.inactive_dirty, "inactive dirty", - cache.get_cache_mode() != CacheMode.WB) + not lazy_write_traits) with TestRun.step("Check statistics per inactive core."): inactive_core_stats = second_core.get_statistics() @@ -634,9 +653,6 @@ def test_remove_inactive_devices(): "command executed without any error.") TestRun.LOGGER.info("Removing core with force option skipped for clean CAS device.") except CmdException as e: - if dirty_blocks == Size.zero(): - TestRun.fail("Removing clean CAS device should be possible but remove " - "command returned an error.") TestRun.LOGGER.info("Remove operation without force option is blocked for " "dirty CAS device as expected.") cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_core) @@ -747,8 +763,8 @@ def check_inactive_usage_stats(stats_before, stats_after, stat_name, should_be_z elif not should_be_zero and stats_after < stats_before: TestRun.LOGGER.info(f"{stat_name} is lower than before as expected.") else: - TestRun.fail(f"{stat_name} ({stats_after}) is not lower than before " - f"({stats_before}).") + TestRun.LOGGER.error(f"{stat_name} ({stats_after}) is not lower than before " + f"({stats_before}).") def check_number_of_inactive_devices(stats: CacheStats, expected_num): From e5c83243985c119cdc1c3452727bf0b8a845f5d1 Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Wed, 9 Sep 2020 12:42:04 +0200 Subject: [PATCH 08/10] Fix recovery tests Methods receiving device as an argument, uses its system_path field, not full_path Signed-off-by: Slawomir Jankowski --- .../lazy_writes/recovery/test_recovery_all_options.py | 4 ++-- .../lazy_writes/recovery/test_recovery_flush_reset.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/functional/tests/lazy_writes/recovery/test_recovery_all_options.py b/test/functional/tests/lazy_writes/recovery/test_recovery_all_options.py index 6293fc8..7a61510 100644 --- a/test/functional/tests/lazy_writes/recovery/test_recovery_all_options.py +++ b/test/functional/tests/lazy_writes/recovery/test_recovery_all_options.py @@ -88,8 +88,8 @@ def test_recovery_all_options(cache_mode, cache_line_size, cleaning_policy, file core.unmount() TestRun.LOGGER.info(f"Number of dirty blocks in cache: {cache.get_dirty_blocks()}") power_cycle_dut() - cache_device.full_path = cache_device_link.get_target() - core_device.full_path = core_device_link.get_target() + cache_device.system_path = cache_device_link.get_target() + core_device.system_path = core_device_link.get_target() with TestRun.step("Try to start cache without load and force option."): try: diff --git a/test/functional/tests/lazy_writes/recovery/test_recovery_flush_reset.py b/test/functional/tests/lazy_writes/recovery/test_recovery_flush_reset.py index 95e84b3..dba6fad 100644 --- a/test/functional/tests/lazy_writes/recovery/test_recovery_flush_reset.py +++ b/test/functional/tests/lazy_writes/recovery/test_recovery_flush_reset.py @@ -70,8 +70,8 @@ def test_recovery_flush_reset_raw(cache_mode): with TestRun.step("Hard reset DUT during data flushing."): power_cycle_dut(wait_for_flush_begin=True, core_device=core_device) - cache_device.full_path = cache_device_link.get_target() - core_device.full_path = core_device_link.get_target() + cache_device.system_path = cache_device_link.get_target() + core_device.system_path = core_device_link.get_target() with TestRun.step("Copy file from core and check if current md5sum is different than " "before restart."): @@ -155,8 +155,8 @@ def test_recovery_flush_reset_fs(cache_mode, fs): with TestRun.step("Hard reset DUT during data flushing."): power_cycle_dut(True, core_device) - cache_device.full_path = cache_device_link.get_target() - core_device.full_path = core_device_link.get_target() + cache_device.system_path = cache_device_link.get_target() + core_device.system_path = core_device_link.get_target() with TestRun.step("Load cache."): cache = casadm.load_cache(cache_device) From a36c1be8a04b2914fcd2f5367c07b79cb97ce9ba Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Wed, 16 Sep 2020 12:42:10 +0200 Subject: [PATCH 09/10] In DI12h test, verify each job separately, right after job is done. Verifying the whole workload at once generates failures in some DUT configurations. Signed-off-by: Slawomir Jankowski --- test/functional/tests/data_integrity/test_data_integrity_12h.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/functional/tests/data_integrity/test_data_integrity_12h.py b/test/functional/tests/data_integrity/test_data_integrity_12h.py index ddaef70..343ea70 100644 --- a/test/functional/tests/data_integrity/test_data_integrity_12h.py +++ b/test/functional/tests/data_integrity/test_data_integrity_12h.py @@ -92,5 +92,6 @@ def run_workload(target): fio_job = fio_run.add_job() fio_job.stonewall() fio_job.block_size(block_size) + fio_run.verify_backlog(block_size) fio_run.run() From 7d1f8cfceee8142dc38e2ad907020ae26528aaeb Mon Sep 17 00:00:00 2001 From: Slawomir Jankowski Date: Wed, 16 Sep 2020 19:31:26 +0200 Subject: [PATCH 10/10] Fix clean reboot test Signed-off-by: Slawomir Jankowski --- test/functional/tests/initialize/test_clean_reboot.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/test/functional/tests/initialize/test_clean_reboot.py b/test/functional/tests/initialize/test_clean_reboot.py index 2783456..8e4925d 100644 --- a/test/functional/tests/initialize/test_clean_reboot.py +++ b/test/functional/tests/initialize/test_clean_reboot.py @@ -5,6 +5,7 @@ import os import pytest + from api.cas import casadm from api.cas.cache_config import CacheMode from core.test_run import TestRun @@ -12,6 +13,7 @@ from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan from test_tools.dd import Dd from test_tools.disk_utils import Filesystem from test_utils.filesystem.file import File +from test_utils.os_utils import drop_caches, DropCachesMode, sync from test_utils.size import Size, Unit @@ -36,8 +38,9 @@ def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem): cache_disk = TestRun.disks['cache'] cache_disk.create_partitions([Size(1, Unit.GibiByte)]) cache_dev = cache_disk.partitions[0] + cache_dev_link = cache_dev.get_device_link("/dev/disk/by-id") core_dev = TestRun.disks['core'] - cache = casadm.start_cache(cache_dev, cache_mode) + cache = casadm.start_cache(cache_dev, cache_mode, force=True) core = cache.add_core(core_dev) core.create_filesystem(filesystem, blocksize=int(Size(1, Unit.Blocks4096))) core.mount(mount_point) @@ -52,6 +55,8 @@ def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem): .run() test_file.refresh_item() test_file_md5 = test_file.md5sum() + sync() + drop_caches(DropCachesMode.ALL) with TestRun.step("Reset platform."): if reboot_type == "soft": @@ -59,9 +64,10 @@ def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem): else: power_control = TestRun.plugin_manager.get_plugin('power_control') power_control.power_cycle() + cache_dev.system_path = cache_dev_link.get_target() with TestRun.step("Load cache."): - cache = casadm.load_cache(cache_dev) + casadm.load_cache(cache_dev) core.mount(mount_point) with TestRun.step("Check file md5sum."):