Increase readability

Signed-off-by: Slawomir Jankowski <slawomir.jankowski@intel.com>
This commit is contained in:
Slawomir Jankowski 2020-08-31 10:59:50 +02:00
parent a99da7fa85
commit f6ec36e49e
3 changed files with 148 additions and 7 deletions

View File

@ -2,6 +2,7 @@
# Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from api.cas import casadm
@ -36,24 +37,32 @@ def test_attach_core_pool():
core_disk.create_partitions([Size(2, Unit.GibiByte), Size(2, Unit.GibiByte)])
core_dev = core_disk.partitions[0]
second_core_dev = core_disk.partitions[1]
with TestRun.step("Start cache."):
cache = casadm.start_cache(cache_dev, force=True)
with TestRun.step("Add core device."):
cache.add_core(core_dev)
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Add previously used core device to core pool using --try-add flag."):
first_core = casadm.try_add(core_dev, cache.cache_id)
with TestRun.step("Add different core device to core pool using --try-add flag."):
second_core = casadm.try_add(second_core_dev, cache.cache_id)
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Check each core status."):
if first_core.get_status() is not CoreStatus.active:
TestRun.fail(f"First core status should be active but is {first_core.get_status()}.")
if second_core.get_status() is not CoreStatus.detached:
TestRun.fail(
f"Second core status should be detached but is {second_core.get_status()}.")
with TestRun.step("Stop cache and remove core from core pool."):
casadm.remove_all_detached_cores()
cache.stop()
@ -77,12 +86,15 @@ def test_core_pool_exclusive_open():
core_disk.create_partitions([Size(1, Unit.GibiByte)])
core_dev = core_disk.partitions[0]
core_dev.create_filesystem(Filesystem.ext4)
with TestRun.step("Add core device to core device pool using --try-add flag."):
core = casadm.try_add(core_dev, 1)
with TestRun.step("Check if core status of added core in core pool is detached."):
status = core.get_status()
if status is not CoreStatus.detached:
TestRun.fail(f"Core status should be detached but is {status}.")
with TestRun.step("Check if it is impossible to add core device from core pool to "
"running cache."):
TestRun.disks["cache"].create_partitions([Size(2, Unit.GibiByte)])
@ -94,6 +106,7 @@ def test_core_pool_exclusive_open():
except CmdException:
TestRun.LOGGER.info("Adding core from core pool to cache is blocked as expected.")
cache.stop()
with TestRun.step("Check if it is impossible to start cache with casadm start command on the "
"core device from core pool."):
try:
@ -103,6 +116,7 @@ def test_core_pool_exclusive_open():
"this is unexpected behaviour.")
except CmdException:
TestRun.LOGGER.info("Using core device from core pool as cache is blocked as expected.")
with TestRun.step("Check if it is impossible to make filesystem on the core device "
"from core pool."):
try:
@ -112,11 +126,13 @@ def test_core_pool_exclusive_open():
except Exception:
TestRun.LOGGER.info("Creating filesystem on core device from core pool is "
"blocked as expected.")
with TestRun.step("Check if it is impossible to mount the core device from core pool."):
try:
core_dev.mount("/mnt")
TestRun.fail("Successfully mounted core pool device, this is unexpected behaviour.")
except Exception:
TestRun.LOGGER.info("Mounting core device form core pool is blocked as expected.")
with TestRun.step("Remove core from core pool."):
casadm.remove_all_detached_cores()

View File

@ -44,33 +44,44 @@ def test_attach_core_to_incomplete_cache_volume():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Load cache."):
casadm.load_cache(cache_dev)
with TestRun.step("Check if there is CAS device in /dev and core is in active status."):
core.check_if_is_present_in_os()
core_status = core.get_status()
if core_status != CoreStatus.active:
TestRun.fail(f"Core should be in active state. (Actual: {core_status})")
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Check if there is no CAS device in /dev and core is in inactive status."):
core.check_if_is_present_in_os(False)
if core.get_status() != CoreStatus.inactive:
TestRun.fail("Core should be in inactive state.")
with TestRun.step("Plug core device."):
plug_device.plug()
time.sleep(1)
with TestRun.step("Check if core status changed to active and CAS device is visible in OS."):
core.wait_for_status_change(CoreStatus.active)
core.check_if_is_present_in_os()
@ -97,6 +108,7 @@ def test_flush_inactive_devices():
first_core_dev = devices["core1"].partitions[0]
second_core_dev = devices["core2"].partitions[0]
plug_device = devices["core1"]
with TestRun.step("Start cache in WB mode and set alru cleaning policy."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
cache.set_cleaning_policy(CleaningPolicy.alru)
@ -108,22 +120,29 @@ def test_flush_inactive_devices():
with TestRun.step("Add two cores."):
first_core = cache.add_core(first_core_dev)
second_core = cache.add_core(second_core_dev)
with TestRun.step("Create init config file using running CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to CAS device."):
run_fio([first_core.system_path, second_core.system_path])
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug one core disk."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Wait longer than required for alru cleaning thread to start and verify "
"that dirty data is flushed only from active device."):
dirty_lines_before = {first_core: first_core.get_dirty_blocks(),
second_core: second_core.get_dirty_blocks()}
time.sleep(30)
check_amount_of_dirty_data(dirty_lines_before)
with TestRun.step("Try to call 'flush cache' command."):
dirty_lines_before = {first_core: first_core.get_dirty_blocks(),
second_core: second_core.get_dirty_blocks()}
@ -134,6 +153,7 @@ def test_flush_inactive_devices():
except Exception as e:
TestRun.LOGGER.info(f"Flush cache operation is blocked as expected.\n{str(e)}")
check_amount_of_dirty_data(dirty_lines_before)
with TestRun.step("Try to call 'flush core' command for inactive core."):
dirty_lines_before = {first_core: first_core.get_dirty_blocks(),
second_core: second_core.get_dirty_blocks()}
@ -144,6 +164,7 @@ def test_flush_inactive_devices():
except Exception as e:
TestRun.LOGGER.info(f"Flush core operation is blocked as expected.\n{str(e)}")
check_amount_of_dirty_data(dirty_lines_before)
with TestRun.step("Plug core disk and verify that this change is reflected on the cache list."):
plug_device.plug()
time.sleep(1)
@ -152,6 +173,7 @@ def test_flush_inactive_devices():
if cache_status != CacheStatus.running:
TestRun.fail(f"Cache did not change status to 'running' after plugging core device. "
f"Actual state: {cache_status}.")
with TestRun.step("Stop cache."):
cache.stop()
@ -172,9 +194,11 @@ def test_list_cache_and_cache_volumes():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Check if list caches command shows proper output (cache should have status "
"Running and cache volume should be Active)."):
core_status = core.get_status()
@ -183,14 +207,19 @@ def test_list_cache_and_cache_volumes():
cache_status = cache.get_status()
if cache_status != CacheStatus.running:
TestRun.fail(f"Cache should be in running state. Actual state: {cache_status}")
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Check if list cache command shows proper output (cache should have status "
"Incomplete and cache volume should be Inactive)."):
core_status = core.get_status()
@ -199,6 +228,7 @@ def test_list_cache_and_cache_volumes():
cache_status = cache.get_status()
if cache_status != CacheStatus.incomplete:
TestRun.fail(f"Cache should be in incomplete state. Actual state: {cache_status}.")
with TestRun.step("Plug missing device and stop cache."):
plug_device.plug()
time.sleep(1)
@ -227,18 +257,24 @@ def test_load_cache_with_inactive_core():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache."):
output = TestRun.executor.run(cli.load_cmd(cache_dev.system_path))
cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing)
with TestRun.step("Plug missing device and stop cache."):
plug_device.plug()
time.sleep(1)
@ -267,16 +303,20 @@ def test_preserve_data_for_inactive_device():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
cache.set_cleaning_policy(CleaningPolicy.nop)
core = cache.add_core(core_dev)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Create filesystem on CAS device and mount it."):
core.create_filesystem(Filesystem.ext3)
core.mount(mount_dir)
with TestRun.step("Create a test file with random writes on mount point and count it's md5."):
file_path = f"{mount_dir}/test_file"
test_file = File.create_file(file_path)
@ -289,27 +329,32 @@ def test_preserve_data_for_inactive_device():
md5_after_create = test_file.md5sum()
cache_stats_before_stop = cache.get_statistics()
core_stats_before_stop = core.get_statistics()
with TestRun.step("Unmount CAS device."):
core.unmount()
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
cache_stats_after_load = cache.get_statistics()
core_stats_after_load = core.get_statistics()
if cache_stats_before_stop.usage_stats.clean != cache_stats_after_load.usage_stats.clean or\
cache_stats_before_stop.usage_stats.dirty != \
cache_stats_after_load.usage_stats.dirty or\
core_stats_before_stop.usage_stats.clean != \
core_stats_after_load.usage_stats.clean or\
core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty:
if (
cache_stats_before_stop.usage_stats.clean != cache_stats_after_load.usage_stats.clean
or cache_stats_before_stop.usage_stats.dirty != cache_stats_after_load.usage_stats.dirty
or core_stats_before_stop.usage_stats.clean != core_stats_after_load.usage_stats.clean
or core_stats_before_stop.usage_stats.dirty != core_stats_after_load.usage_stats.dirty
):
TestRun.fail(f"Statistics after counting md5 are different than after cache load.\n"
f"Cache stats before: {cache_stats_before_stop}\n"
f"Cache stats after: {cache_stats_after_load}\n"
f"Core stats before: {core_stats_before_stop}\n"
f"Core stats after: {core_stats_after_load}")
with TestRun.step("Plug core disk using sysfs and verify this change is reflected "
"on the cache list."):
plug_device.plug()
@ -317,8 +362,10 @@ def test_preserve_data_for_inactive_device():
if cache.get_status() != CacheStatus.running or core.get_status() != CoreStatus.active:
TestRun.fail(f"Expected cache status is running (actual - {cache.get_status()}).\n"
f"Expected core status is active (actual - {core.get_status()}).")
with TestRun.step("Mount CAS device"):
core.mount(mount_dir)
with TestRun.step("Count md5 checksum for test file and compare it with previous value."):
cache_read_hits_before_md5 = cache.get_statistics().request_stats.read.hits
md5_after_cache_load = test_file.md5sum()
@ -328,6 +375,7 @@ def test_preserve_data_for_inactive_device():
else:
TestRun.LOGGER.info("Md5 checksum is identical before and after cache load operation "
"with inactive CAS device.")
with TestRun.step("Verify that cache read hits increased after counting md5 checksum."):
cache_read_hits_after_md5 = cache.get_statistics().request_stats.read.hits
if cache_read_hits_after_md5 - cache_read_hits_before_md5 < 0:
@ -336,6 +384,7 @@ def test_preserve_data_for_inactive_device():
f"After: {cache_read_hits_after_md5}.")
else:
TestRun.LOGGER.info("Cache read hits increased as expected.")
with TestRun.step("Unmount CAS device and stop cache."):
core.unmount()
cache.stop()
@ -363,29 +412,37 @@ def test_print_statistics_inactive(cache_mode):
second_core_dev = devices["core2"].partitions[0]
first_plug_device = devices["core1"]
second_plug_device = devices["core2"]
with TestRun.step("Start cache and add cores."):
cache = casadm.start_cache(cache_dev, cache_mode=cache_mode, force=True)
first_core = cache.add_core(first_core_dev)
second_core = cache.add_core(second_core_dev)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run IO."):
run_fio([first_core.system_path, second_core.system_path])
with TestRun.step("Print statistics and check if there is no inactive usage section."):
active_stats = cache.get_statistics()
check_if_inactive_section_exists(active_stats, False)
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Remove both core devices from OS."):
first_plug_device.unplug()
second_plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Check if inactive devices section appeared and contains appropriate "
"information."):
inactive_stats_before = cache.get_statistics()
check_if_inactive_section_exists(inactive_stats_before)
check_number_of_inactive_devices(inactive_stats_before, 2)
with TestRun.step("Attach one of detached core devices and add it to cache."):
first_plug_device.plug()
time.sleep(1)
@ -393,6 +450,7 @@ def test_print_statistics_inactive(cache_mode):
if first_core_status != CoreStatus.active:
TestRun.fail(f"Core {first_core.system_path} should be in active state but it is not. "
f"Actual state: {first_core_status}.")
with TestRun.step("Check cache statistics section of inactive devices."):
inactive_stats_after = cache.get_statistics()
check_if_inactive_section_exists(inactive_stats_after)
@ -408,6 +466,7 @@ def test_print_statistics_inactive(cache_mode):
inactive_stats_after.inactive_usage_stats.inactive_dirty,
"inactive dirty",
cache.get_cache_mode() != CacheMode.WB)
with TestRun.step("Check statistics per inactive core."):
inactive_core_stats = second_core.get_statistics()
if inactive_stats_after.inactive_usage_stats.inactive_occupancy == \
@ -418,17 +477,20 @@ def test_print_statistics_inactive(cache_mode):
TestRun.fail(f"Inactive core occupancy ({inactive_core_stats.usage_stats.occupancy}) "
f"should be the same as cache inactive occupancy "
f"({inactive_stats_after.inactive_usage_stats.inactive_occupancy}).")
with TestRun.step("Remove inactive core from cache and check if cache is in running state."):
cache.remove_core(second_core.core_id, force=True)
cache_status = cache.get_status()
if cache_status != CacheStatus.running:
TestRun.fail(f"Cache did not change status to 'running' after plugging core device. "
f"Actual status: {cache_status}.")
with TestRun.step("Check if there is no inactive devices statistics section and if cache has "
"Running status."):
cache_stats = cache.get_statistics()
check_if_inactive_section_exists(cache_stats, False)
check_number_of_inactive_devices(cache_stats, 0)
with TestRun.step("Plug missing disk and stop cache."):
second_plug_device.plug()
time.sleep(1)
@ -451,15 +513,19 @@ def test_remove_detached_cores():
cache_dev = devices["cache"].partitions[0]
core_devs = devices["core"].partitions
plug_device = devices["core"]
with TestRun.step("Start cache and add four cores."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
cores = []
for d in core_devs:
cores.append(cache.add_core(d))
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain "
"dirty data."):
for core in cores:
@ -469,21 +535,26 @@ def test_remove_detached_cores():
TestRun.fail("Failed to flush CAS device.")
elif core.get_dirty_blocks() == Size.zero():
TestRun.fail("There should be dirty data on CAS device.")
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug core device from system and plug it back."):
plug_device.unplug()
time.sleep(2)
plug_device.plug()
time.sleep(1)
with TestRun.step("Verify that all cores from plugged core device are listed with "
"proper status."):
for core in cores:
if core.get_status() != CoreStatus.detached:
TestRun.fail(f"Each core should be in detached state. "
f"Actual states: {casadm.list_caches().stdout}")
with TestRun.step("Remove CAS devices from core pool."):
casadm.remove_all_detached_cores()
with TestRun.step("Verify that cores are no longer listed."):
output = casadm.list_caches().stdout
for dev in core_devs:
@ -511,15 +582,19 @@ def test_remove_inactive_devices():
cache_dev = devices["cache"].partitions[0]
core_devs = devices["core"].partitions
plug_device = devices["core"]
with TestRun.step("Start cache and add four cores."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
cores = []
for d in core_devs:
cores.append(cache.add_core(d))
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two "
"contain dirty data."):
for core in cores:
@ -529,18 +604,23 @@ def test_remove_inactive_devices():
TestRun.fail("Failed to flush CAS device.")
elif core.get_dirty_blocks() == Size.zero():
TestRun.fail("There should be dirty data on CAS device.")
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug core disk."):
plug_device.unplug()
with TestRun.step("Load cache."):
casadm.load_cache(cache_dev)
with TestRun.step("Verify that all previously created CAS devices are listed with "
"proper status."):
for core in cores:
if core.get_status() != CoreStatus.inactive:
TestRun.fail(f"Each core should be in inactive state. "
f"Actual states:\n{casadm.list_caches().stdout}")
with TestRun.step("Try removing CAS device without force option. Verify that for "
"dirty CAS devices operation is blocked, proper message is displayed "
"and device is still listed."):
@ -565,6 +645,7 @@ def test_remove_inactive_devices():
TestRun.fail(f"CAS device is not listed in casadm list output but it should be."
f"\n{output}")
core.remove_core(force=True)
with TestRun.step("Plug missing disk and stop cache."):
plug_device.plug()
time.sleep(1)
@ -589,47 +670,63 @@ def test_stop_cache_with_inactive_devices():
cache_dev = devices["cache"].partitions[0]
core_dev = devices["core"].partitions[0]
plug_device = devices["core"]
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Create init config file using current CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes and verify that CAS device contains dirty data."):
run_fio([core.system_path])
if core.get_dirty_blocks() == Size.zero():
TestRun.fail("There is no dirty data on core device.")
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
with TestRun.step("Unplug core disk."):
plug_device.unplug()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Verify that previously created CAS device is listed with proper status."):
core_status = core.get_status()
if core_status != CoreStatus.inactive:
TestRun.fail(f"CAS device should be in inactive state. Actual status: {core_status}.")
with TestRun.step("Try stopping cache without no data flush option, verify that operation "
"was blocked and proper message is displayed."):
try_stop_incomplete_cache(cache)
with TestRun.step("Stop cache with force option."):
cache.stop(no_data_flush=True)
with TestRun.step("Plug missing core device."):
plug_device.plug()
time.sleep(1)
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_dev)
with TestRun.step("Stop cache with flushing dirty data."):
cache.stop()
with TestRun.step("Unplug core device."):
plug_device.unplug()
with TestRun.step("Load cache and verify core status is inactive."):
cache = casadm.load_cache(cache_dev)
core_status = core.get_status()
if core_status != CoreStatus.inactive:
TestRun.fail(f"CAS device should be in inactive state. Actual state: {core_status}.")
with TestRun.step("Try stopping cache without no data flush option, verify that "
"operation was blocked and proper message is displayed."):
try_stop_incomplete_cache(cache)
with TestRun.step("Stop cache with 'no data flush' option and plug missing core device."):
cache.stop(no_data_flush=True)
plug_device.plug()

View File

@ -2,8 +2,8 @@
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
import random
import pytest
from api.cas import casadm, casadm_parser
@ -30,6 +30,7 @@ def test_udev_core_partition():
- Created partitions are added to core pool after attaching core drive.
"""
cores_count = 4
with TestRun.step("Create four partitions on core device and one on cache device."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
@ -37,19 +38,25 @@ def test_udev_core_partition():
core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(2, Unit.GibiByte)] * cores_count)
core_devices = core_disk.partitions
with TestRun.step("Start cache and add created partitions as cores."):
cache = casadm.start_cache(cache_dev, force=True)
for dev in core_devices:
cache.add_core(dev)
with TestRun.step("Create init config from running CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Detach core disk."):
core_disk.unplug()
with TestRun.step("Plug missing core disk."):
core_disk.plug()
time.sleep(1)
with TestRun.step("List cache devices and check that created partitions are present "
"in core pool."):
for dev in core_devices:
@ -80,21 +87,29 @@ def test_udev_core():
core_dev = core_disk.partitions[0]
cache = casadm.start_cache(cache_dev, force=True)
core = cache.add_core(core_dev)
with TestRun.step("Create init config from running CAS configuration."):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug core disk."):
core_disk.unplug()
with TestRun.step("Plug core disk."):
core_disk.plug()
time.sleep(1)
with TestRun.step("Check if core device is listed in core pool."):
check_if_dev_in_core_pool(core_dev)
with TestRun.step("Unplug cache disk."):
cache_disk.unplug()
with TestRun.step("Plug cache disk."):
cache_disk.plug()
with TestRun.step("Check if core device is active and not in the core pool."):
check_if_dev_in_core_pool(core_dev, False)
if core.get_status() != CoreStatus.active:
@ -120,15 +135,20 @@ def test_udev_cache_load(cache_mode):
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
cache = casadm.start_cache(cache_dev, cache_mode=cache_mode)
with TestRun.step("Create init config from running configuration"):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug cache disk."):
cache_disk.unplug()
with TestRun.step("Plug cache disk."):
cache_disk.plug()
time.sleep(1)
with TestRun.step("List caches and check if cache is loaded."):
caches = casadm_parser.get_caches()
if len(caches) < 1:
@ -165,6 +185,7 @@ def test_neg_udev_cache_load():
"""
caches_count = 2
cores_count = 4
with TestRun.step("Create init config file with two caches and two cores per each cache."):
cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(1, Unit.GibiByte)] * caches_count)
@ -178,16 +199,20 @@ def test_neg_udev_cache_load():
init_conf.add_core(1 if j in first_cache_core_numbers else 2,
j + 1, core_disk.partitions[j])
init_conf.save_config_file()
with TestRun.step("Start one cache and add two cores as defined in init config."):
cache = casadm.start_cache(cache_disk.partitions[0])
for i in first_cache_core_numbers:
cache.add_core(core_disk.partitions[i])
with TestRun.step("Stop cache."):
cache.stop()
with TestRun.step("Unplug and plug cache disk."):
cache_disk.unplug()
cache_disk.plug()
time.sleep(1)
with TestRun.step("Check if CAS is loaded correctly."):
cas_devices = casadm_parser.get_cas_devices_dict()
if len(cas_devices["core_pool"]) != 0:
@ -202,6 +227,7 @@ def test_neg_udev_cache_load():
if len(cas_devices["cores"]) != 2:
TestRun.LOGGER.error(f"There is wrong number of cores. Expected: 2, actual: "
f"{len(cas_devices['caches'])}")
correct_core_devices = []
for i in first_cache_core_numbers:
correct_core_devices.append(core_disk.partitions[i].system_path)
@ -210,10 +236,12 @@ def test_neg_udev_cache_load():
CoreStatus[core["status"].lower()] != CoreStatus.active or \
core["cache_id"] != 1:
TestRun.LOGGER.error(f"Core did not load correctly: {core}.")
with TestRun.step("Unplug and plug core disk."):
core_disk.unplug()
core_disk.plug()
time.sleep(1)
with TestRun.step("Check if two cores assigned to not loaded cache are inserted to core pool."):
cas_devices = casadm_parser.get_cas_devices_dict()
if len(cas_devices["core_pool"]) != 2: