diff --git a/test/functional/tests/fault_injection/test_multilevel_cache.py b/test/functional/tests/cache_ops/test_multilevel_cache.py similarity index 98% rename from test/functional/tests/fault_injection/test_multilevel_cache.py rename to test/functional/tests/cache_ops/test_multilevel_cache.py index c2d3f66..183f1af 100644 --- a/test/functional/tests/fault_injection/test_multilevel_cache.py +++ b/test/functional/tests/cache_ops/test_multilevel_cache.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2020-2021 Intel Corporation +# Copyright(c) 2020-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # diff --git a/test/functional/tests/fault_injection/test_cache_insert_error.py b/test/functional/tests/fault_injection/test_cache_insert_error.py index f715928..87c0c4d 100644 --- a/test/functional/tests/fault_injection/test_cache_insert_error.py +++ b/test/functional/tests/fault_injection/test_cache_insert_error.py @@ -1,10 +1,9 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # import pytest - from api.cas import casadm from api.cas.cache_config import ( CacheMode, @@ -12,7 +11,9 @@ from api.cas.cache_config import ( SeqCutOffPolicy, CleaningPolicy, CacheStatus, + CacheModeTrait, ) + from core.test_run import TestRun from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan from test_tools.device_mapper import ErrorDevice, DmTable @@ -21,22 +22,28 @@ from test_tools.fio.fio_param import ReadWrite, IoEngine, ErrorFilter, VerifyMet from test_utils.os_utils import Udev from test_utils.size import Size, Unit +start_size = Size(512, Unit.Byte) +stop_size = Size(128, Unit.KibiByte) + -@pytest.mark.parametrizex("cache_line_size", CacheLineSize) @pytest.mark.parametrizex("cache_mode", CacheMode) +@pytest.mark.parametrizex( + "block_size", [start_size, Size(1024, Unit.Byte), Size(4, Unit.KibiByte), stop_size] +) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_cache_insert_error(cache_mode, cache_line_size): +def test_cache_insert_error(cache_mode, block_size): """ - title: Cache insert test with error device - description: | - Validate CAS ability to handle write errors while it tries to insert - cache lines. For lazy writes cache modes (WO, WB) issue only reads. - pass_criteria: - - No I/O errors returned to the user - - Cache write error statistics are counted properly - - No cache line gets inserted into cache + title: Cache insert test with error device + description: | + Validate CAS ability to handle write errors while it tries to insert + cache lines. For lazy writes cache modes (WO, WB) issue only reads. + pass_criteria: + - No I/O errors returned to the user + - Cache write error statistics are counted properly + - No cache line gets inserted into cache """ + cache_line_size = CacheLineSize.DEFAULT with TestRun.step("Prepare core and cache"): cache, core, core_device = prepare_configuration(cache_mode, cache_line_size) @@ -45,17 +52,18 @@ def test_cache_insert_error(cache_mode, cache_line_size): .create_command() .io_engine(IoEngine.libaio) .size(core.size) - .block_size(cache_line_size) + .block_size(block_size) .target(core) .direct() ) - if cache_mode in [CacheMode.WB, CacheMode.WO]: + if cache_mode in CacheMode.with_traits(CacheModeTrait.LazyWrites): fio_cmd = fio_cmd.read_write(ReadWrite.randread) else: - fio_cmd = fio_cmd.read_write(ReadWrite.randrw).verify_pattern().verify(VerifyMethod.pattern) + fio_cmd = fio_cmd.read_write(ReadWrite.randrw) with TestRun.step("Run fio and verify no errors present"): fio_errors = fio_cmd.run()[0].total_errors() + if fio_errors != 0: TestRun.fail(f"Some I/O ended with errors {fio_errors}") @@ -66,36 +74,91 @@ def test_cache_insert_error(cache_mode, cache_line_size): if occupancy != 0: TestRun.fail(f"Occupancy is not zero, but {occupancy}") - cache_writes = stats.block_stats.cache.writes / cache_line_size.value + # Convert cache writes from bytes to I/O count, assuming cache I/O is sent + # with cacheline granularity. + cache_writes_per_block = max(block_size.get_value() // int(cache_line_size), 1) + cache_writes = stats.block_stats.cache.writes / block_size * cache_writes_per_block + cache_errors = stats.error_stats.cache.total - if cache_writes != cache_errors: + + # Cache error count is accurate, however cache writes is rounded up to 4K in OCF. + # Need to take this into account and round up cache errors accordingly for the + # comparison. + cache_writes_accuracy = max(Size(4, Unit.KibiByte) / block_size, 1) + rounded_cache_errors = ( + (cache_errors + cache_writes_accuracy - 1) + // cache_writes_accuracy + * cache_writes_accuracy + ) + if cache_writes != rounded_cache_errors: TestRun.fail( - f"Cache errors ({cache_errors}) should equal to number of" + f"Cache errors ({rounded_cache_errors}) should equal to number of" f" requests to cache ({cache_writes})" ) - if cache_mode not in [CacheMode.WB, CacheMode.WO]: + if cache_mode not in CacheMode.with_traits(CacheModeTrait.LazyWrites): with TestRun.step("Verify core device contents for non-lazy-writes cache modes"): cache.stop() fio_cmd.target(core_device).verify_only().run() -@pytest.mark.parametrizex("cache_line_size", CacheLineSize) -@pytest.mark.parametrizex("cache_mode", [CacheMode.WB, CacheMode.WO]) +@pytest.mark.parametrizex("cache_mode", CacheMode.without_traits(CacheModeTrait.LazyWrites)) +@pytest.mark.parametrizex("block_size", [start_size, Size(4, Unit.KibiByte), stop_size]) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_cache_write_lazy_insert_error(cache_mode, cache_line_size): +def test_error_cache_verify_core(cache_mode, block_size): """ - title: Cache insert test with error device for writes on lazy writes cache mode - description: | - Validate CAS ability to handle write errors while it tries to insert - cache lines. This test is exclusively for lazy writes cache modes. - pass_criteria: - - I/O errors returned to user - - Cache automatically stops after encountering errors - - No cache line gets inserted into cache + title: Write data to broken cache in non-lazy cache mode + description: | + Verify contents of primary storage after writes to cache with underlaying error + device in non-lazy cache mode and check taht all IO requests succeeded + pass_criteria: + - No I/O errors returned to the user + - The primary storage contents match the actual written data """ + cache_line_size = CacheLineSize.DEFAULT + with TestRun.step("Prepare core and cache"): + cache, core, core_device = prepare_configuration(cache_mode, cache_line_size) + + fio_cmd = ( + Fio() + .create_command() + .io_engine(IoEngine.libaio) + .size(core.size) + .block_size(block_size) + .target(core) + .direct() + .read_write(ReadWrite.randrw) + .verify_pattern() + .verify(VerifyMethod.pattern) + ) + + with TestRun.step("Run fio and verify no errors present"): + fio_errors = fio_cmd.run()[0].total_errors() + if fio_errors != 0: + TestRun.fail(f"Some I/O ended with errors {fio_errors}") + + with TestRun.step("Verify core device contents"): + cache.stop() + fio_cmd.target(core_device).verify_only().run() + + +@pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites)) +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) +def test_cache_write_lazy_insert_error(cache_mode): + """ + title: Cache insert test with error device for writes on lazy writes cache mode + description: | + Validate CAS ability to handle write errors while it tries to insert + cache lines. This test is exclusively for lazy writes cache modes. + pass_criteria: + - I/O errors returned to user + - Cache automatically stops after encountering errors + - No cache line gets inserted into cache + """ + cache_line_size = CacheLineSize.DEFAULT with TestRun.step("Prepare core and cache"): cache, core, _ = prepare_configuration(cache_mode, cache_line_size) @@ -105,7 +168,7 @@ def test_cache_write_lazy_insert_error(cache_mode, cache_line_size): .create_command() .io_engine(IoEngine.libaio) .size(core.size) - .block_size(cache_line_size) + .blocksize_range([(start_size.get_value(), stop_size.get_value())]) .read_write(ReadWrite.randwrite) .target(core) .continue_on_error(ErrorFilter.io) @@ -126,8 +189,6 @@ def test_cache_write_lazy_insert_error(cache_mode, cache_line_size): cache_writes = stats.block_stats.cache.writes / cache_line_size.value cache_errors = stats.error_stats.cache.total - if cache_writes != 1: - TestRun.fail(f"There only should be one cache write attempt before cache stop") if cache_writes != cache_errors: TestRun.fail( f"Cache errors ({cache_errors}) should equal to number of requests to" diff --git a/test/functional/tests/fault_injection/test_fault_injection_interrupts.py b/test/functional/tests/fault_injection/test_fault_injection_interrupts.py index c45fb20..216be8b 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_interrupts.py +++ b/test/functional/tests/fault_injection/test_fault_injection_interrupts.py @@ -11,9 +11,12 @@ from api.cas.casadm_parser import wait_for_flushing from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools.disk_utils import Filesystem +from test_tools.dd import Dd from test_utils import os_utils from test_utils.os_utils import Udev, DropCachesMode from test_utils.size import Size, Unit +from tests.lazy_writes.recovery.recovery_tests_methods import compare_files +from test_tools import fs_utils mount_point = "/mnt/cas" test_file_path = f"{mount_point}/test_file" @@ -27,20 +30,21 @@ cache_size = Size(16, Unit.GibiByte) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_interrupt_core_flush(cache_mode, filesystem): """ - title: Test if OpenCAS works correctly after core's flushing interruption. - description: | - Negative test of the ability of OpenCAS to handle core flushing interruption. - pass_criteria: - - No system crash. - - Flushing would be stopped after interruption. - - Md5sum are correct during all test steps. - - Dirty blocks quantity after interruption is equal or lower but non-zero. + title: Test if OpenCAS works correctly after core's flushing interruption. + description: | + Negative test of the ability of OpenCAS to handle core flushing interruption. + pass_criteria: + - No system crash. + - Flushing would be stopped after interruption. + - Md5sum are correct during all test steps. + - Dirty blocks quantity after interruption is equal or lower but non-zero. """ with TestRun.step("Prepare cache and core."): cache_part, core_part = prepare() - for _ in TestRun.iteration(range(iterations_per_config), - f"Reload cache configuration {iterations_per_config} times."): + for _ in TestRun.iteration( + range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times." + ): with TestRun.step("Start cache."): cache = casadm.start_cache(cache_part, cache_mode, force=True) @@ -66,7 +70,8 @@ def test_interrupt_core_flush(cache_mode, filesystem): with TestRun.step("Start flushing core device."): flush_pid = TestRun.executor.run_in_background( - cli.flush_core_cmd(str(cache.cache_id), str(core.core_id))) + cli.flush_core_cmd(str(cache.cache_id), str(core.core_id)) + ) with TestRun.step("Interrupt core flushing."): wait_for_flushing(cache, core) @@ -78,11 +83,13 @@ def test_interrupt_core_flush(cache_mode, filesystem): with TestRun.step("Check number of dirty data on exported object after interruption."): core_dirty_blocks_after = core.get_dirty_blocks() if core_dirty_blocks_after >= core_dirty_blocks_before: - TestRun.LOGGER.error("Quantity of dirty lines after core flush interruption " - "should be lower.") + TestRun.LOGGER.error( + "Quantity of dirty lines after core flush interruption " "should be lower." + ) if int(core_dirty_blocks_after) == 0: - TestRun.LOGGER.error("Quantity of dirty lines after core flush interruption " - "should not be zero.") + TestRun.LOGGER.error( + "Quantity of dirty lines after core flush interruption " "should not be zero." + ) with TestRun.step("Unmount core and stop cache."): core.unmount() @@ -94,7 +101,8 @@ def test_interrupt_core_flush(cache_mode, filesystem): with TestRun.step("Check md5 sum of test file again."): if test_file_md5sum_before != test_file.md5sum(): TestRun.LOGGER.error( - "Md5 sums before and after interrupting core flush are different.") + "Md5 sums before and after interrupting core flush are different." + ) with TestRun.step("Unmount core device."): core_part.unmount() @@ -106,20 +114,21 @@ def test_interrupt_core_flush(cache_mode, filesystem): @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_interrupt_cache_flush(cache_mode, filesystem): """ - title: Test if OpenCAS works correctly after cache's flushing interruption. - description: | - Negative test of the ability of OpenCAS to handle cache flushing interruption. - pass_criteria: - - No system crash. - - Flushing would be stopped after interruption. - - Md5sum are correct during all test steps. - - Dirty blocks quantity after interruption is equal or lower but non-zero. + title: Test if OpenCAS works correctly after cache's flushing interruption. + description: | + Negative test of the ability of OpenCAS to handle cache flushing interruption. + pass_criteria: + - No system crash. + - Flushing would be stopped after interruption. + - Md5sum are correct during all test steps. + - Dirty blocks quantity after interruption is equal or lower but non-zero. """ with TestRun.step("Prepare cache and core."): cache_part, core_part = prepare() - for _ in TestRun.iteration(range(iterations_per_config), - f"Reload cache configuration {iterations_per_config} times."): + for _ in TestRun.iteration( + range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times." + ): with TestRun.step("Start cache."): cache = casadm.start_cache(cache_part, cache_mode, force=True) @@ -144,8 +153,7 @@ def test_interrupt_cache_flush(cache_mode, filesystem): cache_dirty_blocks_before = cache.get_dirty_blocks() with TestRun.step("Start flushing cache."): - flush_pid = TestRun.executor.run_in_background( - cli.flush_cache_cmd(str(cache.cache_id))) + flush_pid = TestRun.executor.run_in_background(cli.flush_cache_cmd(str(cache.cache_id))) with TestRun.step("Interrupt cache flushing"): wait_for_flushing(cache, core) @@ -157,11 +165,13 @@ def test_interrupt_cache_flush(cache_mode, filesystem): with TestRun.step("Check number of dirty data on exported object after interruption."): cache_dirty_blocks_after = cache.get_dirty_blocks() if cache_dirty_blocks_after >= cache_dirty_blocks_before: - TestRun.LOGGER.error("Quantity of dirty lines after cache flush interruption " - "should be lower.") + TestRun.LOGGER.error( + "Quantity of dirty lines after cache flush interruption " "should be lower." + ) if int(cache_dirty_blocks_after) == 0: - TestRun.LOGGER.error("Quantity of dirty lines after cache flush interruption " - "should not be zero.") + TestRun.LOGGER.error( + "Quantity of dirty lines after cache flush interruption " "should not be zero." + ) with TestRun.step("Unmount core and stop cache."): core.unmount() @@ -173,7 +183,8 @@ def test_interrupt_cache_flush(cache_mode, filesystem): with TestRun.step("Check md5 sum of test file again."): if test_file_md5sum_before != test_file.md5sum(): TestRun.LOGGER.error( - "Md5 sums before and after interrupting cache flush are different.") + "Md5 sums before and after interrupting cache flush are different." + ) with TestRun.step("Unmount core device."): core_part.unmount() @@ -185,26 +196,27 @@ def test_interrupt_cache_flush(cache_mode, filesystem): @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_interrupt_core_remove(cache_mode, filesystem): """ - title: Test if OpenCAS works correctly after core's removing interruption. - description: | - Negative test of the ability of OpenCAS to handle core's removing interruption. - pass_criteria: - - No system crash. - - Core would not be removed from cache after interruption. - - Flushing would be stopped after interruption. - - Md5sum are correct during all test steps. - - Dirty blocks quantity after interruption is lower but non-zero. + title: Test if OpenCAS works correctly after core's removing interruption. + description: | + Negative test of the ability of OpenCAS to handle core's removing interruption. + pass_criteria: + - No system crash. + - Core would not be removed from cache after interruption. + - Flushing would be stopped after interruption. + - Md5sum are correct during all test steps. + - Dirty blocks quantity after interruption is lower but non-zero. """ with TestRun.step("Prepare cache and core."): - cache_dev = TestRun.disks['cache'] + cache_dev = TestRun.disks["cache"] cache_dev.create_partitions([cache_size]) cache_part = cache_dev.partitions[0] - core_dev = TestRun.disks['core'] + core_dev = TestRun.disks["core"] core_dev.create_partitions([cache_size * 2]) core_part = core_dev.partitions[0] - for _ in TestRun.iteration(range(iterations_per_config), - f"Reload cache configuration {iterations_per_config} times."): + for _ in TestRun.iteration( + range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times." + ): with TestRun.step("Start cache."): cache = casadm.start_cache(cache_part, cache_mode, force=True) @@ -224,7 +236,8 @@ def test_interrupt_core_remove(cache_mode, filesystem): test_file_md5sum_before = test_file.md5sum() with TestRun.step( - "Get number of dirty data on exported object before core removal interruption."): + "Get number of dirty data on exported object before core removal interruption." + ): os_utils.sync() os_utils.drop_caches(DropCachesMode.ALL) cache_dirty_blocks_before = cache.get_dirty_blocks() @@ -234,7 +247,8 @@ def test_interrupt_core_remove(cache_mode, filesystem): with TestRun.step("Start removing core device."): flush_pid = TestRun.executor.run_in_background( - cli.remove_core_cmd(str(cache.cache_id), str(core.core_id))) + cli.remove_core_cmd(str(cache.cache_id), str(core.core_id)) + ) with TestRun.step("Interrupt core removing"): wait_for_flushing(cache, core) @@ -243,15 +257,23 @@ def test_interrupt_core_remove(cache_mode, filesystem): percentage = casadm_parser.get_flushing_progress(cache.cache_id, core.core_id) TestRun.executor.run(f"kill -s SIGINT {flush_pid}") + with TestRun.step("Check md5 sum of test file after interruption."): + cache.set_cache_mode(CacheMode.WO) + test_file_md5sum_interrupt = test_file.md5sum() + cache.set_cache_mode(cache_mode) + with TestRun.step( - "Check number of dirty data on exported object after core removal interruption."): + "Check number of dirty data on exported object after core removal interruption." + ): cache_dirty_blocks_after = cache.get_dirty_blocks() if cache_dirty_blocks_after >= cache_dirty_blocks_before: - TestRun.LOGGER.error("Quantity of dirty lines after core removal interruption " - "should be lower.") + TestRun.LOGGER.error( + "Quantity of dirty lines after core removal interruption " "should be lower." + ) if int(cache_dirty_blocks_after) == 0: - TestRun.LOGGER.error("Quantity of dirty lines after core removal interruption " - "should not be zero.") + TestRun.LOGGER.error( + "Quantity of dirty lines after core removal interruption " "should not be zero." + ) with TestRun.step("Remove core from cache."): core.remove_core() @@ -264,35 +286,40 @@ def test_interrupt_core_remove(cache_mode, filesystem): with TestRun.step("Check md5 sum of test file again."): if test_file_md5sum_before != test_file.md5sum(): - TestRun.LOGGER.error( - "Md5 sums before and after interrupting core removal are different.") + TestRun.LOGGER.error("Md5 sum before interrupting core removal is different.") + + is_sum_diff_after_interrupt = test_file_md5sum_interrupt != test_file.md5sum() + if is_sum_diff_after_interrupt: + TestRun.LOGGER.error("Md5 sum after interrupting core removal is different.") with TestRun.step("Unmount core device."): core_part.unmount() -@pytest.mark.parametrizex("filesystem", Filesystem) +@pytest.mark.parametrize("stop_percentage", [0, 50]) @pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites)) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_interrupt_cache_mode_switch_immediately(cache_mode, filesystem): +def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage): """ - title: Test if OpenCAS works correctly after cache mode switching immediate interruption. - description: | - Negative test of the ability of OpenCAS to handle cache mode switching - immediate interruption. - pass_criteria: - - No system crash. - - Cache mode will not be switched after interruption. - - Flushing would be stopped after interruption. - - Md5sum are correct during all test steps. - - Dirty blocks quantity after interruption is lower but non-zero. + title: Test if OpenCAS works correctly after cache mode switching + immediate or delayed interruption. + description: | + Negative test of the ability of OpenCAS to handle cache mode switching + immediate or delayed interruption. + pass_criteria: + - No system crash. + - Cache mode will not be switched after interruption. + - Flushing would be stopped after interruption. + - Md5sum are correct during all test steps. + - Dirty blocks quantity after interruption is lower but non-zero. """ with TestRun.step("Prepare cache and core."): cache_part, core_part = prepare() - for _ in TestRun.iteration(range(iterations_per_config), - f"Reload cache configuration {iterations_per_config} times."): + for _ in TestRun.iteration( + range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times." + ): with TestRun.step("Start cache."): cache = casadm.start_cache(cache_part, cache_mode, force=True) @@ -300,16 +327,20 @@ def test_interrupt_cache_mode_switch_immediately(cache_mode, filesystem): with TestRun.step("Set cleaning policy to NOP."): cache.set_cleaning_policy(CleaningPolicy.nop) - with TestRun.step(f"Add core device with {filesystem} filesystem and mount it."): - core_part.create_filesystem(filesystem) + with TestRun.step(f"Add core device."): core = cache.add_core(core_part) - core.mount(mount_point) with TestRun.step(f"Create test file in mount point of exported object."): - test_file = create_test_file() + test_file_size = Size(1024, Unit.MebiByte) + test_file = fs_utils.create_random_test_file(test_file_path, test_file_size) with TestRun.step("Check md5 sum of test file."): - test_file_md5sum_before = test_file.md5sum() + test_file_md5_before = test_file.md5sum() + + with TestRun.step("Export file to CAS"): + Dd().block_size(test_file_size).input(test_file.full_path).output(core.path).oflag( + "direct" + ).run() with TestRun.step("Get number of dirty data on exported object before interruption."): os_utils.sync() @@ -317,128 +348,45 @@ def test_interrupt_cache_mode_switch_immediately(cache_mode, filesystem): cache_dirty_blocks_before = cache.get_dirty_blocks() with TestRun.step("Start switching cache mode."): - flush_pid = TestRun.executor.run_in_background(cli.set_cache_mode_cmd( - str(CacheMode.DEFAULT.name.lower()), str(cache.cache_id), "yes")) - wait_for_flushing(cache, core) + flush_pid = TestRun.executor.run_in_background( + cli.set_cache_mode_cmd( + str(CacheMode.DEFAULT.name.lower()), str(cache.cache_id), "yes" + ) + ) with TestRun.step("Send interruption signal."): + wait_for_flushing(cache, core) + percentage = casadm_parser.get_flushing_progress(cache.cache_id, core.core_id) + while percentage < stop_percentage: + percentage = casadm_parser.get_flushing_progress(cache.cache_id, core.core_id) TestRun.executor.run(f"kill -s SIGINT {flush_pid}") with TestRun.step("Check number of dirty data on exported object after interruption."): cache_dirty_blocks_after = cache.get_dirty_blocks() if cache_dirty_blocks_after >= cache_dirty_blocks_before: - TestRun.LOGGER.error("Quantity of dirty lines after cache mode switching " - "interruption should be lower.") + TestRun.LOGGER.error( + "Quantity of dirty lines after cache mode switching " + "interruption should be lower." + ) if int(cache_dirty_blocks_after) == 0: - TestRun.LOGGER.error("Quantity of dirty lines after cache mode switching " - "interruption should not be zero.") + TestRun.LOGGER.error( + "Quantity of dirty lines after cache mode switching " + "interruption should not be zero." + ) with TestRun.step("Check cache mode."): if cache.get_cache_mode() != cache_mode: TestRun.LOGGER.error("Cache mode should remain the same.") with TestRun.step("Unmount core and stop cache."): - core.unmount() cache.stop() - with TestRun.step("Mount core device."): - core_part.mount(mount_point) - with TestRun.step("Check md5 sum of test file again."): - if test_file_md5sum_before != test_file.md5sum(): - TestRun.LOGGER.error( - "Md5 sums before and after interrupting mode switching are different.") - - with TestRun.step("Unmount core device."): - core_part.unmount() - - -@pytest.mark.parametrizex("filesystem", Filesystem) -@pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites)) -@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) -@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_interrupt_cache_mode_switch_delayed(cache_mode, filesystem): - """ - title: Test if OpenCAS works correctly after cache mode switching delayed interruption. - description: | - Negative test of the ability of OpenCAS to handle cache mode switching - interruption with delay. - pass_criteria: - - No system crash. - - Cache mode will not be switched after interruption. - - Flushing would be stopped after interruption. - - Md5sum are correct during all test steps. - - Dirty blocks quantity after interruption is lower but non-zero. - """ - with TestRun.step("Prepare cache and core."): - cache_part, core_part = prepare() - - for _ in TestRun.iteration(range(iterations_per_config), - f"Reload cache configuration {iterations_per_config} times."): - - with TestRun.step("Start cache."): - cache = casadm.start_cache(cache_part, cache_mode, force=True) - - with TestRun.step("Set cleaning policy to NOP."): - cache.set_cleaning_policy(CleaningPolicy.nop) - - with TestRun.step(f"Add core device with {filesystem} filesystem and mount it."): - core_part.create_filesystem(filesystem) - core = cache.add_core(core_part) - core.mount(mount_point) - - with TestRun.step(f"Create test file in mount point of exported object."): - test_file = create_test_file() - - with TestRun.step("Check md5 sum of test file."): - test_file_md5sum_before = test_file.md5sum() - - with TestRun.step("Get number of dirty data on exported object before " - "switching cache mode interruption."): - os_utils.sync() - os_utils.drop_caches(DropCachesMode.ALL) - cache_dirty_blocks_before = cache.get_dirty_blocks() - - with TestRun.step("Start switching cache mode."): - flush_pid = TestRun.executor.run_in_background(cli.set_cache_mode_cmd( - str(CacheMode.DEFAULT.name.lower()), str(cache.cache_id), "yes")) - - with TestRun.step("Send interruption signal."): - wait_for_flushing(cache, core) - percentage = casadm_parser.get_flushing_progress(cache.cache_id, core.core_id) - while percentage < 50: - percentage = casadm_parser.get_flushing_progress(cache.cache_id, core.core_id) - TestRun.executor.run(f"kill -s SIGINT {flush_pid}") - - with TestRun.step( - "Get quantity of dirty data on exported object after sending interruption " - "signal to cas to stop mode switching."): - cache_dirty_blocks_after = cache.get_dirty_blocks() - if cache_dirty_blocks_after >= cache_dirty_blocks_before: - TestRun.LOGGER.error("Quantity of dirty lines after cache mode switching " - "interruption should be lower.") - if int(cache_dirty_blocks_after) == 0: - TestRun.LOGGER.error("Quantity of dirty lines after cache mode switching " - "interruption should not be zero.") - - with TestRun.step("Check cache mode."): - if cache.get_cache_mode() != cache_mode: - TestRun.LOGGER.error("Cache mode should remain the same.") - - with TestRun.step("Unmount core and stop cache."): - core.unmount() - cache.stop() - - with TestRun.step("Mount core device."): - core_part.mount(mount_point) - - with TestRun.step("Check md5 sum of test file again."): - if test_file_md5sum_before != test_file.md5sum(): - TestRun.LOGGER.error( - "Md5 sums before and after interrupting mode switching are different.") - - with TestRun.step("Unmount core device."): - core_part.unmount() + Dd().block_size(test_file_size).input(core.path).output(test_file.full_path).oflag( + "direct" + ).run() + target_file_md5 = test_file.md5sum() + compare_files(test_file_md5_before, target_file_md5) @pytest.mark.parametrizex("filesystem", Filesystem) @@ -447,20 +395,21 @@ def test_interrupt_cache_mode_switch_delayed(cache_mode, filesystem): @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_interrupt_cache_stop(cache_mode, filesystem): """ - title: Test if OpenCAS works correctly after cache stopping interruption. - description: | - Negative test of the ability of OpenCAS to handle cache's stop interruption. - pass_criteria: - - No system crash. - - Flushing would be stopped after interruption. - - Md5sum are correct during all test steps. - - Dirty blocks quantity after interruption is lower but non-zero. + title: Test if OpenCAS works correctly after cache stopping interruption. + description: | + Negative test of the ability of OpenCAS to handle cache's stop interruption. + pass_criteria: + - No system crash. + - Flushing would be stopped after interruption. + - Md5sum are correct during all test steps. + - Dirty blocks quantity after interruption is lower but non-zero. """ with TestRun.step("Prepare cache and core."): cache_part, core_part = prepare() - for _ in TestRun.iteration(range(iterations_per_config), - f"Reload cache configuration {iterations_per_config} times."): + for _ in TestRun.iteration( + range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times." + ): with TestRun.step("Start cache."): cache = casadm.start_cache(cache_part, cache_mode, force=True) @@ -476,9 +425,6 @@ def test_interrupt_cache_stop(cache_mode, filesystem): with TestRun.step(f"Create test file in mount point of exported object."): test_file = create_test_file() - with TestRun.step("Check md5 sum of test file."): - test_file_md5sum_before = test_file.md5sum() - with TestRun.step("Get number of dirty data on exported object before interruption."): os_utils.sync() os_utils.drop_caches(DropCachesMode.ALL) @@ -500,11 +446,13 @@ def test_interrupt_cache_stop(cache_mode, filesystem): with TestRun.step("Check number of dirty data on exported object after interruption."): cache_dirty_blocks_after = cache.get_dirty_blocks() if cache_dirty_blocks_after >= cache_dirty_blocks_before: - TestRun.LOGGER.error("Quantity of dirty lines after cache stop interruption " - "should be lower.") + TestRun.LOGGER.error( + "Quantity of dirty lines after cache stop interruption " "should be lower." + ) if int(cache_dirty_blocks_after) == 0: - TestRun.LOGGER.error("Quantity of dirty lines after cache stop interruption " - "should not be zero.") + TestRun.LOGGER.error( + "Quantity of dirty lines after cache stop interruption " "should not be zero." + ) with TestRun.step("Stop cache."): cache.stop() @@ -512,20 +460,15 @@ def test_interrupt_cache_stop(cache_mode, filesystem): with TestRun.step("Mount core device."): core_part.mount(mount_point) - with TestRun.step("Check md5 sum of test file again."): - if test_file_md5sum_before != test_file.md5sum(): - TestRun.LOGGER.error("Md5 sums before and after interrupting" - " cache stop are different.") - with TestRun.step("Unmount core device."): core_part.unmount() def prepare(): - cache_dev = TestRun.disks['cache'] + cache_dev = TestRun.disks["cache"] cache_dev.create_partitions([cache_size]) cache_part = cache_dev.partitions[0] - core_dev = TestRun.disks['core'] + core_dev = TestRun.disks["core"] core_dev.create_partitions([cache_size * 2]) core_part = core_dev.partitions[0] Udev.disable() @@ -535,13 +478,11 @@ def prepare(): def create_test_file(): from test_utils.filesystem.file import File from test_tools.dd import Dd + bs = Size(512, Unit.KibiByte) cnt = int(cache_size.value / bs.value) test_file = File.create_file(test_file_path) - dd = Dd().output(test_file_path) \ - .input("/dev/zero") \ - .block_size(bs) \ - .count(cnt) + dd = Dd().output(test_file_path).input("/dev/zero").block_size(bs).count(cnt) dd.run() test_file.refresh_item() return test_file diff --git a/test/functional/tests/fault_injection/test_fault_injection_many_to_one.py b/test/functional/tests/fault_injection/test_fault_injection_many_to_one.py index 8a354be..74edb32 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_many_to_one.py +++ b/test/functional/tests/fault_injection/test_fault_injection_many_to_one.py @@ -17,12 +17,9 @@ from test_utils.size import Size, Unit block_size = Size(1, Unit.Blocks4096) -@pytest.mark.parametrize( - "cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite) -) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_one_core_remove(cache_mode): +def test_one_core_remove(): """ title: Test if OpenCAS correctly handles removal of one of multiple core devices. description: | @@ -46,6 +43,7 @@ def test_one_core_remove(cache_mode): Udev.disable() with TestRun.step("Start cache"): + cache_mode = CacheMode.WT cache = casadm.start_cache(cache_dev, cache_mode, force=True) caches_count = len(casadm_parser.get_caches()) if caches_count != 1: @@ -89,9 +87,7 @@ def test_one_core_remove(cache_mode): casadm.stop_all_caches() -@pytest.mark.parametrize( - "cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite) -) +@pytest.mark.parametrizex("cache_mode", [CacheMode.WT]) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_one_core_release(cache_mode): @@ -155,6 +151,7 @@ def test_one_core_release(cache_mode): casadm.stop_all_caches() +@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB]) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core1", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core2", DiskTypeLowerThan("cache")) diff --git a/test/functional/tests/fault_injection/test_fault_injection_opencas_load.py b/test/functional/tests/fault_injection/test_fault_injection_opencas_load.py index b482f14..273c5a4 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_opencas_load.py +++ b/test/functional/tests/fault_injection/test_fault_injection_opencas_load.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2020-2021 Intel Corporation +# Copyright(c) 2020-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # @@ -7,6 +7,7 @@ import pytest from api.cas import casadm, casadm_parser, cli, cli_messages from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait +from tests.lazy_writes.recovery.recovery_tests_methods import copy_file, compare_files from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools import fs_utils @@ -19,16 +20,15 @@ mount_point = "/mnt/cas" test_file_path = f"{mount_point}/test_file" -@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites)) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_stop_no_flush_load_cache(cache_mode, filesystem): +def test_stop_no_flush_load_cache(cache_mode): """ title: Test to check that 'stop --no-data-flush' command works correctly. description: | - Negative test of the ability of CAS to load unflushed cache on core device - with filesystem. Test uses lazy flush cache modes. + Negative test of the ability of CAS to load unflushed cache on core device. + Test uses lazy flush cache modes. pass_criteria: - No system crash while load cache. - Starting cache without loading metadata fails. @@ -43,14 +43,15 @@ def test_stop_no_flush_load_cache(cache_mode, filesystem): with TestRun.step("Change cleaning policy to NOP."): cache.set_cleaning_policy(CleaningPolicy.nop) - with TestRun.step(f"Add core with {filesystem.name} filesystem to cache and mount it."): - core_part.create_filesystem(filesystem) + with TestRun.step("Add core to cache."): core = cache.add_core(core_part) - core.mount(mount_point) with TestRun.step(f"Create test file in mount point of exported object and check its md5 sum."): - test_file = fs_utils.create_random_test_file(test_file_path, Size(48, Unit.MebiByte)) + test_file_size = Size(48, Unit.MebiByte) + test_file = fs_utils.create_random_test_file(test_file_path, test_file_size) test_file_md5_before = test_file.md5sum() + copy_file(source=test_file.full_path, target=core.path, size=test_file_size, + direct="oflag") with TestRun.step("Unmount exported object."): core.unmount() @@ -83,82 +84,11 @@ def test_stop_no_flush_load_cache(cache_mode, filesystem): if dirty_blocks_before != core.get_dirty_blocks(): TestRun.LOGGER.error("Dirty blocks number is different than before loading cache.") - with TestRun.step("Mount exported object."): - core.mount(mount_point) - with TestRun.step("Compare md5 sum of test file before and after loading cache."): - if test_file_md5_before != test_file.md5sum(): - TestRun.LOGGER.error("Test file's md5 sum is different than before loading cache.") - - with TestRun.step("Unmount exported object."): - core.unmount() - - with TestRun.step("Stop cache."): - casadm.stop_all_caches() - - -@pytest.mark.parametrize("cache_mode", CacheMode.with_traits(CacheModeTrait.LazyWrites)) -@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) -@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_stop_no_flush_load_cache_no_fs(cache_mode): - """ - title: Test to check that 'stop --no-data-flush' command works correctly. - description: | - Negative test of the ability of CAS to load unflushed cache on core device - without filesystem. Test uses lazy flush cache modes. - pass_criteria: - - No system crash while load cache. - - Starting cache without loading metadata fails. - - Starting cache with loading metadata finishes with success. - """ - with TestRun.step("Prepare cache and core devices."): - cache_part, core_part = prepare() - - with TestRun.step("Start cache with --force option."): - cache = casadm.start_cache(cache_part, cache_mode, force=True) - - with TestRun.step("Change cleaning policy to NOP."): - cache.set_cleaning_policy(CleaningPolicy.nop) - - with TestRun.step("Add core device without filesystem."): - core_part.wipe_filesystem() - core = cache.add_core(core_part) - - with TestRun.step("Fill exported object with data."): - dd = (Dd() - .input("/dev/zero") - .output(core.path) - .block_size(Size(1, Unit.Blocks4096)) - .oflag("direct")) - dd.run() - - with TestRun.step("Count dirty blocks on exported object."): - dirty_blocks_before = core.get_dirty_blocks() - - with TestRun.step("Stop cache with option '--no-data-flush'."): - cache.stop(no_data_flush=True) - caches_count = len(casadm_parser.get_caches()) - if caches_count != 0: - TestRun.fail(f"Expected caches count: 0; Actual caches count: {caches_count}.") - - with TestRun.step("Try to start cache without loading metadata."): - output = TestRun.executor.run_expect_fail(cli.start_cmd( - cache_dev=str(cache_part.path), cache_mode=str(cache_mode.name.lower()), - force=False, load=False)) - cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata) - - with TestRun.step("Load cache."): - cache = casadm.load_cache(cache.cache_device) - caches_count = len(casadm_parser.get_caches()) - if caches_count != 1: - TestRun.fail(f"Expected caches count: 1 Actual caches count: {caches_count}.") - cores_count = len(casadm_parser.get_cores(cache.cache_id)) - if cores_count != 1: - TestRun.fail(f"Expected cores count: 1; Actual cores count: {cores_count}.") - - with TestRun.step("Compare dirty blocks number before and after loading cache."): - if dirty_blocks_before != core.get_dirty_blocks(): - TestRun.LOGGER.error("Dirty blocks number is different than before loading cache.") + copy_file(source=core.path, target=test_file.full_path, + size=test_file_size, direct="iflag") + target_file_md5 = test_file.md5sum() + compare_files(test_file_md5_before, target_file_md5) with TestRun.step("Stop cache."): casadm.stop_all_caches() diff --git a/test/functional/tests/fault_injection/test_fault_injection_with_mounted_core.py b/test/functional/tests/fault_injection/test_fault_injection_with_mounted_core.py index 10ba384..80987e7 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_with_mounted_core.py +++ b/test/functional/tests/fault_injection/test_fault_injection_with_mounted_core.py @@ -1,12 +1,11 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # import pytest from api.cas import casadm, casadm_parser, cli, cli_messages -from api.cas.cache_config import CacheMode from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools import fs_utils @@ -17,10 +16,10 @@ mount_point = "/mnt/cas" test_file_path = f"{mount_point}/test_file" -@pytest.mark.parametrizex("cache_mode", CacheMode) + @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_load_cache_with_mounted_core(cache_mode): +def test_load_cache_with_mounted_core(): """ title: Fault injection test for adding mounted core on cache load. description: | @@ -37,7 +36,7 @@ def test_load_cache_with_mounted_core(cache_mode): core_dev = TestRun.disks['core'] core_dev.create_partitions([Size(4, Unit.GibiByte)]) core_part = core_dev.partitions[0] - cache = casadm.start_cache(cache_part, cache_mode, force=True) + cache = casadm.start_cache(cache_part, force=True) with TestRun.step("Add core device with xfs filesystem to cache and mount it."): core_part.create_filesystem(Filesystem.xfs) @@ -78,10 +77,9 @@ def test_load_cache_with_mounted_core(cache_mode): casadm.stop_all_caches() -@pytest.mark.parametrizex("cache_mode", CacheMode) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_stop_cache_with_mounted_partition(cache_mode): +def test_stop_cache_with_mounted_partition(): """ title: Fault injection test for removing core and stopping cache with mounted core. description: | @@ -99,7 +97,7 @@ def test_stop_cache_with_mounted_partition(cache_mode): core_dev = TestRun.disks['core'] core_dev.create_partitions([Size(4, Unit.GibiByte)]) core_part = core_dev.partitions[0] - cache = casadm.start_cache(cache_part, cache_mode, force=True) + cache = casadm.start_cache(cache_part, force=True) with TestRun.step("Add core device with xfs filesystem and mount it."): core_part.create_filesystem(Filesystem.xfs) @@ -121,49 +119,3 @@ def test_stop_cache_with_mounted_partition(cache_mode): with TestRun.step("Stop cache."): casadm.stop_all_caches() - -@pytest.mark.parametrizex("cache_mode", CacheMode) -@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) -@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_add_cached_core(cache_mode): - """ - title: Fault injection test for adding already used core to a cache. - description: | - Negative test of the ability to add the core to the cache twice to the same cache - and while the core is already used by the another cache instance. - pass_criteria: - - No system crash. - - Adding already used core to another cache instance fails. - - The same core device cannot be used twice in one cache instance. - """ - with TestRun.step("Prepare two caches and one core device."): - cache_dev = TestRun.disks['cache'] - cache_dev.create_partitions([Size(2, Unit.GibiByte), Size(2, Unit.GibiByte)]) - cache_part1 = cache_dev.partitions[0] - cache_part2 = cache_dev.partitions[1] - core_dev = TestRun.disks['core'] - core_dev.create_partitions([Size(4, Unit.GibiByte)]) - core_part = core_dev.partitions[0] - - with TestRun.step("Start the first cache instance"): - cache1 = casadm.start_cache(cache_part1, cache_mode, force=True) - - with TestRun.step("Add core device to first cache instance."): - core = cache1.add_core(core_part) - - with TestRun.step("Start the second cache instance"): - cache2 = casadm.start_cache(cache_part2, cache_mode, force=True) - - with TestRun.step("Try adding the same core device to the second cache instance."): - output = TestRun.executor.run_expect_fail( - cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.path), - core_id=str(core.core_id))) - cli_messages.check_stderr_msg(output, cli_messages.add_cached_core) - - with TestRun.step("Try adding the same core device to the same cache for the second time."): - output = TestRun.executor.run_expect_fail( - cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.path))) - cli_messages.check_stderr_msg(output, cli_messages.already_cached_core) - - with TestRun.step("Stop caches."): - casadm.stop_all_caches() diff --git a/test/functional/tests/fault_injection/test_fault_power_hit_init.py b/test/functional/tests/fault_injection/test_fault_power_hit_init.py deleted file mode 100644 index 5ca181b..0000000 --- a/test/functional/tests/fault_injection/test_fault_power_hit_init.py +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright(c) 2020-2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# - -import pytest - -from api.cas import casadm, cli -from api.cas.cache_config import CacheMode -from core.test_run import TestRun -from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan -from test_utils.size import Size, Unit - - -@pytest.mark.parametrizex("cache_mode", CacheMode) -@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) -@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -@pytest.mark.require_plugin("power_control") -def test_fault_power_hit_init(cache_mode): - """ - title: Test with power hit and verification of metadata initialization after it. - description: | - Test if there will be metadata initialization after wake up - - when starting cache with initialization. - pass_criteria: - - Start cache with initialization works correctly after power hit. - """ - with TestRun.step("Prepare CAS device."): - cache_disk = TestRun.disks['cache'] - core_disk = TestRun.disks['core'] - cache_disk.create_partitions([Size(2, Unit.GibiByte)]) - core_disk.create_partitions([Size(2, Unit.GibiByte)]) - cache_dev = cache_disk.partitions[0] - core_dev = core_disk.partitions[0] - - cache = casadm.start_cache(cache_dev, cache_mode, force=True) - core = cache.add_core(core_dev) - - with TestRun.step("Hard reset."): - power_control = TestRun.plugin_manager.get_plugin('power_control') - power_control.power_cycle() - - with TestRun.step("Start cache with re-initialization."): - TestRun.executor.run_expect_success(cli.start_cmd( - cache_dev=str(cache_dev.path), - cache_mode=str(cache_mode.name.lower()), - force=True, - load=False)) - TestRun.LOGGER.info(f"Successful assembly cache device with initialization") diff --git a/test/functional/tests/fault_injection/test_fault_power_hit.py b/test/functional/tests/fault_injection/test_fault_power_reboot.py similarity index 88% rename from test/functional/tests/fault_injection/test_fault_power_hit.py rename to test/functional/tests/fault_injection/test_fault_power_reboot.py index 0efafca..22237e7 100644 --- a/test/functional/tests/fault_injection/test_fault_power_hit.py +++ b/test/functional/tests/fault_injection/test_fault_power_reboot.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2020 Intel Corporation +# Copyright(c) 2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # @@ -8,7 +8,6 @@ import time import pytest from api.cas import casadm, cli, cli_messages -from api.cas.cache_config import CacheMode from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_utils.output import CmdException @@ -19,11 +18,10 @@ wait_long_time = 180 wait_short_time = 15 -@pytest.mark.parametrizex("cache_mode", CacheMode) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_plugin("power_control") -def test_fault_power_hit(cache_mode): +def test_fault_power_reboot(): """ title: Test with power hit. description: | @@ -42,7 +40,7 @@ def test_fault_power_hit(cache_mode): cache_dev = cache_disk.partitions[0] core_dev = core_disk.partitions[0] - cache = casadm.start_cache(cache_dev, cache_mode, force=True) + cache = casadm.start_cache(cache_dev, force=True) core = cache.add_core(core_dev) with TestRun.step("Mark log lines for later validation of new entries."): @@ -58,7 +56,6 @@ def test_fault_power_hit(cache_mode): with TestRun.step("Start cache without re-initialization."): output = TestRun.executor.run_expect_fail(cli.start_cmd( cache_dev=str(cache_dev.path), - cache_mode=str(cache_mode.name.lower()), force=False, load=False)) if cli_messages.check_stderr_msg(output, cli_messages.error_inserting_cache) and \ cli_messages.check_stderr_msg(output, @@ -71,7 +68,7 @@ def test_fault_power_hit(cache_mode): cache = casadm.load_cache(cache_dev) TestRun.LOGGER.info(f"Cache device loaded correctly (as expected).") except CmdException as e: - TestRun.LOGGER.fail(f"Failed to load cache device. Exception: {e.output}") + TestRun.LOGGER.error(f"Failed to load cache device. Exception: {e.output}") time.sleep(wait_short_time) message_found = check_log(last_read_line, cli_messages.reinitialize_with_force_or_recovery) @@ -81,7 +78,7 @@ def test_fault_power_hit(cache_mode): time.sleep(wait_long_time) result = check_log(last_read_line, cli_messages.reinitialize_with_force_or_recovery) if not result: - TestRun.LOGGER.fail(f"Haven't found expected message in the log.") + TestRun.LOGGER.error(f"Haven't found expected message in the log.") def check_log(last_read_line, expected_message): diff --git a/test/functional/tests/fault_injection/test_primary_device_error.py b/test/functional/tests/fault_injection/test_primary_device_error.py index df75439..069e15d 100644 --- a/test/functional/tests/fault_injection/test_primary_device_error.py +++ b/test/functional/tests/fault_injection/test_primary_device_error.py @@ -22,12 +22,11 @@ from test_utils.os_utils import Udev from test_utils.size import Size, Unit -@pytest.mark.parametrizex("cache_line_size", CacheLineSize) @pytest.mark.parametrizex("cache_mode", CacheMode.without_traits(CacheModeTrait.LazyWrites)) @pytest.mark.parametrizex("io_dir", [ReadWrite.randread, ReadWrite.randwrite]) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_core_device_error(io_dir, cache_mode, cache_line_size): +def test_core_device_error(io_dir, cache_mode): """ title: Check if CAS behaves correctly when encountering errors on core device description: | @@ -39,6 +38,7 @@ def test_core_device_error(io_dir, cache_mode, cache_line_size): - I/O error count in FIO and in cache statistics match - Positively passed fio verify on both core devices """ + cache_line_size = CacheLineSize.DEFAULT with TestRun.step("Prepare error device and setup cache and cores"): cache, error_core, good_core = prepare_configuration(cache_mode, cache_line_size) diff --git a/test/functional/tests/fault_injection/test_soft_hot_plug_device.py b/test/functional/tests/fault_injection/test_soft_hot_plug_device.py index ddd3812..1acaf69 100644 --- a/test/functional/tests/fault_injection/test_soft_hot_plug_device.py +++ b/test/functional/tests/fault_injection/test_soft_hot_plug_device.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # @@ -22,7 +22,7 @@ from test_utils.size import Size, Unit @pytest.mark.parametrizex("cache_mode", CacheMode) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_soft_hot_plug_cache(cache_mode): +def test_soft_hot_unplug_cache(cache_mode): """ title: Test for soft hot plug of cache device. description: | @@ -99,7 +99,7 @@ def test_soft_hot_plug_cache(cache_mode): @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core1", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core2", DiskTypeLowerThan("cache")) -def test_soft_hot_plug_core(cache_mode): +def test_soft_hot_unplug_core(cache_mode): """ title: Test for soft hot plug of one core device. description: | diff --git a/test/functional/tests/initialize/test_negative_load.py b/test/functional/tests/initialize/test_negative_load.py index f261d5a..973e3e6 100644 --- a/test/functional/tests/initialize/test_negative_load.py +++ b/test/functional/tests/initialize/test_negative_load.py @@ -1,11 +1,11 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # import pytest -from api.cas import casadm, casadm_parser +from api.cas import casadm, casadm_parser, cli, cli_messages from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_utils.size import Size, Unit @@ -15,16 +15,16 @@ from test_utils.size import Size, Unit @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_load_occupied_id(): """ - title: Negative test for loading cache with occupied ID. - description: | - Verify that loading cache with occupied ID is not permitted. - pass_criteria: - - Loading cache with occupied ID should fail. + title: Negative test for loading cache with occupied ID. + description: | + Verify that loading cache with occupied ID is not permitted. + pass_criteria: + - Loading cache with occupied ID should fail. """ with TestRun.step("Create partitions for test."): - cache_device = TestRun.disks['cache'] - core_device = TestRun.disks['core'] + cache_device = TestRun.disks["cache"] + core_device = TestRun.disks["core"] cache_device.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)]) core_device.create_partitions([Size(1, Unit.GibiByte)]) cache_device_1 = cache_device.partitions[0] @@ -59,3 +59,55 @@ def test_load_occupied_id(): cores = caches[0].get_core_devices() if len(cores) != 0: TestRun.LOGGER.error("Inappropriate number of cores after load!") + + +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) +def test_add_cached_core(): + """ + title: Negative test for adding already used core to a cache. + description: | + Verify if adding core to cache instance fails while it is already + added to another instance and verify if it fails when trying to add core + again to cache where its added already. + pass_criteria: + - No system crash. + - Adding already used core to another cache instance fails. + - The same core device cannot be used twice in one cache instance. + """ + with TestRun.step("Prepare two caches and one core device."): + cache_dev = TestRun.disks["cache"] + cache_dev.create_partitions([Size(2, Unit.GibiByte), Size(2, Unit.GibiByte)]) + cache_part1 = cache_dev.partitions[0] + cache_part2 = cache_dev.partitions[1] + core_dev = TestRun.disks["core"] + core_dev.create_partitions([Size(4, Unit.GibiByte)]) + core_part = core_dev.partitions[0] + + with TestRun.step("Start the first cache instance"): + cache1 = casadm.start_cache(cache_part1, force=True) + + with TestRun.step("Add core device to first cache instance."): + core = cache1.add_core(core_part) + + with TestRun.step("Start the second cache instance"): + cache2 = casadm.start_cache(cache_part2, force=True) + + with TestRun.step("Try adding the same core device to the second cache instance."): + output = TestRun.executor.run_expect_fail( + cli.add_core_cmd( + cache_id=str(cache2.cache_id), + core_dev=str(core_part.path), + core_id=str(core.core_id), + ) + ) + cli_messages.check_stderr_msg(output, cli_messages.add_cached_core) + + with TestRun.step("Try adding the same core device to the same cache for the second time."): + output = TestRun.executor.run_expect_fail( + cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.path)) + ) + cli_messages.check_stderr_msg(output, cli_messages.already_cached_core) + + with TestRun.step("Stop caches."): + casadm.stop_all_caches() diff --git a/test/functional/tests/fault_injection/test_remove_device_during_io.py b/test/functional/tests/io/test_remove_device_during_io.py similarity index 98% rename from test/functional/tests/fault_injection/test_remove_device_during_io.py rename to test/functional/tests/io/test_remove_device_during_io.py index 2f1895e..b7170af 100644 --- a/test/functional/tests/fault_injection/test_remove_device_during_io.py +++ b/test/functional/tests/io/test_remove_device_during_io.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause #