diff --git a/test/functional/api/cas/cli_help_messages.py b/test/functional/api/cas/cli_help_messages.py index 56c094f..2e4ed67 100644 --- a/test/functional/api/cas/cli_help_messages.py +++ b/test/functional/api/cas/cli_help_messages.py @@ -285,7 +285,7 @@ standby_help = [ ] zero_metadata_help = [ - r"Usage: casadm --zero-metadata --device \ \[option\.\.\.\]]", + r"Usage: casadm --zero-metadata --device \ \[option\.\.\.\]", r"Clear metadata from caching device", r"Options that are valid with --zero-metadata are:", r"-d --device \ Path to device on which metadata would be cleared", diff --git a/test/functional/api/cas/statistics.py b/test/functional/api/cas/statistics.py index a2ea510..1f2a9f4 100644 --- a/test/functional/api/cas/statistics.py +++ b/test/functional/api/cas/statistics.py @@ -173,8 +173,11 @@ class CacheConfigStats: self.cache_line_size = parse_value( value=stats_dict["Cache line size [KiB]"], unit_type=UnitType.kibibyte ) + footprint_prefix = "Metadata Memory Footprint " + footprint_key = next(k for k in stats_dict if k.startswith(footprint_prefix)) self.metadata_memory_footprint = parse_value( - value=stats_dict["Metadata Memory Footprint [MiB]"], unit_type=UnitType.mebibyte + value=stats_dict[footprint_key], + unit_type=UnitType(footprint_key[len(footprint_prefix) :]), ) self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type=UnitType.seconds) self.status = stats_dict["Status"] diff --git a/test/functional/tests/fault_injection/test_fault_injection_interrupts.py b/test/functional/tests/fault_injection/test_fault_injection_interrupts.py index 216be8b..2a7fa21 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_interrupts.py +++ b/test/functional/tests/fault_injection/test_fault_injection_interrupts.py @@ -1,5 +1,6 @@ # # Copyright(c) 2020-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -317,6 +318,9 @@ def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage): with TestRun.step("Prepare cache and core."): cache_part, core_part = prepare() + with TestRun.step("Disable udev"): + Udev.disable() + for _ in TestRun.iteration( range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times." ): @@ -331,16 +335,21 @@ def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage): core = cache.add_core(core_part) with TestRun.step(f"Create test file in mount point of exported object."): - test_file_size = Size(1024, Unit.MebiByte) + test_file_size = Size(4, Unit.GibiByte) test_file = fs_utils.create_random_test_file(test_file_path, test_file_size) with TestRun.step("Check md5 sum of test file."): test_file_md5_before = test_file.md5sum() with TestRun.step("Export file to CAS"): - Dd().block_size(test_file_size).input(test_file.full_path).output(core.path).oflag( - "direct" - ).run() + dd = ( + Dd() + .block_size(test_file_size) + .input(test_file.full_path) + .output(core.path) + .oflag("direct") + ) + dd.run() with TestRun.step("Get number of dirty data on exported object before interruption."): os_utils.sync() @@ -378,7 +387,7 @@ def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage): if cache.get_cache_mode() != cache_mode: TestRun.LOGGER.error("Cache mode should remain the same.") - with TestRun.step("Unmount core and stop cache."): + with TestRun.step("Stop cache."): cache.stop() with TestRun.step("Check md5 sum of test file again."): diff --git a/test/functional/tests/incremental_load/test_inactive_cores.py b/test/functional/tests/incremental_load/test_inactive_cores.py index 5dc6af1..57ef360 100644 --- a/test/functional/tests/incremental_load/test_inactive_cores.py +++ b/test/functional/tests/incremental_load/test_inactive_cores.py @@ -1,13 +1,15 @@ # # Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # - import pytest from api.cas import casadm from api.cas.cache_config import CacheMode +from api.cas.casadm_parser import get_cas_devices_dict +from api.cas.core import Core, CoreStatus from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools.dd import Dd @@ -16,152 +18,238 @@ from test_utils.size import Size, Unit @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_core_inactive(): +def test_core_inactive_stats_conf(): """ - 1. Start cache with 3 cores. - 2. Stop cache. - 3. Remove one of core devices. - 4. Load cache. - 5. Check if cache has appropriate number of valid and inactive core devices. + title: Test for inactive core configuration statistics. + description: | + Test the cache inactive core configuration statistics after removing one of core devices + and loading cache. + pass_criteria: + - Cache can be loaded with inactive core device. + - CAS correctly reports inactive core statistics in cache configuration statistics after + loading cache. """ - cache, core_device = prepare() + core_number = 3 - cache_device = cache.cache_device - stats = cache.get_statistics() + with TestRun.step("Prepare cache and core devices"): + cache_device = TestRun.disks["cache"] + core_device = TestRun.disks["core"] - assert stats.config_stats.core_dev == 3 - assert stats.config_stats.inactive_core_dev == 0 + cache_device.create_partitions([Size(500, Unit.MebiByte)]) + core_device.create_partitions([Size(1, Unit.GibiByte)] * core_number) - TestRun.LOGGER.info("Stopping cache") - cache.stop() + cache_device = cache_device.partitions[0] + core_device_partitions = core_device.partitions - TestRun.LOGGER.info("Removing one of core devices") - core_device.remove_partitions() - core_device.create_partitions([Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]) + with TestRun.step("Start cache"): + cache = casadm.start_cache(cache_device, force=True) - TestRun.LOGGER.info("Loading cache with missing core device") - cache = casadm.start_cache(cache_device, load=True) - stats = cache.get_statistics() + with TestRun.step("Add cores to the cache"): + for core_device_part in core_device_partitions: + cache.add_core(core_dev=core_device_part) - assert stats.config_stats.core_dev == 3 - assert stats.config_stats.inactive_core_dev == 1 + with TestRun.step("Check if correct number of inactive cores is displayed in cache statistics"): + stats = cache.get_statistics() + if stats.config_stats.inactive_core_devices != 0: + TestRun.fail("Inactive core in statistics after starting cache") + + with TestRun.step("Stop cache"): + cache.stop() + + with TestRun.step("Remove last core device"): + core_device.remove_partition(part=core_device_partitions[-1]) + + with TestRun.step("Load cache with missing core device"): + cache = casadm.start_cache(cache_device, load=True) + + with TestRun.step( + "Check if correct number of cores and inactive cores is displayed in cache statistics" + ): + stats = cache.get_statistics() + if stats.config_stats.core_dev != core_number: + TestRun.fail( + "Wrong number of cores after loading the cache\n" + f"Actual number of cores: {stats.config_stats.core_dev}\n" + f"Expected number of cores: {core_number}" + ) + if stats.config_stats.inactive_core_devices != 1: + TestRun.fail( + "Wrong number of inactive cores after loading the cache\n" + f"Actual number of inactive cores: {stats.config_stats.inactive_core_devices}\n" + "Expected number of inactive cores: 1" + ) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -def test_core_inactive_stats(): +def test_core_inactive_stats_usage(): """ - 1. Start cache with 3 cores. - 2. Switch cache into WB mode. - 3. Issue IO to each core. - 4. Stop cache without flush. - 5. Remove two core devices. - 6. Load cache. - 7. Check if cache stats are equal to sum of valid and inactive cores stats. - 8. Check if percentage values are calculated properly. + title: Test for inactive core usage statistics. + description: | + Test the cache inactive core usage statistics after removing one of core devices and loading + cache. + pass_criteria: + - Cache can be loaded with inactive core device. + - CAS correctly reports inactive core statistics in cache usage statistics after loading + cache. """ - cache, core_device = prepare() - cache_device = cache.cache_device + core_number = 3 - TestRun.LOGGER.info("Switching cache mode to WB") - cache.set_cache_mode(cache_mode=CacheMode.WB) - cores = cache.get_core_devices() - TestRun.LOGGER.info("Issue IO to each core") - for core in cores: - dd = ( - Dd() - .input("/dev/zero") - .output(core.path) - .count(1000) - .block_size(Size(4, Unit.KibiByte)) - ).run() + with TestRun.step("Prepare cache and core devices"): + cache_device = TestRun.disks["cache"] + core_device = TestRun.disks["core"] - TestRun.LOGGER.info("Stopping cache with dirty data") - cores[2].flush_core() - cache.stop(no_data_flush=True) + cache_device.create_partitions([Size(500, Unit.MebiByte)]) + core_device.create_partitions([Size(1, Unit.GibiByte)] * core_number) - TestRun.LOGGER.info("Removing two of core devices") - core_device.remove_partitions() - core_device.create_partitions([Size(1, Unit.GibiByte)]) + cache_device = cache_device.partitions[0] + core_device_partitions = core_device.partitions - TestRun.LOGGER.info("Loading cache with missing core device") - cache = casadm.start_cache(cache_device, load=True) + with TestRun.step("Start cache"): + cache = casadm.start_cache(cache_device, force=True, cache_mode=CacheMode.WB) - # Accumulate valid cores stats - cores_occupancy = 0 - cores_clean = 0 - cores_dirty = 0 - cores = cache.get_core_devices() - for core in cores: - core_stats = core.get_statistics() - cores_occupancy += core_stats.usage_stats.occupancy.value - cores_clean += core_stats.usage_stats.clean.value - cores_dirty += core_stats.usage_stats.dirty.value + with TestRun.step("Add cores to the cache"): + core_list = [ + cache.add_core(core_dev=core_device_part) for core_device_part in core_device_partitions + ] - cache_stats = cache.get_statistics() - # Add inactive core stats - cores_occupancy += cache_stats.inactive_usage_stats.inactive_occupancy.value - cores_clean += cache_stats.inactive_usage_stats.inactive_clean.value - cores_dirty += cache_stats.inactive_usage_stats.inactive_dirty.value + with TestRun.step("Run I/O to each core"): + for core in core_list: + dd = ( + Dd() + .input("/dev/zero") + .output(core.path) + .count(1000) + .block_size(Size(4, Unit.KibiByte)) + ) + dd.run() - assert cache_stats.usage_stats.occupancy.value == cores_occupancy - assert cache_stats.usage_stats.dirty.value == cores_dirty - assert cache_stats.usage_stats.clean.value == cores_clean + with TestRun.step("Flush last core"): + core_list[-1].flush_core() - cache_stats_percentage = cache.get_statistics(percentage_val=True) - # Calculate expected percentage value of inactive core stats - inactive_occupancy_perc = ( - cache_stats.inactive_usage_stats.inactive_occupancy.value - / cache_stats.config_stats.cache_size.value - ) - inactive_clean_perc = ( - cache_stats.inactive_usage_stats.inactive_clean.value - / cache_stats.usage_stats.occupancy.value - ) - inactive_dirty_perc = ( - cache_stats.inactive_usage_stats.inactive_dirty.value - / cache_stats.usage_stats.occupancy.value - ) + with TestRun.step("Stop cache with dirty data"): + cache.stop(no_data_flush=True) - inactive_occupancy_perc = round(100 * inactive_occupancy_perc, 1) - inactive_clean_perc = round(100 * inactive_clean_perc, 1) - inactive_dirty_perc = round(100 * inactive_dirty_perc, 1) + with TestRun.step("Removing two of core devices"): + core_device.remove_partition(part=core_device_partitions[0]) + core_device.remove_partition(part=core_device_partitions[1]) - TestRun.LOGGER.info(str(cache_stats_percentage)) - assert ( - inactive_occupancy_perc - == cache_stats_percentage.inactive_usage_stats.inactive_occupancy - ) - assert ( - inactive_clean_perc - == cache_stats_percentage.inactive_usage_stats.inactive_clean - ) - assert ( - inactive_dirty_perc - == cache_stats_percentage.inactive_usage_stats.inactive_dirty - ) + with TestRun.step("Load cache with missing core devices"): + cache = casadm.start_cache(cache_device, load=True) + + with TestRun.step("Check cores statistics"): + active_cores_occupancy_stats = 0 + active_cores_clean_stats = 0 + active_cores_dirty_stats = 0 + + active_cores = cache.get_core_devices() + for core in active_cores: + core_stats = core.get_statistics() + active_cores_occupancy_stats += core_stats.usage_stats.occupancy + active_cores_clean_stats += core_stats.usage_stats.clean + active_cores_dirty_stats += core_stats.usage_stats.dirty + + inactive_cores_occupancy_stats = 0 + inactive_cores_clean_stats = 0 + inactive_cores_dirty_stats = 0 + + inactive_cores = get_inactive_cores(cache_id=cache.cache_id) + for core in inactive_cores: + core_stats = core.get_statistics() + inactive_cores_occupancy_stats += core_stats.usage_stats.occupancy + inactive_cores_clean_stats += core_stats.usage_stats.clean + inactive_cores_dirty_stats += core_stats.usage_stats.dirty + + cache_stats = cache.get_statistics() + cache_usage_stats = cache_stats.usage_stats + + total_cores_occupancy_stats = active_cores_occupancy_stats + inactive_cores_occupancy_stats + total_cores_dirty_stats = active_cores_dirty_stats + inactive_cores_dirty_stats + total_cores_clean_stats = active_cores_clean_stats + inactive_cores_clean_stats + + if cache_usage_stats.occupancy != total_cores_occupancy_stats: + TestRun.LOGGER.error( + "Wrong number of occupancy blocks in cache usage stats\n" + f"Actual number of occupancy blocks: {cache_usage_stats.occupancy}\n" + f"Expected number of occupancy blocks: {total_cores_occupancy_stats}" + ) + if cache_usage_stats.dirty != total_cores_dirty_stats: + TestRun.LOGGER.error( + "Wrong number of dirty blocks in cache usage stats\n" + f"Actual number of dirty blocks: {cache_usage_stats.dirty}\n" + f"Expected number of dirty blocks: {total_cores_dirty_stats}" + ) + if cache_usage_stats.clean != total_cores_clean_stats: + TestRun.LOGGER.error( + "Wrong number of clean blocks in cache usage stats\n" + f"Actual number of clean blocks: {cache_usage_stats.clean}\n" + f"Expected number of clean blocks: {total_cores_clean_stats}" + ) + if cache_usage_stats.inactive_occupancy != inactive_cores_occupancy_stats: + TestRun.LOGGER.error( + "Wrong number of occupancy blocks in inactive cache usage stats\n" + f"Actual number of occupancy blocks: {cache_usage_stats.inactive_occupancy}\n" + f"Expected number of occupancy blocks: {inactive_cores_occupancy_stats}" + ) + if cache_usage_stats.inactive_dirty != inactive_cores_dirty_stats: + TestRun.LOGGER.error( + "Wrong number of dirty blocks in cache inactive usage stats\n" + f"Actual number of dirty blocks: {cache_usage_stats.inactive_dirty}\n" + f"Expected number of dirty blocks: {inactive_cores_dirty_stats}" + ) + if cache_usage_stats.inactive_clean != inactive_cores_clean_stats: + TestRun.LOGGER.error( + "Wrong number of clean blocks in cache inactive usage stats\n" + f"Actual number of clean blocks: {cache_usage_stats.inactive_clean}\n" + f"Expected number of clean blocks: {inactive_cores_clean_stats}" + ) + + cache_usage_stats_percentage = cache.get_statistics(percentage_val=True).usage_stats + + # Calculate expected percentage value of inactive core stats + inactive_occupancy_perc = round( + 100 * (cache_usage_stats.inactive_occupancy / cache_stats.config_stats.cache_size), 1 + ) + inactive_dirty_perc = round( + 100 * (cache_usage_stats.inactive_dirty / cache_stats.usage_stats.occupancy), 1 + ) + inactive_clean_perc = round( + 100 * (cache_usage_stats.inactive_clean / cache_stats.usage_stats.occupancy), 1 + ) + + if cache_usage_stats_percentage.inactive_occupancy != inactive_occupancy_perc: + TestRun.LOGGER.error( + "Wrong occupancy blocks percentage in usage stats\n" + f"Actual number of occupancy blocks percentage:" + f" {cache_usage_stats_percentage.inactive_occupancy}\n" + f"Expected number of occupancy blocks percentage: {inactive_occupancy_perc}" + ) + if cache_usage_stats_percentage.inactive_dirty != inactive_dirty_perc: + TestRun.LOGGER.error( + "Wrong dirty blocks percentage in usage stats\n " + "Actual number of dirty blocks percentage: " + f"{cache_usage_stats_percentage.inactive_dirty}\n" + f"Expected number of dirty blocks percentage: {inactive_dirty_perc}" + ) + if cache_usage_stats_percentage.inactive_clean != inactive_clean_perc: + TestRun.LOGGER.error( + "Wrong clean blocks percentage in usage stats\n" + "Actual number of clean blocks percentage: " + f"{cache_usage_stats.inactive_clean}\n" + f"Expected number of clean blocks percentage: {inactive_cores_clean_stats}" + ) -def prepare(): - cache_device = TestRun.disks["cache"] - core_device = TestRun.disks["core"] +def get_inactive_cores(cache_id: int) -> list: + cores_dict = get_cas_devices_dict()["cores"].values() - cache_device.create_partitions([Size(500, Unit.MebiByte)]) - core_device.create_partitions( - [Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)] - ) + def is_active(core): - cache_device = cache_device.partitions[0] - core_device_1 = core_device.partitions[0] - core_device_2 = core_device.partitions[1] - core_device_3 = core_device.partitions[2] + return CoreStatus[core["status"].lower()] == CoreStatus.inactive - TestRun.LOGGER.info("Staring cache") - cache = casadm.start_cache(cache_device, force=True) - TestRun.LOGGER.info("Adding core device") - core_1 = cache.add_core(core_dev=core_device_1) - core_2 = cache.add_core(core_dev=core_device_2) - core_3 = cache.add_core(core_dev=core_device_3) - - return cache, core_device + return [ + Core(core["device_path"], core["cache_id"]) + for core in cores_dict + if is_active(core) and core["cache_id"] == cache_id + ] diff --git a/test/functional/tests/initialize/test_recovery.py b/test/functional/tests/initialize/test_recovery.py index 84ec893..71e5881 100644 --- a/test/functional/tests/initialize/test_recovery.py +++ b/test/functional/tests/initialize/test_recovery.py @@ -87,7 +87,7 @@ def test_recover_cache_verify_core_device(filesystem): TestRun.fail(f"Wrong error message during cache stop") with TestRun.step("Plug cache device"): - cache_device.plug() + cache_device.plug_all() with TestRun.step("Load cache"): cache = casadm.load_cache(cache_dev) @@ -172,7 +172,7 @@ def test_recover_cache_verify_exp_obj(filesystem): TestRun.fail(f"Wrong error message during cache stop") with TestRun.step("Plug cache device"): - cache_device.plug() + cache_device.plug_all() with TestRun.step("Load cache"): casadm.load_cache(cache_dev) diff --git a/test/functional/tests/initialize/test_simulation_startup.py b/test/functional/tests/initialize/test_simulation_startup.py index 01b28c2..5dc647e 100644 --- a/test/functional/tests/initialize/test_simulation_startup.py +++ b/test/functional/tests/initialize/test_simulation_startup.py @@ -54,7 +54,10 @@ def test_simulation_startup_from_config(): TestRun.executor.run_expect_success(ctl_init()) with TestRun.step("Verify if cache is working"): - cache = casadm_parser.get_caches()[0] + caches = casadm_parser.get_caches() + if not caches: + TestRun.fail("Cache is not working") + cache = caches[0] if cache.get_status() is not CacheStatus.running: TestRun.fail( f"Cache {cache.cache_id} should be running but is in {cache.get_status()} " @@ -75,7 +78,10 @@ def test_simulation_startup_from_config(): TestRun.executor.run_expect_success(f"udevadm trigger") with TestRun.step("Verify if cache is working"): - cache = casadm_parser.get_caches()[0] + caches = casadm_parser.get_caches() + if not caches: + TestRun.fail("Cache is not working") + cache = caches[0] if cache.get_status() is not CacheStatus.running: TestRun.fail( f"Cache {cache.cache_id} should be running but is in {cache.get_status()} " @@ -83,7 +89,10 @@ def test_simulation_startup_from_config(): ) with TestRun.step("Verify if core is working"): - core = cache.get_core_devices()[0] + cores = cache.get_core_devices() + if not cores: + TestRun.fail("Core is not working") + core = cores[0] if core.get_status() is not CoreStatus.active: TestRun.fail( f"Core {core.core_id} should be active but is in {core.get_status()} " f"state." diff --git a/test/functional/tests/initialize/test_startup_init_config.py b/test/functional/tests/initialize/test_startup_init_config.py index 0eebf34..34c1801 100644 --- a/test/functional/tests/initialize/test_startup_init_config.py +++ b/test/functional/tests/initialize/test_startup_init_config.py @@ -215,11 +215,11 @@ def test_cas_startup_lazy(): power_control.power_cycle() with TestRun.step("Verify if all the devices are initialized properly"): - core_pool_list = get_cas_devices_dict()["core_pool"] + core_pool_list = get_cas_devices_dict()["core_pool"].values() caches_list = get_cas_devices_dict()["caches"].values() cores_list = get_cas_devices_dict()["cores"].values() - core_pool_paths = {c["device"] for c in core_pool_list} + core_pool_paths = {c["device_path"] for c in core_pool_list} if core_pool_paths != expected_core_pool_paths: TestRun.error( f"Expected the following devices in core pool " @@ -228,7 +228,7 @@ def test_cas_startup_lazy(): else: TestRun.LOGGER.info("Core pool is ok") - caches_paths = {c["device"] for c in caches_list} + caches_paths = {c["device_path"] for c in caches_list} if caches_paths != expected_caches_paths: TestRun.error( f"Expected the following devices as caches " @@ -237,7 +237,7 @@ def test_cas_startup_lazy(): else: TestRun.LOGGER.info("Caches are ok") - cores_paths = {c["device"] for c in cores_list} + cores_paths = {c["device_path"] for c in cores_list} if cores_paths != expected_cores_paths: TestRun.error( f"Expected the following devices as cores " @@ -246,8 +246,7 @@ def test_cas_startup_lazy(): else: TestRun.LOGGER.info("Core devices are ok") - cores_paths = {c["device"] for c in cores_list} - cores_states = {c["device"]: c["status"] for c in cores_list} + cores_states = {c["device_path"]: c["status"] for c in cores_list} if cores_states[active_core_path] != "Active": TestRun.LOGGER.error( f"Core {active_core_path} should be Active " diff --git a/test/functional/tests/io/trim/test_trim.py b/test/functional/tests/io/trim/test_trim.py index b0fdee7..27052e6 100644 --- a/test/functional/tests/io/trim/test_trim.py +++ b/test/functional/tests/io/trim/test_trim.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause # import os +import posixpath import time import pytest @@ -59,9 +60,8 @@ def test_trim_start_discard(): with TestRun.step("Starting cache"): cache = casadm.start_cache(cas_part, force=True) - dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout - metadata_size = get_metadata_size_on_device(dmesg_out) + metadata_size = get_metadata_size_on_device(cache_id=cache.cache_id) with TestRun.step("Stop blktrace and check if discard requests were issued"): cache_reqs = blktrace.stop_monitoring() @@ -236,7 +236,7 @@ def test_trim_device_discard_support( with TestRun.step("Create random file."): test_file = fs_utils.create_random_test_file( - os.path.join(mount_point, "test_file"), core_dev.size * 0.2 + posixpath.join(mount_point, "test_file"), core_dev.size * 0.2 ) occupancy_before = core.get_occupancy() TestRun.LOGGER.info(str(core.get_statistics())) diff --git a/test/functional/tests/io_class/test_io_class_occupancy.py b/test/functional/tests/io_class/test_io_class_occupancy.py index b7ea53b..6ed20bc 100644 --- a/test/functional/tests/io_class/test_io_class_occupancy.py +++ b/test/functional/tests/io_class/test_io_class_occupancy.py @@ -1,5 +1,6 @@ # # Copyright(c) 2020-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -353,23 +354,38 @@ def test_ioclass_occupancy_sum_cache(): cache.purge_cache() with TestRun.step("Verify stats before IO"): - usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0)) - for i in io_classes: - usage_stats_sum += get_io_class_usage(cache, i.id) - usage_stats_sum += get_io_class_usage(cache, default_ioclass_id) + usage_stats_occupancy_sum = Size.zero() + usage_stats_clean_sum = Size.zero() + usage_stats_dirty_sum = Size.zero() - cache_stats = cache.get_statistics().usage_stats - cache_stats.free = Size(0) + all_io_class_usage_stats = [] + for i in io_classes: + io_class_usage_stats = get_io_class_usage(cache, i.id) + usage_stats_occupancy_sum += io_class_usage_stats.occupancy + usage_stats_clean_sum += io_class_usage_stats.clean + usage_stats_dirty_sum += io_class_usage_stats.dirty + all_io_class_usage_stats.append(io_class_usage_stats) + + io_class_usage_stats = get_io_class_usage(cache, default_ioclass_id) + usage_stats_occupancy_sum += io_class_usage_stats.occupancy + usage_stats_clean_sum += io_class_usage_stats.clean + usage_stats_dirty_sum += io_class_usage_stats.dirty + + cache_usage_stats = cache.get_statistics().usage_stats + cache_usage_stats.free = Size.zero() if ( - cache_stats.occupancy != usage_stats_sum.occupancy - or cache_stats.clean != usage_stats_sum.clean - or cache_stats.dirty != usage_stats_sum.dirty + cache_usage_stats.occupancy != usage_stats_occupancy_sum + or cache_usage_stats.clean != usage_stats_clean_sum + or cache_usage_stats.dirty != usage_stats_dirty_sum ): TestRun.LOGGER.error( - "Initial cache usage stats doesn't match sum of ioclasses stats\n" - f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n" - f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}" + "Initial cache usage stats doesn't match sum of io classes stats\n" + f"Cache usage stats: {cache_usage_stats}\n" + f"Usage stats occupancy sum: {usage_stats_occupancy_sum}\n" + f"Usage stats clean sum: {usage_stats_clean_sum}\n" + f"Usage stats dirty sum: {usage_stats_dirty_sum}\n" + f"Particular stats {all_io_class_usage_stats}" ) with TestRun.step(f"Trigger IO to each directory"): @@ -380,23 +396,38 @@ def test_ioclass_occupancy_sum_cache(): ) with TestRun.step("Verify stats after IO"): - usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0)) - for i in io_classes: - usage_stats_sum += get_io_class_usage(cache, i.id) - usage_stats_sum += get_io_class_usage(cache, default_ioclass_id) + usage_stats_occupancy_sum = Size.zero() + usage_stats_clean_sum = Size.zero() + usage_stats_dirty_sum = Size.zero() - cache_stats = cache.get_statistics().usage_stats - cache_stats.free = Size(0) + all_io_class_usage_stats = [] + for i in io_classes: + io_class_usage_stats = get_io_class_usage(cache, i.id) + usage_stats_occupancy_sum += io_class_usage_stats.occupancy + usage_stats_clean_sum += io_class_usage_stats.clean + usage_stats_dirty_sum += io_class_usage_stats.dirty + all_io_class_usage_stats.append(io_class_usage_stats) + + io_class_usage_stats = get_io_class_usage(cache, default_ioclass_id) + usage_stats_occupancy_sum += io_class_usage_stats.occupancy + usage_stats_clean_sum += io_class_usage_stats.clean + usage_stats_dirty_sum += io_class_usage_stats.dirty + + cache_usage_stats = cache.get_statistics().usage_stats + cache_usage_stats.free = Size.zero() if ( - cache_stats.occupancy != usage_stats_sum.occupancy - or cache_stats.clean != usage_stats_sum.clean - or cache_stats.dirty != usage_stats_sum.dirty + cache_usage_stats.occupancy != usage_stats_occupancy_sum + or cache_usage_stats.clean != usage_stats_clean_sum + or cache_usage_stats.dirty != usage_stats_dirty_sum ): TestRun.LOGGER.error( - "Cache usage stats doesn't match sum of ioclasses stats\n" - f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n" - f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}" + "Initial cache usage stats doesn't match sum of io classes stats\n" + f"Cache usage stats: {cache_usage_stats}\n" + f"Usage stats occupancy sum: {usage_stats_occupancy_sum}\n" + f"Usage stats clean sum: {usage_stats_clean_sum}\n" + f"Usage stats dirty sum: {usage_stats_dirty_sum}\n" + f"particular stats {all_io_class_usage_stats}" ) diff --git a/test/functional/tests/io_class/test_io_class_purge.py b/test/functional/tests/io_class/test_io_class_purge.py index 55c1b09..86d3385 100644 --- a/test/functional/tests/io_class/test_io_class_purge.py +++ b/test/functional/tests/io_class/test_io_class_purge.py @@ -1,5 +1,6 @@ # # Copyright(c) 2020-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -103,18 +104,32 @@ def get_io_class_usage(cache, io_class_id): def verify_ioclass_usage_stats(cache, ioclasses_ids): cache_size = cache.get_statistics().config_stats.cache_size - usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0)) + usage_stats_occupancy_sum = Size.zero() + usage_stats_clean_sum = Size.zero() + usage_stats_dirty_sum = Size.zero() + + all_io_class_usage_stats = [] for i in ioclasses_ids: - usage_stats_sum += get_io_class_usage(cache, i) + io_class_usage_stats = get_io_class_usage(cache, i) + usage_stats_occupancy_sum += io_class_usage_stats.occupancy + usage_stats_clean_sum += io_class_usage_stats.clean + usage_stats_dirty_sum += io_class_usage_stats.dirty + all_io_class_usage_stats.append(io_class_usage_stats) cache_usage_stats = cache.get_statistics().usage_stats - if usage_stats_sum != cache_usage_stats: + if ( + cache_usage_stats.occupancy != usage_stats_occupancy_sum + or cache_usage_stats.clean != usage_stats_clean_sum + or cache_usage_stats.dirty != usage_stats_dirty_sum + ): TestRun.LOGGER.error( - "Sum of io classes usage stats doesn't match cache usage stats!" - f" Cache stats:\n{cache_usage_stats} io classes sum:\n{usage_stats_sum}" - f" Stats of particular io classes:\n" - f"{[get_io_class_usage(cache, i) for i in ioclasses_ids]}" + "Sum of io classes usage stats doesn't match cache usage stats!\n" + f"Cache usage stats: {cache_usage_stats}\n" + f"Usage stats occupancy sum: {usage_stats_occupancy_sum}\n" + f"Usage stats clean sum: {usage_stats_clean_sum}\n" + f"Usage stats dirty sum: {usage_stats_dirty_sum}\n" + f"{all_io_class_usage_stats}" ) if cache_usage_stats.occupancy + cache_usage_stats.free > cache_size: diff --git a/test/functional/tests/lazy_writes/cleaning_policy/test_acp.py b/test/functional/tests/lazy_writes/cleaning_policy/test_acp.py index 351499d..63478c2 100644 --- a/test/functional/tests/lazy_writes/cleaning_policy/test_acp.py +++ b/test/functional/tests/lazy_writes/cleaning_policy/test_acp.py @@ -1,5 +1,6 @@ # # Copyright(c) 2020-2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -233,6 +234,9 @@ def test_acp_param_flush_max_buffers(cache_line_size, cache_mode): fio = get_fio_cmd(core, core_size) fio.run_in_background() time.sleep(10) + cache_stats = cache.get_statistics() + if cache_stats.usage_stats.dirty == Size.zero(): + TestRun.fail("There are no dirty data on cache") with TestRun.step("Set cleaning policy to ACP."): cache.set_cleaning_policy(CleaningPolicy.acp) diff --git a/test/functional/tests/memory/test_memory_metadata_consumption.py b/test/functional/tests/memory/test_memory_metadata_consumption.py index aa7349d..0686155 100644 --- a/test/functional/tests/memory/test_memory_metadata_consumption.py +++ b/test/functional/tests/memory/test_memory_metadata_consumption.py @@ -1,5 +1,6 @@ # # Copyright(c) 2022 Intel Corporation +# Copyright(c) 2024 Huawei Technologies Co., Ltd. # SPDX-License-Identifier: BSD-3-Clause # @@ -7,6 +8,7 @@ import pytest from api.cas import casadm from api.cas.cache_config import CacheLineSize, CacheMode, SeqCutOffPolicy +from api.cas.dmesg import get_metadata_size_on_device from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools.fio.fio import Fio @@ -178,8 +180,7 @@ def validate_memory_consumption(cache_device, expected_maximum, actual_size): """Checks memory consumption""" stats = cache_device.get_statistics() TestRun.LOGGER.info(f"Statistics: {stats}") - - stat_footprint = Size(int(cache_device.get_metadata_size())) + stat_footprint = get_metadata_size_on_device(cache_device.cache_id) TestRun.LOGGER.info(f"Allowed limit for current configuration is: {expected_maximum}") is_memory_within_limit("Reported Metadata Memory Footprint", expected_maximum, stat_footprint) diff --git a/test/functional/tests/misc/test_files_permissions.py b/test/functional/tests/misc/test_files_permissions.py index 19d7e62..b96cf86 100644 --- a/test/functional/tests/misc/test_files_permissions.py +++ b/test/functional/tests/misc/test_files_permissions.py @@ -70,6 +70,7 @@ repo_files_perms_exceptions = { "ocf/tests/unit/framework/prepare_sources_for_testing.py": 755, "ocf/tests/unit/framework/run_unit_tests.py": 755, "ocf/tests/unit/tests/add_new_test_file.py": 755, + "test/functional/test-framework/test_tools/checksec.sh": 744, } build_files_perms_exceptions = {