From 6ac104c3b8f17bb4fbc9f6bc6afbad2890eba4aa Mon Sep 17 00:00:00 2001 From: Katarzyna Lapinska Date: Mon, 17 Aug 2020 09:32:44 +0200 Subject: [PATCH] Rewrite IO class tests to use latest Test Framework API Signed-off-by: Katarzyna Lapinska --- .../tests/io_class/test_io_class_core_id.py | 6 +- .../tests/io_class/test_io_class_directory.py | 713 +++++++------- .../tests/io_class/test_io_class_file.py | 676 +++++++------ .../tests/io_class/test_io_class_process.py | 178 ++-- .../tests/io_class/test_io_classification.py | 923 ++++++++++-------- 5 files changed, 1323 insertions(+), 1173 deletions(-) diff --git a/test/functional/tests/io_class/test_io_class_core_id.py b/test/functional/tests/io_class/test_io_class_core_id.py index e1e9d43..9d93a54 100644 --- a/test/functional/tests/io_class/test_io_class_core_id.py +++ b/test/functional/tests/io_class/test_io_class_core_id.py @@ -3,8 +3,6 @@ # SPDX-License-Identifier: BSD-3-Clause-Clear # -import time - import pytest from test_tools.disk_utils import Filesystem @@ -12,7 +10,7 @@ from api.cas import ioclass_config, casadm from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools.dd import Dd -from test_utils.os_utils import sync, Udev, drop_caches, DropCachesMode +from test_utils.os_utils import sync, Udev, drop_caches from test_utils.size import Unit, Size from core.test_run import TestRun @@ -29,7 +27,7 @@ not_cached_mountpoint = "/tmp/ioclass_core_id_test/not_cached" def test_ioclass_core_id(filesystem): """ title: Test for `core_id` classification rule - dsecription: | + description: | Test if IO to core with selective allocation enabled is cached and IO to core with selective allocation disabled is redirected to pass-through mode pass_criteria: diff --git a/test/functional/tests/io_class/test_io_class_directory.py b/test/functional/tests/io_class/test_io_class_directory.py index b917255..11b8102 100644 --- a/test/functional/tests/io_class/test_io_class_directory.py +++ b/test/functional/tests/io_class/test_io_class_directory.py @@ -4,10 +4,10 @@ # import random -from datetime import datetime - import pytest - +from datetime import datetime +from api.cas import ioclass_config, casadm +from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools import fs_utils from test_tools.dd import Dd @@ -15,7 +15,8 @@ from test_tools.disk_utils import Filesystem from test_utils.filesystem.directory import Directory from test_utils.filesystem.file import File from test_utils.os_utils import drop_caches, DropCachesMode, sync, Udev -from .io_class_common import * +from test_utils.size import Size, Unit +from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path @pytest.mark.os_dependent @@ -24,263 +25,99 @@ from .io_class_common import * @pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_directory_depth(filesystem): """ - Test if directory classification works properly for deeply nested directories for read and - write operations. + title: Test IO classification by directory. + description: | + Test if directory classification works properly for deeply nested directories for read and + write operations. + pass_criteria: + - No kernel bug. + - Read and write operations to directories are classified properly. """ - cache, core = prepare() - Udev.disable() - - TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " - f"and mounting {core.system_path} at {mountpoint}") - core.create_filesystem(filesystem) - core.mount(mountpoint) - sync() - base_dir_path = f"{mountpoint}/base_dir" - TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}") - fs_utils.create_directory(base_dir_path) - nested_dir_path = base_dir_path - random_depth = random.randint(40, 80) - for i in range(random_depth): - nested_dir_path += f"/dir_{i}" - TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}") - fs_utils.create_directory(path=nested_dir_path, parents=True) + with TestRun.step("Prepare cache and core."): + cache, core = prepare() + Udev.disable() + + with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} " + f"at {mountpoint}."): + core.create_filesystem(filesystem) + core.mount(mountpoint) + sync() + + with TestRun.step(f"Create the base directory: {base_dir_path}."): + fs_utils.create_directory(base_dir_path) + + with TestRun.step(f"Create a nested directory."): + nested_dir_path = base_dir_path + random_depth = random.randint(40, 80) + for i in range(random_depth): + nested_dir_path += f"/dir_{i}" + fs_utils.create_directory(path=nested_dir_path, parents=True) # Test classification in nested dir by reading a previously unclassified file - TestRun.LOGGER.info("Creating the first file in the nested directory") - test_file_1 = File(f"{nested_dir_path}/test_file_1") - dd = ( - Dd() - .input("/dev/urandom") - .output(test_file_1.full_path) - .count(random.randint(1, 200)) - .block_size(Size(1, Unit.MebiByte)) - ) - dd.run() - sync() - drop_caches(DropCachesMode.ALL) - test_file_1.refresh_item() + with TestRun.step("Create the first file in the nested directory."): + test_file_1 = File(f"{nested_dir_path}/test_file_1") + dd = ( + Dd().input("/dev/urandom") + .output(test_file_1.full_path) + .count(random.randint(1, 200)) + .block_size(Size(1, Unit.MebiByte)) + ) + dd.run() + sync() + drop_caches(DropCachesMode.ALL) + test_file_1.refresh_item() - ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID) - # directory IO class - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"directory:{base_dir_path}", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Load IO class config."): + ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID) + # directory IO class + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"directory:{base_dir_path}", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - TestRun.LOGGER.info("Reading the file in the nested directory") - dd = ( - Dd() - .input(test_file_1.full_path) - .output("/dev/null") - .block_size(Size(1, Unit.MebiByte)) - ) - dd.run() + with TestRun.step("Read the file in the nested directory"): + base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy + dd = ( + Dd().input(test_file_1.full_path) + .output("/dev/null") + .block_size(Size(1, Unit.MebiByte)) + ) + dd.run() - new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - assert new_occupancy == base_occupancy + test_file_1.size, \ - "Wrong occupancy after reading file!\n" \ - f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}" + with TestRun.step("Check occupancy after creating the file."): + new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy + if new_occupancy != base_occupancy + test_file_1.size: + TestRun.LOGGER.error("Wrong occupancy after reading file!\n" + "Expected: {base_occupancy + test_file_1.size}, " + f"actual: {new_occupancy}") # Test classification in nested dir by creating a file - base_occupancy = new_occupancy - TestRun.LOGGER.info("Creating the second file in the nested directory") - test_file_2 = File(f"{nested_dir_path}/test_file_2") - dd = ( - Dd() - .input("/dev/urandom") - .output(test_file_2.full_path) - .count(random.randint(1, 200)) - .block_size(Size(1, Unit.MebiByte)) - ) - dd.run() - sync() - drop_caches(DropCachesMode.ALL) - test_file_2.refresh_item() + with TestRun.step("Create the second file in the nested directory"): + base_occupancy = new_occupancy + test_file_2 = File(f"{nested_dir_path}/test_file_2") + dd = ( + Dd().input("/dev/urandom") + .output(test_file_2.full_path) + .count(random.randint(1, 200)) + .block_size(Size(1, Unit.MebiByte)) + ) + dd.run() + sync() + drop_caches(DropCachesMode.ALL) + test_file_2.refresh_item() - new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - assert new_occupancy == base_occupancy + test_file_2.size, \ - "Wrong occupancy after creating file!\n" \ - f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}" - - -@pytest.mark.os_dependent -@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) -@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -@pytest.mark.parametrizex("filesystem", Filesystem) -def test_ioclass_directory_dir_operations(filesystem): - """ - Test if directory classification works properly after directory operations like move or rename. - The operations themselves should not cause reclassification but IO after those operations - should be reclassified to proper IO class. - Directory classification may work with a delay after loading IO class configuration or - move/rename operations. Test checks if maximum delay is not exceeded. - """ - def create_files_with_classification_delay_check(directory: Directory, ioclass_id: int): - start_time = datetime.now() - occupancy_after = cache.get_io_class_statistics( - io_class_id=ioclass_id).usage_stats.occupancy - dd_blocks = 10 - dd_size = Size(dd_blocks, Unit.Blocks4096) - file_counter = 0 - unclassified_files = [] - time_from_start = datetime.now() - start_time - while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY: - occupancy_before = occupancy_after - file_path = f"{directory.full_path}/test_file_{file_counter}" - file_counter += 1 - time_from_start = datetime.now() - start_time - (Dd().input("/dev/zero").output(file_path).oflag("sync") - .block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run()) - occupancy_after = cache.get_io_class_statistics( - io_class_id=ioclass_id).usage_stats.occupancy - if occupancy_after - occupancy_before < dd_size: - unclassified_files.append(file_path) - - if len(unclassified_files) == file_counter: - pytest.xfail("No files were properly classified within max delay time!") - - if len(unclassified_files): - TestRun.LOGGER.info("Rewriting unclassified test files...") - for file_path in unclassified_files: - (Dd().input("/dev/zero").output(file_path).oflag("sync") - .block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run()) - - def read_files_with_reclassification_check( - target_ioclass_id: int, source_ioclass_id: int, directory: Directory, with_delay: bool): - start_time = datetime.now() - target_occupancy_after = cache.get_io_class_statistics( - io_class_id=target_ioclass_id).usage_stats.occupancy - source_occupancy_after = cache.get_io_class_statistics( - io_class_id=source_ioclass_id).usage_stats.occupancy - unclassified_files = [] - - for file in [item for item in directory.ls() if isinstance(item, File)]: - target_occupancy_before = target_occupancy_after - source_occupancy_before = source_occupancy_after - time_from_start = datetime.now() - start_time - (Dd().input(file.full_path).output("/dev/null") - .block_size(Size(1, Unit.Blocks4096)).run()) - target_occupancy_after = cache.get_io_class_statistics( - io_class_id=target_ioclass_id).usage_stats.occupancy - source_occupancy_after = cache.get_io_class_statistics( - io_class_id=source_ioclass_id).usage_stats.occupancy - if target_occupancy_after < target_occupancy_before: - pytest.xfail("Target IO class occupancy lowered!") - elif target_occupancy_after - target_occupancy_before < file.size: - unclassified_files.append(file) - if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY: - continue - pytest.xfail("Target IO class occupancy not changed properly!") - if source_occupancy_after >= source_occupancy_before: - if file not in unclassified_files: - unclassified_files.append(file) - if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY: - continue - pytest.xfail("Source IO class occupancy not changed properly!") - - if len(unclassified_files): - TestRun.LOGGER.info("Rereading unclassified test files...") - sync() - drop_caches(DropCachesMode.ALL) - for file in unclassified_files: - (Dd().input(file.full_path).output("/dev/null") - .block_size(Size(1, Unit.Blocks4096)).run()) - - cache, core = prepare() - Udev.disable() - - proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2) - ioclass_id_1 = proper_ids[0] - classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}" - ioclass_id_2 = proper_ids[1] - classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}" - # directory IO classes - ioclass_config.add_ioclass( - ioclass_id=ioclass_id_1, - eviction_priority=1, - allocation=True, - rule=f"directory:{classified_dir_path_1}", - ioclass_config_path=ioclass_config_path, - ) - ioclass_config.add_ioclass( - ioclass_id=ioclass_id_2, - eviction_priority=1, - allocation=True, - rule=f"directory:{classified_dir_path_2}", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - - TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " - f"and mounting {core.system_path} at {mountpoint}") - core.create_filesystem(fs_type=filesystem) - core.mount(mount_point=mountpoint) - sync() - - non_classified_dir_path = f"{mountpoint}/non_classified" - TestRun.LOGGER.info( - f"Creating a non-classified directory: {non_classified_dir_path}") - dir_1 = Directory.create_directory(path=non_classified_dir_path) - - TestRun.LOGGER.info(f"Renaming {non_classified_dir_path} to {classified_dir_path_1}") - dir_1.move(destination=classified_dir_path_1) - - TestRun.LOGGER.info("Creating files with delay check") - create_files_with_classification_delay_check(directory=dir_1, ioclass_id=ioclass_id_1) - - TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir") - dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True) - - TestRun.LOGGER.info("Creating files with delay check") - create_files_with_classification_delay_check(directory=dir_2, ioclass_id=ioclass_id_2) - sync() - drop_caches(DropCachesMode.ALL) - - TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}") - dir_2.move(destination=classified_dir_path_1) - - TestRun.LOGGER.info("Reading files with reclassification check") - read_files_with_reclassification_check( - target_ioclass_id=ioclass_id_1, source_ioclass_id=ioclass_id_2, - directory=dir_2, with_delay=False) - sync() - drop_caches(DropCachesMode.ALL) - - TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}") - dir_2.move(destination=mountpoint) - - TestRun.LOGGER.info("Reading files with reclassification check") - read_files_with_reclassification_check( - target_ioclass_id=0, source_ioclass_id=ioclass_id_1, - directory=dir_2, with_delay=False) - - TestRun.LOGGER.info(f"Removing {classified_dir_path_2}") - fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True) - sync() - drop_caches(DropCachesMode.ALL) - - TestRun.LOGGER.info(f"Renaming {classified_dir_path_1} to {classified_dir_path_2}") - dir_1.move(destination=classified_dir_path_2) - - TestRun.LOGGER.info("Reading files with reclassification check") - read_files_with_reclassification_check( - target_ioclass_id=ioclass_id_2, source_ioclass_id=ioclass_id_1, - directory=dir_1, with_delay=True) - - TestRun.LOGGER.info(f"Renaming {classified_dir_path_2} to {non_classified_dir_path}") - dir_1.move(destination=non_classified_dir_path) - - TestRun.LOGGER.info("Reading files with reclassification check") - read_files_with_reclassification_check( - target_ioclass_id=0, source_ioclass_id=ioclass_id_2, - directory=dir_1, with_delay=True) + with TestRun.step("Check occupancy after creating the second file."): + new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy + if new_occupancy != base_occupancy + test_file_2.size: + TestRun.LOGGER.error("Wrong occupancy after creating file!\n" + f"Expected: {base_occupancy + test_file_2.size}, " + f"actual: {new_occupancy}") @pytest.mark.os_dependent @@ -289,109 +126,309 @@ def test_ioclass_directory_dir_operations(filesystem): @pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_directory_file_operations(filesystem): """ - Test if directory classification works properly after file operations like move or rename. - The operations themselves should not cause reclassification but IO after those operations - should be reclassified to proper IO class. + title: Test IO classification by file operations. + description: | + Test if directory classification works properly after file operations like move or rename. + pass_criteria: + - No kernel bug. + - The operations themselves should not cause reclassification but IO after those + operations should be reclassified to proper IO class. """ - def check_occupancy(expected: Size, actual: Size): - if expected != actual: - pytest.xfail("Occupancy check failed!\n" - f"Expected: {expected}, actual: {actual}") - cache, core = prepare() - Udev.disable() test_dir_path = f"{mountpoint}/test_dir" nested_dir_path = f"{test_dir_path}/nested_dir" - dd_blocks = random.randint(5, 50) - ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID) - # directory IO class - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"directory:{test_dir_path}", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Prepare cache and core."): + cache, core = prepare() + Udev.disable() - TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " - f"and mounting {core.system_path} at {mountpoint}") - core.create_filesystem(fs_type=filesystem) - core.mount(mount_point=mountpoint) - sync() + with TestRun.step("Create and load IO class config file."): + ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID) + # directory IO class + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"directory:{test_dir_path}", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - TestRun.LOGGER.info(f"Creating directory {nested_dir_path}") - Directory.create_directory(path=nested_dir_path, parents=True) - sync() - drop_caches(DropCachesMode.ALL) + with TestRun.step(f"Prepare {filesystem.name} filesystem " + f"and mounting {core.system_path} at {mountpoint}."): + core.create_filesystem(fs_type=filesystem) + core.mount(mount_point=mountpoint) + sync() - TestRun.LOGGER.info("Creating test file") - classified_before = cache.get_io_class_statistics( + with TestRun.step(f"Create directory {nested_dir_path}."): + Directory.create_directory(path=nested_dir_path, parents=True) + sync() + drop_caches(DropCachesMode.ALL) + + with TestRun.step("Create test file."): + classified_before = cache.get_io_class_statistics( + io_class_id=ioclass_id).usage_stats.occupancy + file_path = f"{test_dir_path}/test_file" + (Dd().input("/dev/urandom").output(file_path).oflag("sync") + .block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run()) + sync() + drop_caches(DropCachesMode.ALL) + test_file = File(file_path).refresh_item() + + with TestRun.step("Check classified occupancy."): + classified_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).usage_stats.occupancy + check_occupancy(classified_before + test_file.size, classified_after) + + with TestRun.step("Move test file out of classified directory."): + classified_before = classified_after + non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy + test_file.move(destination=mountpoint) + sync() + drop_caches(DropCachesMode.ALL) + + with TestRun.step("Check classified occupancy."): + classified_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).usage_stats.occupancy + check_occupancy(classified_before, classified_after) + TestRun.LOGGER.info("Checking non-classified occupancy") + non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy + check_occupancy(non_classified_before, non_classified_after) + + with TestRun.step("Read test file."): + classified_before = classified_after + non_classified_before = non_classified_after + (Dd().input(test_file.full_path).output("/dev/null") + .block_size(Size(1, Unit.MebiByte)).run()) + + with TestRun.step("Check classified occupancy."): + classified_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).usage_stats.occupancy + check_occupancy(classified_before - test_file.size, classified_after) + TestRun.LOGGER.info("Checking non-classified occupancy") + non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy + check_occupancy(non_classified_before + test_file.size, non_classified_after) + + with TestRun.step(f"Move test file to {nested_dir_path}."): + classified_before = classified_after + non_classified_before = non_classified_after + test_file.move(destination=nested_dir_path) + sync() + drop_caches(DropCachesMode.ALL) + + with TestRun.step("Check classified occupancy."): + classified_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).usage_stats.occupancy + check_occupancy(classified_before, classified_after) + TestRun.LOGGER.info("Checking non-classified occupancy") + non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy + check_occupancy(non_classified_before, non_classified_after) + + with TestRun.step("Read test file."): + classified_before = classified_after + non_classified_before = non_classified_after + (Dd().input(test_file.full_path).output("/dev/null") + .block_size(Size(1, Unit.MebiByte)).run()) + + with TestRun.step("Check classified occupancy."): + classified_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).usage_stats.occupancy + check_occupancy(classified_before + test_file.size, classified_after) + + with TestRun.step("Check non-classified occupancy."): + non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy + check_occupancy(non_classified_before - test_file.size, non_classified_after) + + +@pytest.mark.os_dependent +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) +@pytest.mark.parametrizex("filesystem", Filesystem) +def test_ioclass_directory_dir_operations(filesystem): + """ + title: Test IO classification by directory operations. + description: | + Test if directory classification works properly after directory operations like move or + rename. + pass_criteria: + - No kernel bug. + - The operations themselves should not cause reclassification but IO after those + operations should be reclassified to proper IO class. + - Directory classification may work with a delay after loading IO class configuration or + move/rename operations. Test checks if maximum delay is not exceeded. + """ + + non_classified_dir_path = f"{mountpoint}/non_classified" + + with TestRun.step("Prepare cache and core."): + cache, core = prepare() + Udev.disable() + + with TestRun.step("Create and load IO class config file."): + proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2) + ioclass_id_1 = proper_ids[0] + classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}" + ioclass_id_2 = proper_ids[1] + classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}" + # directory IO classes + ioclass_config.add_ioclass( + ioclass_id=ioclass_id_1, + eviction_priority=1, + allocation=True, + rule=f"directory:{classified_dir_path_1}", + ioclass_config_path=ioclass_config_path, + ) + ioclass_config.add_ioclass( + ioclass_id=ioclass_id_2, + eviction_priority=1, + allocation=True, + rule=f"directory:{classified_dir_path_2}", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + + with TestRun.step(f"Prepare {filesystem.name} filesystem " + f"and mount {core.system_path} at {mountpoint}."): + core.create_filesystem(fs_type=filesystem) + core.mount(mount_point=mountpoint) + sync() + + with TestRun.step(f"Create a non-classified directory: {non_classified_dir_path}."): + dir_1 = Directory.create_directory(path=non_classified_dir_path) + + with TestRun.step(f"Rename {non_classified_dir_path} to {classified_dir_path_1}."): + dir_1.move(destination=classified_dir_path_1) + + with TestRun.step("Create files with delay check."): + create_files_with_classification_delay_check( + cache, directory=dir_1, ioclass_id=ioclass_id_1) + + with TestRun.step(f"Create {classified_dir_path_2}/subdir."): + dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True) + + with TestRun.step("Create files with delay check."): + create_files_with_classification_delay_check(cache, directory=dir_2, + ioclass_id=ioclass_id_2) + sync() + drop_caches(DropCachesMode.ALL) + + with TestRun.step(f"Move {dir_2.full_path} to {classified_dir_path_1}."): + dir_2.move(destination=classified_dir_path_1) + + with TestRun.step("Read files with reclassification check."): + read_files_with_reclassification_check(cache, + target_ioclass_id=ioclass_id_1, + source_ioclass_id=ioclass_id_2, + directory=dir_2, with_delay=False) + sync() + drop_caches(DropCachesMode.ALL) + + with TestRun.step(f"Move {dir_2.full_path} to {mountpoint}."): + dir_2.move(destination=mountpoint) + + with TestRun.step("Read files with reclassification check."): + read_files_with_reclassification_check(cache, + target_ioclass_id=0, source_ioclass_id=ioclass_id_1, + directory=dir_2, with_delay=False) + + with TestRun.step(f"Remove {classified_dir_path_2}."): + fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True) + sync() + drop_caches(DropCachesMode.ALL) + + with TestRun.step(f"Rename {classified_dir_path_1} to {classified_dir_path_2}."): + dir_1.move(destination=classified_dir_path_2) + + with TestRun.step("Read files with reclassification check."): + read_files_with_reclassification_check(cache, + target_ioclass_id=ioclass_id_2, + source_ioclass_id=ioclass_id_1, + directory=dir_1, with_delay=True) + + with TestRun.step(f"Rename {classified_dir_path_2} to {non_classified_dir_path}."): + dir_1.move(destination=non_classified_dir_path) + + with TestRun.step("Read files with reclassification check."): + read_files_with_reclassification_check(cache, + target_ioclass_id=0, source_ioclass_id=ioclass_id_2, + directory=dir_1, with_delay=True) + + +def create_files_with_classification_delay_check(cache, directory: Directory, ioclass_id: int): + start_time = datetime.now() + occupancy_after = cache.get_io_class_statistics( io_class_id=ioclass_id).usage_stats.occupancy - file_path = f"{test_dir_path}/test_file" - (Dd().input("/dev/urandom").output(file_path).oflag("sync") - .block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run()) - sync() - drop_caches(DropCachesMode.ALL) - test_file = File(file_path).refresh_item() + dd_blocks = 10 + dd_size = Size(dd_blocks, Unit.Blocks4096) + file_counter = 0 + unclassified_files = [] + time_from_start = datetime.now() - start_time + while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY: + occupancy_before = occupancy_after + file_path = f"{directory.full_path}/test_file_{file_counter}" + file_counter += 1 + time_from_start = datetime.now() - start_time + (Dd().input("/dev/zero").output(file_path).oflag("sync") + .block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run()) + occupancy_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).usage_stats.occupancy + if occupancy_after - occupancy_before < dd_size: + unclassified_files.append(file_path) - TestRun.LOGGER.info("Checking classified occupancy") - classified_after = cache.get_io_class_statistics( - io_class_id=ioclass_id).usage_stats.occupancy - check_occupancy(classified_before + test_file.size, classified_after) + if len(unclassified_files) == file_counter: + pytest.xfail("No files were properly classified within max delay time!") - TestRun.LOGGER.info("Moving test file out of classified directory") - classified_before = classified_after - non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy - test_file.move(destination=mountpoint) - sync() - drop_caches(DropCachesMode.ALL) + if len(unclassified_files): + TestRun.LOGGER.info("Rewriting unclassified test files...") + for file_path in unclassified_files: + (Dd().input("/dev/zero").output(file_path).oflag("sync") + .block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run()) - TestRun.LOGGER.info("Checking classified occupancy") - classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - check_occupancy(classified_before, classified_after) - TestRun.LOGGER.info("Checking non-classified occupancy") - non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy - check_occupancy(non_classified_before, non_classified_after) - TestRun.LOGGER.info("Reading test file") - classified_before = classified_after - non_classified_before = non_classified_after - (Dd().input(test_file.full_path).output("/dev/null") - .block_size(Size(1, Unit.MebiByte)).run()) +def read_files_with_reclassification_check(cache, target_ioclass_id: int, source_ioclass_id: int, + directory: Directory, with_delay: bool): + start_time = datetime.now() + target_occupancy_after = cache.get_io_class_statistics( + io_class_id=target_ioclass_id).usage_stats.occupancy + source_occupancy_after = cache.get_io_class_statistics( + io_class_id=source_ioclass_id).usage_stats.occupancy + unclassified_files = [] - TestRun.LOGGER.info("Checking classified occupancy") - classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - check_occupancy(classified_before - test_file.size, classified_after) - TestRun.LOGGER.info("Checking non-classified occupancy") - non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy - check_occupancy(non_classified_before + test_file.size, non_classified_after) + for file in [item for item in directory.ls() if isinstance(item, File)]: + target_occupancy_before = target_occupancy_after + source_occupancy_before = source_occupancy_after + time_from_start = datetime.now() - start_time + (Dd().input(file.full_path).output("/dev/null") + .block_size(Size(1, Unit.Blocks4096)).run()) + target_occupancy_after = cache.get_io_class_statistics( + io_class_id=target_ioclass_id).usage_stats.occupancy + source_occupancy_after = cache.get_io_class_statistics( + io_class_id=source_ioclass_id).usage_stats.occupancy + if target_occupancy_after < target_occupancy_before: + pytest.xfail("Target IO class occupancy lowered!") + elif target_occupancy_after - target_occupancy_before < file.size: + unclassified_files.append(file) + if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY: + continue + pytest.xfail("Target IO class occupancy not changed properly!") + if source_occupancy_after >= source_occupancy_before: + if file not in unclassified_files: + unclassified_files.append(file) + if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY: + continue + pytest.xfail("Source IO class occupancy not changed properly!") - TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}") - classified_before = classified_after - non_classified_before = non_classified_after - test_file.move(destination=nested_dir_path) - sync() - drop_caches(DropCachesMode.ALL) + if len(unclassified_files): + TestRun.LOGGER.info("Rereading unclassified test files...") + sync() + drop_caches(DropCachesMode.ALL) + for file in unclassified_files: + (Dd().input(file.full_path).output("/dev/null") + .block_size(Size(1, Unit.Blocks4096)).run()) - TestRun.LOGGER.info("Checking classified occupancy") - classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - check_occupancy(classified_before, classified_after) - TestRun.LOGGER.info("Checking non-classified occupancy") - non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy - check_occupancy(non_classified_before, non_classified_after) - TestRun.LOGGER.info("Reading test file") - classified_before = classified_after - non_classified_before = non_classified_after - (Dd().input(test_file.full_path).output("/dev/null") - .block_size(Size(1, Unit.MebiByte)).run()) - - TestRun.LOGGER.info("Checking classified occupancy") - classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - check_occupancy(classified_before + test_file.size, classified_after) - TestRun.LOGGER.info("Checking non-classified occupancy") - non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy - check_occupancy(non_classified_before - test_file.size, non_classified_after) +def check_occupancy(expected: Size, actual: Size): + if expected != actual: + pytest.xfail("Occupancy check failed!\n" + f"Expected: {expected}, actual: {actual}") diff --git a/test/functional/tests/io_class/test_io_class_file.py b/test/functional/tests/io_class/test_io_class_file.py index 75e6b73..96b7955 100644 --- a/test/functional/tests/io_class/test_io_class_file.py +++ b/test/functional/tests/io_class/test_io_class_file.py @@ -4,236 +4,267 @@ # import random - import pytest +from api.cas import ioclass_config, casadm +from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools.dd import Dd from test_tools.disk_utils import Filesystem from test_utils.filesystem.file import File -from test_utils.os_utils import sync, Udev, DropCachesMode, drop_caches -from .io_class_common import * +from test_utils.os_utils import sync, DropCachesMode, drop_caches +from test_utils.size import Size, Unit +from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_ioclass_file_extension(): - cache, core = prepare() + """ + title: Test IO classification by file extension. + description: Test if file extension classification works properly. + pass_criteria: + - No kernel bug. + - IO is classified properly based on IO class rule with file extension. + """ iterations = 50 ioclass_id = 1 tested_extension = "tmp" wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"] dd_size = Size(4, Unit.KibiByte) dd_count = 10 - - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"extension:{tested_extension}&done", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - - TestRun.LOGGER.info( - f"Preparing filesystem and mounting {core.system_path} at {mountpoint}" - ) - - core.create_filesystem(Filesystem.ext3) - core.mount(mountpoint) - - cache.flush_cache() - - # Check if file with proper extension is cached dd = ( - Dd() - .input("/dev/zero") - .output(f"{mountpoint}/test_file.{tested_extension}") - .count(dd_count) - .block_size(dd_size) - ) - TestRun.LOGGER.info(f"Writing to file with cached extension.") - for i in range(iterations): - dd.run() - sync() - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count: - TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") - - cache.flush_cache() - - # Check if file with improper extension is not cached - TestRun.LOGGER.info(f"Writing to file with no cached extension.") - for ext in wrong_extensions: - dd = ( - Dd() - .input("/dev/zero") - .output(f"{mountpoint}/test_file.{ext}") + Dd().input("/dev/zero") + .output(f"{mountpoint}/test_file.{tested_extension}") .count(dd_count) .block_size(dd_size) + ) + + with TestRun.step("Prepare cache and core."): + cache, core = prepare() + + with TestRun.step("Create and load IO class config."): + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"extension:{tested_extension}&done", + ioclass_config_path=ioclass_config_path, ) - dd.run() - sync() - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != 0: - TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + + with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."): + core.create_filesystem(Filesystem.ext3) + core.mount(mountpoint) + + with TestRun.step("Flush cache."): + cache.flush_cache() + + with TestRun.step(f"Write to file with cached extension and check if it is properly cached."): + for i in range(iterations): + dd.run() + sync() + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count: + TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") + + with TestRun.step("Flush cache."): + cache.flush_cache() + + with TestRun.step(f"Write to file with not cached extension and check if it is not cached."): + for ext in wrong_extensions: + dd = ( + Dd().input("/dev/zero") + .output(f"{mountpoint}/test_file.{ext}") + .count(dd_count) + .block_size(dd_size) + ) + dd.run() + sync() + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != 0: + TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_ioclass_file_name_prefix(): - cache, core = prepare() + """ + title: Test IO classification by file name prefix. + description: Test if file name prefix classification works properly. + pass_criteria: + - No kernel bug. + - IO is classified properly based on IO class rule with file name prefix. + """ + ioclass_id = 1 cached_files = ["test", "test.txt", "test1", "test1.txt"] not_cached_files = ["file1", "file2", "file4", "file5", "tes"] dd_size = Size(4, Unit.KibiByte) dd_count = 10 - ioclass_config.remove_ioclass_config() - ioclass_config.create_ioclass_config(False) + with TestRun.step("Prepare cache and core."): + cache, core = prepare() - # Avoid caching anything else than files with specified prefix - ioclass_config.add_ioclass( - ioclass_id=0, - eviction_priority=255, - allocation=False, - rule=f"unclassified", - ioclass_config_path=ioclass_config_path, - ) - # Enables file with specified prefix to be cached - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"file_name_prefix:test&done", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Create and load IO class config."): + ioclass_config.remove_ioclass_config() + ioclass_config.create_ioclass_config(False) - TestRun.LOGGER.info( - f"Preparing filesystem and mounting {core.system_path} at {mountpoint}" - ) + # Avoid caching anything else than files with specified prefix + ioclass_config.add_ioclass( + ioclass_id=0, + eviction_priority=255, + allocation=False, + rule=f"unclassified", + ioclass_config_path=ioclass_config_path, + ) + # Enables file with specified prefix to be cached + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"file_name_prefix:test&done", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - previous_occupancy = cache.get_occupancy() + with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"): + previous_occupancy = cache.get_occupancy() - core.create_filesystem(Filesystem.ext3) - core.mount(mountpoint) + core.create_filesystem(Filesystem.ext3) + core.mount(mountpoint) - current_occupancy = cache.get_occupancy() - if previous_occupancy.get_value() > current_occupancy.get_value(): - TestRun.fail(f"Current occupancy ({str(current_occupancy)}) is lower " - f"than before ({str(previous_occupancy)}).") + current_occupancy = cache.get_occupancy() + if previous_occupancy.get_value() > current_occupancy.get_value(): + TestRun.fail(f"Current occupancy ({str(current_occupancy)}) is lower " + f"than before ({str(previous_occupancy)}).") - # Filesystem creation caused metadata IO which is not supposed - # to be cached + # Filesystem creation caused metadata IO which is not supposed + # to be cached # Check if files with proper prefix are cached - TestRun.LOGGER.info(f"Writing files which are supposed to be cached.") - for f in cached_files: - dd = ( - Dd() - .input("/dev/zero") - .output(f"{mountpoint}/{f}") - .count(dd_count) - .block_size(dd_size) - ) - dd.run() - sync() - current_occupancy = cache.get_occupancy() - expected_occupancy = previous_occupancy + (dd_size * dd_count) - if current_occupancy != expected_occupancy: - TestRun.fail(f"Current occupancy value is not valid. " - f"(Expected: {str(expected_occupancy)}, actual: {str(current_occupancy)})") - previous_occupancy = current_occupancy + with TestRun.step(f"Write files which are supposed to be cached and check " + f"if they are cached."): + for f in cached_files: + dd = ( + Dd().input("/dev/zero") + .output(f"{mountpoint}/{f}") + .count(dd_count) + .block_size(dd_size) + ) + dd.run() + sync() + current_occupancy = cache.get_occupancy() + expected_occupancy = previous_occupancy + (dd_size * dd_count) + if current_occupancy != expected_occupancy: + TestRun.fail(f"Current occupancy value is not valid. " + f"(Expected: {str(expected_occupancy)}, " + f"actual: {str(current_occupancy)})") + previous_occupancy = current_occupancy - cache.flush_cache() + with TestRun.step("Flush cache."): + cache.flush_cache() # Check if file with improper extension is not cached - TestRun.LOGGER.info(f"Writing files which are not supposed to be cached.") - for f in not_cached_files: - dd = ( - Dd() - .input("/dev/zero") - .output(f"{mountpoint}/{f}") - .count(dd_count) - .block_size(dd_size) - ) - dd.run() - sync() - current_occupancy = cache.get_occupancy() - if current_occupancy != previous_occupancy: - TestRun.fail(f"Current occupancy value is not valid. " - f"(Expected: {str(previous_occupancy)}, actual: {str(current_occupancy)})") + with TestRun.step(f"Write files which are not supposed to be cached and check if " + f"they are not cached."): + for f in not_cached_files: + dd = ( + Dd().input("/dev/zero") + .output(f"{mountpoint}/{f}") + .count(dd_count) + .block_size(dd_size) + ) + dd.run() + sync() + current_occupancy = cache.get_occupancy() + if current_occupancy != previous_occupancy: + TestRun.fail(f"Current occupancy value is not valid. " + f"(Expected: {str(previous_occupancy)}, " + f"actual: {str(current_occupancy)})") @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_ioclass_file_extension_preexisting_filesystem(): - """Create files on filesystem, add device with filesystem as a core, - write data to files and check if they are cached properly""" - cache, core = prepare() + """ + title: Test IO classification by file extension with preexisting filesystem on core device. + description: | + Test if file extension classification works properly when there is an existing + filesystem on core device. + pass_criteria: + - No kernel bug. + - IO is classified properly based on IO class rule with file extension + after mounting core device. + """ ioclass_id = 1 extensions = ["tmp", "tm", "out", "txt", "log", "123"] dd_size = Size(4, Unit.KibiByte) dd_count = 10 - TestRun.LOGGER.info(f"Preparing files on raw block device") - casadm.remove_core(cache.cache_id, core_id=core.core_id) - core.core_device.create_filesystem(Filesystem.ext3) - core.core_device.mount(mountpoint) + with TestRun.step("Prepare cache and core devices."): + cache, core = prepare() - # Prepare files - for ext in extensions: - dd = ( - Dd() - .input("/dev/zero") - .output(f"{mountpoint}/test_file.{ext}") - .count(dd_count) - .block_size(dd_size) + with TestRun.step(f"Prepare files on raw block device."): + casadm.remove_core(cache.cache_id, core_id=core.core_id) + core.core_device.create_filesystem(Filesystem.ext3) + core.core_device.mount(mountpoint) + + for ext in extensions: + dd = ( + Dd().input("/dev/zero") + .output(f"{mountpoint}/test_file.{ext}") + .count(dd_count) + .block_size(dd_size) + ) + dd.run() + core.core_device.unmount() + + with TestRun.step("Create IO class config."): + rule = "|".join([f"extension:{ext}" for ext in extensions]) + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"{rule}&done", + ioclass_config_path=ioclass_config_path, ) - dd.run() - core.core_device.unmount() - # Prepare ioclass config - rule = "|".join([f"extension:{ext}" for ext in extensions]) - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"{rule}&done", - ioclass_config_path=ioclass_config_path, - ) + with TestRun.step(f"Add device with preexisting data as a core."): + core = casadm.add_core(cache, core_dev=core.core_device) - # Prepare cache for test - TestRun.LOGGER.info(f"Adding device with preexisting data as a core") - core = casadm.add_core(cache, core_dev=core.core_device) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Load IO class config."): + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - core.mount(mountpoint) - cache.flush_cache() + with TestRun.step("Mount core and flush cache."): + core.mount(mountpoint) + cache.flush_cache() - # Check if files with proper extensions are cached - TestRun.LOGGER.info(f"Writing to file with cached extension.") - for ext in extensions: - dd = ( - Dd() - .input("/dev/zero") - .output(f"{mountpoint}/test_file.{ext}") - .count(dd_count) - .block_size(dd_size) - ) - dd.run() - sync() - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count: - TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") + with TestRun.step(f"Write to file with cached extension and check if they are cached."): + for ext in extensions: + dd = ( + Dd().input("/dev/zero") + .output(f"{mountpoint}/test_file.{ext}") + .count(dd_count) + .block_size(dd_size) + ) + dd.run() + sync() + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count: + TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_ioclass_file_offset(): - cache, core = prepare() - + """ + title: Test IO classification by file offset. + description: Test if file offset classification works properly. + pass_criteria: + - No kernel bug. + - IO is classified properly based on IO class rule with file offset. + """ ioclass_id = 1 iterations = 100 dd_size = Size(4, Unit.KibiByte) @@ -241,66 +272,70 @@ def test_ioclass_file_offset(): min_cached_offset = 16384 max_cached_offset = 65536 - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Prepare cache and core."): + cache, core = prepare() - TestRun.LOGGER.info( - f"Preparing filesystem and mounting {core.system_path} at {mountpoint}" - ) - core.create_filesystem(Filesystem.ext3) - core.mount(mountpoint) - - cache.flush_cache() - - # Since ioclass rule consists of strict inequalities, 'seek' can't be set to first - # nor last sector - min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value) - max_seek = int( - (max_cached_offset - min_cached_offset - Unit.Blocks4096.value) - / Unit.Blocks4096.value - ) - TestRun.LOGGER.info(f"Writing to file within cached offset range") - for i in range(iterations): - file_offset = random.choice(range(min_seek, max_seek)) - dd = ( - Dd() - .input("/dev/zero") - .output(f"{mountpoint}/tmp_file") - .count(dd_count) - .block_size(dd_size) - .seek(file_offset) + with TestRun.step("Create and load IO class config file."): + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done", + ioclass_config_path=ioclass_config_path, ) - dd.run() - sync() - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != 1: - TestRun.LOGGER.error(f"Offset not cached: {file_offset}") + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + + with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."): + core.create_filesystem(Filesystem.ext3) + core.mount(mountpoint) + + with TestRun.step("Flush cache."): cache.flush_cache() - min_seek = 0 - max_seek = int(min_cached_offset / Unit.Blocks4096.value) - TestRun.LOGGER.info(f"Writing to file outside of cached offset range") - for i in range(iterations): - file_offset = random.choice(range(min_seek, max_seek)) - dd = ( - Dd() - .input("/dev/zero") - .output(f"{mountpoint}/tmp_file") - .count(dd_count) - .block_size(dd_size) - .seek(file_offset) + with TestRun.step("Write to file within cached offset range and check if it is cached."): + # Since ioclass rule consists of strict inequalities, 'seek' can't be set to first + # nor last sector + min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value) + max_seek = int( + (max_cached_offset - min_cached_offset - Unit.Blocks4096.value) + / Unit.Blocks4096.value ) - dd.run() - sync() - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != 0: - TestRun.LOGGER.error(f"Inappropriately cached offset: {file_offset}") + + for i in range(iterations): + file_offset = random.choice(range(min_seek, max_seek)) + dd = ( + Dd().input("/dev/zero") + .output(f"{mountpoint}/tmp_file") + .count(dd_count) + .block_size(dd_size) + .seek(file_offset) + ) + dd.run() + sync() + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != 1: + TestRun.LOGGER.error(f"Offset not cached: {file_offset}") + cache.flush_cache() + + with TestRun.step( + "Write to file outside of cached offset range and check if it is not cached."): + min_seek = 0 + max_seek = int(min_cached_offset / Unit.Blocks4096.value) + TestRun.LOGGER.info(f"Writing to file outside of cached offset range") + for i in range(iterations): + file_offset = random.choice(range(min_seek, max_seek)) + dd = ( + Dd().input("/dev/zero") + .output(f"{mountpoint}/tmp_file") + .count(dd_count) + .block_size(dd_size) + .seek(file_offset) + ) + dd.run() + sync() + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != 0: + TestRun.LOGGER.error(f"Inappropriately cached offset: {file_offset}") @pytest.mark.os_dependent @@ -309,53 +344,44 @@ def test_ioclass_file_offset(): @pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_file_size(filesystem): """ - File size IO class rules are configured in a way that each tested file size is unambiguously - classified. - Firstly write operations are tested (creation of files), secondly read operations. + title: Test IO classification by file size. + description: Test if file size classification works properly. + pass_criteria: + - No kernel bug. + - IO is classified properly based on IO class rule with file size. """ - def load_file_size_io_classes(): - # IO class order intentional, do not change - base_size_bytes = int(base_size.get_value(Unit.Byte)) - ioclass_config.add_ioclass( - ioclass_id=1, - eviction_priority=1, - allocation=True, - rule=f"file_size:eq:{base_size_bytes}", - ioclass_config_path=ioclass_config_path, - ) - ioclass_config.add_ioclass( - ioclass_id=2, - eviction_priority=1, - allocation=True, - rule=f"file_size:lt:{base_size_bytes}", - ioclass_config_path=ioclass_config_path, - ) - ioclass_config.add_ioclass( - ioclass_id=3, - eviction_priority=1, - allocation=True, - rule=f"file_size:gt:{base_size_bytes}", - ioclass_config_path=ioclass_config_path, - ) - ioclass_config.add_ioclass( - ioclass_id=4, - eviction_priority=1, - allocation=True, - rule=f"file_size:le:{int(base_size_bytes / 2)}", - ioclass_config_path=ioclass_config_path, - ) - ioclass_config.add_ioclass( - ioclass_id=5, - eviction_priority=1, - allocation=True, - rule=f"file_size:ge:{2 * base_size_bytes}", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - def create_files_and_check_classification(): - TestRun.LOGGER.info("Creating files belonging to different IO classes " - "(classification by writes).") + # File size IO class rules are configured in a way that each tested file size is unambiguously + # classified. + # Firstly write operations are tested (creation of files), secondly read operations. + + base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096) + size_to_class = { + base_size: 1, + base_size - Unit.Blocks4096: 2, + base_size + Unit.Blocks4096: 3, + base_size / 2: 4, + base_size / 2 - Unit.Blocks4096: 4, + base_size / 2 + Unit.Blocks4096: 2, + base_size * 2: 5, + base_size * 2 - Unit.Blocks4096: 3, + base_size * 2 + Unit.Blocks4096: 5, + } + + with TestRun.step("Prepare cache and core."): + cache, core = prepare() + + with TestRun.step("Prepare and load IO class config."): + load_file_size_io_classes(cache, base_size) + + with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} " + f"at {mountpoint}."): + core.create_filesystem(filesystem) + core.mount(mountpoint) + sync() + + with TestRun.step("Create files belonging to different IO classes (classification by writes)."): + test_files = [] for size, ioclass_id in size_to_class.items(): occupancy_before = cache.get_io_class_statistics( io_class_id=ioclass_id).usage_stats.occupancy @@ -371,27 +397,7 @@ def test_ioclass_file_size(filesystem): sync() drop_caches(DropCachesMode.ALL) - def reclassify_files(): - TestRun.LOGGER.info("Reading files belonging to different IO classes " - "(classification by reads).") - for file in test_files: - ioclass_id = size_to_class[file.size] - occupancy_before = cache.get_io_class_statistics( - io_class_id=ioclass_id).usage_stats.occupancy - Dd().input(file.full_path).output("/dev/null").block_size(file.size).run() - occupancy_after = cache.get_io_class_statistics( - io_class_id=ioclass_id).usage_stats.occupancy - actual_blocks = occupancy_after.get_value(Unit.Blocks4096) - expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096) - if actual_blocks != expected_blocks: - TestRun.fail("File not reclassified properly!\n" - f"Expected {occupancy_before + file.size}\n" - f"Actual {occupancy_after}") - sync() - drop_caches(DropCachesMode.ALL) - - def remove_files_classification(): - TestRun.LOGGER.info("Moving all files to 'unclassified' IO class") + with TestRun.step("Move all files to 'unclassified' IO class."): ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path) ioclass_config.create_ioclass_config( add_default_rule=False, ioclass_config_path=ioclass_config_path @@ -416,8 +422,7 @@ def test_ioclass_file_size(filesystem): sync() drop_caches(DropCachesMode.ALL) - def restore_classification_config(): - TestRun.LOGGER.info("Restoring IO class configuration") + with TestRun.step("Restore IO class configuration."): ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path) ioclass_config.create_ioclass_config( add_default_rule=False, ioclass_config_path=ioclass_config_path @@ -429,39 +434,66 @@ def test_ioclass_file_size(filesystem): rule="unclassified", ioclass_config_path=ioclass_config_path, ) - load_file_size_io_classes() + load_file_size_io_classes(cache, base_size) - cache, core = prepare() - base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096) - size_to_class = { - base_size: 1, - base_size - Unit.Blocks4096: 2, - base_size + Unit.Blocks4096: 3, - base_size / 2: 4, - base_size / 2 - Unit.Blocks4096: 4, - base_size / 2 + Unit.Blocks4096: 2, - base_size * 2: 5, - base_size * 2 - Unit.Blocks4096: 3, - base_size * 2 + Unit.Blocks4096: 5, - } + with TestRun.step("Read files belonging to different IO classes (classification by reads)."): + # CAS device should be unmounted and mounted because data can be sometimes still cached by + # OS cache so occupancy statistics will not match + core.unmount() + core.mount(mountpoint) + for file in test_files: + ioclass_id = size_to_class[file.size] + occupancy_before = cache.get_io_class_statistics( + io_class_id=ioclass_id).usage_stats.occupancy + Dd().input(file.full_path).output("/dev/null").block_size(file.size).run() + occupancy_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).usage_stats.occupancy + actual_blocks = occupancy_after.get_value(Unit.Blocks4096) + expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096) + if actual_blocks != expected_blocks: + TestRun.fail("File not reclassified properly!\n" + f"Expected {occupancy_before + file.size}\n" + f"Actual {occupancy_after}") + sync() + drop_caches(DropCachesMode.ALL) - load_file_size_io_classes() - TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " - f"and mounting {core.system_path} at {mountpoint}") - core.create_filesystem(filesystem) - core.mount(mountpoint) - sync() - - test_files = [] - create_files_and_check_classification() - - remove_files_classification() - - restore_classification_config() - - # CAS device should be unmounted and mounted because data can be sometimes still cached by - # OS cache so occupancy statistics will not match - core.unmount() - core.mount(mountpoint) - reclassify_files() +def load_file_size_io_classes(cache, base_size): + # IO class order intentional, do not change + base_size_bytes = int(base_size.get_value(Unit.Byte)) + ioclass_config.add_ioclass( + ioclass_id=1, + eviction_priority=1, + allocation=True, + rule=f"file_size:eq:{base_size_bytes}", + ioclass_config_path=ioclass_config_path, + ) + ioclass_config.add_ioclass( + ioclass_id=2, + eviction_priority=1, + allocation=True, + rule=f"file_size:lt:{base_size_bytes}", + ioclass_config_path=ioclass_config_path, + ) + ioclass_config.add_ioclass( + ioclass_id=3, + eviction_priority=1, + allocation=True, + rule=f"file_size:gt:{base_size_bytes}", + ioclass_config_path=ioclass_config_path, + ) + ioclass_config.add_ioclass( + ioclass_id=4, + eviction_priority=1, + allocation=True, + rule=f"file_size:le:{int(base_size_bytes / 2)}", + ioclass_config_path=ioclass_config_path, + ) + ioclass_config.add_ioclass( + ioclass_id=5, + eviction_priority=1, + allocation=True, + rule=f"file_size:ge:{2 * base_size_bytes}", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) diff --git a/test/functional/tests/io_class/test_io_class_process.py b/test/functional/tests/io_class/test_io_class_process.py index 0fc3955..073cfc2 100644 --- a/test/functional/tests/io_class/test_io_class_process.py +++ b/test/functional/tests/io_class/test_io_class_process.py @@ -7,110 +7,132 @@ import time import pytest +from api.cas import ioclass_config, casadm +from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools.dd import Dd from test_utils.os_utils import sync, Udev -from .io_class_common import * +from test_utils.size import Size, Unit +from tests.io_class.io_class_common import prepare, ioclass_config_path @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_ioclass_process_name(): - """Check if data generated by process with particular name is cached""" - cache, core = prepare() - + """ + title: Test IO classification by process name. + description: Check if data generated by process with particular name is cached. + pass_criteria: + - No kernel bug. + - IO is classified properly based on process generating IO name. + """ ioclass_id = 1 dd_size = Size(4, Unit.KibiByte) dd_count = 1 iterations = 100 - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"process_name:dd&done", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Prepare cache and core."): + cache, core = prepare() - cache.flush_cache() - - Udev.disable() - - TestRun.LOGGER.info(f"Check if all data generated by dd process is cached.") - for i in range(iterations): - dd = ( - Dd() - .input("/dev/zero") - .output(core.system_path) - .count(dd_count) - .block_size(dd_size) - .seek(i) + with TestRun.step("Create and load IO class config file."): + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"process_name:dd&done", + ioclass_config_path=ioclass_config_path, ) - dd.run() - sync() - time.sleep(0.1) - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count: - TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + + with TestRun.step("Flush cache and disable udev."): + cache.flush_cache() + Udev.disable() + + with TestRun.step("Check if all data generated by dd process is cached."): + for i in range(iterations): + dd = ( + Dd() + .input("/dev/zero") + .output(core.system_path) + .count(dd_count) + .block_size(dd_size) + .seek(i) + ) + dd.run() + sync() + time.sleep(0.1) + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count: + TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_ioclass_pid(): - cache, core = prepare() - + """ + title: Test IO classification by process id. + description: Check if data generated by process with particular id is cached. + pass_criteria: + - No kernel bug. + - IO is classified properly based on process generating IO id. + """ ioclass_id = 1 iterations = 20 dd_count = 100 dd_size = Size(4, Unit.KibiByte) - Udev.disable() + with TestRun.step("Prepare cache, core and disable udev."): + cache, core = prepare() + Udev.disable() - # Since 'dd' has to be executed right after writing pid to 'ns_last_pid', - # 'dd' command is created and is appended to 'echo' command instead of running it - dd_command = str( - Dd() - .input("/dev/zero") - .output(core.system_path) - .count(dd_count) - .block_size(dd_size) - ) - - for i in range(iterations): - cache.flush_cache() - - output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid") - if output.exit_code != 0: - raise Exception( - f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}" - ) - - # Few pids might be used by system during test preparation - pid = int(output.stdout) + 50 - - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"pid:eq:{pid}&done", - ioclass_config_path=ioclass_config_path, + with TestRun.step("Prepare dd command."): + # Since 'dd' has to be executed right after writing pid to 'ns_last_pid', + # 'dd' command is created and is appended to 'echo' command instead of running it + dd_command = str( + Dd() + .input("/dev/zero") + .output(core.system_path) + .count(dd_count) + .block_size(dd_size) ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - TestRun.LOGGER.info(f"Running dd with pid {pid}") - # pid saved in 'ns_last_pid' has to be smaller by one than target dd pid - dd_and_pid_command = ( - f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}" - ) - output = TestRun.executor.run(dd_and_pid_command) - if output.exit_code != 0: - raise Exception( - f"Failed to run dd with target pid. " - f"stdout: {output.stdout} \n stderr :{output.stderr}" + for _ in TestRun.iteration(range(iterations)): + with TestRun.step("Flush cache."): + cache.flush_cache() + + with TestRun.step("Prepare and load IO class config."): + output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid") + if output.exit_code != 0: + raise Exception( + f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}" + ) + + # Few pids might be used by system during test preparation + pid = int(output.stdout) + 50 + + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"pid:eq:{pid}&done", + ioclass_config_path=ioclass_config_path, ) - sync() - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != dd_count: - TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") - ioclass_config.remove_ioclass(ioclass_id) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + + with TestRun.step(f"Run dd with pid {pid}."): + # pid saved in 'ns_last_pid' has to be smaller by one than target dd pid + dd_and_pid_command = ( + f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}" + ) + output = TestRun.executor.run(dd_and_pid_command) + if output.exit_code != 0: + raise Exception( + f"Failed to run dd with target pid. " + f"stdout: {output.stdout} \n stderr :{output.stderr}" + ) + sync() + with TestRun.step("Check if data was cached properly."): + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != dd_count: + TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") + ioclass_config.remove_ioclass(ioclass_id) diff --git a/test/functional/tests/io_class/test_io_classification.py b/test/functional/tests/io_class/test_io_classification.py index 134ca6b..02ec37b 100644 --- a/test/functional/tests/io_class/test_io_classification.py +++ b/test/functional/tests/io_class/test_io_classification.py @@ -8,7 +8,9 @@ from itertools import permutations import pytest +from api.cas import ioclass_config, casadm from api.cas.ioclass_config import IoClass +from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_tools import fs_utils from test_tools.dd import Dd @@ -17,15 +19,23 @@ from test_tools.fio.fio import Fio from test_tools.fio.fio_param import ReadWrite, IoEngine from test_utils.filesystem.file import File from test_utils.os_utils import sync, Udev -from .io_class_common import * +from test_utils.size import Size, Unit +from tests.io_class.io_class_common import prepare, ioclass_config_path, mountpoint @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_ioclass_lba(): - """Write data to random lba and check if it is cached according to range - defined in ioclass rule""" - cache, core = prepare() + """ + title: Test IO classification by lba. + description: | + Write data to random lba and check if it is cached according to range + defined in ioclass rule + pass_criteria: + - No kernel bug. + - IO is classified properly based on lba range defined in config. + """ + ioclass_id = 1 min_cached_lba = 56 max_cached_lba = 200 @@ -33,130 +43,135 @@ def test_ioclass_lba(): dd_size = Size(1, Unit.Blocks512) dd_count = 1 - # Prepare ioclass config - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done", - ioclass_config_path=ioclass_config_path, - ) + with TestRun.step("Prepare cache and core."): + cache, core = prepare() - # Prepare cache for test - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - - cache.flush_cache() - - # Check if lbas from defined range are cached - dirty_count = 0 - # '8' step is set to prevent writing cache line more than once - TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.") - for lba in range(min_cached_lba, max_cached_lba, 8): - dd = ( - Dd() - .input("/dev/zero") - .output(f"{core.system_path}") - .count(dd_count) - .block_size(dd_size) - .seek(lba) + with TestRun.step("Prepare and load IO class config."): + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done", + ioclass_config_path=ioclass_config_path, ) - dd.run() - sync() - dirty_count += 1 + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != dirty_count: - TestRun.LOGGER.error(f"LBA {lba} not cached") + with TestRun.step("Flush cache."): + cache.flush_cache() - cache.flush_cache() + with TestRun.step("Run IO and check if lbas from defined range are cached."): + dirty_count = 0 + # '8' step is set to prevent writing cache line more than once + TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.") + for lba in range(min_cached_lba, max_cached_lba, 8): + dd = ( + Dd().input("/dev/zero") + .output(f"{core.system_path}") + .count(dd_count) + .block_size(dd_size) + .seek(lba) + ) + dd.run() + sync() + dirty_count += 1 - # Check if lba outside of defined range are not cached - TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.") - for i in range(iterations): - rand_lba = random.randrange(2000) - if min_cached_lba <= rand_lba <= max_cached_lba: - continue - dd = ( - Dd() - .input("/dev/zero") - .output(f"{core.system_path}") - .count(dd_count) - .block_size(dd_size) - .seek(rand_lba) - ) - dd.run() - sync() + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != dirty_count: + TestRun.LOGGER.error(f"LBA {lba} not cached") - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != 0: - TestRun.LOGGER.error(f"Inappropriately cached lba: {rand_lba}") + with TestRun.step("Flush cache."): + cache.flush_cache() + + with TestRun.step("Run IO and check if lba outside of defined range are not cached."): + TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.") + for i in range(iterations): + rand_lba = random.randrange(2000) + if min_cached_lba <= rand_lba <= max_cached_lba: + continue + dd = ( + Dd().input("/dev/zero") + .output(f"{core.system_path}") + .count(dd_count) + .block_size(dd_size) + .seek(rand_lba) + ) + dd.run() + sync() + + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != 0: + TestRun.LOGGER.error(f"Inappropriately cached lba: {rand_lba}") @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_ioclass_request_size(): - cache, core = prepare() + """ + title: Test IO classification by request size. + description: Check if requests with size within defined range are cached. + pass_criteria: + - No kernel bug. + - IO is classified properly based on request size range defined in config. + """ ioclass_id = 1 iterations = 100 - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule=f"request_size:ge:8192&request_size:le:16384&done", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Prepare cache and core."): + cache, core = prepare() - Udev.disable() + with TestRun.step("Create and load IO class config."): + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule=f"request_size:ge:8192&request_size:le:16384&done", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - # Check if requests with appropriate size are cached - TestRun.LOGGER.info( - f"Check if requests with size within defined range are cached" - ) - cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)] - for i in range(iterations): + with TestRun.step("Disable udev."): + Udev.disable() + + with TestRun.step("Check if requests with size within defined range are cached."): + cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)] + for i in range(iterations): + cache.flush_cache() + req_size = random.choice(cached_req_sizes) + dd = ( + Dd().input("/dev/zero") + .output(core.system_path) + .count(1) + .block_size(req_size) + .oflag("direct") + ) + dd.run() + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value: + TestRun.fail("Incorrect number of dirty blocks!") + + with TestRun.step("Flush cache."): cache.flush_cache() - req_size = random.choice(cached_req_sizes) - dd = ( - Dd() - .input("/dev/zero") - .output(core.system_path) - .count(1) - .block_size(req_size) - .oflag("direct") - ) - dd.run() - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value: - TestRun.fail("Incorrect number of dirty blocks!") - cache.flush_cache() - - # Check if requests with inappropriate size are not cached - TestRun.LOGGER.info( - f"Check if requests with size outside defined range are not cached" - ) - not_cached_req_sizes = [ - Size(1, Unit.Blocks4096), - Size(8, Unit.Blocks4096), - Size(16, Unit.Blocks4096), - ] - for i in range(iterations): - req_size = random.choice(not_cached_req_sizes) - dd = ( - Dd() - .input("/dev/zero") - .output(core.system_path) - .count(1) - .block_size(req_size) - .oflag("direct") - ) - dd.run() - dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty - if dirty.get_value(Unit.Blocks4096) != 0: - TestRun.fail("Dirty data present!") + with TestRun.step("Check if requests with size outside of defined range are not cached"): + not_cached_req_sizes = [ + Size(1, Unit.Blocks4096), + Size(8, Unit.Blocks4096), + Size(16, Unit.Blocks4096), + ] + for i in range(iterations): + req_size = random.choice(not_cached_req_sizes) + dd = ( + Dd().input("/dev/zero") + .output(core.system_path) + .count(1) + .block_size(req_size) + .oflag("direct") + ) + dd.run() + dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty + if dirty.get_value(Unit.Blocks4096) != 0: + TestRun.fail("Dirty data present!") @pytest.mark.os_dependent @@ -165,84 +180,96 @@ def test_ioclass_request_size(): @pytest.mark.parametrizex("filesystem", list(Filesystem) + [False]) def test_ioclass_direct(filesystem): """ - Perform buffered/direct IO to/from files or raw block device. - Data from buffered IO should be cached. - Data from buffered IO should not be cached and if performed to/from already cached data - should cause reclassification to unclassified IO class. + title: Direct IO classification. + description: Check if direct requests are properly cached. + pass_criteria: + - No kernel bug. + - Data from direct IO should be cached. + - Data from buffered IO should not be cached and if performed to/from already cached data + should cause reclassification to unclassified IO class. """ - cache, core = prepare() - Udev.disable() ioclass_id = 1 io_size = Size(random.randint(1000, 2000), Unit.Blocks4096) - # direct IO class - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule="direct", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Prepare cache and core. Disable udev."): + cache, core = prepare() + Udev.disable() - fio = ( - Fio().create_command() - .io_engine(IoEngine.libaio) - .size(io_size) - .offset(io_size) - .read_write(ReadWrite.write) - .target(f"{mountpoint}/tmp_file" if filesystem else core.system_path) - ) - - if filesystem: - TestRun.LOGGER.info( - f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at" - f" {mountpoint}" + with TestRun.step("Create and load IO class config."): + # direct IO class + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule="direct", + ioclass_config_path=ioclass_config_path, ) - core.create_filesystem(filesystem) - core.mount(mountpoint) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + + with TestRun.step("Prepare fio command."): + fio = Fio().create_command() \ + .io_engine(IoEngine.libaio) \ + .size(io_size).offset(io_size) \ + .read_write(ReadWrite.write) \ + .target(f"{mountpoint}/tmp_file" if filesystem else core.system_path) + + with TestRun.step("Prepare filesystem."): + if filesystem: + TestRun.LOGGER.info( + f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at" + f" {mountpoint}" + ) + core.create_filesystem(filesystem) + core.mount(mountpoint) + sync() + else: + TestRun.LOGGER.info("Testing on raw exported object.") + + with TestRun.step(f"Run buffered writes to {'file' if filesystem else 'device'}"): + base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy + fio.run() sync() - else: - TestRun.LOGGER.info("Testing on raw exported object") - base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy + with TestRun.step("Check if buffered writes are not cached."): + new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy + if new_occupancy != base_occupancy: + TestRun.fail("Buffered writes were cached!\n" + f"Expected: {base_occupancy}, actual: {new_occupancy}") - TestRun.LOGGER.info(f"Buffered writes to {'file' if filesystem else 'device'}") - fio.run() - sync() - new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - if new_occupancy != base_occupancy: - TestRun.fail("Buffered writes were cached!\n" - f"Expected: {base_occupancy}, actual: {new_occupancy}") + with TestRun.step(f"Run direct writes to {'file' if filesystem else 'device'}"): + fio.direct() + fio.run() + sync() - TestRun.LOGGER.info(f"Direct writes to {'file' if filesystem else 'device'}") - fio.direct() - fio.run() - sync() - new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - if new_occupancy != base_occupancy + io_size: - TestRun.fail("Wrong number of direct writes was cached!\n" - f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}") + with TestRun.step("Check if direct writes are cached."): + new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy + if new_occupancy != base_occupancy + io_size: + TestRun.fail("Wrong number of direct writes was cached!\n" + f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}") - TestRun.LOGGER.info(f"Buffered reads from {'file' if filesystem else 'device'}") - fio.remove_param("readwrite").remove_param("direct") - fio.read_write(ReadWrite.read) - fio.run() - sync() - new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - if new_occupancy != base_occupancy: - TestRun.fail("Buffered reads did not cause reclassification!" - f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}") + with TestRun.step(f"Run buffered reads from {'file' if filesystem else 'device'}"): + fio.remove_param("readwrite").remove_param("direct") + fio.read_write(ReadWrite.read) + fio.run() + sync() - TestRun.LOGGER.info(f"Direct reads from {'file' if filesystem else 'device'}") - fio.direct() - fio.run() - sync() - new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy - if new_occupancy != base_occupancy + io_size: - TestRun.fail("Wrong number of direct reads was cached!\n" - f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}") + with TestRun.step("Check if buffered reads caused reclassification."): + new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy + if new_occupancy != base_occupancy: + TestRun.fail("Buffered reads did not cause reclassification!" + f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}") + + with TestRun.step(f"Run direct reads from {'file' if filesystem else 'device'}"): + fio.direct() + fio.run() + sync() + + with TestRun.step("Check if direct reads are cached."): + new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy + if new_occupancy != base_occupancy + io_size: + TestRun.fail("Wrong number of direct reads was cached!\n" + f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}") @pytest.mark.os_dependent @@ -251,89 +278,96 @@ def test_ioclass_direct(filesystem): @pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_metadata(filesystem): """ - Perform operations on files that cause metadata update. - Determine if every such operation results in increased writes to cached metadata. - Exact values may not be tested as each file system has different metadata structure. + title: Metadata IO classification. + description: | + Determine if every operation on files that cause metadata update results in increased + writes to cached metadata. + pass_criteria: + - No kernel bug. + - Metadata is classified properly. """ - cache, core = prepare() - Udev.disable() - - ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID) - # metadata IO class - ioclass_config.add_ioclass( - ioclass_id=ioclass_id, - eviction_priority=1, - allocation=True, - rule="metadata&done", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - - TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " - f"and mounting {core.system_path} at {mountpoint}") - core.create_filesystem(filesystem) - core.mount(mountpoint) - sync() - - requests_to_metadata_before = cache.get_io_class_statistics( - io_class_id=ioclass_id).request_stats.write - TestRun.LOGGER.info("Creating 20 test files") - files = [] - for i in range(1, 21): - file_path = f"{mountpoint}/test_file_{i}" - dd = ( - Dd() - .input("/dev/urandom") - .output(file_path) - .count(random.randint(5, 50)) - .block_size(Size(1, Unit.MebiByte)) - .oflag("sync") - ) - dd.run() - files.append(File(file_path)) - - TestRun.LOGGER.info("Checking requests to metadata") - requests_to_metadata_after = cache.get_io_class_statistics( - io_class_id=ioclass_id).request_stats.write - if requests_to_metadata_after == requests_to_metadata_before: - TestRun.fail("No requests to metadata while creating files!") - - requests_to_metadata_before = requests_to_metadata_after - TestRun.LOGGER.info("Renaming all test files") - for file in files: - file.move(f"{file.full_path}_renamed") - sync() - - TestRun.LOGGER.info("Checking requests to metadata") - requests_to_metadata_after = cache.get_io_class_statistics( - io_class_id=ioclass_id).request_stats.write - if requests_to_metadata_after == requests_to_metadata_before: - TestRun.fail("No requests to metadata while renaming files!") - - requests_to_metadata_before = requests_to_metadata_after + # Exact values may not be tested as each file system has different metadata structure. test_dir_path = f"{mountpoint}/test_dir" - TestRun.LOGGER.info(f"Creating directory {test_dir_path}") - fs_utils.create_directory(path=test_dir_path) - TestRun.LOGGER.info(f"Moving test files into {test_dir_path}") - for file in files: - file.move(test_dir_path) - sync() + with TestRun.step("Prepare cache and core. Disable udev."): + cache, core = prepare() + Udev.disable() - TestRun.LOGGER.info("Checking requests to metadata") - requests_to_metadata_after = cache.get_io_class_statistics( - io_class_id=ioclass_id).request_stats.write - if requests_to_metadata_after == requests_to_metadata_before: - TestRun.fail("No requests to metadata while moving files!") + with TestRun.step("Prepare and load IO class config file."): + ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID) + # metadata IO class + ioclass_config.add_ioclass( + ioclass_id=ioclass_id, + eviction_priority=1, + allocation=True, + rule="metadata&done", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - TestRun.LOGGER.info(f"Removing {test_dir_path}") - fs_utils.remove(path=test_dir_path, force=True, recursive=True) + with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} " + f"at {mountpoint}."): + core.create_filesystem(filesystem) + core.mount(mountpoint) + sync() - TestRun.LOGGER.info("Checking requests to metadata") - requests_to_metadata_after = cache.get_io_class_statistics( - io_class_id=ioclass_id).request_stats.write - if requests_to_metadata_after == requests_to_metadata_before: - TestRun.fail("No requests to metadata while deleting directory with files!") + with TestRun.step("Create 20 test files."): + requests_to_metadata_before = cache.get_io_class_statistics( + io_class_id=ioclass_id).request_stats.write + files = [] + for i in range(1, 21): + file_path = f"{mountpoint}/test_file_{i}" + dd = ( + Dd().input("/dev/urandom") + .output(file_path) + .count(random.randint(5, 50)) + .block_size(Size(1, Unit.MebiByte)) + .oflag("sync") + ) + dd.run() + files.append(File(file_path)) + + with TestRun.step("Check requests to metadata."): + requests_to_metadata_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).request_stats.write + if requests_to_metadata_after == requests_to_metadata_before: + TestRun.fail("No requests to metadata while creating files!") + + with TestRun.step("Rename all test files."): + requests_to_metadata_before = requests_to_metadata_after + for file in files: + file.move(f"{file.full_path}_renamed") + sync() + + with TestRun.step("Check requests to metadata."): + requests_to_metadata_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).request_stats.write + if requests_to_metadata_after == requests_to_metadata_before: + TestRun.fail("No requests to metadata while renaming files!") + + with TestRun.step(f"Create directory {test_dir_path}."): + requests_to_metadata_before = requests_to_metadata_after + fs_utils.create_directory(path=test_dir_path) + + TestRun.LOGGER.info(f"Moving test files into {test_dir_path}") + for file in files: + file.move(test_dir_path) + sync() + + with TestRun.step("Check requests to metadata."): + requests_to_metadata_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).request_stats.write + if requests_to_metadata_after == requests_to_metadata_before: + TestRun.fail("No requests to metadata while moving files!") + + with TestRun.step(f"Remove {test_dir_path}."): + fs_utils.remove(path=test_dir_path, force=True, recursive=True) + + with TestRun.step("Check requests to metadata."): + requests_to_metadata_after = cache.get_io_class_statistics( + io_class_id=ioclass_id).request_stats.write + if requests_to_metadata_after == requests_to_metadata_before: + TestRun.fail("No requests to metadata while deleting directory with files!") @pytest.mark.os_dependent @@ -342,138 +376,150 @@ def test_ioclass_metadata(filesystem): @pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_id_as_condition(filesystem): """ - Load config in which IO class ids are used as conditions in other IO class definitions. - Check if performed IO is properly classified. + title: IO class as a condition. + description: | + Load config in which IO class ids are used as conditions in other IO class definitions. + pass_criteria: + - No kernel bug. + - IO is classified properly as described in IO class config. """ - cache, core = prepare() - Udev.disable() base_dir_path = f"{mountpoint}/base_dir" ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte) ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte)) - # directory condition - ioclass_config.add_ioclass( - ioclass_id=1, - eviction_priority=1, - allocation=True, - rule=f"directory:{base_dir_path}", - ioclass_config_path=ioclass_config_path, - ) - # file size condition - ioclass_config.add_ioclass( - ioclass_id=2, - eviction_priority=1, - allocation=True, - rule=f"file_size:eq:{ioclass_file_size_bytes}", - ioclass_config_path=ioclass_config_path, - ) - # direct condition - ioclass_config.add_ioclass( - ioclass_id=3, - eviction_priority=1, - allocation=True, - rule="direct", - ioclass_config_path=ioclass_config_path, - ) - # IO class 1 OR 2 condition - ioclass_config.add_ioclass( - ioclass_id=4, - eviction_priority=1, - allocation=True, - rule="io_class:1|io_class:2", - ioclass_config_path=ioclass_config_path, - ) - # IO class 4 AND file size condition (same as IO class 2) - ioclass_config.add_ioclass( - ioclass_id=5, - eviction_priority=1, - allocation=True, - rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}", - ioclass_config_path=ioclass_config_path, - ) - # IO class 3 condition - ioclass_config.add_ioclass( - ioclass_id=6, - eviction_priority=1, - allocation=True, - rule="io_class:3", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Prepare cache and core. Disable udev."): + cache, core = prepare() + Udev.disable() - TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " - f"and mounting {core.system_path} at {mountpoint}") - core.create_filesystem(filesystem) - core.mount(mountpoint) - fs_utils.create_directory(base_dir_path) - sync() + with TestRun.step("Create and load IO class config file."): + # directory condition + ioclass_config.add_ioclass( + ioclass_id=1, + eviction_priority=1, + allocation=True, + rule=f"directory:{base_dir_path}", + ioclass_config_path=ioclass_config_path, + ) + # file size condition + ioclass_config.add_ioclass( + ioclass_id=2, + eviction_priority=1, + allocation=True, + rule=f"file_size:eq:{ioclass_file_size_bytes}", + ioclass_config_path=ioclass_config_path, + ) + # direct condition + ioclass_config.add_ioclass( + ioclass_id=3, + eviction_priority=1, + allocation=True, + rule="direct", + ioclass_config_path=ioclass_config_path, + ) + # IO class 1 OR 2 condition + ioclass_config.add_ioclass( + ioclass_id=4, + eviction_priority=1, + allocation=True, + rule="io_class:1|io_class:2", + ioclass_config_path=ioclass_config_path, + ) + # IO class 4 AND file size condition (same as IO class 2) + ioclass_config.add_ioclass( + ioclass_id=5, + eviction_priority=1, + allocation=True, + rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}", + ioclass_config_path=ioclass_config_path, + ) + # IO class 3 condition + ioclass_config.add_ioclass( + ioclass_id=6, + eviction_priority=1, + allocation=True, + rule="io_class:3", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - # IO fulfilling IO class 1 condition (and not IO class 2) - # Should be classified as IO class 4 - base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy - non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte) - (Fio().create_command() - .io_engine(IoEngine.libaio) - .size(non_ioclass_file_size) - .read_write(ReadWrite.write) - .target(f"{base_dir_path}/test_file_1") - .run()) - sync() - new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy + with TestRun.step(f"Prepare {filesystem.name} filesystem " + f"and mount {core.system_path} at {mountpoint}."): + core.create_filesystem(filesystem) + core.mount(mountpoint) + fs_utils.create_directory(base_dir_path) + sync() - if new_occupancy != base_occupancy + non_ioclass_file_size: - TestRun.fail("Writes were not properly cached!\n" - f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}") + with TestRun.step("Run IO fulfilling IO class 1 condition (and not IO class 2) and check if " + "it is classified properly."): + # Should be classified as IO class 4 + base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy + non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte) + (Fio().create_command() + .io_engine(IoEngine.libaio) + .size(non_ioclass_file_size) + .read_write(ReadWrite.write) + .target(f"{base_dir_path}/test_file_1") + .run()) + sync() + new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy - # IO fulfilling IO class 2 condition (and not IO class 1) - # Should be classified as IO class 5 - base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy - (Fio().create_command() - .io_engine(IoEngine.libaio) - .size(ioclass_file_size) - .read_write(ReadWrite.write) - .target(f"{mountpoint}/test_file_2") - .run()) - sync() - new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy + if new_occupancy != base_occupancy + non_ioclass_file_size: + TestRun.fail("Writes were not properly cached!\n" + f"Expected: {base_occupancy + non_ioclass_file_size}, " + f"actual: {new_occupancy}") - if new_occupancy != base_occupancy + ioclass_file_size: - TestRun.fail("Writes were not properly cached!\n" - f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}") + with TestRun.step("Run IO fulfilling IO class 2 condition (and not IO class 1) and check if " + "it is classified properly."): + # Should be classified as IO class 5 + base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy + (Fio().create_command() + .io_engine(IoEngine.libaio) + .size(ioclass_file_size) + .read_write(ReadWrite.write) + .target(f"{mountpoint}/test_file_2") + .run()) + sync() + new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy - # IO fulfilling IO class 1 and 2 conditions - # Should be classified as IO class 5 - base_occupancy = new_occupancy - (Fio().create_command() - .io_engine(IoEngine.libaio) - .size(ioclass_file_size) - .read_write(ReadWrite.write) - .target(f"{base_dir_path}/test_file_3") - .run()) - sync() - new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy + if new_occupancy != base_occupancy + ioclass_file_size: + TestRun.fail("Writes were not properly cached!\n" + f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}") - if new_occupancy != base_occupancy + ioclass_file_size: - TestRun.fail("Writes were not properly cached!\n" - f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}") + with TestRun.step("Run IO fulfilling IO class 1 and 2 conditions and check if " + "it is classified properly."): + # Should be classified as IO class 5 + base_occupancy = new_occupancy + (Fio().create_command() + .io_engine(IoEngine.libaio) + .size(ioclass_file_size) + .read_write(ReadWrite.write) + .target(f"{base_dir_path}/test_file_3") + .run()) + sync() + new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy - # Same IO but direct - # Should be classified as IO class 6 - base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy - (Fio().create_command() - .io_engine(IoEngine.libaio) - .size(ioclass_file_size) - .read_write(ReadWrite.write) - .target(f"{base_dir_path}/test_file_3") - .direct() - .run()) - sync() - new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy + if new_occupancy != base_occupancy + ioclass_file_size: + TestRun.fail("Writes were not properly cached!\n" + f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}") - if new_occupancy != base_occupancy + ioclass_file_size: - TestRun.fail("Writes were not properly cached!\n" - f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}") + with TestRun.step("Run direct IO fulfilling IO class 1 and 2 conditions and check if " + "it is classified properly."): + # Should be classified as IO class 6 + base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy + (Fio().create_command() + .io_engine(IoEngine.libaio) + .size(ioclass_file_size) + .read_write(ReadWrite.write) + .target(f"{base_dir_path}/test_file_3") + .direct() + .run()) + sync() + new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy + + if new_occupancy != base_occupancy + ioclass_file_size: + TestRun.fail("Writes were not properly cached!\n" + f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}") @pytest.mark.os_dependent @@ -482,47 +528,54 @@ def test_ioclass_id_as_condition(filesystem): @pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_conditions_or(filesystem): """ - Load config with IO class combining 5 contradicting conditions connected by OR operator. - Check if every IO fulfilling one condition is classified properly. + title: IO class condition 'or'. + description: | + Load config with IO class combining 5 contradicting conditions connected by OR operator. + pass_criteria: + - No kernel bug. + - Every IO fulfilling one condition is classified properly. """ - cache, core = prepare() - Udev.disable() - # directories OR condition - ioclass_config.add_ioclass( - ioclass_id=1, - eviction_priority=1, - allocation=True, - rule=f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:" - f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Prepare cache and core. Disable udev."): + cache, core = prepare() + Udev.disable() - TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " - f"and mounting {core.system_path} at {mountpoint}") - core.create_filesystem(filesystem) - core.mount(mountpoint) - for i in range(1, 6): - fs_utils.create_directory(f"{mountpoint}/dir{i}") - sync() + with TestRun.step("Create and load IO class config file."): + # directories OR condition + ioclass_config.add_ioclass( + ioclass_id=1, + eviction_priority=1, + allocation=True, + rule=f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:" + f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) - # Perform IO fulfilling each condition and check if occupancy raises - for i in range(1, 6): - file_size = Size(random.randint(25, 50), Unit.MebiByte) - base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy - (Fio().create_command() - .io_engine(IoEngine.libaio) - .size(file_size) - .read_write(ReadWrite.write) - .target(f"{mountpoint}/dir{i}/test_file") - .run()) + with TestRun.step(f"Prepare {filesystem.name} filesystem " + f"and mount {core.system_path} at {mountpoint}."): + core.create_filesystem(filesystem) + core.mount(mountpoint) + for i in range(1, 6): + fs_utils.create_directory(f"{mountpoint}/dir{i}") sync() - new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy - if new_occupancy != base_occupancy + file_size: - TestRun.fail("Occupancy has not increased correctly!\n" - f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}") + with TestRun.step("Perform IO fulfilling each condition and check if occupancy raises."): + for i in range(1, 6): + file_size = Size(random.randint(25, 50), Unit.MebiByte) + base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy + (Fio().create_command() + .io_engine(IoEngine.libaio) + .size(file_size) + .read_write(ReadWrite.write) + .target(f"{mountpoint}/dir{i}/test_file") + .run()) + sync() + new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy + + if new_occupancy != base_occupancy + file_size: + TestRun.fail("Occupancy has not increased correctly!\n" + f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}") @pytest.mark.os_dependent @@ -531,26 +584,34 @@ def test_ioclass_conditions_or(filesystem): @pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_conditions_and(filesystem): """ - Load config with IO class combining 5 conditions contradicting at least one other condition - connected by AND operator. - Check if every IO fulfilling one of the conditions is not classified. + title: IO class condition 'and'. + description: | + Load config with IO class combining 5 conditions contradicting + at least one other condition. + pass_criteria: + - No kernel bug. + - Every IO fulfilling one of the conditions is not classified. """ - cache, core = prepare() - Udev.disable() + file_size = Size(random.randint(25, 50), Unit.MebiByte) file_size_bytes = int(file_size.get_value(Unit.Byte)) - # directories OR condition - ioclass_config.add_ioclass( - ioclass_id=1, - eviction_priority=1, - allocation=True, - rule=f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&" - f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&" - f"file_size:eq:{file_size_bytes}", - ioclass_config_path=ioclass_config_path, - ) - casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) + with TestRun.step("Prepare cache and core. Disable udev."): + cache, core = prepare() + Udev.disable() + + with TestRun.step("Create and load IO class config file."): + # directories OR condition + ioclass_config.add_ioclass( + ioclass_id=1, + eviction_priority=1, + allocation=True, + rule=f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&" + f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&" + f"file_size:eq:{file_size_bytes}", + ioclass_config_path=ioclass_config_path, + ) + casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " f"and mounting {core.system_path} at {mountpoint}") @@ -562,11 +623,11 @@ def test_ioclass_conditions_and(filesystem): # Perform IO for size in [file_size, file_size + Size(1, Unit.MebiByte), file_size - Size(1, Unit.MebiByte)]: (Fio().create_command() - .io_engine(IoEngine.libaio) - .size(size) - .read_write(ReadWrite.write) - .target(f"{mountpoint}/test_file") - .run()) + .io_engine(IoEngine.libaio) + .size(size) + .read_write(ReadWrite.write) + .target(f"{mountpoint}/test_file") + .run()) sync() new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy @@ -581,13 +642,13 @@ def test_ioclass_conditions_and(filesystem): @pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_effective_ioclass(filesystem): """ - title: Effective IO class with multiple non-exclusive conditions - description: | - Test CAS ability to properly classify IO fulfilling multiple conditions based on - IO class ids and presence of '&done' annotation in IO class rules - pass_criteria: - - In every iteration first IO is classified to the last in order IO class - - In every iteration second IO is classified to the IO class with '&done' annotation + title: Effective IO class with multiple non-exclusive conditions + description: | + Test CAS ability to properly classify IO fulfilling multiple conditions based on + IO class ids and presence of '&done' annotation in IO class rules + pass_criteria: + - In every iteration first IO is classified to the last in order IO class + - In every iteration second IO is classified to the IO class with '&done' annotation """ with TestRun.LOGGER.step(f"Test prepare"): cache, core = prepare()