Rewrite IO class tests to use latest Test Framework API

Signed-off-by: Katarzyna Lapinska <katarzyna.lapinska@intel.com>
This commit is contained in:
Katarzyna Lapinska 2020-08-17 09:32:44 +02:00
parent f9f3ce100b
commit 6ac104c3b8
5 changed files with 1323 additions and 1173 deletions

View File

@ -3,8 +3,6 @@
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import time
import pytest
from test_tools.disk_utils import Filesystem
@ -12,7 +10,7 @@ from api.cas import ioclass_config, casadm
from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev, drop_caches, DropCachesMode
from test_utils.os_utils import sync, Udev, drop_caches
from test_utils.size import Unit, Size
from core.test_run import TestRun
@ -29,7 +27,7 @@ not_cached_mountpoint = "/tmp/ioclass_core_id_test/not_cached"
def test_ioclass_core_id(filesystem):
"""
title: Test for `core_id` classification rule
dsecription: |
description: |
Test if IO to core with selective allocation enabled is cached and IO to core with
selective allocation disabled is redirected to pass-through mode
pass_criteria:

View File

@ -4,10 +4,10 @@
#
import random
from datetime import datetime
import pytest
from datetime import datetime
from api.cas import ioclass_config, casadm
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.dd import Dd
@ -15,7 +15,8 @@ from test_tools.disk_utils import Filesystem
from test_utils.filesystem.directory import Directory
from test_utils.filesystem.file import File
from test_utils.os_utils import drop_caches, DropCachesMode, sync, Udev
from .io_class_common import *
from test_utils.size import Size, Unit
from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path
@pytest.mark.os_dependent
@ -24,35 +25,41 @@ from .io_class_common import *
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_depth(filesystem):
"""
title: Test IO classification by directory.
description: |
Test if directory classification works properly for deeply nested directories for read and
write operations.
pass_criteria:
- No kernel bug.
- Read and write operations to directories are classified properly.
"""
base_dir_path = f"{mountpoint}/base_dir"
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
Udev.disable()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_dir_path = f"{mountpoint}/base_dir"
TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
with TestRun.step(f"Create the base directory: {base_dir_path}."):
fs_utils.create_directory(base_dir_path)
with TestRun.step(f"Create a nested directory."):
nested_dir_path = base_dir_path
random_depth = random.randint(40, 80)
for i in range(random_depth):
nested_dir_path += f"/dir_{i}"
TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}")
fs_utils.create_directory(path=nested_dir_path, parents=True)
# Test classification in nested dir by reading a previously unclassified file
TestRun.LOGGER.info("Creating the first file in the nested directory")
with TestRun.step("Create the first file in the nested directory."):
test_file_1 = File(f"{nested_dir_path}/test_file_1")
dd = (
Dd()
.input("/dev/urandom")
Dd().input("/dev/urandom")
.output(test_file_1.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
@ -62,6 +69,7 @@ def test_ioclass_directory_depth(filesystem):
drop_caches(DropCachesMode.ALL)
test_file_1.refresh_item()
with TestRun.step("Load IO class config."):
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
@ -73,28 +81,28 @@ def test_ioclass_directory_depth(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Read the file in the nested directory"):
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
TestRun.LOGGER.info("Reading the file in the nested directory")
dd = (
Dd()
.input(test_file_1.full_path)
Dd().input(test_file_1.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
with TestRun.step("Check occupancy after creating the file."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
assert new_occupancy == base_occupancy + test_file_1.size, \
"Wrong occupancy after reading file!\n" \
f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}"
if new_occupancy != base_occupancy + test_file_1.size:
TestRun.LOGGER.error("Wrong occupancy after reading file!\n"
"Expected: {base_occupancy + test_file_1.size}, "
f"actual: {new_occupancy}")
# Test classification in nested dir by creating a file
with TestRun.step("Create the second file in the nested directory"):
base_occupancy = new_occupancy
TestRun.LOGGER.info("Creating the second file in the nested directory")
test_file_2 = File(f"{nested_dir_path}/test_file_2")
dd = (
Dd()
.input("/dev/urandom")
Dd().input("/dev/urandom")
.output(test_file_2.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
@ -104,10 +112,133 @@ def test_ioclass_directory_depth(filesystem):
drop_caches(DropCachesMode.ALL)
test_file_2.refresh_item()
with TestRun.step("Check occupancy after creating the second file."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
assert new_occupancy == base_occupancy + test_file_2.size, \
"Wrong occupancy after creating file!\n" \
f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}"
if new_occupancy != base_occupancy + test_file_2.size:
TestRun.LOGGER.error("Wrong occupancy after creating file!\n"
f"Expected: {base_occupancy + test_file_2.size}, "
f"actual: {new_occupancy}")
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_file_operations(filesystem):
"""
title: Test IO classification by file operations.
description: |
Test if directory classification works properly after file operations like move or rename.
pass_criteria:
- No kernel bug.
- The operations themselves should not cause reclassification but IO after those
operations should be reclassified to proper IO class.
"""
test_dir_path = f"{mountpoint}/test_dir"
nested_dir_path = f"{test_dir_path}/nested_dir"
dd_blocks = random.randint(5, 50)
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Create and load IO class config file."):
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{test_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
with TestRun.step(f"Create directory {nested_dir_path}."):
Directory.create_directory(path=nested_dir_path, parents=True)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Create test file."):
classified_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
file_path = f"{test_dir_path}/test_file"
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
sync()
drop_caches(DropCachesMode.ALL)
test_file = File(file_path).refresh_item()
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Move test file out of classified directory."):
classified_before = classified_after
non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
test_file.move(destination=mountpoint)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
with TestRun.step("Read test file."):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before - test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before + test_file.size, non_classified_after)
with TestRun.step(f"Move test file to {nested_dir_path}."):
classified_before = classified_after
non_classified_before = non_classified_after
test_file.move(destination=nested_dir_path)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
with TestRun.step("Read test file."):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Check non-classified occupancy."):
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before - test_file.size, non_classified_after)
@pytest.mark.os_dependent
@ -116,13 +247,115 @@ def test_ioclass_directory_depth(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_dir_operations(filesystem):
"""
Test if directory classification works properly after directory operations like move or rename.
The operations themselves should not cause reclassification but IO after those operations
should be reclassified to proper IO class.
Directory classification may work with a delay after loading IO class configuration or
title: Test IO classification by directory operations.
description: |
Test if directory classification works properly after directory operations like move or
rename.
pass_criteria:
- No kernel bug.
- The operations themselves should not cause reclassification but IO after those
operations should be reclassified to proper IO class.
- Directory classification may work with a delay after loading IO class configuration or
move/rename operations. Test checks if maximum delay is not exceeded.
"""
def create_files_with_classification_delay_check(directory: Directory, ioclass_id: int):
non_classified_dir_path = f"{mountpoint}/non_classified"
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Create and load IO class config file."):
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
ioclass_id_1 = proper_ids[0]
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
ioclass_id_2 = proper_ids[1]
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
# directory IO classes
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_1,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_1}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_2,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_2}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
with TestRun.step(f"Create a non-classified directory: {non_classified_dir_path}."):
dir_1 = Directory.create_directory(path=non_classified_dir_path)
with TestRun.step(f"Rename {non_classified_dir_path} to {classified_dir_path_1}."):
dir_1.move(destination=classified_dir_path_1)
with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check(
cache, directory=dir_1, ioclass_id=ioclass_id_1)
with TestRun.step(f"Create {classified_dir_path_2}/subdir."):
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check(cache, directory=dir_2,
ioclass_id=ioclass_id_2)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Move {dir_2.full_path} to {classified_dir_path_1}."):
dir_2.move(destination=classified_dir_path_1)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=ioclass_id_1,
source_ioclass_id=ioclass_id_2,
directory=dir_2, with_delay=False)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Move {dir_2.full_path} to {mountpoint}."):
dir_2.move(destination=mountpoint)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False)
with TestRun.step(f"Remove {classified_dir_path_2}."):
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Rename {classified_dir_path_1} to {classified_dir_path_2}."):
dir_1.move(destination=classified_dir_path_2)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=ioclass_id_2,
source_ioclass_id=ioclass_id_1,
directory=dir_1, with_delay=True)
with TestRun.step(f"Rename {classified_dir_path_2} to {non_classified_dir_path}."):
dir_1.move(destination=non_classified_dir_path)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True)
def create_files_with_classification_delay_check(cache, directory: Directory, ioclass_id: int):
start_time = datetime.now()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
@ -152,8 +385,9 @@ def test_ioclass_directory_dir_operations(filesystem):
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
def read_files_with_reclassification_check(
target_ioclass_id: int, source_ioclass_id: int, directory: Directory, with_delay: bool):
def read_files_with_reclassification_check(cache, target_ioclass_id: int, source_ioclass_id: int,
directory: Directory, with_delay: bool):
start_time = datetime.now()
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
@ -193,205 +427,8 @@ def test_ioclass_directory_dir_operations(filesystem):
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
cache, core = prepare()
Udev.disable()
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
ioclass_id_1 = proper_ids[0]
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
ioclass_id_2 = proper_ids[1]
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
# directory IO classes
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_1,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_1}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_2,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_2}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
non_classified_dir_path = f"{mountpoint}/non_classified"
TestRun.LOGGER.info(
f"Creating a non-classified directory: {non_classified_dir_path}")
dir_1 = Directory.create_directory(path=non_classified_dir_path)
TestRun.LOGGER.info(f"Renaming {non_classified_dir_path} to {classified_dir_path_1}")
dir_1.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_1, ioclass_id=ioclass_id_1)
TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir")
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_2, ioclass_id=ioclass_id_2)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}")
dir_2.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_1, source_ioclass_id=ioclass_id_2,
directory=dir_2, with_delay=False)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}")
dir_2.move(destination=mountpoint)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False)
TestRun.LOGGER.info(f"Removing {classified_dir_path_2}")
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_1} to {classified_dir_path_2}")
dir_1.move(destination=classified_dir_path_2)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_2, source_ioclass_id=ioclass_id_1,
directory=dir_1, with_delay=True)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_2} to {non_classified_dir_path}")
dir_1.move(destination=non_classified_dir_path)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True)
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_file_operations(filesystem):
"""
Test if directory classification works properly after file operations like move or rename.
The operations themselves should not cause reclassification but IO after those operations
should be reclassified to proper IO class.
"""
def check_occupancy(expected: Size, actual: Size):
if expected != actual:
pytest.xfail("Occupancy check failed!\n"
f"Expected: {expected}, actual: {actual}")
cache, core = prepare()
Udev.disable()
test_dir_path = f"{mountpoint}/test_dir"
nested_dir_path = f"{test_dir_path}/nested_dir"
dd_blocks = random.randint(5, 50)
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{test_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
TestRun.LOGGER.info(f"Creating directory {nested_dir_path}")
Directory.create_directory(path=nested_dir_path, parents=True)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Creating test file")
classified_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
file_path = f"{test_dir_path}/test_file"
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
sync()
drop_caches(DropCachesMode.ALL)
test_file = File(file_path).refresh_item()
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
TestRun.LOGGER.info("Moving test file out of classified directory")
classified_before = classified_after
non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
test_file.move(destination=mountpoint)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file")
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before - test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before + test_file.size, non_classified_after)
TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}")
classified_before = classified_after
non_classified_before = non_classified_after
test_file.move(destination=nested_dir_path)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file")
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before - test_file.size, non_classified_after)

View File

@ -4,28 +4,46 @@
#
import random
import pytest
from api.cas import ioclass_config, casadm
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev, DropCachesMode, drop_caches
from .io_class_common import *
from test_utils.os_utils import sync, DropCachesMode, drop_caches
from test_utils.size import Size, Unit
from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_extension():
cache, core = prepare()
"""
title: Test IO classification by file extension.
description: Test if file extension classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file extension.
"""
iterations = 50
ioclass_id = 1
tested_extension = "tmp"
wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{tested_extension}")
.count(dd_count)
.block_size(dd_size)
)
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Create and load IO class config."):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
@ -35,24 +53,14 @@ def test_ioclass_file_extension():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
with TestRun.step("Flush cache."):
cache.flush_cache()
# Check if file with proper extension is cached
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{tested_extension}")
.count(dd_count)
.block_size(dd_size)
)
TestRun.LOGGER.info(f"Writing to file with cached extension.")
with TestRun.step(f"Write to file with cached extension and check if it is properly cached."):
for i in range(iterations):
dd.run()
sync()
@ -60,14 +68,13 @@ def test_ioclass_file_extension():
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
with TestRun.step("Flush cache."):
cache.flush_cache()
# Check if file with improper extension is not cached
TestRun.LOGGER.info(f"Writing to file with no cached extension.")
with TestRun.step(f"Write to file with not cached extension and check if it is not cached."):
for ext in wrong_extensions:
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
@ -82,13 +89,24 @@ def test_ioclass_file_extension():
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_name_prefix():
cache, core = prepare()
"""
title: Test IO classification by file name prefix.
description: Test if file name prefix classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file name prefix.
"""
ioclass_id = 1
cached_files = ["test", "test.txt", "test1", "test1.txt"]
not_cached_files = ["file1", "file2", "file4", "file5", "tes"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Create and load IO class config."):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
@ -110,10 +128,7 @@ def test_ioclass_file_name_prefix():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
previous_occupancy = cache.get_occupancy()
core.create_filesystem(Filesystem.ext3)
@ -128,11 +143,11 @@ def test_ioclass_file_name_prefix():
# to be cached
# Check if files with proper prefix are cached
TestRun.LOGGER.info(f"Writing files which are supposed to be cached.")
with TestRun.step(f"Write files which are supposed to be cached and check "
f"if they are cached."):
for f in cached_files:
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(f"{mountpoint}/{f}")
.count(dd_count)
.block_size(dd_size)
@ -143,17 +158,19 @@ def test_ioclass_file_name_prefix():
expected_occupancy = previous_occupancy + (dd_size * dd_count)
if current_occupancy != expected_occupancy:
TestRun.fail(f"Current occupancy value is not valid. "
f"(Expected: {str(expected_occupancy)}, actual: {str(current_occupancy)})")
f"(Expected: {str(expected_occupancy)}, "
f"actual: {str(current_occupancy)})")
previous_occupancy = current_occupancy
with TestRun.step("Flush cache."):
cache.flush_cache()
# Check if file with improper extension is not cached
TestRun.LOGGER.info(f"Writing files which are not supposed to be cached.")
with TestRun.step(f"Write files which are not supposed to be cached and check if "
f"they are not cached."):
for f in not_cached_files:
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(f"{mountpoint}/{f}")
.count(dd_count)
.block_size(dd_size)
@ -163,30 +180,39 @@ def test_ioclass_file_name_prefix():
current_occupancy = cache.get_occupancy()
if current_occupancy != previous_occupancy:
TestRun.fail(f"Current occupancy value is not valid. "
f"(Expected: {str(previous_occupancy)}, actual: {str(current_occupancy)})")
f"(Expected: {str(previous_occupancy)}, "
f"actual: {str(current_occupancy)})")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_extension_preexisting_filesystem():
"""Create files on filesystem, add device with filesystem as a core,
write data to files and check if they are cached properly"""
cache, core = prepare()
"""
title: Test IO classification by file extension with preexisting filesystem on core device.
description: |
Test if file extension classification works properly when there is an existing
filesystem on core device.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file extension
after mounting core device.
"""
ioclass_id = 1
extensions = ["tmp", "tm", "out", "txt", "log", "123"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
TestRun.LOGGER.info(f"Preparing files on raw block device")
with TestRun.step("Prepare cache and core devices."):
cache, core = prepare()
with TestRun.step(f"Prepare files on raw block device."):
casadm.remove_core(cache.cache_id, core_id=core.core_id)
core.core_device.create_filesystem(Filesystem.ext3)
core.core_device.mount(mountpoint)
# Prepare files
for ext in extensions:
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
@ -194,7 +220,7 @@ def test_ioclass_file_extension_preexisting_filesystem():
dd.run()
core.core_device.unmount()
# Prepare ioclass config
with TestRun.step("Create IO class config."):
rule = "|".join([f"extension:{ext}" for ext in extensions])
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
@ -204,20 +230,20 @@ def test_ioclass_file_extension_preexisting_filesystem():
ioclass_config_path=ioclass_config_path,
)
# Prepare cache for test
TestRun.LOGGER.info(f"Adding device with preexisting data as a core")
with TestRun.step(f"Add device with preexisting data as a core."):
core = casadm.add_core(cache, core_dev=core.core_device)
with TestRun.step("Load IO class config."):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Mount core and flush cache."):
core.mount(mountpoint)
cache.flush_cache()
# Check if files with proper extensions are cached
TestRun.LOGGER.info(f"Writing to file with cached extension.")
with TestRun.step(f"Write to file with cached extension and check if they are cached."):
for ext in extensions:
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
@ -232,8 +258,13 @@ def test_ioclass_file_extension_preexisting_filesystem():
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_offset():
cache, core = prepare()
"""
title: Test IO classification by file offset.
description: Test if file offset classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file offset.
"""
ioclass_id = 1
iterations = 100
dd_size = Size(4, Unit.KibiByte)
@ -241,6 +272,10 @@ def test_ioclass_file_offset():
min_cached_offset = 16384
max_cached_offset = 65536
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Create and load IO class config file."):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
@ -250,14 +285,14 @@ def test_ioclass_file_offset():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
with TestRun.step("Flush cache."):
cache.flush_cache()
with TestRun.step("Write to file within cached offset range and check if it is cached."):
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
# nor last sector
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
@ -265,12 +300,11 @@ def test_ioclass_file_offset():
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
/ Unit.Blocks4096.value
)
TestRun.LOGGER.info(f"Writing to file within cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
@ -283,14 +317,15 @@ def test_ioclass_file_offset():
TestRun.LOGGER.error(f"Offset not cached: {file_offset}")
cache.flush_cache()
with TestRun.step(
"Write to file outside of cached offset range and check if it is not cached."):
min_seek = 0
max_seek = int(min_cached_offset / Unit.Blocks4096.value)
TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
@ -309,11 +344,121 @@ def test_ioclass_file_offset():
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_file_size(filesystem):
"""
File size IO class rules are configured in a way that each tested file size is unambiguously
classified.
Firstly write operations are tested (creation of files), secondly read operations.
title: Test IO classification by file size.
description: Test if file size classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file size.
"""
def load_file_size_io_classes():
# File size IO class rules are configured in a way that each tested file size is unambiguously
# classified.
# Firstly write operations are tested (creation of files), secondly read operations.
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
size_to_class = {
base_size: 1,
base_size - Unit.Blocks4096: 2,
base_size + Unit.Blocks4096: 3,
base_size / 2: 4,
base_size / 2 - Unit.Blocks4096: 4,
base_size / 2 + Unit.Blocks4096: 2,
base_size * 2: 5,
base_size * 2 - Unit.Blocks4096: 3,
base_size * 2 + Unit.Blocks4096: 5,
}
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Prepare and load IO class config."):
load_file_size_io_classes(cache, base_size)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Create files belonging to different IO classes (classification by writes)."):
test_files = []
for size, ioclass_id in size_to_class.items():
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
file_path = f"{mountpoint}/test_file_{size.get_value()}"
Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
if occupancy_after != occupancy_before + size:
TestRun.fail("File not cached properly!\n"
f"Expected {occupancy_before + size}\n"
f"Actual {occupancy_after}")
test_files.append(File(file_path).refresh_item())
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Move all files to 'unclassified' IO class."):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
occupancy_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
for file in test_files:
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
if occupancy_after != occupancy_before + file.size:
TestRun.fail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
occupancy_before = occupancy_after
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Restore IO class configuration."):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
load_file_size_io_classes(cache, base_size)
with TestRun.step("Read files belonging to different IO classes (classification by reads)."):
# CAS device should be unmounted and mounted because data can be sometimes still cached by
# OS cache so occupancy statistics will not match
core.unmount()
core.mount(mountpoint)
for file in test_files:
ioclass_id = size_to_class[file.size]
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
if actual_blocks != expected_blocks:
TestRun.fail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
sync()
drop_caches(DropCachesMode.ALL)
def load_file_size_io_classes(cache, base_size):
# IO class order intentional, do not change
base_size_bytes = int(base_size.get_value(Unit.Byte))
ioclass_config.add_ioclass(
@ -352,116 +497,3 @@ def test_ioclass_file_size(filesystem):
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
def create_files_and_check_classification():
TestRun.LOGGER.info("Creating files belonging to different IO classes "
"(classification by writes).")
for size, ioclass_id in size_to_class.items():
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
file_path = f"{mountpoint}/test_file_{size.get_value()}"
Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
if occupancy_after != occupancy_before + size:
TestRun.fail("File not cached properly!\n"
f"Expected {occupancy_before + size}\n"
f"Actual {occupancy_after}")
test_files.append(File(file_path).refresh_item())
sync()
drop_caches(DropCachesMode.ALL)
def reclassify_files():
TestRun.LOGGER.info("Reading files belonging to different IO classes "
"(classification by reads).")
for file in test_files:
ioclass_id = size_to_class[file.size]
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
if actual_blocks != expected_blocks:
TestRun.fail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
sync()
drop_caches(DropCachesMode.ALL)
def remove_files_classification():
TestRun.LOGGER.info("Moving all files to 'unclassified' IO class")
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
occupancy_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
for file in test_files:
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
if occupancy_after != occupancy_before + file.size:
TestRun.fail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
occupancy_before = occupancy_after
sync()
drop_caches(DropCachesMode.ALL)
def restore_classification_config():
TestRun.LOGGER.info("Restoring IO class configuration")
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
load_file_size_io_classes()
cache, core = prepare()
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
size_to_class = {
base_size: 1,
base_size - Unit.Blocks4096: 2,
base_size + Unit.Blocks4096: 3,
base_size / 2: 4,
base_size / 2 - Unit.Blocks4096: 4,
base_size / 2 + Unit.Blocks4096: 2,
base_size * 2: 5,
base_size * 2 - Unit.Blocks4096: 3,
base_size * 2 + Unit.Blocks4096: 5,
}
load_file_size_io_classes()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
test_files = []
create_files_and_check_classification()
remove_files_classification()
restore_classification_config()
# CAS device should be unmounted and mounted because data can be sometimes still cached by
# OS cache so occupancy statistics will not match
core.unmount()
core.mount(mountpoint)
reclassify_files()

View File

@ -7,23 +7,34 @@ import time
import pytest
from api.cas import ioclass_config, casadm
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev
from .io_class_common import *
from test_utils.size import Size, Unit
from tests.io_class.io_class_common import prepare, ioclass_config_path
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_process_name():
"""Check if data generated by process with particular name is cached"""
cache, core = prepare()
"""
title: Test IO classification by process name.
description: Check if data generated by process with particular name is cached.
pass_criteria:
- No kernel bug.
- IO is classified properly based on process generating IO name.
"""
ioclass_id = 1
dd_size = Size(4, Unit.KibiByte)
dd_count = 1
iterations = 100
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Create and load IO class config file."):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
@ -33,11 +44,11 @@ def test_ioclass_process_name():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Flush cache and disable udev."):
cache.flush_cache()
Udev.disable()
TestRun.LOGGER.info(f"Check if all data generated by dd process is cached.")
with TestRun.step("Check if all data generated by dd process is cached."):
for i in range(iterations):
dd = (
Dd()
@ -58,15 +69,23 @@ def test_ioclass_process_name():
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_pid():
cache, core = prepare()
"""
title: Test IO classification by process id.
description: Check if data generated by process with particular id is cached.
pass_criteria:
- No kernel bug.
- IO is classified properly based on process generating IO id.
"""
ioclass_id = 1
iterations = 20
dd_count = 100
dd_size = Size(4, Unit.KibiByte)
with TestRun.step("Prepare cache, core and disable udev."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Prepare dd command."):
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
# 'dd' command is created and is appended to 'echo' command instead of running it
dd_command = str(
@ -77,9 +96,11 @@ def test_ioclass_pid():
.block_size(dd_size)
)
for i in range(iterations):
for _ in TestRun.iteration(range(iterations)):
with TestRun.step("Flush cache."):
cache.flush_cache()
with TestRun.step("Prepare and load IO class config."):
output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
if output.exit_code != 0:
raise Exception(
@ -98,7 +119,7 @@ def test_ioclass_pid():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Running dd with pid {pid}")
with TestRun.step(f"Run dd with pid {pid}."):
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
dd_and_pid_command = (
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
@ -110,6 +131,7 @@ def test_ioclass_pid():
f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
sync()
with TestRun.step("Check if data was cached properly."):
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")

View File

@ -8,7 +8,9 @@ from itertools import permutations
import pytest
from api.cas import ioclass_config, casadm
from api.cas.ioclass_config import IoClass
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.dd import Dd
@ -17,15 +19,23 @@ from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev
from .io_class_common import *
from test_utils.size import Size, Unit
from tests.io_class.io_class_common import prepare, ioclass_config_path, mountpoint
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_lba():
"""Write data to random lba and check if it is cached according to range
defined in ioclass rule"""
cache, core = prepare()
"""
title: Test IO classification by lba.
description: |
Write data to random lba and check if it is cached according to range
defined in ioclass rule
pass_criteria:
- No kernel bug.
- IO is classified properly based on lba range defined in config.
"""
ioclass_id = 1
min_cached_lba = 56
max_cached_lba = 200
@ -33,7 +43,10 @@ def test_ioclass_lba():
dd_size = Size(1, Unit.Blocks512)
dd_count = 1
# Prepare ioclass config
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Prepare and load IO class config."):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
@ -41,20 +54,18 @@ def test_ioclass_lba():
rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done",
ioclass_config_path=ioclass_config_path,
)
# Prepare cache for test
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Flush cache."):
cache.flush_cache()
# Check if lbas from defined range are cached
with TestRun.step("Run IO and check if lbas from defined range are cached."):
dirty_count = 0
# '8' step is set to prevent writing cache line more than once
TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
for lba in range(min_cached_lba, max_cached_lba, 8):
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
@ -68,17 +79,17 @@ def test_ioclass_lba():
if dirty.get_value(Unit.Blocks4096) != dirty_count:
TestRun.LOGGER.error(f"LBA {lba} not cached")
with TestRun.step("Flush cache."):
cache.flush_cache()
# Check if lba outside of defined range are not cached
with TestRun.step("Run IO and check if lba outside of defined range are not cached."):
TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.")
for i in range(iterations):
rand_lba = random.randrange(2000)
if min_cached_lba <= rand_lba <= max_cached_lba:
continue
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
@ -95,11 +106,21 @@ def test_ioclass_lba():
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_request_size():
cache, core = prepare()
"""
title: Test IO classification by request size.
description: Check if requests with size within defined range are cached.
pass_criteria:
- No kernel bug.
- IO is classified properly based on request size range defined in config.
"""
ioclass_id = 1
iterations = 100
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Create and load IO class config."):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
@ -109,19 +130,16 @@ def test_ioclass_request_size():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Disable udev."):
Udev.disable()
# Check if requests with appropriate size are cached
TestRun.LOGGER.info(
f"Check if requests with size within defined range are cached"
)
with TestRun.step("Check if requests with size within defined range are cached."):
cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
for i in range(iterations):
cache.flush_cache()
req_size = random.choice(cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
@ -132,12 +150,10 @@ def test_ioclass_request_size():
if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
TestRun.fail("Incorrect number of dirty blocks!")
with TestRun.step("Flush cache."):
cache.flush_cache()
# Check if requests with inappropriate size are not cached
TestRun.LOGGER.info(
f"Check if requests with size outside defined range are not cached"
)
with TestRun.step("Check if requests with size outside of defined range are not cached"):
not_cached_req_sizes = [
Size(1, Unit.Blocks4096),
Size(8, Unit.Blocks4096),
@ -146,8 +162,7 @@ def test_ioclass_request_size():
for i in range(iterations):
req_size = random.choice(not_cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
Dd().input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
@ -165,17 +180,23 @@ def test_ioclass_request_size():
@pytest.mark.parametrizex("filesystem", list(Filesystem) + [False])
def test_ioclass_direct(filesystem):
"""
Perform buffered/direct IO to/from files or raw block device.
Data from buffered IO should be cached.
Data from buffered IO should not be cached and if performed to/from already cached data
title: Direct IO classification.
description: Check if direct requests are properly cached.
pass_criteria:
- No kernel bug.
- Data from direct IO should be cached.
- Data from buffered IO should not be cached and if performed to/from already cached data
should cause reclassification to unclassified IO class.
"""
cache, core = prepare()
Udev.disable()
ioclass_id = 1
io_size = Size(random.randint(1000, 2000), Unit.Blocks4096)
with TestRun.step("Prepare cache and core. Disable udev."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Create and load IO class config."):
# direct IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
@ -186,15 +207,14 @@ def test_ioclass_direct(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
fio = (
Fio().create_command()
.io_engine(IoEngine.libaio)
.size(io_size)
.offset(io_size)
.read_write(ReadWrite.write)
with TestRun.step("Prepare fio command."):
fio = Fio().create_command() \
.io_engine(IoEngine.libaio) \
.size(io_size).offset(io_size) \
.read_write(ReadWrite.write) \
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path)
)
with TestRun.step("Prepare filesystem."):
if filesystem:
TestRun.LOGGER.info(
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at"
@ -204,41 +224,48 @@ def test_ioclass_direct(filesystem):
core.mount(mountpoint)
sync()
else:
TestRun.LOGGER.info("Testing on raw exported object")
TestRun.LOGGER.info("Testing on raw exported object.")
with TestRun.step(f"Run buffered writes to {'file' if filesystem else 'device'}"):
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
TestRun.LOGGER.info(f"Buffered writes to {'file' if filesystem else 'device'}")
fio.run()
sync()
with TestRun.step("Check if buffered writes are not cached."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Buffered writes were cached!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}")
TestRun.LOGGER.info(f"Direct writes to {'file' if filesystem else 'device'}")
with TestRun.step(f"Run direct writes to {'file' if filesystem else 'device'}"):
fio.direct()
fio.run()
sync()
with TestRun.step("Check if direct writes are cached."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct writes was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
TestRun.LOGGER.info(f"Buffered reads from {'file' if filesystem else 'device'}")
with TestRun.step(f"Run buffered reads from {'file' if filesystem else 'device'}"):
fio.remove_param("readwrite").remove_param("direct")
fio.read_write(ReadWrite.read)
fio.run()
sync()
with TestRun.step("Check if buffered reads caused reclassification."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Buffered reads did not cause reclassification!"
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}")
TestRun.LOGGER.info(f"Direct reads from {'file' if filesystem else 'device'}")
with TestRun.step(f"Run direct reads from {'file' if filesystem else 'device'}"):
fio.direct()
fio.run()
sync()
with TestRun.step("Check if direct reads are cached."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct reads was cached!\n"
@ -251,13 +278,22 @@ def test_ioclass_direct(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_metadata(filesystem):
"""
Perform operations on files that cause metadata update.
Determine if every such operation results in increased writes to cached metadata.
Exact values may not be tested as each file system has different metadata structure.
title: Metadata IO classification.
description: |
Determine if every operation on files that cause metadata update results in increased
writes to cached metadata.
pass_criteria:
- No kernel bug.
- Metadata is classified properly.
"""
# Exact values may not be tested as each file system has different metadata structure.
test_dir_path = f"{mountpoint}/test_dir"
with TestRun.step("Prepare cache and core. Disable udev."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Prepare and load IO class config file."):
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# metadata IO class
ioclass_config.add_ioclass(
@ -269,21 +305,20 @@ def test_ioclass_metadata(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Create 20 test files."):
requests_to_metadata_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
TestRun.LOGGER.info("Creating 20 test files")
files = []
for i in range(1, 21):
file_path = f"{mountpoint}/test_file_{i}"
dd = (
Dd()
.input("/dev/urandom")
Dd().input("/dev/urandom")
.output(file_path)
.count(random.randint(5, 50))
.block_size(Size(1, Unit.MebiByte))
@ -292,27 +327,26 @@ def test_ioclass_metadata(filesystem):
dd.run()
files.append(File(file_path))
TestRun.LOGGER.info("Checking requests to metadata")
with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while creating files!")
with TestRun.step("Rename all test files."):
requests_to_metadata_before = requests_to_metadata_after
TestRun.LOGGER.info("Renaming all test files")
for file in files:
file.move(f"{file.full_path}_renamed")
sync()
TestRun.LOGGER.info("Checking requests to metadata")
with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while renaming files!")
with TestRun.step(f"Create directory {test_dir_path}."):
requests_to_metadata_before = requests_to_metadata_after
test_dir_path = f"{mountpoint}/test_dir"
TestRun.LOGGER.info(f"Creating directory {test_dir_path}")
fs_utils.create_directory(path=test_dir_path)
TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
@ -320,16 +354,16 @@ def test_ioclass_metadata(filesystem):
file.move(test_dir_path)
sync()
TestRun.LOGGER.info("Checking requests to metadata")
with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while moving files!")
TestRun.LOGGER.info(f"Removing {test_dir_path}")
with TestRun.step(f"Remove {test_dir_path}."):
fs_utils.remove(path=test_dir_path, force=True, recursive=True)
TestRun.LOGGER.info("Checking requests to metadata")
with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
@ -342,16 +376,23 @@ def test_ioclass_metadata(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_id_as_condition(filesystem):
"""
title: IO class as a condition.
description: |
Load config in which IO class ids are used as conditions in other IO class definitions.
Check if performed IO is properly classified.
pass_criteria:
- No kernel bug.
- IO is classified properly as described in IO class config.
"""
cache, core = prepare()
Udev.disable()
base_dir_path = f"{mountpoint}/base_dir"
ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))
with TestRun.step("Prepare cache and core. Disable udev."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Create and load IO class config file."):
# directory condition
ioclass_config.add_ioclass(
ioclass_id=1,
@ -402,14 +443,15 @@ def test_ioclass_id_as_condition(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(base_dir_path)
sync()
# IO fulfilling IO class 1 condition (and not IO class 2)
with TestRun.step("Run IO fulfilling IO class 1 condition (and not IO class 2) and check if "
"it is classified properly."):
# Should be classified as IO class 4
base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
@ -424,9 +466,11 @@ def test_ioclass_id_as_condition(filesystem):
if new_occupancy != base_occupancy + non_ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}")
f"Expected: {base_occupancy + non_ioclass_file_size}, "
f"actual: {new_occupancy}")
# IO fulfilling IO class 2 condition (and not IO class 1)
with TestRun.step("Run IO fulfilling IO class 2 condition (and not IO class 1) and check if "
"it is classified properly."):
# Should be classified as IO class 5
base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
(Fio().create_command()
@ -442,7 +486,8 @@ def test_ioclass_id_as_condition(filesystem):
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
# IO fulfilling IO class 1 and 2 conditions
with TestRun.step("Run IO fulfilling IO class 1 and 2 conditions and check if "
"it is classified properly."):
# Should be classified as IO class 5
base_occupancy = new_occupancy
(Fio().create_command()
@ -458,7 +503,8 @@ def test_ioclass_id_as_condition(filesystem):
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
# Same IO but direct
with TestRun.step("Run direct IO fulfilling IO class 1 and 2 conditions and check if "
"it is classified properly."):
# Should be classified as IO class 6
base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
(Fio().create_command()
@ -482,12 +528,19 @@ def test_ioclass_id_as_condition(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_conditions_or(filesystem):
"""
title: IO class condition 'or'.
description: |
Load config with IO class combining 5 contradicting conditions connected by OR operator.
Check if every IO fulfilling one condition is classified properly.
pass_criteria:
- No kernel bug.
- Every IO fulfilling one condition is classified properly.
"""
with TestRun.step("Prepare cache and core. Disable udev."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Create and load IO class config file."):
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,
@ -499,15 +552,15 @@ def test_ioclass_conditions_or(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
for i in range(1, 6):
fs_utils.create_directory(f"{mountpoint}/dir{i}")
sync()
# Perform IO fulfilling each condition and check if occupancy raises
with TestRun.step("Perform IO fulfilling each condition and check if occupancy raises."):
for i in range(1, 6):
file_size = Size(random.randint(25, 50), Unit.MebiByte)
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
@ -531,15 +584,23 @@ def test_ioclass_conditions_or(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_conditions_and(filesystem):
"""
Load config with IO class combining 5 conditions contradicting at least one other condition
connected by AND operator.
Check if every IO fulfilling one of the conditions is not classified.
title: IO class condition 'and'.
description: |
Load config with IO class combining 5 conditions contradicting
at least one other condition.
pass_criteria:
- No kernel bug.
- Every IO fulfilling one of the conditions is not classified.
"""
cache, core = prepare()
Udev.disable()
file_size = Size(random.randint(25, 50), Unit.MebiByte)
file_size_bytes = int(file_size.get_value(Unit.Byte))
with TestRun.step("Prepare cache and core. Disable udev."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Create and load IO class config file."):
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,