Merge pull request #515 from katlapinka/io-class-renew

Rewrite IO class tests to use latest Test Framework API
This commit is contained in:
Michał Mielewczyk 2020-09-08 15:43:42 +02:00 committed by GitHub
commit fea55bca42
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 1323 additions and 1173 deletions

View File

@ -3,8 +3,6 @@
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import time
import pytest
from test_tools.disk_utils import Filesystem
@ -12,7 +10,7 @@ from api.cas import ioclass_config, casadm
from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev, drop_caches, DropCachesMode
from test_utils.os_utils import sync, Udev, drop_caches
from test_utils.size import Unit, Size
from core.test_run import TestRun
@ -29,7 +27,7 @@ not_cached_mountpoint = "/tmp/ioclass_core_id_test/not_cached"
def test_ioclass_core_id(filesystem):
"""
title: Test for `core_id` classification rule
dsecription: |
description: |
Test if IO to core with selective allocation enabled is cached and IO to core with
selective allocation disabled is redirected to pass-through mode
pass_criteria:

View File

@ -4,10 +4,10 @@
#
import random
from datetime import datetime
import pytest
from datetime import datetime
from api.cas import ioclass_config, casadm
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.dd import Dd
@ -15,7 +15,8 @@ from test_tools.disk_utils import Filesystem
from test_utils.filesystem.directory import Directory
from test_utils.filesystem.file import File
from test_utils.os_utils import drop_caches, DropCachesMode, sync, Udev
from .io_class_common import *
from test_utils.size import Size, Unit
from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path
@pytest.mark.os_dependent
@ -24,263 +25,99 @@ from .io_class_common import *
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_depth(filesystem):
"""
Test if directory classification works properly for deeply nested directories for read and
write operations.
title: Test IO classification by directory.
description: |
Test if directory classification works properly for deeply nested directories for read and
write operations.
pass_criteria:
- No kernel bug.
- Read and write operations to directories are classified properly.
"""
cache, core = prepare()
Udev.disable()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_dir_path = f"{mountpoint}/base_dir"
TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
fs_utils.create_directory(base_dir_path)
nested_dir_path = base_dir_path
random_depth = random.randint(40, 80)
for i in range(random_depth):
nested_dir_path += f"/dir_{i}"
TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}")
fs_utils.create_directory(path=nested_dir_path, parents=True)
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
Udev.disable()
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step(f"Create the base directory: {base_dir_path}."):
fs_utils.create_directory(base_dir_path)
with TestRun.step(f"Create a nested directory."):
nested_dir_path = base_dir_path
random_depth = random.randint(40, 80)
for i in range(random_depth):
nested_dir_path += f"/dir_{i}"
fs_utils.create_directory(path=nested_dir_path, parents=True)
# Test classification in nested dir by reading a previously unclassified file
TestRun.LOGGER.info("Creating the first file in the nested directory")
test_file_1 = File(f"{nested_dir_path}/test_file_1")
dd = (
Dd()
.input("/dev/urandom")
.output(test_file_1.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_1.refresh_item()
with TestRun.step("Create the first file in the nested directory."):
test_file_1 = File(f"{nested_dir_path}/test_file_1")
dd = (
Dd().input("/dev/urandom")
.output(test_file_1.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_1.refresh_item()
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{base_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Load IO class config."):
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{base_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
TestRun.LOGGER.info("Reading the file in the nested directory")
dd = (
Dd()
.input(test_file_1.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
with TestRun.step("Read the file in the nested directory"):
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
dd = (
Dd().input(test_file_1.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
assert new_occupancy == base_occupancy + test_file_1.size, \
"Wrong occupancy after reading file!\n" \
f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}"
with TestRun.step("Check occupancy after creating the file."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + test_file_1.size:
TestRun.LOGGER.error("Wrong occupancy after reading file!\n"
"Expected: {base_occupancy + test_file_1.size}, "
f"actual: {new_occupancy}")
# Test classification in nested dir by creating a file
base_occupancy = new_occupancy
TestRun.LOGGER.info("Creating the second file in the nested directory")
test_file_2 = File(f"{nested_dir_path}/test_file_2")
dd = (
Dd()
.input("/dev/urandom")
.output(test_file_2.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_2.refresh_item()
with TestRun.step("Create the second file in the nested directory"):
base_occupancy = new_occupancy
test_file_2 = File(f"{nested_dir_path}/test_file_2")
dd = (
Dd().input("/dev/urandom")
.output(test_file_2.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_2.refresh_item()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
assert new_occupancy == base_occupancy + test_file_2.size, \
"Wrong occupancy after creating file!\n" \
f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}"
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_dir_operations(filesystem):
"""
Test if directory classification works properly after directory operations like move or rename.
The operations themselves should not cause reclassification but IO after those operations
should be reclassified to proper IO class.
Directory classification may work with a delay after loading IO class configuration or
move/rename operations. Test checks if maximum delay is not exceeded.
"""
def create_files_with_classification_delay_check(directory: Directory, ioclass_id: int):
start_time = datetime.now()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
dd_blocks = 10
dd_size = Size(dd_blocks, Unit.Blocks4096)
file_counter = 0
unclassified_files = []
time_from_start = datetime.now() - start_time
while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY:
occupancy_before = occupancy_after
file_path = f"{directory.full_path}/test_file_{file_counter}"
file_counter += 1
time_from_start = datetime.now() - start_time
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
if occupancy_after - occupancy_before < dd_size:
unclassified_files.append(file_path)
if len(unclassified_files) == file_counter:
pytest.xfail("No files were properly classified within max delay time!")
if len(unclassified_files):
TestRun.LOGGER.info("Rewriting unclassified test files...")
for file_path in unclassified_files:
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
def read_files_with_reclassification_check(
target_ioclass_id: int, source_ioclass_id: int, directory: Directory, with_delay: bool):
start_time = datetime.now()
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
unclassified_files = []
for file in [item for item in directory.ls() if isinstance(item, File)]:
target_occupancy_before = target_occupancy_after
source_occupancy_before = source_occupancy_after
time_from_start = datetime.now() - start_time
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
if target_occupancy_after < target_occupancy_before:
pytest.xfail("Target IO class occupancy lowered!")
elif target_occupancy_after - target_occupancy_before < file.size:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Target IO class occupancy not changed properly!")
if source_occupancy_after >= source_occupancy_before:
if file not in unclassified_files:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Source IO class occupancy not changed properly!")
if len(unclassified_files):
TestRun.LOGGER.info("Rereading unclassified test files...")
sync()
drop_caches(DropCachesMode.ALL)
for file in unclassified_files:
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
cache, core = prepare()
Udev.disable()
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
ioclass_id_1 = proper_ids[0]
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
ioclass_id_2 = proper_ids[1]
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
# directory IO classes
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_1,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_1}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_2,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_2}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
non_classified_dir_path = f"{mountpoint}/non_classified"
TestRun.LOGGER.info(
f"Creating a non-classified directory: {non_classified_dir_path}")
dir_1 = Directory.create_directory(path=non_classified_dir_path)
TestRun.LOGGER.info(f"Renaming {non_classified_dir_path} to {classified_dir_path_1}")
dir_1.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_1, ioclass_id=ioclass_id_1)
TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir")
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_2, ioclass_id=ioclass_id_2)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}")
dir_2.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_1, source_ioclass_id=ioclass_id_2,
directory=dir_2, with_delay=False)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}")
dir_2.move(destination=mountpoint)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False)
TestRun.LOGGER.info(f"Removing {classified_dir_path_2}")
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_1} to {classified_dir_path_2}")
dir_1.move(destination=classified_dir_path_2)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_2, source_ioclass_id=ioclass_id_1,
directory=dir_1, with_delay=True)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_2} to {non_classified_dir_path}")
dir_1.move(destination=non_classified_dir_path)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True)
with TestRun.step("Check occupancy after creating the second file."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + test_file_2.size:
TestRun.LOGGER.error("Wrong occupancy after creating file!\n"
f"Expected: {base_occupancy + test_file_2.size}, "
f"actual: {new_occupancy}")
@pytest.mark.os_dependent
@ -289,109 +126,309 @@ def test_ioclass_directory_dir_operations(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_file_operations(filesystem):
"""
Test if directory classification works properly after file operations like move or rename.
The operations themselves should not cause reclassification but IO after those operations
should be reclassified to proper IO class.
title: Test IO classification by file operations.
description: |
Test if directory classification works properly after file operations like move or rename.
pass_criteria:
- No kernel bug.
- The operations themselves should not cause reclassification but IO after those
operations should be reclassified to proper IO class.
"""
def check_occupancy(expected: Size, actual: Size):
if expected != actual:
pytest.xfail("Occupancy check failed!\n"
f"Expected: {expected}, actual: {actual}")
cache, core = prepare()
Udev.disable()
test_dir_path = f"{mountpoint}/test_dir"
nested_dir_path = f"{test_dir_path}/nested_dir"
dd_blocks = random.randint(5, 50)
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{test_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
Udev.disable()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
with TestRun.step("Create and load IO class config file."):
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{test_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Creating directory {nested_dir_path}")
Directory.create_directory(path=nested_dir_path, parents=True)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
TestRun.LOGGER.info("Creating test file")
classified_before = cache.get_io_class_statistics(
with TestRun.step(f"Create directory {nested_dir_path}."):
Directory.create_directory(path=nested_dir_path, parents=True)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Create test file."):
classified_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
file_path = f"{test_dir_path}/test_file"
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
sync()
drop_caches(DropCachesMode.ALL)
test_file = File(file_path).refresh_item()
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Move test file out of classified directory."):
classified_before = classified_after
non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
test_file.move(destination=mountpoint)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
with TestRun.step("Read test file."):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before - test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before + test_file.size, non_classified_after)
with TestRun.step(f"Move test file to {nested_dir_path}."):
classified_before = classified_after
non_classified_before = non_classified_after
test_file.move(destination=nested_dir_path)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
with TestRun.step("Read test file."):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Check non-classified occupancy."):
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before - test_file.size, non_classified_after)
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_dir_operations(filesystem):
"""
title: Test IO classification by directory operations.
description: |
Test if directory classification works properly after directory operations like move or
rename.
pass_criteria:
- No kernel bug.
- The operations themselves should not cause reclassification but IO after those
operations should be reclassified to proper IO class.
- Directory classification may work with a delay after loading IO class configuration or
move/rename operations. Test checks if maximum delay is not exceeded.
"""
non_classified_dir_path = f"{mountpoint}/non_classified"
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Create and load IO class config file."):
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
ioclass_id_1 = proper_ids[0]
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
ioclass_id_2 = proper_ids[1]
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
# directory IO classes
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_1,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_1}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_2,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_2}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
with TestRun.step(f"Create a non-classified directory: {non_classified_dir_path}."):
dir_1 = Directory.create_directory(path=non_classified_dir_path)
with TestRun.step(f"Rename {non_classified_dir_path} to {classified_dir_path_1}."):
dir_1.move(destination=classified_dir_path_1)
with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check(
cache, directory=dir_1, ioclass_id=ioclass_id_1)
with TestRun.step(f"Create {classified_dir_path_2}/subdir."):
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check(cache, directory=dir_2,
ioclass_id=ioclass_id_2)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Move {dir_2.full_path} to {classified_dir_path_1}."):
dir_2.move(destination=classified_dir_path_1)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=ioclass_id_1,
source_ioclass_id=ioclass_id_2,
directory=dir_2, with_delay=False)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Move {dir_2.full_path} to {mountpoint}."):
dir_2.move(destination=mountpoint)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False)
with TestRun.step(f"Remove {classified_dir_path_2}."):
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Rename {classified_dir_path_1} to {classified_dir_path_2}."):
dir_1.move(destination=classified_dir_path_2)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=ioclass_id_2,
source_ioclass_id=ioclass_id_1,
directory=dir_1, with_delay=True)
with TestRun.step(f"Rename {classified_dir_path_2} to {non_classified_dir_path}."):
dir_1.move(destination=non_classified_dir_path)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True)
def create_files_with_classification_delay_check(cache, directory: Directory, ioclass_id: int):
start_time = datetime.now()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
file_path = f"{test_dir_path}/test_file"
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
sync()
drop_caches(DropCachesMode.ALL)
test_file = File(file_path).refresh_item()
dd_blocks = 10
dd_size = Size(dd_blocks, Unit.Blocks4096)
file_counter = 0
unclassified_files = []
time_from_start = datetime.now() - start_time
while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY:
occupancy_before = occupancy_after
file_path = f"{directory.full_path}/test_file_{file_counter}"
file_counter += 1
time_from_start = datetime.now() - start_time
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
if occupancy_after - occupancy_before < dd_size:
unclassified_files.append(file_path)
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
if len(unclassified_files) == file_counter:
pytest.xfail("No files were properly classified within max delay time!")
TestRun.LOGGER.info("Moving test file out of classified directory")
classified_before = classified_after
non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
test_file.move(destination=mountpoint)
sync()
drop_caches(DropCachesMode.ALL)
if len(unclassified_files):
TestRun.LOGGER.info("Rewriting unclassified test files...")
for file_path in unclassified_files:
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file")
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
def read_files_with_reclassification_check(cache, target_ioclass_id: int, source_ioclass_id: int,
directory: Directory, with_delay: bool):
start_time = datetime.now()
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
unclassified_files = []
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before - test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before + test_file.size, non_classified_after)
for file in [item for item in directory.ls() if isinstance(item, File)]:
target_occupancy_before = target_occupancy_after
source_occupancy_before = source_occupancy_after
time_from_start = datetime.now() - start_time
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
if target_occupancy_after < target_occupancy_before:
pytest.xfail("Target IO class occupancy lowered!")
elif target_occupancy_after - target_occupancy_before < file.size:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Target IO class occupancy not changed properly!")
if source_occupancy_after >= source_occupancy_before:
if file not in unclassified_files:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Source IO class occupancy not changed properly!")
TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}")
classified_before = classified_after
non_classified_before = non_classified_after
test_file.move(destination=nested_dir_path)
sync()
drop_caches(DropCachesMode.ALL)
if len(unclassified_files):
TestRun.LOGGER.info("Rereading unclassified test files...")
sync()
drop_caches(DropCachesMode.ALL)
for file in unclassified_files:
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file")
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before - test_file.size, non_classified_after)
def check_occupancy(expected: Size, actual: Size):
if expected != actual:
pytest.xfail("Occupancy check failed!\n"
f"Expected: {expected}, actual: {actual}")

View File

@ -4,236 +4,267 @@
#
import random
import pytest
from api.cas import ioclass_config, casadm
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev, DropCachesMode, drop_caches
from .io_class_common import *
from test_utils.os_utils import sync, DropCachesMode, drop_caches
from test_utils.size import Size, Unit
from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_extension():
cache, core = prepare()
"""
title: Test IO classification by file extension.
description: Test if file extension classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file extension.
"""
iterations = 50
ioclass_id = 1
tested_extension = "tmp"
wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"extension:{tested_extension}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
cache.flush_cache()
# Check if file with proper extension is cached
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{tested_extension}")
.count(dd_count)
.block_size(dd_size)
)
TestRun.LOGGER.info(f"Writing to file with cached extension.")
for i in range(iterations):
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
cache.flush_cache()
# Check if file with improper extension is not cached
TestRun.LOGGER.info(f"Writing to file with no cached extension.")
for ext in wrong_extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{tested_extension}")
.count(dd_count)
.block_size(dd_size)
)
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Create and load IO class config."):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"extension:{tested_extension}&done",
ioclass_config_path=ioclass_config_path,
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
with TestRun.step("Flush cache."):
cache.flush_cache()
with TestRun.step(f"Write to file with cached extension and check if it is properly cached."):
for i in range(iterations):
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
with TestRun.step("Flush cache."):
cache.flush_cache()
with TestRun.step(f"Write to file with not cached extension and check if it is not cached."):
for ext in wrong_extensions:
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_name_prefix():
cache, core = prepare()
"""
title: Test IO classification by file name prefix.
description: Test if file name prefix classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file name prefix.
"""
ioclass_id = 1
cached_files = ["test", "test.txt", "test1", "test1.txt"]
not_cached_files = ["file1", "file2", "file4", "file5", "tes"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
# Avoid caching anything else than files with specified prefix
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=255,
allocation=False,
rule=f"unclassified",
ioclass_config_path=ioclass_config_path,
)
# Enables file with specified prefix to be cached
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"file_name_prefix:test&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Create and load IO class config."):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
# Avoid caching anything else than files with specified prefix
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=255,
allocation=False,
rule=f"unclassified",
ioclass_config_path=ioclass_config_path,
)
# Enables file with specified prefix to be cached
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"file_name_prefix:test&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
previous_occupancy = cache.get_occupancy()
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
previous_occupancy = cache.get_occupancy()
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
current_occupancy = cache.get_occupancy()
if previous_occupancy.get_value() > current_occupancy.get_value():
TestRun.fail(f"Current occupancy ({str(current_occupancy)}) is lower "
f"than before ({str(previous_occupancy)}).")
current_occupancy = cache.get_occupancy()
if previous_occupancy.get_value() > current_occupancy.get_value():
TestRun.fail(f"Current occupancy ({str(current_occupancy)}) is lower "
f"than before ({str(previous_occupancy)}).")
# Filesystem creation caused metadata IO which is not supposed
# to be cached
# Filesystem creation caused metadata IO which is not supposed
# to be cached
# Check if files with proper prefix are cached
TestRun.LOGGER.info(f"Writing files which are supposed to be cached.")
for f in cached_files:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/{f}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
current_occupancy = cache.get_occupancy()
expected_occupancy = previous_occupancy + (dd_size * dd_count)
if current_occupancy != expected_occupancy:
TestRun.fail(f"Current occupancy value is not valid. "
f"(Expected: {str(expected_occupancy)}, actual: {str(current_occupancy)})")
previous_occupancy = current_occupancy
with TestRun.step(f"Write files which are supposed to be cached and check "
f"if they are cached."):
for f in cached_files:
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/{f}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
current_occupancy = cache.get_occupancy()
expected_occupancy = previous_occupancy + (dd_size * dd_count)
if current_occupancy != expected_occupancy:
TestRun.fail(f"Current occupancy value is not valid. "
f"(Expected: {str(expected_occupancy)}, "
f"actual: {str(current_occupancy)})")
previous_occupancy = current_occupancy
cache.flush_cache()
with TestRun.step("Flush cache."):
cache.flush_cache()
# Check if file with improper extension is not cached
TestRun.LOGGER.info(f"Writing files which are not supposed to be cached.")
for f in not_cached_files:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/{f}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
current_occupancy = cache.get_occupancy()
if current_occupancy != previous_occupancy:
TestRun.fail(f"Current occupancy value is not valid. "
f"(Expected: {str(previous_occupancy)}, actual: {str(current_occupancy)})")
with TestRun.step(f"Write files which are not supposed to be cached and check if "
f"they are not cached."):
for f in not_cached_files:
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/{f}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
current_occupancy = cache.get_occupancy()
if current_occupancy != previous_occupancy:
TestRun.fail(f"Current occupancy value is not valid. "
f"(Expected: {str(previous_occupancy)}, "
f"actual: {str(current_occupancy)})")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_extension_preexisting_filesystem():
"""Create files on filesystem, add device with filesystem as a core,
write data to files and check if they are cached properly"""
cache, core = prepare()
"""
title: Test IO classification by file extension with preexisting filesystem on core device.
description: |
Test if file extension classification works properly when there is an existing
filesystem on core device.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file extension
after mounting core device.
"""
ioclass_id = 1
extensions = ["tmp", "tm", "out", "txt", "log", "123"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
TestRun.LOGGER.info(f"Preparing files on raw block device")
casadm.remove_core(cache.cache_id, core_id=core.core_id)
core.core_device.create_filesystem(Filesystem.ext3)
core.core_device.mount(mountpoint)
with TestRun.step("Prepare cache and core devices."):
cache, core = prepare()
# Prepare files
for ext in extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
with TestRun.step(f"Prepare files on raw block device."):
casadm.remove_core(cache.cache_id, core_id=core.core_id)
core.core_device.create_filesystem(Filesystem.ext3)
core.core_device.mount(mountpoint)
for ext in extensions:
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
core.core_device.unmount()
with TestRun.step("Create IO class config."):
rule = "|".join([f"extension:{ext}" for ext in extensions])
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"{rule}&done",
ioclass_config_path=ioclass_config_path,
)
dd.run()
core.core_device.unmount()
# Prepare ioclass config
rule = "|".join([f"extension:{ext}" for ext in extensions])
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"{rule}&done",
ioclass_config_path=ioclass_config_path,
)
with TestRun.step(f"Add device with preexisting data as a core."):
core = casadm.add_core(cache, core_dev=core.core_device)
# Prepare cache for test
TestRun.LOGGER.info(f"Adding device with preexisting data as a core")
core = casadm.add_core(cache, core_dev=core.core_device)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Load IO class config."):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
core.mount(mountpoint)
cache.flush_cache()
with TestRun.step("Mount core and flush cache."):
core.mount(mountpoint)
cache.flush_cache()
# Check if files with proper extensions are cached
TestRun.LOGGER.info(f"Writing to file with cached extension.")
for ext in extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
with TestRun.step(f"Write to file with cached extension and check if they are cached."):
for ext in extensions:
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_offset():
cache, core = prepare()
"""
title: Test IO classification by file offset.
description: Test if file offset classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file offset.
"""
ioclass_id = 1
iterations = 100
dd_size = Size(4, Unit.KibiByte)
@ -241,66 +272,70 @@ def test_ioclass_file_offset():
min_cached_offset = 16384
max_cached_offset = 65536
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
cache.flush_cache()
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
# nor last sector
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
max_seek = int(
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
/ Unit.Blocks4096.value
)
TestRun.LOGGER.info(f"Writing to file within cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
with TestRun.step("Create and load IO class config file."):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
ioclass_config_path=ioclass_config_path,
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 1:
TestRun.LOGGER.error(f"Offset not cached: {file_offset}")
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
with TestRun.step("Flush cache."):
cache.flush_cache()
min_seek = 0
max_seek = int(min_cached_offset / Unit.Blocks4096.value)
TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
with TestRun.step("Write to file within cached offset range and check if it is cached."):
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
# nor last sector
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
max_seek = int(
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
/ Unit.Blocks4096.value
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.LOGGER.error(f"Inappropriately cached offset: {file_offset}")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 1:
TestRun.LOGGER.error(f"Offset not cached: {file_offset}")
cache.flush_cache()
with TestRun.step(
"Write to file outside of cached offset range and check if it is not cached."):
min_seek = 0
max_seek = int(min_cached_offset / Unit.Blocks4096.value)
TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.LOGGER.error(f"Inappropriately cached offset: {file_offset}")
@pytest.mark.os_dependent
@ -309,53 +344,44 @@ def test_ioclass_file_offset():
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_file_size(filesystem):
"""
File size IO class rules are configured in a way that each tested file size is unambiguously
classified.
Firstly write operations are tested (creation of files), secondly read operations.
title: Test IO classification by file size.
description: Test if file size classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file size.
"""
def load_file_size_io_classes():
# IO class order intentional, do not change
base_size_bytes = int(base_size.get_value(Unit.Byte))
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"file_size:eq:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
rule=f"file_size:lt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule=f"file_size:le:{int(base_size_bytes / 2)}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"file_size:ge:{2 * base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
def create_files_and_check_classification():
TestRun.LOGGER.info("Creating files belonging to different IO classes "
"(classification by writes).")
# File size IO class rules are configured in a way that each tested file size is unambiguously
# classified.
# Firstly write operations are tested (creation of files), secondly read operations.
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
size_to_class = {
base_size: 1,
base_size - Unit.Blocks4096: 2,
base_size + Unit.Blocks4096: 3,
base_size / 2: 4,
base_size / 2 - Unit.Blocks4096: 4,
base_size / 2 + Unit.Blocks4096: 2,
base_size * 2: 5,
base_size * 2 - Unit.Blocks4096: 3,
base_size * 2 + Unit.Blocks4096: 5,
}
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Prepare and load IO class config."):
load_file_size_io_classes(cache, base_size)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Create files belonging to different IO classes (classification by writes)."):
test_files = []
for size, ioclass_id in size_to_class.items():
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
@ -371,27 +397,7 @@ def test_ioclass_file_size(filesystem):
sync()
drop_caches(DropCachesMode.ALL)
def reclassify_files():
TestRun.LOGGER.info("Reading files belonging to different IO classes "
"(classification by reads).")
for file in test_files:
ioclass_id = size_to_class[file.size]
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
if actual_blocks != expected_blocks:
TestRun.fail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
sync()
drop_caches(DropCachesMode.ALL)
def remove_files_classification():
TestRun.LOGGER.info("Moving all files to 'unclassified' IO class")
with TestRun.step("Move all files to 'unclassified' IO class."):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
@ -416,8 +422,7 @@ def test_ioclass_file_size(filesystem):
sync()
drop_caches(DropCachesMode.ALL)
def restore_classification_config():
TestRun.LOGGER.info("Restoring IO class configuration")
with TestRun.step("Restore IO class configuration."):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
@ -429,39 +434,66 @@ def test_ioclass_file_size(filesystem):
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
load_file_size_io_classes()
load_file_size_io_classes(cache, base_size)
cache, core = prepare()
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
size_to_class = {
base_size: 1,
base_size - Unit.Blocks4096: 2,
base_size + Unit.Blocks4096: 3,
base_size / 2: 4,
base_size / 2 - Unit.Blocks4096: 4,
base_size / 2 + Unit.Blocks4096: 2,
base_size * 2: 5,
base_size * 2 - Unit.Blocks4096: 3,
base_size * 2 + Unit.Blocks4096: 5,
}
with TestRun.step("Read files belonging to different IO classes (classification by reads)."):
# CAS device should be unmounted and mounted because data can be sometimes still cached by
# OS cache so occupancy statistics will not match
core.unmount()
core.mount(mountpoint)
for file in test_files:
ioclass_id = size_to_class[file.size]
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
if actual_blocks != expected_blocks:
TestRun.fail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
sync()
drop_caches(DropCachesMode.ALL)
load_file_size_io_classes()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
test_files = []
create_files_and_check_classification()
remove_files_classification()
restore_classification_config()
# CAS device should be unmounted and mounted because data can be sometimes still cached by
# OS cache so occupancy statistics will not match
core.unmount()
core.mount(mountpoint)
reclassify_files()
def load_file_size_io_classes(cache, base_size):
# IO class order intentional, do not change
base_size_bytes = int(base_size.get_value(Unit.Byte))
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"file_size:eq:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
rule=f"file_size:lt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule=f"file_size:le:{int(base_size_bytes / 2)}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"file_size:ge:{2 * base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

View File

@ -7,110 +7,132 @@ import time
import pytest
from api.cas import ioclass_config, casadm
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev
from .io_class_common import *
from test_utils.size import Size, Unit
from tests.io_class.io_class_common import prepare, ioclass_config_path
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_process_name():
"""Check if data generated by process with particular name is cached"""
cache, core = prepare()
"""
title: Test IO classification by process name.
description: Check if data generated by process with particular name is cached.
pass_criteria:
- No kernel bug.
- IO is classified properly based on process generating IO name.
"""
ioclass_id = 1
dd_size = Size(4, Unit.KibiByte)
dd_count = 1
iterations = 100
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"process_name:dd&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
cache.flush_cache()
Udev.disable()
TestRun.LOGGER.info(f"Check if all data generated by dd process is cached.")
for i in range(iterations):
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
.seek(i)
with TestRun.step("Create and load IO class config file."):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"process_name:dd&done",
ioclass_config_path=ioclass_config_path,
)
dd.run()
sync()
time.sleep(0.1)
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Flush cache and disable udev."):
cache.flush_cache()
Udev.disable()
with TestRun.step("Check if all data generated by dd process is cached."):
for i in range(iterations):
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
.seek(i)
)
dd.run()
sync()
time.sleep(0.1)
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_pid():
cache, core = prepare()
"""
title: Test IO classification by process id.
description: Check if data generated by process with particular id is cached.
pass_criteria:
- No kernel bug.
- IO is classified properly based on process generating IO id.
"""
ioclass_id = 1
iterations = 20
dd_count = 100
dd_size = Size(4, Unit.KibiByte)
Udev.disable()
with TestRun.step("Prepare cache, core and disable udev."):
cache, core = prepare()
Udev.disable()
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
# 'dd' command is created and is appended to 'echo' command instead of running it
dd_command = str(
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
)
for i in range(iterations):
cache.flush_cache()
output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
if output.exit_code != 0:
raise Exception(
f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
)
# Few pids might be used by system during test preparation
pid = int(output.stdout) + 50
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"pid:eq:{pid}&done",
ioclass_config_path=ioclass_config_path,
with TestRun.step("Prepare dd command."):
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
# 'dd' command is created and is appended to 'echo' command instead of running it
dd_command = str(
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Running dd with pid {pid}")
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
dd_and_pid_command = (
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
)
output = TestRun.executor.run(dd_and_pid_command)
if output.exit_code != 0:
raise Exception(
f"Failed to run dd with target pid. "
f"stdout: {output.stdout} \n stderr :{output.stderr}"
for _ in TestRun.iteration(range(iterations)):
with TestRun.step("Flush cache."):
cache.flush_cache()
with TestRun.step("Prepare and load IO class config."):
output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
if output.exit_code != 0:
raise Exception(
f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
)
# Few pids might be used by system during test preparation
pid = int(output.stdout) + 50
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"pid:eq:{pid}&done",
ioclass_config_path=ioclass_config_path,
)
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
ioclass_config.remove_ioclass(ioclass_id)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Run dd with pid {pid}."):
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
dd_and_pid_command = (
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
)
output = TestRun.executor.run(dd_and_pid_command)
if output.exit_code != 0:
raise Exception(
f"Failed to run dd with target pid. "
f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
sync()
with TestRun.step("Check if data was cached properly."):
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
ioclass_config.remove_ioclass(ioclass_id)

File diff suppressed because it is too large Load Diff