Rewrite IO class tests to use latest Test Framework API

Signed-off-by: Katarzyna Lapinska <katarzyna.lapinska@intel.com>
This commit is contained in:
Katarzyna Lapinska 2020-08-17 09:32:44 +02:00
parent f9f3ce100b
commit 6ac104c3b8
5 changed files with 1323 additions and 1173 deletions

View File

@ -3,8 +3,6 @@
# SPDX-License-Identifier: BSD-3-Clause-Clear # SPDX-License-Identifier: BSD-3-Clause-Clear
# #
import time
import pytest import pytest
from test_tools.disk_utils import Filesystem from test_tools.disk_utils import Filesystem
@ -12,7 +10,7 @@ from api.cas import ioclass_config, casadm
from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev, drop_caches, DropCachesMode from test_utils.os_utils import sync, Udev, drop_caches
from test_utils.size import Unit, Size from test_utils.size import Unit, Size
from core.test_run import TestRun from core.test_run import TestRun
@ -29,7 +27,7 @@ not_cached_mountpoint = "/tmp/ioclass_core_id_test/not_cached"
def test_ioclass_core_id(filesystem): def test_ioclass_core_id(filesystem):
""" """
title: Test for `core_id` classification rule title: Test for `core_id` classification rule
dsecription: | description: |
Test if IO to core with selective allocation enabled is cached and IO to core with Test if IO to core with selective allocation enabled is cached and IO to core with
selective allocation disabled is redirected to pass-through mode selective allocation disabled is redirected to pass-through mode
pass_criteria: pass_criteria:

View File

@ -4,10 +4,10 @@
# #
import random import random
from datetime import datetime
import pytest import pytest
from datetime import datetime
from api.cas import ioclass_config, casadm
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils from test_tools import fs_utils
from test_tools.dd import Dd from test_tools.dd import Dd
@ -15,7 +15,8 @@ from test_tools.disk_utils import Filesystem
from test_utils.filesystem.directory import Directory from test_utils.filesystem.directory import Directory
from test_utils.filesystem.file import File from test_utils.filesystem.file import File
from test_utils.os_utils import drop_caches, DropCachesMode, sync, Udev from test_utils.os_utils import drop_caches, DropCachesMode, sync, Udev
from .io_class_common import * from test_utils.size import Size, Unit
from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path
@pytest.mark.os_dependent @pytest.mark.os_dependent
@ -24,263 +25,99 @@ from .io_class_common import *
@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_depth(filesystem): def test_ioclass_directory_depth(filesystem):
""" """
Test if directory classification works properly for deeply nested directories for read and title: Test IO classification by directory.
write operations. description: |
Test if directory classification works properly for deeply nested directories for read and
write operations.
pass_criteria:
- No kernel bug.
- Read and write operations to directories are classified properly.
""" """
cache, core = prepare()
Udev.disable()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_dir_path = f"{mountpoint}/base_dir" base_dir_path = f"{mountpoint}/base_dir"
TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
fs_utils.create_directory(base_dir_path)
nested_dir_path = base_dir_path with TestRun.step("Prepare cache and core."):
random_depth = random.randint(40, 80) cache, core = prepare()
for i in range(random_depth): Udev.disable()
nested_dir_path += f"/dir_{i}"
TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}") with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
fs_utils.create_directory(path=nested_dir_path, parents=True) f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step(f"Create the base directory: {base_dir_path}."):
fs_utils.create_directory(base_dir_path)
with TestRun.step(f"Create a nested directory."):
nested_dir_path = base_dir_path
random_depth = random.randint(40, 80)
for i in range(random_depth):
nested_dir_path += f"/dir_{i}"
fs_utils.create_directory(path=nested_dir_path, parents=True)
# Test classification in nested dir by reading a previously unclassified file # Test classification in nested dir by reading a previously unclassified file
TestRun.LOGGER.info("Creating the first file in the nested directory") with TestRun.step("Create the first file in the nested directory."):
test_file_1 = File(f"{nested_dir_path}/test_file_1") test_file_1 = File(f"{nested_dir_path}/test_file_1")
dd = ( dd = (
Dd() Dd().input("/dev/urandom")
.input("/dev/urandom") .output(test_file_1.full_path)
.output(test_file_1.full_path) .count(random.randint(1, 200))
.count(random.randint(1, 200)) .block_size(Size(1, Unit.MebiByte))
.block_size(Size(1, Unit.MebiByte)) )
) dd.run()
dd.run() sync()
sync() drop_caches(DropCachesMode.ALL)
drop_caches(DropCachesMode.ALL) test_file_1.refresh_item()
test_file_1.refresh_item()
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID) with TestRun.step("Load IO class config."):
# directory IO class ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
ioclass_config.add_ioclass( # directory IO class
ioclass_id=ioclass_id, ioclass_config.add_ioclass(
eviction_priority=1, ioclass_id=ioclass_id,
allocation=True, eviction_priority=1,
rule=f"directory:{base_dir_path}", allocation=True,
ioclass_config_path=ioclass_config_path, rule=f"directory:{base_dir_path}",
) ioclass_config_path=ioclass_config_path,
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy with TestRun.step("Read the file in the nested directory"):
TestRun.LOGGER.info("Reading the file in the nested directory") base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
dd = ( dd = (
Dd() Dd().input(test_file_1.full_path)
.input(test_file_1.full_path) .output("/dev/null")
.output("/dev/null") .block_size(Size(1, Unit.MebiByte))
.block_size(Size(1, Unit.MebiByte)) )
) dd.run()
dd.run()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy with TestRun.step("Check occupancy after creating the file."):
assert new_occupancy == base_occupancy + test_file_1.size, \ new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
"Wrong occupancy after reading file!\n" \ if new_occupancy != base_occupancy + test_file_1.size:
f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}" TestRun.LOGGER.error("Wrong occupancy after reading file!\n"
"Expected: {base_occupancy + test_file_1.size}, "
f"actual: {new_occupancy}")
# Test classification in nested dir by creating a file # Test classification in nested dir by creating a file
base_occupancy = new_occupancy with TestRun.step("Create the second file in the nested directory"):
TestRun.LOGGER.info("Creating the second file in the nested directory") base_occupancy = new_occupancy
test_file_2 = File(f"{nested_dir_path}/test_file_2") test_file_2 = File(f"{nested_dir_path}/test_file_2")
dd = ( dd = (
Dd() Dd().input("/dev/urandom")
.input("/dev/urandom") .output(test_file_2.full_path)
.output(test_file_2.full_path) .count(random.randint(1, 200))
.count(random.randint(1, 200)) .block_size(Size(1, Unit.MebiByte))
.block_size(Size(1, Unit.MebiByte)) )
) dd.run()
dd.run() sync()
sync() drop_caches(DropCachesMode.ALL)
drop_caches(DropCachesMode.ALL) test_file_2.refresh_item()
test_file_2.refresh_item()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy with TestRun.step("Check occupancy after creating the second file."):
assert new_occupancy == base_occupancy + test_file_2.size, \ new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
"Wrong occupancy after creating file!\n" \ if new_occupancy != base_occupancy + test_file_2.size:
f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}" TestRun.LOGGER.error("Wrong occupancy after creating file!\n"
f"Expected: {base_occupancy + test_file_2.size}, "
f"actual: {new_occupancy}")
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_dir_operations(filesystem):
"""
Test if directory classification works properly after directory operations like move or rename.
The operations themselves should not cause reclassification but IO after those operations
should be reclassified to proper IO class.
Directory classification may work with a delay after loading IO class configuration or
move/rename operations. Test checks if maximum delay is not exceeded.
"""
def create_files_with_classification_delay_check(directory: Directory, ioclass_id: int):
start_time = datetime.now()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
dd_blocks = 10
dd_size = Size(dd_blocks, Unit.Blocks4096)
file_counter = 0
unclassified_files = []
time_from_start = datetime.now() - start_time
while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY:
occupancy_before = occupancy_after
file_path = f"{directory.full_path}/test_file_{file_counter}"
file_counter += 1
time_from_start = datetime.now() - start_time
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
if occupancy_after - occupancy_before < dd_size:
unclassified_files.append(file_path)
if len(unclassified_files) == file_counter:
pytest.xfail("No files were properly classified within max delay time!")
if len(unclassified_files):
TestRun.LOGGER.info("Rewriting unclassified test files...")
for file_path in unclassified_files:
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
def read_files_with_reclassification_check(
target_ioclass_id: int, source_ioclass_id: int, directory: Directory, with_delay: bool):
start_time = datetime.now()
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
unclassified_files = []
for file in [item for item in directory.ls() if isinstance(item, File)]:
target_occupancy_before = target_occupancy_after
source_occupancy_before = source_occupancy_after
time_from_start = datetime.now() - start_time
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
if target_occupancy_after < target_occupancy_before:
pytest.xfail("Target IO class occupancy lowered!")
elif target_occupancy_after - target_occupancy_before < file.size:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Target IO class occupancy not changed properly!")
if source_occupancy_after >= source_occupancy_before:
if file not in unclassified_files:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Source IO class occupancy not changed properly!")
if len(unclassified_files):
TestRun.LOGGER.info("Rereading unclassified test files...")
sync()
drop_caches(DropCachesMode.ALL)
for file in unclassified_files:
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
cache, core = prepare()
Udev.disable()
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
ioclass_id_1 = proper_ids[0]
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
ioclass_id_2 = proper_ids[1]
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
# directory IO classes
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_1,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_1}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_2,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_2}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
non_classified_dir_path = f"{mountpoint}/non_classified"
TestRun.LOGGER.info(
f"Creating a non-classified directory: {non_classified_dir_path}")
dir_1 = Directory.create_directory(path=non_classified_dir_path)
TestRun.LOGGER.info(f"Renaming {non_classified_dir_path} to {classified_dir_path_1}")
dir_1.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_1, ioclass_id=ioclass_id_1)
TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir")
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_2, ioclass_id=ioclass_id_2)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}")
dir_2.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_1, source_ioclass_id=ioclass_id_2,
directory=dir_2, with_delay=False)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}")
dir_2.move(destination=mountpoint)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False)
TestRun.LOGGER.info(f"Removing {classified_dir_path_2}")
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_1} to {classified_dir_path_2}")
dir_1.move(destination=classified_dir_path_2)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_2, source_ioclass_id=ioclass_id_1,
directory=dir_1, with_delay=True)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_2} to {non_classified_dir_path}")
dir_1.move(destination=non_classified_dir_path)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True)
@pytest.mark.os_dependent @pytest.mark.os_dependent
@ -289,109 +126,309 @@ def test_ioclass_directory_dir_operations(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_file_operations(filesystem): def test_ioclass_directory_file_operations(filesystem):
""" """
Test if directory classification works properly after file operations like move or rename. title: Test IO classification by file operations.
The operations themselves should not cause reclassification but IO after those operations description: |
should be reclassified to proper IO class. Test if directory classification works properly after file operations like move or rename.
pass_criteria:
- No kernel bug.
- The operations themselves should not cause reclassification but IO after those
operations should be reclassified to proper IO class.
""" """
def check_occupancy(expected: Size, actual: Size):
if expected != actual:
pytest.xfail("Occupancy check failed!\n"
f"Expected: {expected}, actual: {actual}")
cache, core = prepare()
Udev.disable()
test_dir_path = f"{mountpoint}/test_dir" test_dir_path = f"{mountpoint}/test_dir"
nested_dir_path = f"{test_dir_path}/nested_dir" nested_dir_path = f"{test_dir_path}/nested_dir"
dd_blocks = random.randint(5, 50) dd_blocks = random.randint(5, 50)
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID) with TestRun.step("Prepare cache and core."):
# directory IO class cache, core = prepare()
ioclass_config.add_ioclass( Udev.disable()
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{test_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " with TestRun.step("Create and load IO class config file."):
f"and mounting {core.system_path} at {mountpoint}") ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
core.create_filesystem(fs_type=filesystem) # directory IO class
core.mount(mount_point=mountpoint) ioclass_config.add_ioclass(
sync() ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{test_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Creating directory {nested_dir_path}") with TestRun.step(f"Prepare {filesystem.name} filesystem "
Directory.create_directory(path=nested_dir_path, parents=True) f"and mounting {core.system_path} at {mountpoint}."):
sync() core.create_filesystem(fs_type=filesystem)
drop_caches(DropCachesMode.ALL) core.mount(mount_point=mountpoint)
sync()
TestRun.LOGGER.info("Creating test file") with TestRun.step(f"Create directory {nested_dir_path}."):
classified_before = cache.get_io_class_statistics( Directory.create_directory(path=nested_dir_path, parents=True)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Create test file."):
classified_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
file_path = f"{test_dir_path}/test_file"
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
sync()
drop_caches(DropCachesMode.ALL)
test_file = File(file_path).refresh_item()
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Move test file out of classified directory."):
classified_before = classified_after
non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
test_file.move(destination=mountpoint)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
with TestRun.step("Read test file."):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before - test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before + test_file.size, non_classified_after)
with TestRun.step(f"Move test file to {nested_dir_path}."):
classified_before = classified_after
non_classified_before = non_classified_after
test_file.move(destination=nested_dir_path)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
with TestRun.step("Read test file."):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Check non-classified occupancy."):
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before - test_file.size, non_classified_after)
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_dir_operations(filesystem):
"""
title: Test IO classification by directory operations.
description: |
Test if directory classification works properly after directory operations like move or
rename.
pass_criteria:
- No kernel bug.
- The operations themselves should not cause reclassification but IO after those
operations should be reclassified to proper IO class.
- Directory classification may work with a delay after loading IO class configuration or
move/rename operations. Test checks if maximum delay is not exceeded.
"""
non_classified_dir_path = f"{mountpoint}/non_classified"
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
Udev.disable()
with TestRun.step("Create and load IO class config file."):
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
ioclass_id_1 = proper_ids[0]
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
ioclass_id_2 = proper_ids[1]
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
# directory IO classes
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_1,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_1}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_2,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_2}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
with TestRun.step(f"Create a non-classified directory: {non_classified_dir_path}."):
dir_1 = Directory.create_directory(path=non_classified_dir_path)
with TestRun.step(f"Rename {non_classified_dir_path} to {classified_dir_path_1}."):
dir_1.move(destination=classified_dir_path_1)
with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check(
cache, directory=dir_1, ioclass_id=ioclass_id_1)
with TestRun.step(f"Create {classified_dir_path_2}/subdir."):
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check(cache, directory=dir_2,
ioclass_id=ioclass_id_2)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Move {dir_2.full_path} to {classified_dir_path_1}."):
dir_2.move(destination=classified_dir_path_1)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=ioclass_id_1,
source_ioclass_id=ioclass_id_2,
directory=dir_2, with_delay=False)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Move {dir_2.full_path} to {mountpoint}."):
dir_2.move(destination=mountpoint)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False)
with TestRun.step(f"Remove {classified_dir_path_2}."):
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step(f"Rename {classified_dir_path_1} to {classified_dir_path_2}."):
dir_1.move(destination=classified_dir_path_2)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=ioclass_id_2,
source_ioclass_id=ioclass_id_1,
directory=dir_1, with_delay=True)
with TestRun.step(f"Rename {classified_dir_path_2} to {non_classified_dir_path}."):
dir_1.move(destination=non_classified_dir_path)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True)
def create_files_with_classification_delay_check(cache, directory: Directory, ioclass_id: int):
start_time = datetime.now()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id).usage_stats.occupancy
file_path = f"{test_dir_path}/test_file" dd_blocks = 10
(Dd().input("/dev/urandom").output(file_path).oflag("sync") dd_size = Size(dd_blocks, Unit.Blocks4096)
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run()) file_counter = 0
sync() unclassified_files = []
drop_caches(DropCachesMode.ALL) time_from_start = datetime.now() - start_time
test_file = File(file_path).refresh_item() while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY:
occupancy_before = occupancy_after
file_path = f"{directory.full_path}/test_file_{file_counter}"
file_counter += 1
time_from_start = datetime.now() - start_time
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
if occupancy_after - occupancy_before < dd_size:
unclassified_files.append(file_path)
TestRun.LOGGER.info("Checking classified occupancy") if len(unclassified_files) == file_counter:
classified_after = cache.get_io_class_statistics( pytest.xfail("No files were properly classified within max delay time!")
io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
TestRun.LOGGER.info("Moving test file out of classified directory") if len(unclassified_files):
classified_before = classified_after TestRun.LOGGER.info("Rewriting unclassified test files...")
non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy for file_path in unclassified_files:
test_file.move(destination=mountpoint) (Dd().input("/dev/zero").output(file_path).oflag("sync")
sync() .block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file") def read_files_with_reclassification_check(cache, target_ioclass_id: int, source_ioclass_id: int,
classified_before = classified_after directory: Directory, with_delay: bool):
non_classified_before = non_classified_after start_time = datetime.now()
(Dd().input(test_file.full_path).output("/dev/null") target_occupancy_after = cache.get_io_class_statistics(
.block_size(Size(1, Unit.MebiByte)).run()) io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
unclassified_files = []
TestRun.LOGGER.info("Checking classified occupancy") for file in [item for item in directory.ls() if isinstance(item, File)]:
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy target_occupancy_before = target_occupancy_after
check_occupancy(classified_before - test_file.size, classified_after) source_occupancy_before = source_occupancy_after
TestRun.LOGGER.info("Checking non-classified occupancy") time_from_start = datetime.now() - start_time
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy (Dd().input(file.full_path).output("/dev/null")
check_occupancy(non_classified_before + test_file.size, non_classified_after) .block_size(Size(1, Unit.Blocks4096)).run())
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
if target_occupancy_after < target_occupancy_before:
pytest.xfail("Target IO class occupancy lowered!")
elif target_occupancy_after - target_occupancy_before < file.size:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Target IO class occupancy not changed properly!")
if source_occupancy_after >= source_occupancy_before:
if file not in unclassified_files:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Source IO class occupancy not changed properly!")
TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}") if len(unclassified_files):
classified_before = classified_after TestRun.LOGGER.info("Rereading unclassified test files...")
non_classified_before = non_classified_after sync()
test_file.move(destination=nested_dir_path) drop_caches(DropCachesMode.ALL)
sync() for file in unclassified_files:
drop_caches(DropCachesMode.ALL) (Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file") def check_occupancy(expected: Size, actual: Size):
classified_before = classified_after if expected != actual:
non_classified_before = non_classified_after pytest.xfail("Occupancy check failed!\n"
(Dd().input(test_file.full_path).output("/dev/null") f"Expected: {expected}, actual: {actual}")
.block_size(Size(1, Unit.MebiByte)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
check_occupancy(non_classified_before - test_file.size, non_classified_after)

View File

@ -4,236 +4,267 @@
# #
import random import random
import pytest import pytest
from api.cas import ioclass_config, casadm
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev, DropCachesMode, drop_caches from test_utils.os_utils import sync, DropCachesMode, drop_caches
from .io_class_common import * from test_utils.size import Size, Unit
from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_extension(): def test_ioclass_file_extension():
cache, core = prepare() """
title: Test IO classification by file extension.
description: Test if file extension classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file extension.
"""
iterations = 50 iterations = 50
ioclass_id = 1 ioclass_id = 1
tested_extension = "tmp" tested_extension = "tmp"
wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"] wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"]
dd_size = Size(4, Unit.KibiByte) dd_size = Size(4, Unit.KibiByte)
dd_count = 10 dd_count = 10
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"extension:{tested_extension}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
cache.flush_cache()
# Check if file with proper extension is cached
dd = ( dd = (
Dd() Dd().input("/dev/zero")
.input("/dev/zero") .output(f"{mountpoint}/test_file.{tested_extension}")
.output(f"{mountpoint}/test_file.{tested_extension}")
.count(dd_count)
.block_size(dd_size)
)
TestRun.LOGGER.info(f"Writing to file with cached extension.")
for i in range(iterations):
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
cache.flush_cache()
# Check if file with improper extension is not cached
TestRun.LOGGER.info(f"Writing to file with no cached extension.")
for ext in wrong_extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
)
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Create and load IO class config."):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"extension:{tested_extension}&done",
ioclass_config_path=ioclass_config_path,
) )
dd.run() casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
if dirty.get_value(Unit.Blocks4096) != 0: core.create_filesystem(Filesystem.ext3)
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") core.mount(mountpoint)
with TestRun.step("Flush cache."):
cache.flush_cache()
with TestRun.step(f"Write to file with cached extension and check if it is properly cached."):
for i in range(iterations):
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
with TestRun.step("Flush cache."):
cache.flush_cache()
with TestRun.step(f"Write to file with not cached extension and check if it is not cached."):
for ext in wrong_extensions:
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_name_prefix(): def test_ioclass_file_name_prefix():
cache, core = prepare() """
title: Test IO classification by file name prefix.
description: Test if file name prefix classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file name prefix.
"""
ioclass_id = 1 ioclass_id = 1
cached_files = ["test", "test.txt", "test1", "test1.txt"] cached_files = ["test", "test.txt", "test1", "test1.txt"]
not_cached_files = ["file1", "file2", "file4", "file5", "tes"] not_cached_files = ["file1", "file2", "file4", "file5", "tes"]
dd_size = Size(4, Unit.KibiByte) dd_size = Size(4, Unit.KibiByte)
dd_count = 10 dd_count = 10
ioclass_config.remove_ioclass_config() with TestRun.step("Prepare cache and core."):
ioclass_config.create_ioclass_config(False) cache, core = prepare()
# Avoid caching anything else than files with specified prefix with TestRun.step("Create and load IO class config."):
ioclass_config.add_ioclass( ioclass_config.remove_ioclass_config()
ioclass_id=0, ioclass_config.create_ioclass_config(False)
eviction_priority=255,
allocation=False,
rule=f"unclassified",
ioclass_config_path=ioclass_config_path,
)
# Enables file with specified prefix to be cached
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"file_name_prefix:test&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info( # Avoid caching anything else than files with specified prefix
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}" ioclass_config.add_ioclass(
) ioclass_id=0,
eviction_priority=255,
allocation=False,
rule=f"unclassified",
ioclass_config_path=ioclass_config_path,
)
# Enables file with specified prefix to be cached
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"file_name_prefix:test&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
previous_occupancy = cache.get_occupancy() with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
previous_occupancy = cache.get_occupancy()
core.create_filesystem(Filesystem.ext3) core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint) core.mount(mountpoint)
current_occupancy = cache.get_occupancy() current_occupancy = cache.get_occupancy()
if previous_occupancy.get_value() > current_occupancy.get_value(): if previous_occupancy.get_value() > current_occupancy.get_value():
TestRun.fail(f"Current occupancy ({str(current_occupancy)}) is lower " TestRun.fail(f"Current occupancy ({str(current_occupancy)}) is lower "
f"than before ({str(previous_occupancy)}).") f"than before ({str(previous_occupancy)}).")
# Filesystem creation caused metadata IO which is not supposed # Filesystem creation caused metadata IO which is not supposed
# to be cached # to be cached
# Check if files with proper prefix are cached # Check if files with proper prefix are cached
TestRun.LOGGER.info(f"Writing files which are supposed to be cached.") with TestRun.step(f"Write files which are supposed to be cached and check "
for f in cached_files: f"if they are cached."):
dd = ( for f in cached_files:
Dd() dd = (
.input("/dev/zero") Dd().input("/dev/zero")
.output(f"{mountpoint}/{f}") .output(f"{mountpoint}/{f}")
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
) )
dd.run() dd.run()
sync() sync()
current_occupancy = cache.get_occupancy() current_occupancy = cache.get_occupancy()
expected_occupancy = previous_occupancy + (dd_size * dd_count) expected_occupancy = previous_occupancy + (dd_size * dd_count)
if current_occupancy != expected_occupancy: if current_occupancy != expected_occupancy:
TestRun.fail(f"Current occupancy value is not valid. " TestRun.fail(f"Current occupancy value is not valid. "
f"(Expected: {str(expected_occupancy)}, actual: {str(current_occupancy)})") f"(Expected: {str(expected_occupancy)}, "
previous_occupancy = current_occupancy f"actual: {str(current_occupancy)})")
previous_occupancy = current_occupancy
cache.flush_cache() with TestRun.step("Flush cache."):
cache.flush_cache()
# Check if file with improper extension is not cached # Check if file with improper extension is not cached
TestRun.LOGGER.info(f"Writing files which are not supposed to be cached.") with TestRun.step(f"Write files which are not supposed to be cached and check if "
for f in not_cached_files: f"they are not cached."):
dd = ( for f in not_cached_files:
Dd() dd = (
.input("/dev/zero") Dd().input("/dev/zero")
.output(f"{mountpoint}/{f}") .output(f"{mountpoint}/{f}")
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
) )
dd.run() dd.run()
sync() sync()
current_occupancy = cache.get_occupancy() current_occupancy = cache.get_occupancy()
if current_occupancy != previous_occupancy: if current_occupancy != previous_occupancy:
TestRun.fail(f"Current occupancy value is not valid. " TestRun.fail(f"Current occupancy value is not valid. "
f"(Expected: {str(previous_occupancy)}, actual: {str(current_occupancy)})") f"(Expected: {str(previous_occupancy)}, "
f"actual: {str(current_occupancy)})")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_extension_preexisting_filesystem(): def test_ioclass_file_extension_preexisting_filesystem():
"""Create files on filesystem, add device with filesystem as a core, """
write data to files and check if they are cached properly""" title: Test IO classification by file extension with preexisting filesystem on core device.
cache, core = prepare() description: |
Test if file extension classification works properly when there is an existing
filesystem on core device.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file extension
after mounting core device.
"""
ioclass_id = 1 ioclass_id = 1
extensions = ["tmp", "tm", "out", "txt", "log", "123"] extensions = ["tmp", "tm", "out", "txt", "log", "123"]
dd_size = Size(4, Unit.KibiByte) dd_size = Size(4, Unit.KibiByte)
dd_count = 10 dd_count = 10
TestRun.LOGGER.info(f"Preparing files on raw block device") with TestRun.step("Prepare cache and core devices."):
casadm.remove_core(cache.cache_id, core_id=core.core_id) cache, core = prepare()
core.core_device.create_filesystem(Filesystem.ext3)
core.core_device.mount(mountpoint)
# Prepare files with TestRun.step(f"Prepare files on raw block device."):
for ext in extensions: casadm.remove_core(cache.cache_id, core_id=core.core_id)
dd = ( core.core_device.create_filesystem(Filesystem.ext3)
Dd() core.core_device.mount(mountpoint)
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}") for ext in extensions:
.count(dd_count) dd = (
.block_size(dd_size) Dd().input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
core.core_device.unmount()
with TestRun.step("Create IO class config."):
rule = "|".join([f"extension:{ext}" for ext in extensions])
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"{rule}&done",
ioclass_config_path=ioclass_config_path,
) )
dd.run()
core.core_device.unmount()
# Prepare ioclass config with TestRun.step(f"Add device with preexisting data as a core."):
rule = "|".join([f"extension:{ext}" for ext in extensions]) core = casadm.add_core(cache, core_dev=core.core_device)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"{rule}&done",
ioclass_config_path=ioclass_config_path,
)
# Prepare cache for test with TestRun.step("Load IO class config."):
TestRun.LOGGER.info(f"Adding device with preexisting data as a core") casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
core = casadm.add_core(cache, core_dev=core.core_device)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
core.mount(mountpoint) with TestRun.step("Mount core and flush cache."):
cache.flush_cache() core.mount(mountpoint)
cache.flush_cache()
# Check if files with proper extensions are cached with TestRun.step(f"Write to file with cached extension and check if they are cached."):
TestRun.LOGGER.info(f"Writing to file with cached extension.") for ext in extensions:
for ext in extensions: dd = (
dd = ( Dd().input("/dev/zero")
Dd() .output(f"{mountpoint}/test_file.{ext}")
.input("/dev/zero") .count(dd_count)
.output(f"{mountpoint}/test_file.{ext}") .block_size(dd_size)
.count(dd_count) )
.block_size(dd_size) dd.run()
) sync()
dd.run() dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
sync() if dirty.get_value(Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count:
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
if dirty.get_value(Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_offset(): def test_ioclass_file_offset():
cache, core = prepare() """
title: Test IO classification by file offset.
description: Test if file offset classification works properly.
pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file offset.
"""
ioclass_id = 1 ioclass_id = 1
iterations = 100 iterations = 100
dd_size = Size(4, Unit.KibiByte) dd_size = Size(4, Unit.KibiByte)
@ -241,66 +272,70 @@ def test_ioclass_file_offset():
min_cached_offset = 16384 min_cached_offset = 16384
max_cached_offset = 65536 max_cached_offset = 65536
ioclass_config.add_ioclass( with TestRun.step("Prepare cache and core."):
ioclass_id=ioclass_id, cache, core = prepare()
eviction_priority=1,
allocation=True,
rule=f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info( with TestRun.step("Create and load IO class config file."):
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}" ioclass_config.add_ioclass(
) ioclass_id=ioclass_id,
core.create_filesystem(Filesystem.ext3) eviction_priority=1,
core.mount(mountpoint) allocation=True,
rule=f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
cache.flush_cache() ioclass_config_path=ioclass_config_path,
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
# nor last sector
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
max_seek = int(
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
/ Unit.Blocks4096.value
)
TestRun.LOGGER.info(f"Writing to file within cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
) )
dd.run() casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
if dirty.get_value(Unit.Blocks4096) != 1: core.create_filesystem(Filesystem.ext3)
TestRun.LOGGER.error(f"Offset not cached: {file_offset}") core.mount(mountpoint)
with TestRun.step("Flush cache."):
cache.flush_cache() cache.flush_cache()
min_seek = 0 with TestRun.step("Write to file within cached offset range and check if it is cached."):
max_seek = int(min_cached_offset / Unit.Blocks4096.value) # Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
TestRun.LOGGER.info(f"Writing to file outside of cached offset range") # nor last sector
for i in range(iterations): min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
file_offset = random.choice(range(min_seek, max_seek)) max_seek = int(
dd = ( (max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
Dd() / Unit.Blocks4096.value
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
) )
dd.run()
sync() for i in range(iterations):
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty file_offset = random.choice(range(min_seek, max_seek))
if dirty.get_value(Unit.Blocks4096) != 0: dd = (
TestRun.LOGGER.error(f"Inappropriately cached offset: {file_offset}") Dd().input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 1:
TestRun.LOGGER.error(f"Offset not cached: {file_offset}")
cache.flush_cache()
with TestRun.step(
"Write to file outside of cached offset range and check if it is not cached."):
min_seek = 0
max_seek = int(min_cached_offset / Unit.Blocks4096.value)
TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd().input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.LOGGER.error(f"Inappropriately cached offset: {file_offset}")
@pytest.mark.os_dependent @pytest.mark.os_dependent
@ -309,53 +344,44 @@ def test_ioclass_file_offset():
@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_file_size(filesystem): def test_ioclass_file_size(filesystem):
""" """
File size IO class rules are configured in a way that each tested file size is unambiguously title: Test IO classification by file size.
classified. description: Test if file size classification works properly.
Firstly write operations are tested (creation of files), secondly read operations. pass_criteria:
- No kernel bug.
- IO is classified properly based on IO class rule with file size.
""" """
def load_file_size_io_classes():
# IO class order intentional, do not change
base_size_bytes = int(base_size.get_value(Unit.Byte))
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"file_size:eq:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
rule=f"file_size:lt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule=f"file_size:le:{int(base_size_bytes / 2)}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"file_size:ge:{2 * base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
def create_files_and_check_classification(): # File size IO class rules are configured in a way that each tested file size is unambiguously
TestRun.LOGGER.info("Creating files belonging to different IO classes " # classified.
"(classification by writes).") # Firstly write operations are tested (creation of files), secondly read operations.
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
size_to_class = {
base_size: 1,
base_size - Unit.Blocks4096: 2,
base_size + Unit.Blocks4096: 3,
base_size / 2: 4,
base_size / 2 - Unit.Blocks4096: 4,
base_size / 2 + Unit.Blocks4096: 2,
base_size * 2: 5,
base_size * 2 - Unit.Blocks4096: 3,
base_size * 2 + Unit.Blocks4096: 5,
}
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Prepare and load IO class config."):
load_file_size_io_classes(cache, base_size)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Create files belonging to different IO classes (classification by writes)."):
test_files = []
for size, ioclass_id in size_to_class.items(): for size, ioclass_id in size_to_class.items():
occupancy_before = cache.get_io_class_statistics( occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id).usage_stats.occupancy
@ -371,27 +397,7 @@ def test_ioclass_file_size(filesystem):
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
def reclassify_files(): with TestRun.step("Move all files to 'unclassified' IO class."):
TestRun.LOGGER.info("Reading files belonging to different IO classes "
"(classification by reads).")
for file in test_files:
ioclass_id = size_to_class[file.size]
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
if actual_blocks != expected_blocks:
TestRun.fail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
sync()
drop_caches(DropCachesMode.ALL)
def remove_files_classification():
TestRun.LOGGER.info("Moving all files to 'unclassified' IO class")
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path) ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config( ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path add_default_rule=False, ioclass_config_path=ioclass_config_path
@ -416,8 +422,7 @@ def test_ioclass_file_size(filesystem):
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
def restore_classification_config(): with TestRun.step("Restore IO class configuration."):
TestRun.LOGGER.info("Restoring IO class configuration")
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path) ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config( ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path add_default_rule=False, ioclass_config_path=ioclass_config_path
@ -429,39 +434,66 @@ def test_ioclass_file_size(filesystem):
rule="unclassified", rule="unclassified",
ioclass_config_path=ioclass_config_path, ioclass_config_path=ioclass_config_path,
) )
load_file_size_io_classes() load_file_size_io_classes(cache, base_size)
cache, core = prepare() with TestRun.step("Read files belonging to different IO classes (classification by reads)."):
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096) # CAS device should be unmounted and mounted because data can be sometimes still cached by
size_to_class = { # OS cache so occupancy statistics will not match
base_size: 1, core.unmount()
base_size - Unit.Blocks4096: 2, core.mount(mountpoint)
base_size + Unit.Blocks4096: 3, for file in test_files:
base_size / 2: 4, ioclass_id = size_to_class[file.size]
base_size / 2 - Unit.Blocks4096: 4, occupancy_before = cache.get_io_class_statistics(
base_size / 2 + Unit.Blocks4096: 2, io_class_id=ioclass_id).usage_stats.occupancy
base_size * 2: 5, Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
base_size * 2 - Unit.Blocks4096: 3, occupancy_after = cache.get_io_class_statistics(
base_size * 2 + Unit.Blocks4096: 5, io_class_id=ioclass_id).usage_stats.occupancy
} actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
if actual_blocks != expected_blocks:
TestRun.fail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
sync()
drop_caches(DropCachesMode.ALL)
load_file_size_io_classes()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " def load_file_size_io_classes(cache, base_size):
f"and mounting {core.system_path} at {mountpoint}") # IO class order intentional, do not change
core.create_filesystem(filesystem) base_size_bytes = int(base_size.get_value(Unit.Byte))
core.mount(mountpoint) ioclass_config.add_ioclass(
sync() ioclass_id=1,
eviction_priority=1,
test_files = [] allocation=True,
create_files_and_check_classification() rule=f"file_size:eq:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
remove_files_classification() )
ioclass_config.add_ioclass(
restore_classification_config() ioclass_id=2,
eviction_priority=1,
# CAS device should be unmounted and mounted because data can be sometimes still cached by allocation=True,
# OS cache so occupancy statistics will not match rule=f"file_size:lt:{base_size_bytes}",
core.unmount() ioclass_config_path=ioclass_config_path,
core.mount(mountpoint) )
reclassify_files() ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule=f"file_size:le:{int(base_size_bytes / 2)}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"file_size:ge:{2 * base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

View File

@ -7,110 +7,132 @@ import time
import pytest import pytest
from api.cas import ioclass_config, casadm
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev from test_utils.os_utils import sync, Udev
from .io_class_common import * from test_utils.size import Size, Unit
from tests.io_class.io_class_common import prepare, ioclass_config_path
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_process_name(): def test_ioclass_process_name():
"""Check if data generated by process with particular name is cached""" """
cache, core = prepare() title: Test IO classification by process name.
description: Check if data generated by process with particular name is cached.
pass_criteria:
- No kernel bug.
- IO is classified properly based on process generating IO name.
"""
ioclass_id = 1 ioclass_id = 1
dd_size = Size(4, Unit.KibiByte) dd_size = Size(4, Unit.KibiByte)
dd_count = 1 dd_count = 1
iterations = 100 iterations = 100
ioclass_config.add_ioclass( with TestRun.step("Prepare cache and core."):
ioclass_id=ioclass_id, cache, core = prepare()
eviction_priority=1,
allocation=True,
rule=f"process_name:dd&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
cache.flush_cache() with TestRun.step("Create and load IO class config file."):
ioclass_config.add_ioclass(
Udev.disable() ioclass_id=ioclass_id,
eviction_priority=1,
TestRun.LOGGER.info(f"Check if all data generated by dd process is cached.") allocation=True,
for i in range(iterations): rule=f"process_name:dd&done",
dd = ( ioclass_config_path=ioclass_config_path,
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
.seek(i)
) )
dd.run() casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
sync()
time.sleep(0.1) with TestRun.step("Flush cache and disable udev."):
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty cache.flush_cache()
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count: Udev.disable()
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
with TestRun.step("Check if all data generated by dd process is cached."):
for i in range(iterations):
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
.seek(i)
)
dd.run()
sync()
time.sleep(0.1)
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_pid(): def test_ioclass_pid():
cache, core = prepare() """
title: Test IO classification by process id.
description: Check if data generated by process with particular id is cached.
pass_criteria:
- No kernel bug.
- IO is classified properly based on process generating IO id.
"""
ioclass_id = 1 ioclass_id = 1
iterations = 20 iterations = 20
dd_count = 100 dd_count = 100
dd_size = Size(4, Unit.KibiByte) dd_size = Size(4, Unit.KibiByte)
Udev.disable() with TestRun.step("Prepare cache, core and disable udev."):
cache, core = prepare()
Udev.disable()
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid', with TestRun.step("Prepare dd command."):
# 'dd' command is created and is appended to 'echo' command instead of running it # Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
dd_command = str( # 'dd' command is created and is appended to 'echo' command instead of running it
Dd() dd_command = str(
.input("/dev/zero") Dd()
.output(core.system_path) .input("/dev/zero")
.count(dd_count) .output(core.system_path)
.block_size(dd_size) .count(dd_count)
) .block_size(dd_size)
for i in range(iterations):
cache.flush_cache()
output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
if output.exit_code != 0:
raise Exception(
f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
)
# Few pids might be used by system during test preparation
pid = int(output.stdout) + 50
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"pid:eq:{pid}&done",
ioclass_config_path=ioclass_config_path,
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Running dd with pid {pid}") for _ in TestRun.iteration(range(iterations)):
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid with TestRun.step("Flush cache."):
dd_and_pid_command = ( cache.flush_cache()
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
) with TestRun.step("Prepare and load IO class config."):
output = TestRun.executor.run(dd_and_pid_command) output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
if output.exit_code != 0: if output.exit_code != 0:
raise Exception( raise Exception(
f"Failed to run dd with target pid. " f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
f"stdout: {output.stdout} \n stderr :{output.stderr}" )
# Few pids might be used by system during test preparation
pid = int(output.stdout) + 50
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"pid:eq:{pid}&done",
ioclass_config_path=ioclass_config_path,
) )
sync() casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != dd_count: with TestRun.step(f"Run dd with pid {pid}."):
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") # pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
ioclass_config.remove_ioclass(ioclass_id) dd_and_pid_command = (
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
)
output = TestRun.executor.run(dd_and_pid_command)
if output.exit_code != 0:
raise Exception(
f"Failed to run dd with target pid. "
f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
sync()
with TestRun.step("Check if data was cached properly."):
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
ioclass_config.remove_ioclass(ioclass_id)

File diff suppressed because it is too large Load Diff