Merge pull request #515 from katlapinka/io-class-renew
Rewrite IO class tests to use latest Test Framework API
This commit is contained in:
commit
fea55bca42
@ -3,8 +3,6 @@
|
|||||||
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||||
#
|
#
|
||||||
|
|
||||||
import time
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from test_tools.disk_utils import Filesystem
|
from test_tools.disk_utils import Filesystem
|
||||||
@ -12,7 +10,7 @@ from api.cas import ioclass_config, casadm
|
|||||||
from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy
|
from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools.dd import Dd
|
from test_tools.dd import Dd
|
||||||
from test_utils.os_utils import sync, Udev, drop_caches, DropCachesMode
|
from test_utils.os_utils import sync, Udev, drop_caches
|
||||||
from test_utils.size import Unit, Size
|
from test_utils.size import Unit, Size
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
|
|
||||||
@ -29,7 +27,7 @@ not_cached_mountpoint = "/tmp/ioclass_core_id_test/not_cached"
|
|||||||
def test_ioclass_core_id(filesystem):
|
def test_ioclass_core_id(filesystem):
|
||||||
"""
|
"""
|
||||||
title: Test for `core_id` classification rule
|
title: Test for `core_id` classification rule
|
||||||
dsecription: |
|
description: |
|
||||||
Test if IO to core with selective allocation enabled is cached and IO to core with
|
Test if IO to core with selective allocation enabled is cached and IO to core with
|
||||||
selective allocation disabled is redirected to pass-through mode
|
selective allocation disabled is redirected to pass-through mode
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
|
@ -4,10 +4,10 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
import random
|
import random
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from datetime import datetime
|
||||||
|
from api.cas import ioclass_config, casadm
|
||||||
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools import fs_utils
|
from test_tools import fs_utils
|
||||||
from test_tools.dd import Dd
|
from test_tools.dd import Dd
|
||||||
@ -15,7 +15,8 @@ from test_tools.disk_utils import Filesystem
|
|||||||
from test_utils.filesystem.directory import Directory
|
from test_utils.filesystem.directory import Directory
|
||||||
from test_utils.filesystem.file import File
|
from test_utils.filesystem.file import File
|
||||||
from test_utils.os_utils import drop_caches, DropCachesMode, sync, Udev
|
from test_utils.os_utils import drop_caches, DropCachesMode, sync, Udev
|
||||||
from .io_class_common import *
|
from test_utils.size import Size, Unit
|
||||||
|
from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.os_dependent
|
@pytest.mark.os_dependent
|
||||||
@ -24,35 +25,41 @@ from .io_class_common import *
|
|||||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
def test_ioclass_directory_depth(filesystem):
|
def test_ioclass_directory_depth(filesystem):
|
||||||
"""
|
"""
|
||||||
|
title: Test IO classification by directory.
|
||||||
|
description: |
|
||||||
Test if directory classification works properly for deeply nested directories for read and
|
Test if directory classification works properly for deeply nested directories for read and
|
||||||
write operations.
|
write operations.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- Read and write operations to directories are classified properly.
|
||||||
"""
|
"""
|
||||||
|
base_dir_path = f"{mountpoint}/base_dir"
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core."):
|
||||||
cache, core = prepare()
|
cache, core = prepare()
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
|
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
|
||||||
f"and mounting {core.system_path} at {mountpoint}")
|
f"at {mountpoint}."):
|
||||||
core.create_filesystem(filesystem)
|
core.create_filesystem(filesystem)
|
||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
base_dir_path = f"{mountpoint}/base_dir"
|
with TestRun.step(f"Create the base directory: {base_dir_path}."):
|
||||||
TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
|
|
||||||
fs_utils.create_directory(base_dir_path)
|
fs_utils.create_directory(base_dir_path)
|
||||||
|
|
||||||
|
with TestRun.step(f"Create a nested directory."):
|
||||||
nested_dir_path = base_dir_path
|
nested_dir_path = base_dir_path
|
||||||
random_depth = random.randint(40, 80)
|
random_depth = random.randint(40, 80)
|
||||||
for i in range(random_depth):
|
for i in range(random_depth):
|
||||||
nested_dir_path += f"/dir_{i}"
|
nested_dir_path += f"/dir_{i}"
|
||||||
TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}")
|
|
||||||
fs_utils.create_directory(path=nested_dir_path, parents=True)
|
fs_utils.create_directory(path=nested_dir_path, parents=True)
|
||||||
|
|
||||||
# Test classification in nested dir by reading a previously unclassified file
|
# Test classification in nested dir by reading a previously unclassified file
|
||||||
TestRun.LOGGER.info("Creating the first file in the nested directory")
|
with TestRun.step("Create the first file in the nested directory."):
|
||||||
test_file_1 = File(f"{nested_dir_path}/test_file_1")
|
test_file_1 = File(f"{nested_dir_path}/test_file_1")
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/urandom")
|
||||||
.input("/dev/urandom")
|
|
||||||
.output(test_file_1.full_path)
|
.output(test_file_1.full_path)
|
||||||
.count(random.randint(1, 200))
|
.count(random.randint(1, 200))
|
||||||
.block_size(Size(1, Unit.MebiByte))
|
.block_size(Size(1, Unit.MebiByte))
|
||||||
@ -62,6 +69,7 @@ def test_ioclass_directory_depth(filesystem):
|
|||||||
drop_caches(DropCachesMode.ALL)
|
drop_caches(DropCachesMode.ALL)
|
||||||
test_file_1.refresh_item()
|
test_file_1.refresh_item()
|
||||||
|
|
||||||
|
with TestRun.step("Load IO class config."):
|
||||||
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
|
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
|
||||||
# directory IO class
|
# directory IO class
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
@ -73,28 +81,28 @@ def test_ioclass_directory_depth(filesystem):
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
|
with TestRun.step("Read the file in the nested directory"):
|
||||||
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
TestRun.LOGGER.info("Reading the file in the nested directory")
|
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input(test_file_1.full_path)
|
||||||
.input(test_file_1.full_path)
|
|
||||||
.output("/dev/null")
|
.output("/dev/null")
|
||||||
.block_size(Size(1, Unit.MebiByte))
|
.block_size(Size(1, Unit.MebiByte))
|
||||||
)
|
)
|
||||||
dd.run()
|
dd.run()
|
||||||
|
|
||||||
|
with TestRun.step("Check occupancy after creating the file."):
|
||||||
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
assert new_occupancy == base_occupancy + test_file_1.size, \
|
if new_occupancy != base_occupancy + test_file_1.size:
|
||||||
"Wrong occupancy after reading file!\n" \
|
TestRun.LOGGER.error("Wrong occupancy after reading file!\n"
|
||||||
f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}"
|
"Expected: {base_occupancy + test_file_1.size}, "
|
||||||
|
f"actual: {new_occupancy}")
|
||||||
|
|
||||||
# Test classification in nested dir by creating a file
|
# Test classification in nested dir by creating a file
|
||||||
|
with TestRun.step("Create the second file in the nested directory"):
|
||||||
base_occupancy = new_occupancy
|
base_occupancy = new_occupancy
|
||||||
TestRun.LOGGER.info("Creating the second file in the nested directory")
|
|
||||||
test_file_2 = File(f"{nested_dir_path}/test_file_2")
|
test_file_2 = File(f"{nested_dir_path}/test_file_2")
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/urandom")
|
||||||
.input("/dev/urandom")
|
|
||||||
.output(test_file_2.full_path)
|
.output(test_file_2.full_path)
|
||||||
.count(random.randint(1, 200))
|
.count(random.randint(1, 200))
|
||||||
.block_size(Size(1, Unit.MebiByte))
|
.block_size(Size(1, Unit.MebiByte))
|
||||||
@ -104,10 +112,133 @@ def test_ioclass_directory_depth(filesystem):
|
|||||||
drop_caches(DropCachesMode.ALL)
|
drop_caches(DropCachesMode.ALL)
|
||||||
test_file_2.refresh_item()
|
test_file_2.refresh_item()
|
||||||
|
|
||||||
|
with TestRun.step("Check occupancy after creating the second file."):
|
||||||
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
assert new_occupancy == base_occupancy + test_file_2.size, \
|
if new_occupancy != base_occupancy + test_file_2.size:
|
||||||
"Wrong occupancy after creating file!\n" \
|
TestRun.LOGGER.error("Wrong occupancy after creating file!\n"
|
||||||
f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}"
|
f"Expected: {base_occupancy + test_file_2.size}, "
|
||||||
|
f"actual: {new_occupancy}")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.os_dependent
|
||||||
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
|
def test_ioclass_directory_file_operations(filesystem):
|
||||||
|
"""
|
||||||
|
title: Test IO classification by file operations.
|
||||||
|
description: |
|
||||||
|
Test if directory classification works properly after file operations like move or rename.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- The operations themselves should not cause reclassification but IO after those
|
||||||
|
operations should be reclassified to proper IO class.
|
||||||
|
"""
|
||||||
|
|
||||||
|
test_dir_path = f"{mountpoint}/test_dir"
|
||||||
|
nested_dir_path = f"{test_dir_path}/nested_dir"
|
||||||
|
dd_blocks = random.randint(5, 50)
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core."):
|
||||||
|
cache, core = prepare()
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config file."):
|
||||||
|
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
|
||||||
|
# directory IO class
|
||||||
|
ioclass_config.add_ioclass(
|
||||||
|
ioclass_id=ioclass_id,
|
||||||
|
eviction_priority=1,
|
||||||
|
allocation=True,
|
||||||
|
rule=f"directory:{test_dir_path}",
|
||||||
|
ioclass_config_path=ioclass_config_path,
|
||||||
|
)
|
||||||
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
|
with TestRun.step(f"Prepare {filesystem.name} filesystem "
|
||||||
|
f"and mounting {core.system_path} at {mountpoint}."):
|
||||||
|
core.create_filesystem(fs_type=filesystem)
|
||||||
|
core.mount(mount_point=mountpoint)
|
||||||
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step(f"Create directory {nested_dir_path}."):
|
||||||
|
Directory.create_directory(path=nested_dir_path, parents=True)
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
|
||||||
|
with TestRun.step("Create test file."):
|
||||||
|
classified_before = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
file_path = f"{test_dir_path}/test_file"
|
||||||
|
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
|
||||||
|
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
test_file = File(file_path).refresh_item()
|
||||||
|
|
||||||
|
with TestRun.step("Check classified occupancy."):
|
||||||
|
classified_after = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
check_occupancy(classified_before + test_file.size, classified_after)
|
||||||
|
|
||||||
|
with TestRun.step("Move test file out of classified directory."):
|
||||||
|
classified_before = classified_after
|
||||||
|
non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
||||||
|
test_file.move(destination=mountpoint)
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
|
||||||
|
with TestRun.step("Check classified occupancy."):
|
||||||
|
classified_after = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
check_occupancy(classified_before, classified_after)
|
||||||
|
TestRun.LOGGER.info("Checking non-classified occupancy")
|
||||||
|
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
||||||
|
check_occupancy(non_classified_before, non_classified_after)
|
||||||
|
|
||||||
|
with TestRun.step("Read test file."):
|
||||||
|
classified_before = classified_after
|
||||||
|
non_classified_before = non_classified_after
|
||||||
|
(Dd().input(test_file.full_path).output("/dev/null")
|
||||||
|
.block_size(Size(1, Unit.MebiByte)).run())
|
||||||
|
|
||||||
|
with TestRun.step("Check classified occupancy."):
|
||||||
|
classified_after = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
check_occupancy(classified_before - test_file.size, classified_after)
|
||||||
|
TestRun.LOGGER.info("Checking non-classified occupancy")
|
||||||
|
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
||||||
|
check_occupancy(non_classified_before + test_file.size, non_classified_after)
|
||||||
|
|
||||||
|
with TestRun.step(f"Move test file to {nested_dir_path}."):
|
||||||
|
classified_before = classified_after
|
||||||
|
non_classified_before = non_classified_after
|
||||||
|
test_file.move(destination=nested_dir_path)
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
|
||||||
|
with TestRun.step("Check classified occupancy."):
|
||||||
|
classified_after = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
check_occupancy(classified_before, classified_after)
|
||||||
|
TestRun.LOGGER.info("Checking non-classified occupancy")
|
||||||
|
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
||||||
|
check_occupancy(non_classified_before, non_classified_after)
|
||||||
|
|
||||||
|
with TestRun.step("Read test file."):
|
||||||
|
classified_before = classified_after
|
||||||
|
non_classified_before = non_classified_after
|
||||||
|
(Dd().input(test_file.full_path).output("/dev/null")
|
||||||
|
.block_size(Size(1, Unit.MebiByte)).run())
|
||||||
|
|
||||||
|
with TestRun.step("Check classified occupancy."):
|
||||||
|
classified_after = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
check_occupancy(classified_before + test_file.size, classified_after)
|
||||||
|
|
||||||
|
with TestRun.step("Check non-classified occupancy."):
|
||||||
|
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
||||||
|
check_occupancy(non_classified_before - test_file.size, non_classified_after)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.os_dependent
|
@pytest.mark.os_dependent
|
||||||
@ -116,13 +247,115 @@ def test_ioclass_directory_depth(filesystem):
|
|||||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
def test_ioclass_directory_dir_operations(filesystem):
|
def test_ioclass_directory_dir_operations(filesystem):
|
||||||
"""
|
"""
|
||||||
Test if directory classification works properly after directory operations like move or rename.
|
title: Test IO classification by directory operations.
|
||||||
The operations themselves should not cause reclassification but IO after those operations
|
description: |
|
||||||
should be reclassified to proper IO class.
|
Test if directory classification works properly after directory operations like move or
|
||||||
Directory classification may work with a delay after loading IO class configuration or
|
rename.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- The operations themselves should not cause reclassification but IO after those
|
||||||
|
operations should be reclassified to proper IO class.
|
||||||
|
- Directory classification may work with a delay after loading IO class configuration or
|
||||||
move/rename operations. Test checks if maximum delay is not exceeded.
|
move/rename operations. Test checks if maximum delay is not exceeded.
|
||||||
"""
|
"""
|
||||||
def create_files_with_classification_delay_check(directory: Directory, ioclass_id: int):
|
|
||||||
|
non_classified_dir_path = f"{mountpoint}/non_classified"
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core."):
|
||||||
|
cache, core = prepare()
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config file."):
|
||||||
|
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
|
||||||
|
ioclass_id_1 = proper_ids[0]
|
||||||
|
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
|
||||||
|
ioclass_id_2 = proper_ids[1]
|
||||||
|
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
|
||||||
|
# directory IO classes
|
||||||
|
ioclass_config.add_ioclass(
|
||||||
|
ioclass_id=ioclass_id_1,
|
||||||
|
eviction_priority=1,
|
||||||
|
allocation=True,
|
||||||
|
rule=f"directory:{classified_dir_path_1}",
|
||||||
|
ioclass_config_path=ioclass_config_path,
|
||||||
|
)
|
||||||
|
ioclass_config.add_ioclass(
|
||||||
|
ioclass_id=ioclass_id_2,
|
||||||
|
eviction_priority=1,
|
||||||
|
allocation=True,
|
||||||
|
rule=f"directory:{classified_dir_path_2}",
|
||||||
|
ioclass_config_path=ioclass_config_path,
|
||||||
|
)
|
||||||
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
|
with TestRun.step(f"Prepare {filesystem.name} filesystem "
|
||||||
|
f"and mount {core.system_path} at {mountpoint}."):
|
||||||
|
core.create_filesystem(fs_type=filesystem)
|
||||||
|
core.mount(mount_point=mountpoint)
|
||||||
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step(f"Create a non-classified directory: {non_classified_dir_path}."):
|
||||||
|
dir_1 = Directory.create_directory(path=non_classified_dir_path)
|
||||||
|
|
||||||
|
with TestRun.step(f"Rename {non_classified_dir_path} to {classified_dir_path_1}."):
|
||||||
|
dir_1.move(destination=classified_dir_path_1)
|
||||||
|
|
||||||
|
with TestRun.step("Create files with delay check."):
|
||||||
|
create_files_with_classification_delay_check(
|
||||||
|
cache, directory=dir_1, ioclass_id=ioclass_id_1)
|
||||||
|
|
||||||
|
with TestRun.step(f"Create {classified_dir_path_2}/subdir."):
|
||||||
|
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
|
||||||
|
|
||||||
|
with TestRun.step("Create files with delay check."):
|
||||||
|
create_files_with_classification_delay_check(cache, directory=dir_2,
|
||||||
|
ioclass_id=ioclass_id_2)
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
|
||||||
|
with TestRun.step(f"Move {dir_2.full_path} to {classified_dir_path_1}."):
|
||||||
|
dir_2.move(destination=classified_dir_path_1)
|
||||||
|
|
||||||
|
with TestRun.step("Read files with reclassification check."):
|
||||||
|
read_files_with_reclassification_check(cache,
|
||||||
|
target_ioclass_id=ioclass_id_1,
|
||||||
|
source_ioclass_id=ioclass_id_2,
|
||||||
|
directory=dir_2, with_delay=False)
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
|
||||||
|
with TestRun.step(f"Move {dir_2.full_path} to {mountpoint}."):
|
||||||
|
dir_2.move(destination=mountpoint)
|
||||||
|
|
||||||
|
with TestRun.step("Read files with reclassification check."):
|
||||||
|
read_files_with_reclassification_check(cache,
|
||||||
|
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
|
||||||
|
directory=dir_2, with_delay=False)
|
||||||
|
|
||||||
|
with TestRun.step(f"Remove {classified_dir_path_2}."):
|
||||||
|
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
|
||||||
|
with TestRun.step(f"Rename {classified_dir_path_1} to {classified_dir_path_2}."):
|
||||||
|
dir_1.move(destination=classified_dir_path_2)
|
||||||
|
|
||||||
|
with TestRun.step("Read files with reclassification check."):
|
||||||
|
read_files_with_reclassification_check(cache,
|
||||||
|
target_ioclass_id=ioclass_id_2,
|
||||||
|
source_ioclass_id=ioclass_id_1,
|
||||||
|
directory=dir_1, with_delay=True)
|
||||||
|
|
||||||
|
with TestRun.step(f"Rename {classified_dir_path_2} to {non_classified_dir_path}."):
|
||||||
|
dir_1.move(destination=non_classified_dir_path)
|
||||||
|
|
||||||
|
with TestRun.step("Read files with reclassification check."):
|
||||||
|
read_files_with_reclassification_check(cache,
|
||||||
|
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
|
||||||
|
directory=dir_1, with_delay=True)
|
||||||
|
|
||||||
|
|
||||||
|
def create_files_with_classification_delay_check(cache, directory: Directory, ioclass_id: int):
|
||||||
start_time = datetime.now()
|
start_time = datetime.now()
|
||||||
occupancy_after = cache.get_io_class_statistics(
|
occupancy_after = cache.get_io_class_statistics(
|
||||||
io_class_id=ioclass_id).usage_stats.occupancy
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
@ -152,8 +385,9 @@ def test_ioclass_directory_dir_operations(filesystem):
|
|||||||
(Dd().input("/dev/zero").output(file_path).oflag("sync")
|
(Dd().input("/dev/zero").output(file_path).oflag("sync")
|
||||||
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
|
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
|
||||||
|
|
||||||
def read_files_with_reclassification_check(
|
|
||||||
target_ioclass_id: int, source_ioclass_id: int, directory: Directory, with_delay: bool):
|
def read_files_with_reclassification_check(cache, target_ioclass_id: int, source_ioclass_id: int,
|
||||||
|
directory: Directory, with_delay: bool):
|
||||||
start_time = datetime.now()
|
start_time = datetime.now()
|
||||||
target_occupancy_after = cache.get_io_class_statistics(
|
target_occupancy_after = cache.get_io_class_statistics(
|
||||||
io_class_id=target_ioclass_id).usage_stats.occupancy
|
io_class_id=target_ioclass_id).usage_stats.occupancy
|
||||||
@ -193,205 +427,8 @@ def test_ioclass_directory_dir_operations(filesystem):
|
|||||||
(Dd().input(file.full_path).output("/dev/null")
|
(Dd().input(file.full_path).output("/dev/null")
|
||||||
.block_size(Size(1, Unit.Blocks4096)).run())
|
.block_size(Size(1, Unit.Blocks4096)).run())
|
||||||
|
|
||||||
cache, core = prepare()
|
|
||||||
Udev.disable()
|
|
||||||
|
|
||||||
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
|
def check_occupancy(expected: Size, actual: Size):
|
||||||
ioclass_id_1 = proper_ids[0]
|
|
||||||
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
|
|
||||||
ioclass_id_2 = proper_ids[1]
|
|
||||||
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
|
|
||||||
# directory IO classes
|
|
||||||
ioclass_config.add_ioclass(
|
|
||||||
ioclass_id=ioclass_id_1,
|
|
||||||
eviction_priority=1,
|
|
||||||
allocation=True,
|
|
||||||
rule=f"directory:{classified_dir_path_1}",
|
|
||||||
ioclass_config_path=ioclass_config_path,
|
|
||||||
)
|
|
||||||
ioclass_config.add_ioclass(
|
|
||||||
ioclass_id=ioclass_id_2,
|
|
||||||
eviction_priority=1,
|
|
||||||
allocation=True,
|
|
||||||
rule=f"directory:{classified_dir_path_2}",
|
|
||||||
ioclass_config_path=ioclass_config_path,
|
|
||||||
)
|
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
|
|
||||||
f"and mounting {core.system_path} at {mountpoint}")
|
|
||||||
core.create_filesystem(fs_type=filesystem)
|
|
||||||
core.mount(mount_point=mountpoint)
|
|
||||||
sync()
|
|
||||||
|
|
||||||
non_classified_dir_path = f"{mountpoint}/non_classified"
|
|
||||||
TestRun.LOGGER.info(
|
|
||||||
f"Creating a non-classified directory: {non_classified_dir_path}")
|
|
||||||
dir_1 = Directory.create_directory(path=non_classified_dir_path)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Renaming {non_classified_dir_path} to {classified_dir_path_1}")
|
|
||||||
dir_1.move(destination=classified_dir_path_1)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Creating files with delay check")
|
|
||||||
create_files_with_classification_delay_check(directory=dir_1, ioclass_id=ioclass_id_1)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir")
|
|
||||||
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Creating files with delay check")
|
|
||||||
create_files_with_classification_delay_check(directory=dir_2, ioclass_id=ioclass_id_2)
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}")
|
|
||||||
dir_2.move(destination=classified_dir_path_1)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Reading files with reclassification check")
|
|
||||||
read_files_with_reclassification_check(
|
|
||||||
target_ioclass_id=ioclass_id_1, source_ioclass_id=ioclass_id_2,
|
|
||||||
directory=dir_2, with_delay=False)
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}")
|
|
||||||
dir_2.move(destination=mountpoint)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Reading files with reclassification check")
|
|
||||||
read_files_with_reclassification_check(
|
|
||||||
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
|
|
||||||
directory=dir_2, with_delay=False)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Removing {classified_dir_path_2}")
|
|
||||||
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Renaming {classified_dir_path_1} to {classified_dir_path_2}")
|
|
||||||
dir_1.move(destination=classified_dir_path_2)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Reading files with reclassification check")
|
|
||||||
read_files_with_reclassification_check(
|
|
||||||
target_ioclass_id=ioclass_id_2, source_ioclass_id=ioclass_id_1,
|
|
||||||
directory=dir_1, with_delay=True)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Renaming {classified_dir_path_2} to {non_classified_dir_path}")
|
|
||||||
dir_1.move(destination=non_classified_dir_path)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Reading files with reclassification check")
|
|
||||||
read_files_with_reclassification_check(
|
|
||||||
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
|
|
||||||
directory=dir_1, with_delay=True)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.os_dependent
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
|
||||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
|
||||||
def test_ioclass_directory_file_operations(filesystem):
|
|
||||||
"""
|
|
||||||
Test if directory classification works properly after file operations like move or rename.
|
|
||||||
The operations themselves should not cause reclassification but IO after those operations
|
|
||||||
should be reclassified to proper IO class.
|
|
||||||
"""
|
|
||||||
def check_occupancy(expected: Size, actual: Size):
|
|
||||||
if expected != actual:
|
if expected != actual:
|
||||||
pytest.xfail("Occupancy check failed!\n"
|
pytest.xfail("Occupancy check failed!\n"
|
||||||
f"Expected: {expected}, actual: {actual}")
|
f"Expected: {expected}, actual: {actual}")
|
||||||
|
|
||||||
cache, core = prepare()
|
|
||||||
Udev.disable()
|
|
||||||
test_dir_path = f"{mountpoint}/test_dir"
|
|
||||||
nested_dir_path = f"{test_dir_path}/nested_dir"
|
|
||||||
|
|
||||||
dd_blocks = random.randint(5, 50)
|
|
||||||
|
|
||||||
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
|
|
||||||
# directory IO class
|
|
||||||
ioclass_config.add_ioclass(
|
|
||||||
ioclass_id=ioclass_id,
|
|
||||||
eviction_priority=1,
|
|
||||||
allocation=True,
|
|
||||||
rule=f"directory:{test_dir_path}",
|
|
||||||
ioclass_config_path=ioclass_config_path,
|
|
||||||
)
|
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
|
|
||||||
f"and mounting {core.system_path} at {mountpoint}")
|
|
||||||
core.create_filesystem(fs_type=filesystem)
|
|
||||||
core.mount(mount_point=mountpoint)
|
|
||||||
sync()
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Creating directory {nested_dir_path}")
|
|
||||||
Directory.create_directory(path=nested_dir_path, parents=True)
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Creating test file")
|
|
||||||
classified_before = cache.get_io_class_statistics(
|
|
||||||
io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
file_path = f"{test_dir_path}/test_file"
|
|
||||||
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
|
|
||||||
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
test_file = File(file_path).refresh_item()
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Checking classified occupancy")
|
|
||||||
classified_after = cache.get_io_class_statistics(
|
|
||||||
io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
check_occupancy(classified_before + test_file.size, classified_after)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Moving test file out of classified directory")
|
|
||||||
classified_before = classified_after
|
|
||||||
non_classified_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
|
||||||
test_file.move(destination=mountpoint)
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Checking classified occupancy")
|
|
||||||
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
check_occupancy(classified_before, classified_after)
|
|
||||||
TestRun.LOGGER.info("Checking non-classified occupancy")
|
|
||||||
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
|
||||||
check_occupancy(non_classified_before, non_classified_after)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Reading test file")
|
|
||||||
classified_before = classified_after
|
|
||||||
non_classified_before = non_classified_after
|
|
||||||
(Dd().input(test_file.full_path).output("/dev/null")
|
|
||||||
.block_size(Size(1, Unit.MebiByte)).run())
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Checking classified occupancy")
|
|
||||||
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
check_occupancy(classified_before - test_file.size, classified_after)
|
|
||||||
TestRun.LOGGER.info("Checking non-classified occupancy")
|
|
||||||
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
|
||||||
check_occupancy(non_classified_before + test_file.size, non_classified_after)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}")
|
|
||||||
classified_before = classified_after
|
|
||||||
non_classified_before = non_classified_after
|
|
||||||
test_file.move(destination=nested_dir_path)
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Checking classified occupancy")
|
|
||||||
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
check_occupancy(classified_before, classified_after)
|
|
||||||
TestRun.LOGGER.info("Checking non-classified occupancy")
|
|
||||||
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
|
||||||
check_occupancy(non_classified_before, non_classified_after)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Reading test file")
|
|
||||||
classified_before = classified_after
|
|
||||||
non_classified_before = non_classified_after
|
|
||||||
(Dd().input(test_file.full_path).output("/dev/null")
|
|
||||||
.block_size(Size(1, Unit.MebiByte)).run())
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Checking classified occupancy")
|
|
||||||
classified_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
check_occupancy(classified_before + test_file.size, classified_after)
|
|
||||||
TestRun.LOGGER.info("Checking non-classified occupancy")
|
|
||||||
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
|
||||||
check_occupancy(non_classified_before - test_file.size, non_classified_after)
|
|
||||||
|
@ -4,28 +4,46 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from api.cas import ioclass_config, casadm
|
||||||
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools.dd import Dd
|
from test_tools.dd import Dd
|
||||||
from test_tools.disk_utils import Filesystem
|
from test_tools.disk_utils import Filesystem
|
||||||
from test_utils.filesystem.file import File
|
from test_utils.filesystem.file import File
|
||||||
from test_utils.os_utils import sync, Udev, DropCachesMode, drop_caches
|
from test_utils.os_utils import sync, DropCachesMode, drop_caches
|
||||||
from .io_class_common import *
|
from test_utils.size import Size, Unit
|
||||||
|
from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_path
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_file_extension():
|
def test_ioclass_file_extension():
|
||||||
cache, core = prepare()
|
"""
|
||||||
|
title: Test IO classification by file extension.
|
||||||
|
description: Test if file extension classification works properly.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly based on IO class rule with file extension.
|
||||||
|
"""
|
||||||
iterations = 50
|
iterations = 50
|
||||||
ioclass_id = 1
|
ioclass_id = 1
|
||||||
tested_extension = "tmp"
|
tested_extension = "tmp"
|
||||||
wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"]
|
wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"]
|
||||||
dd_size = Size(4, Unit.KibiByte)
|
dd_size = Size(4, Unit.KibiByte)
|
||||||
dd_count = 10
|
dd_count = 10
|
||||||
|
dd = (
|
||||||
|
Dd().input("/dev/zero")
|
||||||
|
.output(f"{mountpoint}/test_file.{tested_extension}")
|
||||||
|
.count(dd_count)
|
||||||
|
.block_size(dd_size)
|
||||||
|
)
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core."):
|
||||||
|
cache, core = prepare()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config."):
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=ioclass_id,
|
ioclass_id=ioclass_id,
|
||||||
eviction_priority=1,
|
eviction_priority=1,
|
||||||
@ -35,24 +53,14 @@ def test_ioclass_file_extension():
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
TestRun.LOGGER.info(
|
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
|
||||||
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
|
|
||||||
)
|
|
||||||
|
|
||||||
core.create_filesystem(Filesystem.ext3)
|
core.create_filesystem(Filesystem.ext3)
|
||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
|
|
||||||
|
with TestRun.step("Flush cache."):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
# Check if file with proper extension is cached
|
with TestRun.step(f"Write to file with cached extension and check if it is properly cached."):
|
||||||
dd = (
|
|
||||||
Dd()
|
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{mountpoint}/test_file.{tested_extension}")
|
|
||||||
.count(dd_count)
|
|
||||||
.block_size(dd_size)
|
|
||||||
)
|
|
||||||
TestRun.LOGGER.info(f"Writing to file with cached extension.")
|
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
dd.run()
|
dd.run()
|
||||||
sync()
|
sync()
|
||||||
@ -60,14 +68,13 @@ def test_ioclass_file_extension():
|
|||||||
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
|
if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
|
||||||
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
|
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
|
||||||
|
|
||||||
|
with TestRun.step("Flush cache."):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
# Check if file with improper extension is not cached
|
with TestRun.step(f"Write to file with not cached extension and check if it is not cached."):
|
||||||
TestRun.LOGGER.info(f"Writing to file with no cached extension.")
|
|
||||||
for ext in wrong_extensions:
|
for ext in wrong_extensions:
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{mountpoint}/test_file.{ext}")
|
.output(f"{mountpoint}/test_file.{ext}")
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
@ -82,13 +89,24 @@ def test_ioclass_file_extension():
|
|||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_file_name_prefix():
|
def test_ioclass_file_name_prefix():
|
||||||
cache, core = prepare()
|
"""
|
||||||
|
title: Test IO classification by file name prefix.
|
||||||
|
description: Test if file name prefix classification works properly.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly based on IO class rule with file name prefix.
|
||||||
|
"""
|
||||||
|
|
||||||
ioclass_id = 1
|
ioclass_id = 1
|
||||||
cached_files = ["test", "test.txt", "test1", "test1.txt"]
|
cached_files = ["test", "test.txt", "test1", "test1.txt"]
|
||||||
not_cached_files = ["file1", "file2", "file4", "file5", "tes"]
|
not_cached_files = ["file1", "file2", "file4", "file5", "tes"]
|
||||||
dd_size = Size(4, Unit.KibiByte)
|
dd_size = Size(4, Unit.KibiByte)
|
||||||
dd_count = 10
|
dd_count = 10
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core."):
|
||||||
|
cache, core = prepare()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config."):
|
||||||
ioclass_config.remove_ioclass_config()
|
ioclass_config.remove_ioclass_config()
|
||||||
ioclass_config.create_ioclass_config(False)
|
ioclass_config.create_ioclass_config(False)
|
||||||
|
|
||||||
@ -110,10 +128,7 @@ def test_ioclass_file_name_prefix():
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
TestRun.LOGGER.info(
|
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
|
||||||
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
|
|
||||||
)
|
|
||||||
|
|
||||||
previous_occupancy = cache.get_occupancy()
|
previous_occupancy = cache.get_occupancy()
|
||||||
|
|
||||||
core.create_filesystem(Filesystem.ext3)
|
core.create_filesystem(Filesystem.ext3)
|
||||||
@ -128,11 +143,11 @@ def test_ioclass_file_name_prefix():
|
|||||||
# to be cached
|
# to be cached
|
||||||
|
|
||||||
# Check if files with proper prefix are cached
|
# Check if files with proper prefix are cached
|
||||||
TestRun.LOGGER.info(f"Writing files which are supposed to be cached.")
|
with TestRun.step(f"Write files which are supposed to be cached and check "
|
||||||
|
f"if they are cached."):
|
||||||
for f in cached_files:
|
for f in cached_files:
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{mountpoint}/{f}")
|
.output(f"{mountpoint}/{f}")
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
@ -143,17 +158,19 @@ def test_ioclass_file_name_prefix():
|
|||||||
expected_occupancy = previous_occupancy + (dd_size * dd_count)
|
expected_occupancy = previous_occupancy + (dd_size * dd_count)
|
||||||
if current_occupancy != expected_occupancy:
|
if current_occupancy != expected_occupancy:
|
||||||
TestRun.fail(f"Current occupancy value is not valid. "
|
TestRun.fail(f"Current occupancy value is not valid. "
|
||||||
f"(Expected: {str(expected_occupancy)}, actual: {str(current_occupancy)})")
|
f"(Expected: {str(expected_occupancy)}, "
|
||||||
|
f"actual: {str(current_occupancy)})")
|
||||||
previous_occupancy = current_occupancy
|
previous_occupancy = current_occupancy
|
||||||
|
|
||||||
|
with TestRun.step("Flush cache."):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
# Check if file with improper extension is not cached
|
# Check if file with improper extension is not cached
|
||||||
TestRun.LOGGER.info(f"Writing files which are not supposed to be cached.")
|
with TestRun.step(f"Write files which are not supposed to be cached and check if "
|
||||||
|
f"they are not cached."):
|
||||||
for f in not_cached_files:
|
for f in not_cached_files:
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{mountpoint}/{f}")
|
.output(f"{mountpoint}/{f}")
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
@ -163,30 +180,39 @@ def test_ioclass_file_name_prefix():
|
|||||||
current_occupancy = cache.get_occupancy()
|
current_occupancy = cache.get_occupancy()
|
||||||
if current_occupancy != previous_occupancy:
|
if current_occupancy != previous_occupancy:
|
||||||
TestRun.fail(f"Current occupancy value is not valid. "
|
TestRun.fail(f"Current occupancy value is not valid. "
|
||||||
f"(Expected: {str(previous_occupancy)}, actual: {str(current_occupancy)})")
|
f"(Expected: {str(previous_occupancy)}, "
|
||||||
|
f"actual: {str(current_occupancy)})")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_file_extension_preexisting_filesystem():
|
def test_ioclass_file_extension_preexisting_filesystem():
|
||||||
"""Create files on filesystem, add device with filesystem as a core,
|
"""
|
||||||
write data to files and check if they are cached properly"""
|
title: Test IO classification by file extension with preexisting filesystem on core device.
|
||||||
cache, core = prepare()
|
description: |
|
||||||
|
Test if file extension classification works properly when there is an existing
|
||||||
|
filesystem on core device.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly based on IO class rule with file extension
|
||||||
|
after mounting core device.
|
||||||
|
"""
|
||||||
ioclass_id = 1
|
ioclass_id = 1
|
||||||
extensions = ["tmp", "tm", "out", "txt", "log", "123"]
|
extensions = ["tmp", "tm", "out", "txt", "log", "123"]
|
||||||
dd_size = Size(4, Unit.KibiByte)
|
dd_size = Size(4, Unit.KibiByte)
|
||||||
dd_count = 10
|
dd_count = 10
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Preparing files on raw block device")
|
with TestRun.step("Prepare cache and core devices."):
|
||||||
|
cache, core = prepare()
|
||||||
|
|
||||||
|
with TestRun.step(f"Prepare files on raw block device."):
|
||||||
casadm.remove_core(cache.cache_id, core_id=core.core_id)
|
casadm.remove_core(cache.cache_id, core_id=core.core_id)
|
||||||
core.core_device.create_filesystem(Filesystem.ext3)
|
core.core_device.create_filesystem(Filesystem.ext3)
|
||||||
core.core_device.mount(mountpoint)
|
core.core_device.mount(mountpoint)
|
||||||
|
|
||||||
# Prepare files
|
|
||||||
for ext in extensions:
|
for ext in extensions:
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{mountpoint}/test_file.{ext}")
|
.output(f"{mountpoint}/test_file.{ext}")
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
@ -194,7 +220,7 @@ def test_ioclass_file_extension_preexisting_filesystem():
|
|||||||
dd.run()
|
dd.run()
|
||||||
core.core_device.unmount()
|
core.core_device.unmount()
|
||||||
|
|
||||||
# Prepare ioclass config
|
with TestRun.step("Create IO class config."):
|
||||||
rule = "|".join([f"extension:{ext}" for ext in extensions])
|
rule = "|".join([f"extension:{ext}" for ext in extensions])
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=ioclass_id,
|
ioclass_id=ioclass_id,
|
||||||
@ -204,20 +230,20 @@ def test_ioclass_file_extension_preexisting_filesystem():
|
|||||||
ioclass_config_path=ioclass_config_path,
|
ioclass_config_path=ioclass_config_path,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Prepare cache for test
|
with TestRun.step(f"Add device with preexisting data as a core."):
|
||||||
TestRun.LOGGER.info(f"Adding device with preexisting data as a core")
|
|
||||||
core = casadm.add_core(cache, core_dev=core.core_device)
|
core = casadm.add_core(cache, core_dev=core.core_device)
|
||||||
|
|
||||||
|
with TestRun.step("Load IO class config."):
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
|
with TestRun.step("Mount core and flush cache."):
|
||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
# Check if files with proper extensions are cached
|
with TestRun.step(f"Write to file with cached extension and check if they are cached."):
|
||||||
TestRun.LOGGER.info(f"Writing to file with cached extension.")
|
|
||||||
for ext in extensions:
|
for ext in extensions:
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{mountpoint}/test_file.{ext}")
|
.output(f"{mountpoint}/test_file.{ext}")
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
@ -232,8 +258,13 @@ def test_ioclass_file_extension_preexisting_filesystem():
|
|||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_file_offset():
|
def test_ioclass_file_offset():
|
||||||
cache, core = prepare()
|
"""
|
||||||
|
title: Test IO classification by file offset.
|
||||||
|
description: Test if file offset classification works properly.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly based on IO class rule with file offset.
|
||||||
|
"""
|
||||||
ioclass_id = 1
|
ioclass_id = 1
|
||||||
iterations = 100
|
iterations = 100
|
||||||
dd_size = Size(4, Unit.KibiByte)
|
dd_size = Size(4, Unit.KibiByte)
|
||||||
@ -241,6 +272,10 @@ def test_ioclass_file_offset():
|
|||||||
min_cached_offset = 16384
|
min_cached_offset = 16384
|
||||||
max_cached_offset = 65536
|
max_cached_offset = 65536
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core."):
|
||||||
|
cache, core = prepare()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config file."):
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=ioclass_id,
|
ioclass_id=ioclass_id,
|
||||||
eviction_priority=1,
|
eviction_priority=1,
|
||||||
@ -250,14 +285,14 @@ def test_ioclass_file_offset():
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
TestRun.LOGGER.info(
|
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
|
||||||
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
|
|
||||||
)
|
|
||||||
core.create_filesystem(Filesystem.ext3)
|
core.create_filesystem(Filesystem.ext3)
|
||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
|
|
||||||
|
with TestRun.step("Flush cache."):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
|
with TestRun.step("Write to file within cached offset range and check if it is cached."):
|
||||||
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
|
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
|
||||||
# nor last sector
|
# nor last sector
|
||||||
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
|
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
|
||||||
@ -265,12 +300,11 @@ def test_ioclass_file_offset():
|
|||||||
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
|
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
|
||||||
/ Unit.Blocks4096.value
|
/ Unit.Blocks4096.value
|
||||||
)
|
)
|
||||||
TestRun.LOGGER.info(f"Writing to file within cached offset range")
|
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
file_offset = random.choice(range(min_seek, max_seek))
|
file_offset = random.choice(range(min_seek, max_seek))
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{mountpoint}/tmp_file")
|
.output(f"{mountpoint}/tmp_file")
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
@ -283,14 +317,15 @@ def test_ioclass_file_offset():
|
|||||||
TestRun.LOGGER.error(f"Offset not cached: {file_offset}")
|
TestRun.LOGGER.error(f"Offset not cached: {file_offset}")
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
|
with TestRun.step(
|
||||||
|
"Write to file outside of cached offset range and check if it is not cached."):
|
||||||
min_seek = 0
|
min_seek = 0
|
||||||
max_seek = int(min_cached_offset / Unit.Blocks4096.value)
|
max_seek = int(min_cached_offset / Unit.Blocks4096.value)
|
||||||
TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
|
TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
file_offset = random.choice(range(min_seek, max_seek))
|
file_offset = random.choice(range(min_seek, max_seek))
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{mountpoint}/tmp_file")
|
.output(f"{mountpoint}/tmp_file")
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
@ -309,11 +344,121 @@ def test_ioclass_file_offset():
|
|||||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
def test_ioclass_file_size(filesystem):
|
def test_ioclass_file_size(filesystem):
|
||||||
"""
|
"""
|
||||||
File size IO class rules are configured in a way that each tested file size is unambiguously
|
title: Test IO classification by file size.
|
||||||
classified.
|
description: Test if file size classification works properly.
|
||||||
Firstly write operations are tested (creation of files), secondly read operations.
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly based on IO class rule with file size.
|
||||||
"""
|
"""
|
||||||
def load_file_size_io_classes():
|
|
||||||
|
# File size IO class rules are configured in a way that each tested file size is unambiguously
|
||||||
|
# classified.
|
||||||
|
# Firstly write operations are tested (creation of files), secondly read operations.
|
||||||
|
|
||||||
|
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
|
||||||
|
size_to_class = {
|
||||||
|
base_size: 1,
|
||||||
|
base_size - Unit.Blocks4096: 2,
|
||||||
|
base_size + Unit.Blocks4096: 3,
|
||||||
|
base_size / 2: 4,
|
||||||
|
base_size / 2 - Unit.Blocks4096: 4,
|
||||||
|
base_size / 2 + Unit.Blocks4096: 2,
|
||||||
|
base_size * 2: 5,
|
||||||
|
base_size * 2 - Unit.Blocks4096: 3,
|
||||||
|
base_size * 2 + Unit.Blocks4096: 5,
|
||||||
|
}
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core."):
|
||||||
|
cache, core = prepare()
|
||||||
|
|
||||||
|
with TestRun.step("Prepare and load IO class config."):
|
||||||
|
load_file_size_io_classes(cache, base_size)
|
||||||
|
|
||||||
|
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
|
||||||
|
f"at {mountpoint}."):
|
||||||
|
core.create_filesystem(filesystem)
|
||||||
|
core.mount(mountpoint)
|
||||||
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step("Create files belonging to different IO classes (classification by writes)."):
|
||||||
|
test_files = []
|
||||||
|
for size, ioclass_id in size_to_class.items():
|
||||||
|
occupancy_before = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
file_path = f"{mountpoint}/test_file_{size.get_value()}"
|
||||||
|
Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run()
|
||||||
|
occupancy_after = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
if occupancy_after != occupancy_before + size:
|
||||||
|
TestRun.fail("File not cached properly!\n"
|
||||||
|
f"Expected {occupancy_before + size}\n"
|
||||||
|
f"Actual {occupancy_after}")
|
||||||
|
test_files.append(File(file_path).refresh_item())
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
|
||||||
|
with TestRun.step("Move all files to 'unclassified' IO class."):
|
||||||
|
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
|
||||||
|
ioclass_config.create_ioclass_config(
|
||||||
|
add_default_rule=False, ioclass_config_path=ioclass_config_path
|
||||||
|
)
|
||||||
|
ioclass_config.add_ioclass(
|
||||||
|
ioclass_id=0,
|
||||||
|
eviction_priority=22,
|
||||||
|
allocation=False,
|
||||||
|
rule="unclassified",
|
||||||
|
ioclass_config_path=ioclass_config_path,
|
||||||
|
)
|
||||||
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
occupancy_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
||||||
|
for file in test_files:
|
||||||
|
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
|
||||||
|
occupancy_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
||||||
|
if occupancy_after != occupancy_before + file.size:
|
||||||
|
TestRun.fail("File not reclassified properly!\n"
|
||||||
|
f"Expected {occupancy_before + file.size}\n"
|
||||||
|
f"Actual {occupancy_after}")
|
||||||
|
occupancy_before = occupancy_after
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
|
||||||
|
with TestRun.step("Restore IO class configuration."):
|
||||||
|
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
|
||||||
|
ioclass_config.create_ioclass_config(
|
||||||
|
add_default_rule=False, ioclass_config_path=ioclass_config_path
|
||||||
|
)
|
||||||
|
ioclass_config.add_ioclass(
|
||||||
|
ioclass_id=0,
|
||||||
|
eviction_priority=22,
|
||||||
|
allocation=False,
|
||||||
|
rule="unclassified",
|
||||||
|
ioclass_config_path=ioclass_config_path,
|
||||||
|
)
|
||||||
|
load_file_size_io_classes(cache, base_size)
|
||||||
|
|
||||||
|
with TestRun.step("Read files belonging to different IO classes (classification by reads)."):
|
||||||
|
# CAS device should be unmounted and mounted because data can be sometimes still cached by
|
||||||
|
# OS cache so occupancy statistics will not match
|
||||||
|
core.unmount()
|
||||||
|
core.mount(mountpoint)
|
||||||
|
for file in test_files:
|
||||||
|
ioclass_id = size_to_class[file.size]
|
||||||
|
occupancy_before = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
|
||||||
|
occupancy_after = cache.get_io_class_statistics(
|
||||||
|
io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
|
||||||
|
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
|
||||||
|
if actual_blocks != expected_blocks:
|
||||||
|
TestRun.fail("File not reclassified properly!\n"
|
||||||
|
f"Expected {occupancy_before + file.size}\n"
|
||||||
|
f"Actual {occupancy_after}")
|
||||||
|
sync()
|
||||||
|
drop_caches(DropCachesMode.ALL)
|
||||||
|
|
||||||
|
|
||||||
|
def load_file_size_io_classes(cache, base_size):
|
||||||
# IO class order intentional, do not change
|
# IO class order intentional, do not change
|
||||||
base_size_bytes = int(base_size.get_value(Unit.Byte))
|
base_size_bytes = int(base_size.get_value(Unit.Byte))
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
@ -352,116 +497,3 @@ def test_ioclass_file_size(filesystem):
|
|||||||
ioclass_config_path=ioclass_config_path,
|
ioclass_config_path=ioclass_config_path,
|
||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
def create_files_and_check_classification():
|
|
||||||
TestRun.LOGGER.info("Creating files belonging to different IO classes "
|
|
||||||
"(classification by writes).")
|
|
||||||
for size, ioclass_id in size_to_class.items():
|
|
||||||
occupancy_before = cache.get_io_class_statistics(
|
|
||||||
io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
file_path = f"{mountpoint}/test_file_{size.get_value()}"
|
|
||||||
Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run()
|
|
||||||
occupancy_after = cache.get_io_class_statistics(
|
|
||||||
io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
if occupancy_after != occupancy_before + size:
|
|
||||||
TestRun.fail("File not cached properly!\n"
|
|
||||||
f"Expected {occupancy_before + size}\n"
|
|
||||||
f"Actual {occupancy_after}")
|
|
||||||
test_files.append(File(file_path).refresh_item())
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
|
|
||||||
def reclassify_files():
|
|
||||||
TestRun.LOGGER.info("Reading files belonging to different IO classes "
|
|
||||||
"(classification by reads).")
|
|
||||||
for file in test_files:
|
|
||||||
ioclass_id = size_to_class[file.size]
|
|
||||||
occupancy_before = cache.get_io_class_statistics(
|
|
||||||
io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
|
|
||||||
occupancy_after = cache.get_io_class_statistics(
|
|
||||||
io_class_id=ioclass_id).usage_stats.occupancy
|
|
||||||
actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
|
|
||||||
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
|
|
||||||
if actual_blocks != expected_blocks:
|
|
||||||
TestRun.fail("File not reclassified properly!\n"
|
|
||||||
f"Expected {occupancy_before + file.size}\n"
|
|
||||||
f"Actual {occupancy_after}")
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
|
|
||||||
def remove_files_classification():
|
|
||||||
TestRun.LOGGER.info("Moving all files to 'unclassified' IO class")
|
|
||||||
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
|
|
||||||
ioclass_config.create_ioclass_config(
|
|
||||||
add_default_rule=False, ioclass_config_path=ioclass_config_path
|
|
||||||
)
|
|
||||||
ioclass_config.add_ioclass(
|
|
||||||
ioclass_id=0,
|
|
||||||
eviction_priority=22,
|
|
||||||
allocation=False,
|
|
||||||
rule="unclassified",
|
|
||||||
ioclass_config_path=ioclass_config_path,
|
|
||||||
)
|
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
|
||||||
occupancy_before = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
|
||||||
for file in test_files:
|
|
||||||
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
|
|
||||||
occupancy_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
|
|
||||||
if occupancy_after != occupancy_before + file.size:
|
|
||||||
TestRun.fail("File not reclassified properly!\n"
|
|
||||||
f"Expected {occupancy_before + file.size}\n"
|
|
||||||
f"Actual {occupancy_after}")
|
|
||||||
occupancy_before = occupancy_after
|
|
||||||
sync()
|
|
||||||
drop_caches(DropCachesMode.ALL)
|
|
||||||
|
|
||||||
def restore_classification_config():
|
|
||||||
TestRun.LOGGER.info("Restoring IO class configuration")
|
|
||||||
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
|
|
||||||
ioclass_config.create_ioclass_config(
|
|
||||||
add_default_rule=False, ioclass_config_path=ioclass_config_path
|
|
||||||
)
|
|
||||||
ioclass_config.add_ioclass(
|
|
||||||
ioclass_id=0,
|
|
||||||
eviction_priority=22,
|
|
||||||
allocation=False,
|
|
||||||
rule="unclassified",
|
|
||||||
ioclass_config_path=ioclass_config_path,
|
|
||||||
)
|
|
||||||
load_file_size_io_classes()
|
|
||||||
|
|
||||||
cache, core = prepare()
|
|
||||||
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
|
|
||||||
size_to_class = {
|
|
||||||
base_size: 1,
|
|
||||||
base_size - Unit.Blocks4096: 2,
|
|
||||||
base_size + Unit.Blocks4096: 3,
|
|
||||||
base_size / 2: 4,
|
|
||||||
base_size / 2 - Unit.Blocks4096: 4,
|
|
||||||
base_size / 2 + Unit.Blocks4096: 2,
|
|
||||||
base_size * 2: 5,
|
|
||||||
base_size * 2 - Unit.Blocks4096: 3,
|
|
||||||
base_size * 2 + Unit.Blocks4096: 5,
|
|
||||||
}
|
|
||||||
|
|
||||||
load_file_size_io_classes()
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
|
|
||||||
f"and mounting {core.system_path} at {mountpoint}")
|
|
||||||
core.create_filesystem(filesystem)
|
|
||||||
core.mount(mountpoint)
|
|
||||||
sync()
|
|
||||||
|
|
||||||
test_files = []
|
|
||||||
create_files_and_check_classification()
|
|
||||||
|
|
||||||
remove_files_classification()
|
|
||||||
|
|
||||||
restore_classification_config()
|
|
||||||
|
|
||||||
# CAS device should be unmounted and mounted because data can be sometimes still cached by
|
|
||||||
# OS cache so occupancy statistics will not match
|
|
||||||
core.unmount()
|
|
||||||
core.mount(mountpoint)
|
|
||||||
reclassify_files()
|
|
||||||
|
@ -7,23 +7,34 @@ import time
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from api.cas import ioclass_config, casadm
|
||||||
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools.dd import Dd
|
from test_tools.dd import Dd
|
||||||
from test_utils.os_utils import sync, Udev
|
from test_utils.os_utils import sync, Udev
|
||||||
from .io_class_common import *
|
from test_utils.size import Size, Unit
|
||||||
|
from tests.io_class.io_class_common import prepare, ioclass_config_path
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_process_name():
|
def test_ioclass_process_name():
|
||||||
"""Check if data generated by process with particular name is cached"""
|
"""
|
||||||
cache, core = prepare()
|
title: Test IO classification by process name.
|
||||||
|
description: Check if data generated by process with particular name is cached.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly based on process generating IO name.
|
||||||
|
"""
|
||||||
ioclass_id = 1
|
ioclass_id = 1
|
||||||
dd_size = Size(4, Unit.KibiByte)
|
dd_size = Size(4, Unit.KibiByte)
|
||||||
dd_count = 1
|
dd_count = 1
|
||||||
iterations = 100
|
iterations = 100
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core."):
|
||||||
|
cache, core = prepare()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config file."):
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=ioclass_id,
|
ioclass_id=ioclass_id,
|
||||||
eviction_priority=1,
|
eviction_priority=1,
|
||||||
@ -33,11 +44,11 @@ def test_ioclass_process_name():
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
|
with TestRun.step("Flush cache and disable udev."):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Check if all data generated by dd process is cached.")
|
with TestRun.step("Check if all data generated by dd process is cached."):
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd()
|
||||||
@ -58,15 +69,23 @@ def test_ioclass_process_name():
|
|||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_pid():
|
def test_ioclass_pid():
|
||||||
cache, core = prepare()
|
"""
|
||||||
|
title: Test IO classification by process id.
|
||||||
|
description: Check if data generated by process with particular id is cached.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly based on process generating IO id.
|
||||||
|
"""
|
||||||
ioclass_id = 1
|
ioclass_id = 1
|
||||||
iterations = 20
|
iterations = 20
|
||||||
dd_count = 100
|
dd_count = 100
|
||||||
dd_size = Size(4, Unit.KibiByte)
|
dd_size = Size(4, Unit.KibiByte)
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache, core and disable udev."):
|
||||||
|
cache, core = prepare()
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Prepare dd command."):
|
||||||
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
|
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
|
||||||
# 'dd' command is created and is appended to 'echo' command instead of running it
|
# 'dd' command is created and is appended to 'echo' command instead of running it
|
||||||
dd_command = str(
|
dd_command = str(
|
||||||
@ -77,9 +96,11 @@ def test_ioclass_pid():
|
|||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
)
|
)
|
||||||
|
|
||||||
for i in range(iterations):
|
for _ in TestRun.iteration(range(iterations)):
|
||||||
|
with TestRun.step("Flush cache."):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
|
with TestRun.step("Prepare and load IO class config."):
|
||||||
output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
|
output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
|
||||||
if output.exit_code != 0:
|
if output.exit_code != 0:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
@ -98,7 +119,7 @@ def test_ioclass_pid():
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Running dd with pid {pid}")
|
with TestRun.step(f"Run dd with pid {pid}."):
|
||||||
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
|
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
|
||||||
dd_and_pid_command = (
|
dd_and_pid_command = (
|
||||||
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
|
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
|
||||||
@ -110,6 +131,7 @@ def test_ioclass_pid():
|
|||||||
f"stdout: {output.stdout} \n stderr :{output.stderr}"
|
f"stdout: {output.stdout} \n stderr :{output.stderr}"
|
||||||
)
|
)
|
||||||
sync()
|
sync()
|
||||||
|
with TestRun.step("Check if data was cached properly."):
|
||||||
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
|
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
|
||||||
if dirty.get_value(Unit.Blocks4096) != dd_count:
|
if dirty.get_value(Unit.Blocks4096) != dd_count:
|
||||||
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
|
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
|
||||||
|
@ -8,7 +8,9 @@ from itertools import permutations
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from api.cas import ioclass_config, casadm
|
||||||
from api.cas.ioclass_config import IoClass
|
from api.cas.ioclass_config import IoClass
|
||||||
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools import fs_utils
|
from test_tools import fs_utils
|
||||||
from test_tools.dd import Dd
|
from test_tools.dd import Dd
|
||||||
@ -17,15 +19,23 @@ from test_tools.fio.fio import Fio
|
|||||||
from test_tools.fio.fio_param import ReadWrite, IoEngine
|
from test_tools.fio.fio_param import ReadWrite, IoEngine
|
||||||
from test_utils.filesystem.file import File
|
from test_utils.filesystem.file import File
|
||||||
from test_utils.os_utils import sync, Udev
|
from test_utils.os_utils import sync, Udev
|
||||||
from .io_class_common import *
|
from test_utils.size import Size, Unit
|
||||||
|
from tests.io_class.io_class_common import prepare, ioclass_config_path, mountpoint
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_lba():
|
def test_ioclass_lba():
|
||||||
"""Write data to random lba and check if it is cached according to range
|
"""
|
||||||
defined in ioclass rule"""
|
title: Test IO classification by lba.
|
||||||
cache, core = prepare()
|
description: |
|
||||||
|
Write data to random lba and check if it is cached according to range
|
||||||
|
defined in ioclass rule
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly based on lba range defined in config.
|
||||||
|
"""
|
||||||
|
|
||||||
ioclass_id = 1
|
ioclass_id = 1
|
||||||
min_cached_lba = 56
|
min_cached_lba = 56
|
||||||
max_cached_lba = 200
|
max_cached_lba = 200
|
||||||
@ -33,7 +43,10 @@ def test_ioclass_lba():
|
|||||||
dd_size = Size(1, Unit.Blocks512)
|
dd_size = Size(1, Unit.Blocks512)
|
||||||
dd_count = 1
|
dd_count = 1
|
||||||
|
|
||||||
# Prepare ioclass config
|
with TestRun.step("Prepare cache and core."):
|
||||||
|
cache, core = prepare()
|
||||||
|
|
||||||
|
with TestRun.step("Prepare and load IO class config."):
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=ioclass_id,
|
ioclass_id=ioclass_id,
|
||||||
eviction_priority=1,
|
eviction_priority=1,
|
||||||
@ -41,20 +54,18 @@ def test_ioclass_lba():
|
|||||||
rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done",
|
rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done",
|
||||||
ioclass_config_path=ioclass_config_path,
|
ioclass_config_path=ioclass_config_path,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Prepare cache for test
|
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
|
with TestRun.step("Flush cache."):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
# Check if lbas from defined range are cached
|
with TestRun.step("Run IO and check if lbas from defined range are cached."):
|
||||||
dirty_count = 0
|
dirty_count = 0
|
||||||
# '8' step is set to prevent writing cache line more than once
|
# '8' step is set to prevent writing cache line more than once
|
||||||
TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
|
TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
|
||||||
for lba in range(min_cached_lba, max_cached_lba, 8):
|
for lba in range(min_cached_lba, max_cached_lba, 8):
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{core.system_path}")
|
.output(f"{core.system_path}")
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
@ -68,17 +79,17 @@ def test_ioclass_lba():
|
|||||||
if dirty.get_value(Unit.Blocks4096) != dirty_count:
|
if dirty.get_value(Unit.Blocks4096) != dirty_count:
|
||||||
TestRun.LOGGER.error(f"LBA {lba} not cached")
|
TestRun.LOGGER.error(f"LBA {lba} not cached")
|
||||||
|
|
||||||
|
with TestRun.step("Flush cache."):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
# Check if lba outside of defined range are not cached
|
with TestRun.step("Run IO and check if lba outside of defined range are not cached."):
|
||||||
TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.")
|
TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.")
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
rand_lba = random.randrange(2000)
|
rand_lba = random.randrange(2000)
|
||||||
if min_cached_lba <= rand_lba <= max_cached_lba:
|
if min_cached_lba <= rand_lba <= max_cached_lba:
|
||||||
continue
|
continue
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(f"{core.system_path}")
|
.output(f"{core.system_path}")
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_size)
|
||||||
@ -95,11 +106,21 @@ def test_ioclass_lba():
|
|||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_request_size():
|
def test_ioclass_request_size():
|
||||||
cache, core = prepare()
|
"""
|
||||||
|
title: Test IO classification by request size.
|
||||||
|
description: Check if requests with size within defined range are cached.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly based on request size range defined in config.
|
||||||
|
"""
|
||||||
|
|
||||||
ioclass_id = 1
|
ioclass_id = 1
|
||||||
iterations = 100
|
iterations = 100
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core."):
|
||||||
|
cache, core = prepare()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config."):
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=ioclass_id,
|
ioclass_id=ioclass_id,
|
||||||
eviction_priority=1,
|
eviction_priority=1,
|
||||||
@ -109,19 +130,16 @@ def test_ioclass_request_size():
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev."):
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
# Check if requests with appropriate size are cached
|
with TestRun.step("Check if requests with size within defined range are cached."):
|
||||||
TestRun.LOGGER.info(
|
|
||||||
f"Check if requests with size within defined range are cached"
|
|
||||||
)
|
|
||||||
cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
|
cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
req_size = random.choice(cached_req_sizes)
|
req_size = random.choice(cached_req_sizes)
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(core.system_path)
|
.output(core.system_path)
|
||||||
.count(1)
|
.count(1)
|
||||||
.block_size(req_size)
|
.block_size(req_size)
|
||||||
@ -132,12 +150,10 @@ def test_ioclass_request_size():
|
|||||||
if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
|
if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
|
||||||
TestRun.fail("Incorrect number of dirty blocks!")
|
TestRun.fail("Incorrect number of dirty blocks!")
|
||||||
|
|
||||||
|
with TestRun.step("Flush cache."):
|
||||||
cache.flush_cache()
|
cache.flush_cache()
|
||||||
|
|
||||||
# Check if requests with inappropriate size are not cached
|
with TestRun.step("Check if requests with size outside of defined range are not cached"):
|
||||||
TestRun.LOGGER.info(
|
|
||||||
f"Check if requests with size outside defined range are not cached"
|
|
||||||
)
|
|
||||||
not_cached_req_sizes = [
|
not_cached_req_sizes = [
|
||||||
Size(1, Unit.Blocks4096),
|
Size(1, Unit.Blocks4096),
|
||||||
Size(8, Unit.Blocks4096),
|
Size(8, Unit.Blocks4096),
|
||||||
@ -146,8 +162,7 @@ def test_ioclass_request_size():
|
|||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
req_size = random.choice(not_cached_req_sizes)
|
req_size = random.choice(not_cached_req_sizes)
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/zero")
|
||||||
.input("/dev/zero")
|
|
||||||
.output(core.system_path)
|
.output(core.system_path)
|
||||||
.count(1)
|
.count(1)
|
||||||
.block_size(req_size)
|
.block_size(req_size)
|
||||||
@ -165,17 +180,23 @@ def test_ioclass_request_size():
|
|||||||
@pytest.mark.parametrizex("filesystem", list(Filesystem) + [False])
|
@pytest.mark.parametrizex("filesystem", list(Filesystem) + [False])
|
||||||
def test_ioclass_direct(filesystem):
|
def test_ioclass_direct(filesystem):
|
||||||
"""
|
"""
|
||||||
Perform buffered/direct IO to/from files or raw block device.
|
title: Direct IO classification.
|
||||||
Data from buffered IO should be cached.
|
description: Check if direct requests are properly cached.
|
||||||
Data from buffered IO should not be cached and if performed to/from already cached data
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- Data from direct IO should be cached.
|
||||||
|
- Data from buffered IO should not be cached and if performed to/from already cached data
|
||||||
should cause reclassification to unclassified IO class.
|
should cause reclassification to unclassified IO class.
|
||||||
"""
|
"""
|
||||||
cache, core = prepare()
|
|
||||||
Udev.disable()
|
|
||||||
|
|
||||||
ioclass_id = 1
|
ioclass_id = 1
|
||||||
io_size = Size(random.randint(1000, 2000), Unit.Blocks4096)
|
io_size = Size(random.randint(1000, 2000), Unit.Blocks4096)
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core. Disable udev."):
|
||||||
|
cache, core = prepare()
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config."):
|
||||||
# direct IO class
|
# direct IO class
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=ioclass_id,
|
ioclass_id=ioclass_id,
|
||||||
@ -186,15 +207,14 @@ def test_ioclass_direct(filesystem):
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
fio = (
|
with TestRun.step("Prepare fio command."):
|
||||||
Fio().create_command()
|
fio = Fio().create_command() \
|
||||||
.io_engine(IoEngine.libaio)
|
.io_engine(IoEngine.libaio) \
|
||||||
.size(io_size)
|
.size(io_size).offset(io_size) \
|
||||||
.offset(io_size)
|
.read_write(ReadWrite.write) \
|
||||||
.read_write(ReadWrite.write)
|
|
||||||
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path)
|
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path)
|
||||||
)
|
|
||||||
|
|
||||||
|
with TestRun.step("Prepare filesystem."):
|
||||||
if filesystem:
|
if filesystem:
|
||||||
TestRun.LOGGER.info(
|
TestRun.LOGGER.info(
|
||||||
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at"
|
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at"
|
||||||
@ -204,41 +224,48 @@ def test_ioclass_direct(filesystem):
|
|||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
sync()
|
sync()
|
||||||
else:
|
else:
|
||||||
TestRun.LOGGER.info("Testing on raw exported object")
|
TestRun.LOGGER.info("Testing on raw exported object.")
|
||||||
|
|
||||||
|
with TestRun.step(f"Run buffered writes to {'file' if filesystem else 'device'}"):
|
||||||
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Buffered writes to {'file' if filesystem else 'device'}")
|
|
||||||
fio.run()
|
fio.run()
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step("Check if buffered writes are not cached."):
|
||||||
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
if new_occupancy != base_occupancy:
|
if new_occupancy != base_occupancy:
|
||||||
TestRun.fail("Buffered writes were cached!\n"
|
TestRun.fail("Buffered writes were cached!\n"
|
||||||
f"Expected: {base_occupancy}, actual: {new_occupancy}")
|
f"Expected: {base_occupancy}, actual: {new_occupancy}")
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Direct writes to {'file' if filesystem else 'device'}")
|
with TestRun.step(f"Run direct writes to {'file' if filesystem else 'device'}"):
|
||||||
fio.direct()
|
fio.direct()
|
||||||
fio.run()
|
fio.run()
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step("Check if direct writes are cached."):
|
||||||
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
if new_occupancy != base_occupancy + io_size:
|
if new_occupancy != base_occupancy + io_size:
|
||||||
TestRun.fail("Wrong number of direct writes was cached!\n"
|
TestRun.fail("Wrong number of direct writes was cached!\n"
|
||||||
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
|
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Buffered reads from {'file' if filesystem else 'device'}")
|
with TestRun.step(f"Run buffered reads from {'file' if filesystem else 'device'}"):
|
||||||
fio.remove_param("readwrite").remove_param("direct")
|
fio.remove_param("readwrite").remove_param("direct")
|
||||||
fio.read_write(ReadWrite.read)
|
fio.read_write(ReadWrite.read)
|
||||||
fio.run()
|
fio.run()
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step("Check if buffered reads caused reclassification."):
|
||||||
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
if new_occupancy != base_occupancy:
|
if new_occupancy != base_occupancy:
|
||||||
TestRun.fail("Buffered reads did not cause reclassification!"
|
TestRun.fail("Buffered reads did not cause reclassification!"
|
||||||
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}")
|
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}")
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Direct reads from {'file' if filesystem else 'device'}")
|
with TestRun.step(f"Run direct reads from {'file' if filesystem else 'device'}"):
|
||||||
fio.direct()
|
fio.direct()
|
||||||
fio.run()
|
fio.run()
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step("Check if direct reads are cached."):
|
||||||
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
|
||||||
if new_occupancy != base_occupancy + io_size:
|
if new_occupancy != base_occupancy + io_size:
|
||||||
TestRun.fail("Wrong number of direct reads was cached!\n"
|
TestRun.fail("Wrong number of direct reads was cached!\n"
|
||||||
@ -251,13 +278,22 @@ def test_ioclass_direct(filesystem):
|
|||||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
def test_ioclass_metadata(filesystem):
|
def test_ioclass_metadata(filesystem):
|
||||||
"""
|
"""
|
||||||
Perform operations on files that cause metadata update.
|
title: Metadata IO classification.
|
||||||
Determine if every such operation results in increased writes to cached metadata.
|
description: |
|
||||||
Exact values may not be tested as each file system has different metadata structure.
|
Determine if every operation on files that cause metadata update results in increased
|
||||||
|
writes to cached metadata.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- Metadata is classified properly.
|
||||||
"""
|
"""
|
||||||
|
# Exact values may not be tested as each file system has different metadata structure.
|
||||||
|
test_dir_path = f"{mountpoint}/test_dir"
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core. Disable udev."):
|
||||||
cache, core = prepare()
|
cache, core = prepare()
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Prepare and load IO class config file."):
|
||||||
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
|
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
|
||||||
# metadata IO class
|
# metadata IO class
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
@ -269,21 +305,20 @@ def test_ioclass_metadata(filesystem):
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
|
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
|
||||||
f"and mounting {core.system_path} at {mountpoint}")
|
f"at {mountpoint}."):
|
||||||
core.create_filesystem(filesystem)
|
core.create_filesystem(filesystem)
|
||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step("Create 20 test files."):
|
||||||
requests_to_metadata_before = cache.get_io_class_statistics(
|
requests_to_metadata_before = cache.get_io_class_statistics(
|
||||||
io_class_id=ioclass_id).request_stats.write
|
io_class_id=ioclass_id).request_stats.write
|
||||||
TestRun.LOGGER.info("Creating 20 test files")
|
|
||||||
files = []
|
files = []
|
||||||
for i in range(1, 21):
|
for i in range(1, 21):
|
||||||
file_path = f"{mountpoint}/test_file_{i}"
|
file_path = f"{mountpoint}/test_file_{i}"
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd().input("/dev/urandom")
|
||||||
.input("/dev/urandom")
|
|
||||||
.output(file_path)
|
.output(file_path)
|
||||||
.count(random.randint(5, 50))
|
.count(random.randint(5, 50))
|
||||||
.block_size(Size(1, Unit.MebiByte))
|
.block_size(Size(1, Unit.MebiByte))
|
||||||
@ -292,27 +327,26 @@ def test_ioclass_metadata(filesystem):
|
|||||||
dd.run()
|
dd.run()
|
||||||
files.append(File(file_path))
|
files.append(File(file_path))
|
||||||
|
|
||||||
TestRun.LOGGER.info("Checking requests to metadata")
|
with TestRun.step("Check requests to metadata."):
|
||||||
requests_to_metadata_after = cache.get_io_class_statistics(
|
requests_to_metadata_after = cache.get_io_class_statistics(
|
||||||
io_class_id=ioclass_id).request_stats.write
|
io_class_id=ioclass_id).request_stats.write
|
||||||
if requests_to_metadata_after == requests_to_metadata_before:
|
if requests_to_metadata_after == requests_to_metadata_before:
|
||||||
TestRun.fail("No requests to metadata while creating files!")
|
TestRun.fail("No requests to metadata while creating files!")
|
||||||
|
|
||||||
|
with TestRun.step("Rename all test files."):
|
||||||
requests_to_metadata_before = requests_to_metadata_after
|
requests_to_metadata_before = requests_to_metadata_after
|
||||||
TestRun.LOGGER.info("Renaming all test files")
|
|
||||||
for file in files:
|
for file in files:
|
||||||
file.move(f"{file.full_path}_renamed")
|
file.move(f"{file.full_path}_renamed")
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
TestRun.LOGGER.info("Checking requests to metadata")
|
with TestRun.step("Check requests to metadata."):
|
||||||
requests_to_metadata_after = cache.get_io_class_statistics(
|
requests_to_metadata_after = cache.get_io_class_statistics(
|
||||||
io_class_id=ioclass_id).request_stats.write
|
io_class_id=ioclass_id).request_stats.write
|
||||||
if requests_to_metadata_after == requests_to_metadata_before:
|
if requests_to_metadata_after == requests_to_metadata_before:
|
||||||
TestRun.fail("No requests to metadata while renaming files!")
|
TestRun.fail("No requests to metadata while renaming files!")
|
||||||
|
|
||||||
|
with TestRun.step(f"Create directory {test_dir_path}."):
|
||||||
requests_to_metadata_before = requests_to_metadata_after
|
requests_to_metadata_before = requests_to_metadata_after
|
||||||
test_dir_path = f"{mountpoint}/test_dir"
|
|
||||||
TestRun.LOGGER.info(f"Creating directory {test_dir_path}")
|
|
||||||
fs_utils.create_directory(path=test_dir_path)
|
fs_utils.create_directory(path=test_dir_path)
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
|
TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
|
||||||
@ -320,16 +354,16 @@ def test_ioclass_metadata(filesystem):
|
|||||||
file.move(test_dir_path)
|
file.move(test_dir_path)
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
TestRun.LOGGER.info("Checking requests to metadata")
|
with TestRun.step("Check requests to metadata."):
|
||||||
requests_to_metadata_after = cache.get_io_class_statistics(
|
requests_to_metadata_after = cache.get_io_class_statistics(
|
||||||
io_class_id=ioclass_id).request_stats.write
|
io_class_id=ioclass_id).request_stats.write
|
||||||
if requests_to_metadata_after == requests_to_metadata_before:
|
if requests_to_metadata_after == requests_to_metadata_before:
|
||||||
TestRun.fail("No requests to metadata while moving files!")
|
TestRun.fail("No requests to metadata while moving files!")
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Removing {test_dir_path}")
|
with TestRun.step(f"Remove {test_dir_path}."):
|
||||||
fs_utils.remove(path=test_dir_path, force=True, recursive=True)
|
fs_utils.remove(path=test_dir_path, force=True, recursive=True)
|
||||||
|
|
||||||
TestRun.LOGGER.info("Checking requests to metadata")
|
with TestRun.step("Check requests to metadata."):
|
||||||
requests_to_metadata_after = cache.get_io_class_statistics(
|
requests_to_metadata_after = cache.get_io_class_statistics(
|
||||||
io_class_id=ioclass_id).request_stats.write
|
io_class_id=ioclass_id).request_stats.write
|
||||||
if requests_to_metadata_after == requests_to_metadata_before:
|
if requests_to_metadata_after == requests_to_metadata_before:
|
||||||
@ -342,16 +376,23 @@ def test_ioclass_metadata(filesystem):
|
|||||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
def test_ioclass_id_as_condition(filesystem):
|
def test_ioclass_id_as_condition(filesystem):
|
||||||
"""
|
"""
|
||||||
|
title: IO class as a condition.
|
||||||
|
description: |
|
||||||
Load config in which IO class ids are used as conditions in other IO class definitions.
|
Load config in which IO class ids are used as conditions in other IO class definitions.
|
||||||
Check if performed IO is properly classified.
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- IO is classified properly as described in IO class config.
|
||||||
"""
|
"""
|
||||||
cache, core = prepare()
|
|
||||||
Udev.disable()
|
|
||||||
|
|
||||||
base_dir_path = f"{mountpoint}/base_dir"
|
base_dir_path = f"{mountpoint}/base_dir"
|
||||||
ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
|
ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
|
||||||
ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))
|
ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core. Disable udev."):
|
||||||
|
cache, core = prepare()
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config file."):
|
||||||
# directory condition
|
# directory condition
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=1,
|
ioclass_id=1,
|
||||||
@ -402,14 +443,15 @@ def test_ioclass_id_as_condition(filesystem):
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
|
with TestRun.step(f"Prepare {filesystem.name} filesystem "
|
||||||
f"and mounting {core.system_path} at {mountpoint}")
|
f"and mount {core.system_path} at {mountpoint}."):
|
||||||
core.create_filesystem(filesystem)
|
core.create_filesystem(filesystem)
|
||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
fs_utils.create_directory(base_dir_path)
|
fs_utils.create_directory(base_dir_path)
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
# IO fulfilling IO class 1 condition (and not IO class 2)
|
with TestRun.step("Run IO fulfilling IO class 1 condition (and not IO class 2) and check if "
|
||||||
|
"it is classified properly."):
|
||||||
# Should be classified as IO class 4
|
# Should be classified as IO class 4
|
||||||
base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
|
base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
|
||||||
non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
|
non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
|
||||||
@ -424,9 +466,11 @@ def test_ioclass_id_as_condition(filesystem):
|
|||||||
|
|
||||||
if new_occupancy != base_occupancy + non_ioclass_file_size:
|
if new_occupancy != base_occupancy + non_ioclass_file_size:
|
||||||
TestRun.fail("Writes were not properly cached!\n"
|
TestRun.fail("Writes were not properly cached!\n"
|
||||||
f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}")
|
f"Expected: {base_occupancy + non_ioclass_file_size}, "
|
||||||
|
f"actual: {new_occupancy}")
|
||||||
|
|
||||||
# IO fulfilling IO class 2 condition (and not IO class 1)
|
with TestRun.step("Run IO fulfilling IO class 2 condition (and not IO class 1) and check if "
|
||||||
|
"it is classified properly."):
|
||||||
# Should be classified as IO class 5
|
# Should be classified as IO class 5
|
||||||
base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
|
base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
|
||||||
(Fio().create_command()
|
(Fio().create_command()
|
||||||
@ -442,7 +486,8 @@ def test_ioclass_id_as_condition(filesystem):
|
|||||||
TestRun.fail("Writes were not properly cached!\n"
|
TestRun.fail("Writes were not properly cached!\n"
|
||||||
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
|
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
|
||||||
|
|
||||||
# IO fulfilling IO class 1 and 2 conditions
|
with TestRun.step("Run IO fulfilling IO class 1 and 2 conditions and check if "
|
||||||
|
"it is classified properly."):
|
||||||
# Should be classified as IO class 5
|
# Should be classified as IO class 5
|
||||||
base_occupancy = new_occupancy
|
base_occupancy = new_occupancy
|
||||||
(Fio().create_command()
|
(Fio().create_command()
|
||||||
@ -458,7 +503,8 @@ def test_ioclass_id_as_condition(filesystem):
|
|||||||
TestRun.fail("Writes were not properly cached!\n"
|
TestRun.fail("Writes were not properly cached!\n"
|
||||||
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
|
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
|
||||||
|
|
||||||
# Same IO but direct
|
with TestRun.step("Run direct IO fulfilling IO class 1 and 2 conditions and check if "
|
||||||
|
"it is classified properly."):
|
||||||
# Should be classified as IO class 6
|
# Should be classified as IO class 6
|
||||||
base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
|
base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
|
||||||
(Fio().create_command()
|
(Fio().create_command()
|
||||||
@ -482,12 +528,19 @@ def test_ioclass_id_as_condition(filesystem):
|
|||||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
def test_ioclass_conditions_or(filesystem):
|
def test_ioclass_conditions_or(filesystem):
|
||||||
"""
|
"""
|
||||||
|
title: IO class condition 'or'.
|
||||||
|
description: |
|
||||||
Load config with IO class combining 5 contradicting conditions connected by OR operator.
|
Load config with IO class combining 5 contradicting conditions connected by OR operator.
|
||||||
Check if every IO fulfilling one condition is classified properly.
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- Every IO fulfilling one condition is classified properly.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core. Disable udev."):
|
||||||
cache, core = prepare()
|
cache, core = prepare()
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config file."):
|
||||||
# directories OR condition
|
# directories OR condition
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=1,
|
ioclass_id=1,
|
||||||
@ -499,15 +552,15 @@ def test_ioclass_conditions_or(filesystem):
|
|||||||
)
|
)
|
||||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
|
with TestRun.step(f"Prepare {filesystem.name} filesystem "
|
||||||
f"and mounting {core.system_path} at {mountpoint}")
|
f"and mount {core.system_path} at {mountpoint}."):
|
||||||
core.create_filesystem(filesystem)
|
core.create_filesystem(filesystem)
|
||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
for i in range(1, 6):
|
for i in range(1, 6):
|
||||||
fs_utils.create_directory(f"{mountpoint}/dir{i}")
|
fs_utils.create_directory(f"{mountpoint}/dir{i}")
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
# Perform IO fulfilling each condition and check if occupancy raises
|
with TestRun.step("Perform IO fulfilling each condition and check if occupancy raises."):
|
||||||
for i in range(1, 6):
|
for i in range(1, 6):
|
||||||
file_size = Size(random.randint(25, 50), Unit.MebiByte)
|
file_size = Size(random.randint(25, 50), Unit.MebiByte)
|
||||||
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
|
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
|
||||||
@ -531,15 +584,23 @@ def test_ioclass_conditions_or(filesystem):
|
|||||||
@pytest.mark.parametrizex("filesystem", Filesystem)
|
@pytest.mark.parametrizex("filesystem", Filesystem)
|
||||||
def test_ioclass_conditions_and(filesystem):
|
def test_ioclass_conditions_and(filesystem):
|
||||||
"""
|
"""
|
||||||
Load config with IO class combining 5 conditions contradicting at least one other condition
|
title: IO class condition 'and'.
|
||||||
connected by AND operator.
|
description: |
|
||||||
Check if every IO fulfilling one of the conditions is not classified.
|
Load config with IO class combining 5 conditions contradicting
|
||||||
|
at least one other condition.
|
||||||
|
pass_criteria:
|
||||||
|
- No kernel bug.
|
||||||
|
- Every IO fulfilling one of the conditions is not classified.
|
||||||
"""
|
"""
|
||||||
cache, core = prepare()
|
|
||||||
Udev.disable()
|
|
||||||
file_size = Size(random.randint(25, 50), Unit.MebiByte)
|
file_size = Size(random.randint(25, 50), Unit.MebiByte)
|
||||||
file_size_bytes = int(file_size.get_value(Unit.Byte))
|
file_size_bytes = int(file_size.get_value(Unit.Byte))
|
||||||
|
|
||||||
|
with TestRun.step("Prepare cache and core. Disable udev."):
|
||||||
|
cache, core = prepare()
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Create and load IO class config file."):
|
||||||
# directories OR condition
|
# directories OR condition
|
||||||
ioclass_config.add_ioclass(
|
ioclass_config.add_ioclass(
|
||||||
ioclass_id=1,
|
ioclass_id=1,
|
||||||
|
Loading…
Reference in New Issue
Block a user