Refactor IO class tests

Signed-off-by: Klaudia Jablonska <klaudia.jablonska@intel.com>
This commit is contained in:
Klaudia Jablonska 2022-08-29 16:04:22 +02:00
parent 2da9753a10
commit 1cf2af7ed4
17 changed files with 1100 additions and 883 deletions

View File

@ -1,5 +1,5 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
@ -20,7 +20,6 @@ from test_utils.os_utils import Udev, sync
from test_utils.os_utils import drop_caches, DropCachesMode
from test_utils.size import Size, Unit
ioclass_config_path = "/etc/opencas/ioclass.conf"
template_config_path = "/etc/opencas/ioclass-config.csv"
mountpoint = "/tmp/cas1-1"
@ -31,9 +30,9 @@ def prepare(
core_size=Size(40, Unit.GibiByte),
cache_mode=CacheMode.WB,
cache_line_size=CacheLineSize.LINE_4KiB,
default_allocation="0.00"
default_allocation="0.00",
):
ioclass_config.remove_ioclass_config()
ioclass_config.remove_ioclass_config(ioclass_config_path)
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
@ -60,6 +59,7 @@ def prepare(
)
# To make test more precise all workload except of tested ioclass should be
# put in pass-through mode
# Avoid caching anything else than files with specified prefix
ioclass_config.add_ioclass(
ioclass_id=ioclass_config.DEFAULT_IO_CLASS_ID,
eviction_priority=ioclass_config.DEFAULT_IO_CLASS_PRIORITY,
@ -99,9 +99,9 @@ def generate_and_load_random_io_class_config(cache):
def compare_io_classes_list(expected, actual):
if not IoClass.compare_ioclass_lists(expected, actual):
TestRun.LOGGER.error("IO classes configuration is not as expected.")
expected = '\n'.join(str(i) for i in expected)
expected = "\n".join(str(i) for i in expected)
TestRun.LOGGER.error(f"Expected IO classes:\n{expected}")
actual = '\n'.join(str(i) for i in actual)
actual = "\n".join(str(i) for i in actual)
TestRun.LOGGER.error(f"Actual IO classes:\n{actual}")
@ -131,14 +131,14 @@ def run_io_dir_read(path):
drop_caches(DropCachesMode.ALL)
def run_fio_count(core, blocksize, num_ios):
def run_fio_count(core, block_size, num_ios):
(
Fio()
.create_command()
.target(core)
.io_engine(IoEngine.libaio)
.read_write(ReadWrite.randread)
.block_size(blocksize)
.block_size(block_size)
.direct()
.file_size(Size(10, Unit.GibiByte))
.num_ios(num_ios)

View File

@ -1,5 +1,5 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
@ -20,8 +20,7 @@ ioclass_config_path = "/tmp/opencas_ioclass.conf"
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
def test_ioclass_export_configuration(cache_mode):
def test_io_class_export_configuration():
"""
title: Export IO class configuration to a file
description: |
@ -30,6 +29,8 @@ def test_ioclass_export_configuration(cache_mode):
- CAS default IO class configuration contains unclassified class only
- CAS properly imports previously exported configuration
"""
cache_mode = CacheMode.WB
with TestRun.LOGGER.step(f"Test prepare"):
cache, core = prepare(cache_mode)
saved_config_path = "/tmp/opencas_saved.conf"
@ -38,28 +39,37 @@ def test_ioclass_export_configuration(cache_mode):
with TestRun.LOGGER.step(f"Check IO class configuration (should contain only default class)"):
csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), default_list):
TestRun.LOGGER.error("Default configuration does not match expected\n"
TestRun.LOGGER.error(
"Default configuration does not match expected\n"
f"Current:\n{csv}\n"
f"Expected:{IoClass.list_to_csv(default_list)}")
f"Expected:{IoClass.list_to_csv(default_list)}"
)
with TestRun.LOGGER.step("Create and load configuration file for 33 IO classes "
"with random names, allocation and priority values"):
with TestRun.LOGGER.step(
"Create and load configuration file for 33 IO classes "
"with random names, allocation and priority values"
):
random_list = IoClass.generate_random_ioclass_list(33)
IoClass.save_list_to_config_file(random_list, ioclass_config_path=ioclass_config_path)
casadm.load_io_classes(cache.cache_id, ioclass_config_path)
with TestRun.LOGGER.step("Display and export IO class configuration - displayed configuration "
"should be the same as created"):
with TestRun.LOGGER.step(
"Display and export IO class configuration - displayed configuration "
"should be the same as created"
):
TestRun.executor.run(
f"{casadm.list_io_classes_cmd(str(cache.cache_id), OutputFormat.csv.name)}"
f" > {saved_config_path}")
f" > {saved_config_path}"
)
csv = fs_utils.read_file(saved_config_path)
if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), random_list):
TestRun.LOGGER.error("Exported configuration does not match expected\n"
TestRun.LOGGER.error(
"Exported configuration does not match expected\n"
f"Current:\n{csv}\n"
f"Expected:{IoClass.list_to_csv(random_list)}")
f"Expected:{IoClass.list_to_csv(random_list)}"
)
with TestRun.LOGGER.step("Stop Intel CAS"):
with TestRun.LOGGER.step("Stop Open CAS"):
casadm.stop_cache(cache.cache_id)
with TestRun.LOGGER.step("Start cache and add core"):
@ -69,9 +79,11 @@ def test_ioclass_export_configuration(cache_mode):
with TestRun.LOGGER.step("Check IO class configuration (should contain only default class)"):
csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), default_list):
TestRun.LOGGER.error("Default configuration does not match expected\n"
TestRun.LOGGER.error(
"Default configuration does not match expected\n"
f"Current:\n{csv}\n"
f"Expected:{IoClass.list_to_csv(default_list)}")
f"Expected:{IoClass.list_to_csv(default_list)}"
)
with TestRun.LOGGER.step("Load exported configuration file for 33 IO classes"):
casadm.load_io_classes(cache.cache_id, saved_config_path)
@ -79,18 +91,20 @@ def test_ioclass_export_configuration(cache_mode):
with TestRun.LOGGER.step("Display IO class configuration - should be the same as created"):
csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), random_list):
TestRun.LOGGER.error("Exported configuration does not match expected\n"
TestRun.LOGGER.error(
"Exported configuration does not match expected\n"
f"Current:\n{csv}\n"
f"Expected:{IoClass.list_to_csv(random_list)}")
f"Expected:{IoClass.list_to_csv(random_list)}"
)
with TestRun.LOGGER.step(f"Test cleanup"):
fs_utils.remove(saved_config_path)
def prepare(cache_mode: CacheMode = None):
ioclass_config.remove_ioclass_config()
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
ioclass_config.remove_ioclass_config(ioclass_config_path)
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(150, Unit.MebiByte)])
core_device.create_partitions([Size(300, Unit.MebiByte)])

View File

@ -1,19 +1,17 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from test_tools.disk_utils import Filesystem
from api.cas import ioclass_config, casadm
from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev, drop_caches
from test_utils.size import Unit, Size
from core.test_run import TestRun
dd_bs = Size(1, Unit.Blocks4096)
dd_count = 1230
@ -23,8 +21,7 @@ not_cached_mountpoint = "/tmp/ioclass_core_id_test/not_cached"
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", [fs for fs in Filesystem] + [None])
def test_ioclass_core_id(filesystem):
def test_ioclass_core_id():
"""
title: Test for `core_id` classification rule
description: |
@ -34,12 +31,11 @@ def test_ioclass_core_id(filesystem):
- IO to core with enabled selective allocation is cached
- IO to core with disabled selective allocation is not cached
"""
fs_info = f"with {filesystem}" if filesystem else ""
with TestRun.step(
f"Start cache with two cores on created partitions {fs_info}, "
"with NOP, disabled seq cutoff"
f"Start cache with two cores on created partitions with NOP, disabled seq cutoff"
):
cache, cores = prepare(filesystem, 2)
cache, cores = prepare(2)
core_1, core_2 = cores[0], cores[1]
with TestRun.step(f"Add core_id based classification rules"):
@ -66,11 +62,6 @@ def test_ioclass_core_id(filesystem):
cache_id=cache.cache_id, file=ioclass_config.default_config_file_path
)
if filesystem:
with TestRun.step(f"Mount cores"):
core_1.mount(cached_mountpoint)
core_2.mount(not_cached_mountpoint)
with TestRun.step(f"Reset counters"):
sync()
drop_caches()
@ -78,21 +69,18 @@ def test_ioclass_core_id(filesystem):
cache.reset_counters()
with TestRun.step(f"Trigger IO to both cores"):
if filesystem:
dd_dst_paths = [cached_mountpoint + "/test_file", not_cached_mountpoint + "/test_file"]
else:
dd_dst_paths = [core_1.path, core_2.path]
for path in dd_dst_paths:
dd = (
(
Dd()
.input("/dev/zero")
.output(path)
.count(dd_count)
.block_size(dd_bs)
.oflag("sync")
.run()
)
dd.run()
sync()
drop_caches()
@ -105,7 +93,7 @@ def test_ioclass_core_id(filesystem):
if core_1_occupancy < dd_size:
TestRun.LOGGER.error(
f"First core's occupancy is {core_1_occupancy} "
f"- it is less than {dd_size} - triggerd IO size!"
f"- it is less than {dd_size} - triggered IO size!"
)
if core_2_occupancy.get_value() != 0:
@ -122,7 +110,7 @@ def test_ioclass_core_id(filesystem):
if cached_ioclass_occupancy < dd_size:
TestRun.LOGGER.error(
f"Cached ioclass occupancy is {cached_ioclass_occupancy} "
f"- it is less than {dd_size} - triggerd IO size!"
f"- it is less than {dd_size} - triggered IO size!"
)
if not_cached_ioclass_occupancy.get_value() != 0:
TestRun.LOGGER.error(
@ -138,7 +126,7 @@ def test_ioclass_core_id(filesystem):
)
def prepare(filesystem, cores_number):
def prepare(cores_number):
ioclass_config.remove_ioclass_config()
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
@ -155,14 +143,13 @@ def prepare(filesystem, cores_number):
cores = []
for part in core_device.partitions:
if filesystem:
part.create_filesystem(filesystem)
cores.append(casadm.add_core(cache, core_dev=part))
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config.default_config_file_path
add_default_rule=False,
ioclass_config_path=ioclass_config.default_config_file_path,
)
# To make test more precise all workload except of tested ioclass should be
# put in pass-through mode

View File

@ -1,11 +1,11 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import random
from datetime import datetime
import time
from datetime import datetime
import pytest
@ -42,8 +42,9 @@ def test_ioclass_directory_depth(filesystem):
cache, core = prepare()
Udev.disable()
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."):
with TestRun.step(
f"Prepare {filesystem.name} filesystem and mount {core.path} " f"at {mountpoint}."
):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
@ -61,13 +62,14 @@ def test_ioclass_directory_depth(filesystem):
# Test classification in nested dir by reading a previously unclassified file
with TestRun.step("Create the first file in the nested directory."):
test_file_1 = File(f"{nested_dir_path}/test_file_1")
dd = (
Dd().input("/dev/urandom")
(
Dd()
.input("/dev/urandom")
.output(test_file_1.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
.run()
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_1.refresh_item()
@ -86,42 +88,48 @@ def test_ioclass_directory_depth(filesystem):
with TestRun.step("Read the file in the nested directory"):
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
dd = (
Dd().input(test_file_1.full_path)
(
Dd()
.input(test_file_1.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.MebiByte))
.run()
)
dd.run()
with TestRun.step("Check occupancy after creating the file."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + test_file_1.size:
TestRun.LOGGER.error("Wrong occupancy after reading file!\n"
TestRun.LOGGER.error(
"Wrong occupancy after reading file!\n"
f"Expected: {base_occupancy + test_file_1.size}, "
f"actual: {new_occupancy}")
f"actual: {new_occupancy}"
)
# Test classification in nested dir by creating a file
with TestRun.step("Create the second file in the nested directory"):
base_occupancy = new_occupancy
test_file_2 = File(f"{nested_dir_path}/test_file_2")
dd = (
Dd().input("/dev/urandom")
(
Dd()
.input("/dev/urandom")
.output(test_file_2.full_path)
.count(random.randint(25600, 51200)) # 100MB to 200MB
.count(random.randint(25600, 51200)) # count from 100MB to 200MB
.block_size(Size(1, Unit.Blocks4096))
.run()
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_2.refresh_item()
with TestRun.step("Check occupancy after creating the second file."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
expected_occpuancy = (base_occupancy + test_file_2.size).set_unit(Unit.Blocks4096)
expected_occupancy = (base_occupancy + test_file_2.size).set_unit(Unit.Blocks4096)
if new_occupancy != base_occupancy + test_file_2.size:
TestRun.LOGGER.error("Wrong occupancy after creating file!\n"
f"Expected: {expected_occpuancy}, "
f"actual: {new_occupancy}")
TestRun.LOGGER.error(
"Wrong occupancy after creating file!\n"
f"Expected: {expected_occupancy}, "
f"actual: {new_occupancy}"
)
@pytest.mark.os_dependent
@ -149,11 +157,13 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Create and load IO class config file."):
ioclass_id = random.randint(2, ioclass_config.MAX_IO_CLASS_ID)
ioclass_config.add_ioclass(ioclass_id=1,
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation="1.00",
rule="metadata",
ioclass_config_path=ioclass_config_path)
ioclass_config_path=ioclass_config_path,
)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
@ -164,8 +174,9 @@ def test_ioclass_directory_file_operations(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mounting {core.path} at {mountpoint}."):
with TestRun.step(
f"Prepare {filesystem.name} filesystem " f"and mounting {core.path} at {mountpoint}."
):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
@ -178,10 +189,18 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Create test file."):
classified_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
file_path = f"{test_dir_path}/test_file"
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
(
Dd()
.input("/dev/urandom")
.output(file_path)
.oflag("sync")
.block_size(Size(1, Unit.MebiByte))
.count(dd_blocks)
.run()
)
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
sync()
drop_caches(DropCachesMode.ALL)
@ -189,7 +208,8 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Move test file out of classified directory."):
@ -202,7 +222,8 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
@ -211,15 +232,15 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Read test file."):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.iflag("sync").run())
Dd().input(test_file.full_path).output("/dev/null").iflag("sync").run()
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before - test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
@ -235,7 +256,8 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
@ -244,15 +266,21 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Read test file."):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
(
Dd()
.input(test_file.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.Blocks4096))
.run()
)
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Check non-classified occupancy."):
@ -314,8 +342,9 @@ def test_ioclass_directory_dir_operations(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.path} at {mountpoint}."):
with TestRun.step(
f"Prepare {filesystem.name} filesystem " f"and mount {core.path} at {mountpoint}."
):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
@ -328,14 +357,16 @@ def test_ioclass_directory_dir_operations(filesystem):
with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check(
cache, directory=dir_1, ioclass_id=ioclass_id_1)
cache, directory=dir_1, ioclass_id=ioclass_id_1
)
with TestRun.step(f"Create {classified_dir_path_2}/subdir."):
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check(cache, directory=dir_2,
ioclass_id=ioclass_id_2)
create_files_with_classification_delay_check(
cache, directory=dir_2, ioclass_id=ioclass_id_2
)
sync()
drop_caches(DropCachesMode.ALL)
@ -343,10 +374,13 @@ def test_ioclass_directory_dir_operations(filesystem):
dir_2.move(destination=classified_dir_path_1)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
read_files_with_reclassification_check(
cache,
target_ioclass_id=ioclass_id_1,
source_ioclass_id=ioclass_id_2,
directory=dir_2, with_delay=False)
directory=dir_2,
with_delay=False,
)
sync()
drop_caches(DropCachesMode.ALL)
@ -354,9 +388,13 @@ def test_ioclass_directory_dir_operations(filesystem):
dir_2.move(destination=mountpoint)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=True)
read_files_with_reclassification_check(
cache,
target_ioclass_id=0,
source_ioclass_id=ioclass_id_1,
directory=dir_2,
with_delay=True,
)
with TestRun.step(f"Remove {classified_dir_path_2}."):
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
@ -367,24 +405,30 @@ def test_ioclass_directory_dir_operations(filesystem):
dir_1.move(destination=classified_dir_path_2)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
read_files_with_reclassification_check(
cache,
target_ioclass_id=ioclass_id_2,
source_ioclass_id=ioclass_id_1,
directory=dir_1, with_delay=True)
directory=dir_1,
with_delay=True,
)
with TestRun.step(f"Rename {classified_dir_path_2} to {non_classified_dir_path}."):
dir_1.move(destination=non_classified_dir_path)
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True)
read_files_with_reclassification_check(
cache,
target_ioclass_id=0,
source_ioclass_id=ioclass_id_2,
directory=dir_1,
with_delay=True,
)
def create_files_with_classification_delay_check(cache, directory: Directory, ioclass_id: int):
start_time = datetime.now()
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
occupancy_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
dd_blocks = 10
dd_size = Size(dd_blocks, Unit.Blocks4096)
file_counter = 0
@ -395,10 +439,18 @@ def create_files_with_classification_delay_check(cache, directory: Directory, io
file_path = f"{directory.full_path}/test_file_{file_counter}"
file_counter += 1
time_from_start = datetime.now() - start_time
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
(
Dd()
.input("/dev/zero")
.output(file_path)
.oflag("sync")
.block_size(Size(1, Unit.Blocks4096))
.count(dd_blocks)
.run()
)
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
if occupancy_after - occupancy_before < dd_size:
unclassified_files.append(file_path)
@ -408,17 +460,31 @@ def create_files_with_classification_delay_check(cache, directory: Directory, io
if len(unclassified_files):
TestRun.LOGGER.info("Rewriting unclassified test files...")
for file_path in unclassified_files:
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
(
Dd()
.input("/dev/zero")
.output(file_path)
.oflag("sync")
.block_size(Size(1, Unit.Blocks4096))
.count(dd_blocks)
.run()
)
def read_files_with_reclassification_check(cache, target_ioclass_id: int, source_ioclass_id: int,
directory: Directory, with_delay: bool):
def read_files_with_reclassification_check(
cache,
target_ioclass_id: int,
source_ioclass_id: int,
directory: Directory,
with_delay: bool,
):
start_time = datetime.now()
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
io_class_id=target_ioclass_id
).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
io_class_id=source_ioclass_id
).usage_stats.occupancy
files_to_reclassify = []
target_ioclass_is_enabled = ioclass_is_enabled(cache, target_ioclass_id)
@ -426,12 +492,13 @@ def read_files_with_reclassification_check(cache, target_ioclass_id: int, source
target_occupancy_before = target_occupancy_after
source_occupancy_before = source_occupancy_after
time_from_start = datetime.now() - start_time
dd = Dd().input(file.full_path).output("/dev/null").block_size(Size(1, Unit.Blocks4096))
dd.run()
Dd().input(file.full_path).output("/dev/null").block_size(Size(1, Unit.Blocks4096)).run()
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
io_class_id=target_ioclass_id
).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
io_class_id=source_ioclass_id
).usage_stats.occupancy
if target_ioclass_is_enabled:
if target_occupancy_after < target_occupancy_before:
@ -464,8 +531,13 @@ def read_files_with_reclassification_check(cache, target_ioclass_id: int, source
sync()
drop_caches(DropCachesMode.ALL)
for file in files_to_reclassify:
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
(
Dd()
.input(file.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.Blocks4096))
.run()
)
def check_occupancy(expected: Size, actual: Size):

View File

@ -1,5 +1,5 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
@ -8,21 +8,17 @@ from math import isclose
import pytest
from api.cas.cache_config import CacheMode, CacheLineSize
from api.cas.ioclass_config import IoClass
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
from .io_class_common import *
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_ioclass_eviction_priority(cache_line_size):
def test_io_class_eviction_priority():
"""
title: Check whether eviction priorites are respected.
title: Check whether eviction priorities are respected.
description: |
Create io class for 4 different directories, each with different
eviction priority configured. Saturate 3 of them and check if the
@ -30,6 +26,8 @@ def test_ioclass_eviction_priority(cache_line_size):
pass_criteria:
- Partitions are evicted in specified order
"""
cache_line_size = CacheLineSize.LINE_64KiB
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=CacheMode.WT, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
@ -37,35 +35,23 @@ def test_ioclass_eviction_priority(cache_line_size):
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(
f"Preparing filesystem and mounting {core.path} at {mountpoint}"
):
with TestRun.step(f"Preparing filesystem and mounting {core.path} at {mountpoint}"):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Prepare test dirs"):
IoclassConfig = namedtuple(
"IoclassConfig", "id eviction_prio max_occupancy dir_path"
)
IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path")
io_classes = [
IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
IoclassConfig(2, 4, 0.30, f"{mountpoint}/B"),
IoclassConfig(3, 5, 0.40, f"{mountpoint}/C"),
IoclassConfig(4, 1, 1.00, f"{mountpoint}/D"),
]
for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Adding default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Adding io classes for all dirs"):
for io_class in io_classes:
ioclass_config.add_ioclass(
@ -73,8 +59,8 @@ def test_ioclass_eviction_priority(cache_line_size):
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy:0.2f}",
ioclass_config_path
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Resetting cache stats"):
@ -86,22 +72,20 @@ def test_ioclass_eviction_priority(cache_line_size):
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f"Incorrect initial occupancy for io class id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
with TestRun.step(
f"To A, B and C directories perform IO with size of max io_class occupancy"
):
with TestRun.step(f"To A, B and C directories perform IO with size of max io_class occupancy"):
for io_class in io_classes[0:3]:
run_io_dir(
f"{io_class.dir_path}/tmp_file",
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096.get_value()),
)
with TestRun.step("Check if each io class reached it's occupancy limit"):
for io_class in io_classes[0:3]:
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
actual_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size)
@ -109,16 +93,16 @@ def test_ioclass_eviction_priority(cache_line_size):
.set_unit(Unit.Blocks4096)
)
if not isclose(actuall_occupancy.value, occupancy_limit.value, rel_tol=0.1):
if not isclose(actual_occupancy.value, occupancy_limit.value, rel_tol=0.1):
TestRun.LOGGER.error(
f"Occupancy for io class {io_class.id} does not match. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
f"Limit: {occupancy_limit}, actual: {actual_occupancy}"
)
if get_io_class_occupancy(cache, io_classes[3].id).value != 0:
TestRun.LOGGER.error(
f"Occupancy for io class {io_classes[3].id} should be 0. "
f"Actuall: {actuall_occupancy}"
f"Actual: {actual_occupancy}"
)
with TestRun.step(
@ -126,22 +110,14 @@ def test_ioclass_eviction_priority(cache_line_size):
"if other partitions are evicted in a good order"
):
target_io_class = io_classes[3]
io_classes_to_evict = io_classes[:3][
::-1
] # List is ordered by eviction priority
io_classes_to_evict = io_classes[:3][::-1] # List is ordered by eviction priority
io_classes_evicted = []
io_offset = 0
for io_class in io_classes_to_evict:
io_size = int((io_class.max_occupancy * cache_size) / Unit.Blocks4096)
run_io_dir(
f"{target_io_class.dir_path}/tmp_file_{io_class.id}",
io_size,
io_offset
)
io_size = int((io_class.max_occupancy * cache_size) / Unit.Blocks4096.get_value())
run_io_dir(f"{target_io_class.dir_path}/tmp_file_{io_class.id}", io_size, io_offset)
io_offset += io_size
part_to_evict_end_occupancy = get_io_class_occupancy(
cache, io_class.id, percent=True
)
part_to_evict_end_occupancy = get_io_class_occupancy(cache, io_class.id, percent=True)
# Since number of evicted cache lines is always >= 128, occupancy is checked
# with approximation

View File

@ -1,5 +1,5 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
@ -35,7 +35,8 @@ def test_ioclass_file_extension():
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
dd = (
Dd().input("/dev/zero")
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{tested_extension}")
.count(dd_count)
.block_size(dd_size)
@ -74,13 +75,14 @@ def test_ioclass_file_extension():
with TestRun.step(f"Write to file with not cached extension and check if it is not cached."):
for ext in wrong_extensions:
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
.run()
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
@ -107,25 +109,14 @@ def test_ioclass_file_name_prefix():
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
with TestRun.step("Create and load IO class config."):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
# Avoid caching anything else than files with specified prefix
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=255,
allocation="0.00",
rule=f"unclassified",
ioclass_config_path=ioclass_config_path,
)
with TestRun.step("Add io class for specific file name prefix."):
# Enables file with specified prefix to be cached
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation="1.00",
rule=f"file_name_prefix:test&done",
ioclass_config_path=ioclass_config_path,
ioclass_config_path=ioclass_config_path
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
@ -137,52 +128,62 @@ def test_ioclass_file_name_prefix():
current_occupancy = cache.get_occupancy()
if previous_occupancy.get_value() > current_occupancy.get_value():
TestRun.fail(f"Current occupancy ({str(current_occupancy)}) is lower "
f"than before ({str(previous_occupancy)}).")
TestRun.fail(
f"Current occupancy ({str(current_occupancy)}) is lower "
f"than before ({str(previous_occupancy)})."
)
# Filesystem creation caused metadata IO which is not supposed
# to be cached
# Check if files with proper prefix are cached
with TestRun.step(f"Write files which are supposed to be cached and check "
f"if they are cached."):
with TestRun.step(
f"Write files which are supposed to be cached and check " f"if they are cached."
):
for f in cached_files:
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/{f}")
.count(dd_count)
.block_size(dd_size)
.run()
)
dd.run()
sync()
current_occupancy = cache.get_occupancy()
expected_occupancy = previous_occupancy + (dd_size * dd_count)
if current_occupancy != expected_occupancy:
TestRun.fail(f"Current occupancy value is not valid. "
TestRun.fail(
f"Current occupancy value is not valid. "
f"(Expected: {str(expected_occupancy)}, "
f"actual: {str(current_occupancy)})")
f"actual: {str(current_occupancy)})"
)
previous_occupancy = current_occupancy
with TestRun.step("Flush cache."):
cache.flush_cache()
# Check if file with improper extension is not cached
with TestRun.step(f"Write files which are not supposed to be cached and check if "
f"they are not cached."):
with TestRun.step(
f"Write files which are not supposed to be cached and check if " f"they are not cached."
):
for f in not_cached_files:
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/{f}")
.count(dd_count)
.block_size(dd_size)
.run()
)
dd.run()
sync()
current_occupancy = cache.get_occupancy()
if current_occupancy != previous_occupancy:
TestRun.fail(f"Current occupancy value is not valid. "
TestRun.fail(
f"Current occupancy value is not valid. "
f"(Expected: {str(previous_occupancy)}, "
f"actual: {str(current_occupancy)})")
f"actual: {str(current_occupancy)})"
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -212,13 +213,14 @@ def test_ioclass_file_extension_preexisting_filesystem():
core.core_device.mount(mountpoint)
for ext in extensions:
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
.run()
)
dd.run()
core.core_device.unmount()
with TestRun.step("Create IO class config."):
@ -243,13 +245,14 @@ def test_ioclass_file_extension_preexisting_filesystem():
with TestRun.step(f"Write to file with cached extension and check if they are cached."):
for ext in extensions:
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
.run()
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count:
@ -272,6 +275,7 @@ def test_ioclass_file_offset():
dd_count = 1
min_cached_offset = 16384
max_cached_offset = 65536
blocks4096 = Unit.Blocks4096.get_value()
with TestRun.step("Prepare cache and core."):
cache, core = prepare()
@ -296,22 +300,21 @@ def test_ioclass_file_offset():
with TestRun.step("Write to file within cached offset range and check if it is cached."):
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
# nor last sector
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
max_seek = int(
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
/ Unit.Blocks4096.value
)
min_seek = int((min_cached_offset + blocks4096) / blocks4096)
max_seek = int((max_cached_offset - min_cached_offset - blocks4096) / blocks4096)
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
.run()
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 1:
@ -319,20 +322,22 @@ def test_ioclass_file_offset():
cache.flush_cache()
with TestRun.step(
"Write to file outside of cached offset range and check if it is not cached."):
"Write to file outside of cached offset range and check if it is not cached."
):
min_seek = 0
max_seek = int(min_cached_offset / Unit.Blocks4096.value)
max_seek = int(min_cached_offset / blocks4096)
TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
.run()
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
@ -375,8 +380,9 @@ def test_ioclass_file_size(filesystem):
with TestRun.step("Prepare and load IO class config."):
load_file_size_io_classes(cache, base_size)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."):
with TestRun.step(
f"Prepare {filesystem.name} filesystem and mount {core.path} " f"at {mountpoint}."
):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
@ -385,26 +391,28 @@ def test_ioclass_file_size(filesystem):
test_files = []
for size, ioclass_id in size_to_class.items():
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
file_path = f"{mountpoint}/test_file_{size.get_value()}"
Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run()
sync()
drop_caches(DropCachesMode.ALL)
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
if occupancy_after != occupancy_before + size:
TestRun.fail("File not cached properly!\n"
TestRun.fail(
"File not cached properly!\n"
f"Expected {occupancy_before + size}\n"
f"Actual {occupancy_after}")
f"Actual {occupancy_after}"
)
test_files.append(File(file_path).refresh_item())
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Move all files to 'unclassified' IO class."):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.remove_ioclass_config(ioclass_config_path)
ioclass_config.create_ioclass_config(False, ioclass_config_path)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
@ -428,18 +436,18 @@ def test_ioclass_file_size(filesystem):
occupancy_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
occupancy_expected = occupancy_before + file.size
if occupancy_after != occupancy_expected:
TestRun.fail("File not reclassified properly!\n"
TestRun.fail(
"File not reclassified properly!\n"
f"Expected {occupancy_expected}\n"
f"Actual {occupancy_after}")
f"Actual {occupancy_after}"
)
occupancy_before = occupancy_after
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Restore IO class configuration."):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.remove_ioclass_config(ioclass_config_path)
ioclass_config.create_ioclass_config(False, ioclass_config_path)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
@ -457,18 +465,22 @@ def test_ioclass_file_size(filesystem):
for file in test_files:
ioclass_id = size_to_class[file.size]
occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
sync()
drop_caches(DropCachesMode.ALL)
occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id
).usage_stats.occupancy
actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
if actual_blocks != expected_blocks:
TestRun.fail("File not reclassified properly!\n"
TestRun.fail(
"File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
f"Actual {occupancy_after}"
)
sync()
drop_caches(DropCachesMode.ALL)

View File

@ -18,26 +18,33 @@ from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
from test_utils.size import Unit, Size
from tests.io_class.io_class_common import prepare, mountpoint, run_io_dir, \
get_io_class_occupancy, run_io_dir_read, get_io_class_usage
from tests.io_class.io_class_common import (
prepare,
mountpoint,
run_io_dir,
get_io_class_occupancy,
run_io_dir_read,
get_io_class_usage,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("io_size_multiplication", [0.5, 2])
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, cache_line_size):
def test_io_class_occupancy_directory_write(io_size_multiplication, cache_mode):
"""
title: Test for max occupancy set for ioclass based on directory
description: |
Create ioclass for 3 different directories, each with different
max cache occupancy configured. Run IO against each directory and see
if occupancy limit is repected.
if occupancy limit is respected.
pass_criteria:
- Max occupancy is set correctly for each ioclass
- Each ioclass does not exceed max occupancy
"""
cache_line_size = CacheLineSize.LINE_64KiB
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
@ -94,7 +101,8 @@ def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, c
)
with TestRun.step(
f"To each directory perform IO with size of {io_size_multiplication} max io_class occupancy"
f"To each directory perform IO"
f" with size of {io_size_multiplication} max io_class occupancy"
):
for io_class in io_classes:
original_occupancies = {}
@ -123,7 +131,7 @@ def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, c
io_count = get_io_count(i, cache_size, cache_line_size, io_size_multiplication)
if (
original_occupancies[i.id] != actual_occupancy
and io_count * Unit.Blocks4096.value < actual_occupancy.value
and io_count * Unit.Blocks4096.get_value() < actual_occupancy.value
):
TestRun.LOGGER.error(
f"Occupancy for ioclass {i.id} should not change "
@ -141,7 +149,7 @@ def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, c
.set_unit(Unit.Blocks4096)
)
# Divergency may be caused by rounding max occupancy
# Divergence may be caused by rounding max occupancy
if actual_occupancy > occupancy_limit * 1.01:
TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. "
@ -152,20 +160,20 @@ def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, c
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("io_size_multiplication", [0.5, 2])
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_size, cache_mode):
def test_io_class_occupancy_directory_read(io_size_multiplication):
"""
title: Test for max occupancy set for ioclass based on directory - read
description: |
Set cache mode to pass-through and create files on mounted core
device. Swtich cache to write through, and load ioclasses applaying
to different files. Read files and check if occupancy threshold is
respected.
Set cache mode to pass-through and create files on mounted core device.
Switch cache to write through, and load io classes applying to different files.
Read files and check if occupancy threshold is respected.
pass_criteria:
- Max occupancy is set correctly for each ioclass
- Each ioclass does not exceed max occupancy
"""
cache_line_size = CacheLineSize.LINE_64KiB
cache_mode = CacheMode.WB
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
@ -225,7 +233,7 @@ def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_siz
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f"Incorrect initial occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
@ -256,7 +264,7 @@ def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_siz
io_count = get_io_count(i, cache_size, cache_line_size, io_size_multiplication)
if (
original_occupancies[i.id] != actual_occupancy
and io_count * Unit.Blocks4096.value < actual_occupancy.value
and io_count * Unit.Blocks4096.get_value() < actual_occupancy.value
):
TestRun.LOGGER.error(
f"Occupancy for ioclass {i.id} should not change "
@ -274,7 +282,7 @@ def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_siz
.set_unit(Unit.Blocks4096)
)
# Divergency may be caused by rounding max occupancy
# Divergence may be caused by rounding max occupancy
if actual_occupancy > occupancy_limit * 1.01:
TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. "
@ -293,7 +301,7 @@ def test_ioclass_occupancy_sum_cache():
if sum of their Usage stats is equal to cache Usage stats.
pass_criteria:
- Max occupancy is set correctly for each ioclass
- Sum of ioclassess stats is equal to cache stats
- Sum of io classes stats is equal to cache stats
"""
with TestRun.step("Prepare CAS device"):
cache, core = prepare()
@ -365,7 +373,7 @@ def test_ioclass_occupancy_sum_cache():
for io_class in io_classes:
run_io_dir(
f"{io_class.dir_path}/tmp_file",
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096.get_value()),
)
with TestRun.step("Verify stats after IO"):

View File

@ -16,23 +16,32 @@ from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
from tests.io_class.io_class_common import prepare, mountpoint, TestRun, Unit, \
run_io_dir, get_io_class_dirty, get_io_class_usage, get_io_class_occupancy
from tests.io_class.io_class_common import (
prepare,
mountpoint,
TestRun,
Unit,
run_io_dir,
get_io_class_dirty,
get_io_class_usage,
get_io_class_occupancy,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_ioclass_occuppancy_load(cache_line_size):
def test_ioclass_occupancy_load():
"""
title: Load cache with occupancy limit specified
description: |
Load cache and verify if occupancy limits are loaded correctly and if
each part has assigned apropriate number of
dirty blocks.
each part has assigned appropriate number of dirty blocks.
pass_criteria:
- Occupancy thresholds have correct values for each ioclass after load
"""
cache_line_size = CacheLineSize.LINE_64KiB
blocks4096 = Unit.Blocks4096.get_value()
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=CacheMode.WB, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
@ -40,18 +49,14 @@ def test_ioclass_occuppancy_load(cache_line_size):
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(
f"Prepare filesystem and mount {core.path} at {mountpoint}"
):
with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}"):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Prepare test dirs"):
IoclassConfig = namedtuple(
"IoclassConfig", "id eviction_prio max_occupancy dir_path"
)
IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path")
io_classes = [
IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
IoclassConfig(2, 3, 0.30, f"{mountpoint}/B"),
@ -88,32 +93,28 @@ def test_ioclass_occuppancy_load(cache_line_size):
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f"Incorrect initial occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
with TestRun.step(f"Perform IO with size equal to cache size"):
for io_class in io_classes:
run_io_dir(
f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096)
)
run_io_dir(f"{io_class.dir_path}/tmp_file", int(cache_size / blocks4096))
with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
for io_class in io_classes:
actuall_dirty = get_io_class_dirty(cache, io_class.id)
actual_dirty = get_io_class_dirty(cache, io_class.id)
dirty_limit = (
(io_class.max_occupancy * cache_size)
.align_down(Unit.Blocks4096.get_value())
.align_down(blocks4096)
.set_unit(Unit.Blocks4096)
)
if not isclose(
actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1
):
if not isclose(actual_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1):
TestRun.LOGGER.error(
f"Dirty for ioclass id: {io_class.id} doesn't match expected."
f"Expected: {dirty_limit}, actuall: {actuall_dirty}"
f"Expected: {dirty_limit}, actual: {actual_dirty}"
)
with TestRun.step("Stop cache without flushing the data"):
@ -131,20 +132,18 @@ def test_ioclass_occuppancy_load(cache_line_size):
with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
for io_class in io_classes:
actuall_dirty = get_io_class_dirty(cache, io_class.id)
actual_dirty = get_io_class_dirty(cache, io_class.id)
dirty_limit = (
(io_class.max_occupancy * cache_size)
.align_down(Unit.Blocks4096.get_value())
.align_down(blocks4096)
.set_unit(Unit.Blocks4096)
)
if not isclose(
actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1
):
if not isclose(actual_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1):
TestRun.LOGGER.error(
f"Dirty for ioclass id: {io_class.id} doesn't match expected."
f"Expected: {dirty_limit}, actuall: {actuall_dirty}"
f"Expected: {dirty_limit}, actual: {actual_dirty}"
)
with TestRun.step("Compare ioclass configs"):
@ -172,10 +171,10 @@ def test_ioclass_occuppancy_load(cache_line_size):
with TestRun.step("Compare usage stats before and after the load"):
for io_class in io_classes:
actuall_usage_stats = get_io_class_usage(cache, io_class.id)
if original_usage_stats[io_class.id] != actuall_usage_stats:
actual_usage_stats = get_io_class_usage(cache, io_class.id)
if original_usage_stats[io_class.id] != actual_usage_stats:
TestRun.LOGGER.error(
f"Usage stats doesn't match for ioclass {io_class.id}. "
f"Original: {original_usage_stats[io_class.id]}, "
f"loaded: {actuall_usage_stats}"
f"loaded: {actual_usage_stats}"
)

View File

@ -17,16 +17,20 @@ from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
from test_utils.size import Unit
from tests.io_class.io_class_common import prepare, mountpoint, ioclass_config_path, \
get_io_class_occupancy, run_io_dir, run_io_dir_read
from tests.io_class.io_class_common import (
prepare,
mountpoint,
ioclass_config_path,
get_io_class_occupancy,
run_io_dir,
run_io_dir_read,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("cache_mode", [CacheMode.WB, CacheMode.WT])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrize("ioclass_size_multiplicatior", [0.5, 1])
def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior):
@pytest.mark.parametrize("io_class_size_multiplication", [0.5, 1])
def test_ioclass_repart(io_class_size_multiplication):
"""
title: Check whether occupancy limit is respected during repart
description: |
@ -37,6 +41,9 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
pass_criteria:
- Partitions are evicted in specified order
"""
cache_mode = CacheMode.WB
cache_line_size = CacheLineSize.LINE_64KiB
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
@ -72,7 +79,7 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
rule="metadata",
eviction_priority=1,
allocation="1.00",
ioclass_config_path=ioclass_config_path
ioclass_config_path=ioclass_config_path,
)
with TestRun.step("Add io classes for all dirs"):
@ -81,7 +88,7 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
io_class.id,
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy*ioclass_size_multiplicatior:0.2f}",
f"{io_class.max_occupancy * io_class_size_multiplication:0.2f}",
)
casadm.load_io_classes(cache_id=cache.cache_id, file=default_config_file_path)
@ -93,7 +100,8 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
with TestRun.step(f"Create 3 files classified in default ioclass"):
for i, io_class in enumerate(io_classes[0:3]):
run_io_dir(
f"{mountpoint}/{i}", int((io_class.max_occupancy * cache_size) / Unit.Blocks4096)
f"{mountpoint}/{i}",
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096.get_value()),
)
if not isclose(
@ -108,7 +116,7 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f"Incorrect initial occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
@ -119,7 +127,7 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
with TestRun.step("Check if each ioclass reached it's occupancy limit"):
for io_class in io_classes[0:3]:
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
actual_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size)
@ -127,8 +135,8 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
.set_unit(Unit.Blocks4096)
)
if not isclose(actuall_occupancy.value, occupancy_limit.value, rel_tol=0.1):
if not isclose(actual_occupancy.value, occupancy_limit.value, rel_tol=0.1):
TestRun.LOGGER.error(
f"Occupancy for ioclass {io_class.id} does not match. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
f"Limit: {occupancy_limit}, actual: {actual_occupancy}"
)

View File

@ -15,7 +15,12 @@ from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
from test_utils.size import Unit
from tests.io_class.io_class_common import mountpoint, prepare, get_io_class_occupancy, run_io_dir
from tests.io_class.io_class_common import (
mountpoint,
prepare,
get_io_class_occupancy,
run_io_dir,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -60,7 +65,7 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
rule="metadata&done",
eviction_priority=1,
allocation="1.00",
ioclass_config_path=default_config_file_path
ioclass_config_path=default_config_file_path,
)
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
@ -82,15 +87,15 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f"Incorrect initial occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
with TestRun.step(f"Perform IO with size equal to cache size"):
run_io_dir(f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096))
run_io_dir(f"{io_class.dir_path}/tmp_file", int(cache_size / Unit.Blocks4096))
with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
actual_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size)
@ -98,15 +103,16 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
.set_unit(Unit.Blocks4096)
)
# Divergency may be casued be rounding max occupancy
if actuall_occupancy > occupancy_limit * 1.01:
# Divergence may be caused be rounding max occupancy
if actual_occupancy > occupancy_limit * 1.01:
TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
f"Limit: {occupancy_limit}, actual: {actual_occupancy}"
)
with TestRun.step(
f"Resize ioclass from {io_class.max_occupancy*100}% to {new_occupancy}%" " cache occupancy"
f"Resize ioclass from {io_class.max_occupancy * 100}% to {new_occupancy}%"
" cache occupancy"
):
io_class.max_occupancy = new_occupancy / 100
ioclass_config.remove_ioclass_config()
@ -119,7 +125,7 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
rule="metadata&done",
eviction_priority=1,
allocation="1.00",
ioclass_config_path=default_config_file_path
ioclass_config_path=default_config_file_path,
)
ioclass_config.add_ioclass(
io_class.id,
@ -131,10 +137,10 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
casadm.load_io_classes(cache_id=cache.cache_id, file=default_config_file_path)
with TestRun.step(f"Perform IO with size equal to cache size"):
run_io_dir(f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096))
run_io_dir(f"{io_class.dir_path}/tmp_file", int(cache_size / Unit.Blocks4096))
with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
actual_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size)
@ -142,9 +148,9 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
.set_unit(Unit.Blocks4096)
)
# Divergency may be casued be rounding max occupancy
if actuall_occupancy > occupancy_limit * 1.01:
# Divergence may be caused be rounding max occupancy
if actual_occupancy > occupancy_limit * 1.01:
TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
f"Limit: {occupancy_limit}, actual: {actual_occupancy}"
)

View File

@ -10,8 +10,10 @@ from api.cas.ioclass_config import IoClass
from core.test_run_utils import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_utils.size import Size, Unit
from tests.io_class.io_class_common import compare_io_classes_list, \
generate_and_load_random_io_class_config
from tests.io_class.io_class_common import (
compare_io_classes_list,
generate_and_load_random_io_class_config,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -20,16 +22,16 @@ def test_io_class_preserve_configuration():
"""
title: Preserve IO class configuration after load.
description: |
Check Open CAS ability to preserve IO class configuration after starting CAS with
load option.
Check Open CAS ability to preserve IO class configuration
after starting CAS with load option.
pass_criteria:
- No system crash
- Cache loads successfully
- IO class configuration is the same before and after reboot
"""
with TestRun.step("Prepare devices."):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(150, Unit.MebiByte)])
core_device.create_partitions([Size(300, Unit.MebiByte)])
@ -41,19 +43,24 @@ def test_io_class_preserve_configuration():
cache = casadm.start_cache(cache_device, force=True)
with TestRun.step("Display IO class configuration shall be only Unclassified IO class."):
default_io_class = [IoClass(
default_io_class = [
IoClass(
ioclass_config.DEFAULT_IO_CLASS_ID,
ioclass_config.DEFAULT_IO_CLASS_RULE,
ioclass_config.DEFAULT_IO_CLASS_PRIORITY,
allocation="1.00")]
allocation="1.00",
)
]
actual = cache.list_io_classes()
compare_io_classes_list(default_io_class, actual)
with TestRun.step("Add core device."):
cache.add_core(core_device)
with TestRun.step("Create and load configuration file for 33 IO classes with random names, "
"allocation and priority values."):
with TestRun.step(
"Create and load configuration file for 33 IO classes with random names, "
"allocation and priority values."
):
generated_io_classes = generate_and_load_random_io_class_config(cache)
with TestRun.step("Display IO class configuration shall be the same as created."):
@ -64,7 +71,8 @@ def test_io_class_preserve_configuration():
cache.stop()
with TestRun.step(
"Load cache and check IO class configuration - shall be the same as created."):
"Load cache and check IO class configuration - shall be the same as created."
):
cache = casadm.load_cache(cache_device)
actual = cache.list_io_classes()
compare_io_classes_list(generated_io_classes, actual)
@ -73,7 +81,8 @@ def test_io_class_preserve_configuration():
TestRun.executor.reboot()
with TestRun.step(
"Load cache and check IO class configuration - shall be the same as created."):
"Load cache and check IO class configuration - shall be the same as created."
):
cache = casadm.load_cache(cache_device)
actual = cache.list_io_classes()
compare_io_classes_list(generated_io_classes, actual)

View File

@ -4,14 +4,15 @@
#
import pytest
from api.cas import ioclass_config, cli_messages
from api.cas.ioclass_config import IoClass
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_utils.output import CmdException
from test_utils.size import Unit, Size
from tests.io_class.io_class_common import prepare, ioclass_config_path
from api.cas.ioclass_config import IoClass
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
headerless_configuration = "1,unclassified,22,1.00"
double_io_class_configuration = "2,file_size:le:4096,1,1.00\n2,file_size:le:4096,1,1.00"
@ -28,7 +29,6 @@ illegal_io_class_configurations = {
",,1,": cli_messages.illegal_io_class_config_L2C1,
",1,,": cli_messages.illegal_io_class_config_L2C1,
"1,,,": cli_messages.illegal_io_class_config_L2C2,
# 2 parameters
",,1,1": cli_messages.illegal_io_class_config_L2C1,
",1,,1": cli_messages.illegal_io_class_config_L2C1,
@ -36,34 +36,28 @@ illegal_io_class_configurations = {
"1,,1,": cli_messages.illegal_io_class_config_L2C2,
"1,,,1": cli_messages.illegal_io_class_config_L2C2,
"1,1,,": cli_messages.illegal_io_class_config_L2C4,
# 3 parameters
",1,1,1": cli_messages.illegal_io_class_config_L2C1,
"1,,1,1": cli_messages.illegal_io_class_config_L2C2,
"1,1,1,": cli_messages.illegal_io_class_config_L2C4,
# 5 parameters
"1,1,1,1,1": cli_messages.illegal_io_class_config_L2,
# Try to configure IO class ID as: string, negative value or 33
"IllegalInput,Superblock,22,1": cli_messages.illegal_io_class_invalid_id,
"-2,Superblock,22,1": cli_messages.illegal_io_class_invalid_id_number,
"33,Superblock,22,1": cli_messages.illegal_io_class_invalid_id_number,
# Try to use semicolon, dots or new line as csv delimiters
"1;1;1;1": cli_messages.illegal_io_class_config_L2,
"1.1.1.1": cli_messages.illegal_io_class_config_L2,
"1\n1\n1\n1": cli_messages.illegal_io_class_config_L2,
# Try to configure eviction priority as: string, negative value or 256
"1,Superblock,IllegalInput,1": cli_messages.illegal_io_class_invalid_priority,
"1,Superblock,-2,1": cli_messages.illegal_io_class_invalid_priority_number,
"1,Superblock,256,1": cli_messages.illegal_io_class_invalid_priority_number,
# Try to configure allocation as: string, negative value or 2
"1,Superblock,22,IllegalInput": cli_messages.illegal_io_class_invalid_allocation,
"1,Superblock,255,-2": cli_messages.illegal_io_class_invalid_allocation_number,
"1,Superblock,255,2": cli_messages.illegal_io_class_invalid_allocation_number
"1,Superblock,255,2": cli_messages.illegal_io_class_invalid_allocation_number,
}
@ -86,49 +80,67 @@ def test_io_class_prevent_wrong_configuration():
with TestRun.step("Display IO class configuration shall be default"):
create_and_load_default_io_class_config(cache)
loaded_io_classes = cache.list_io_classes()
loaded_io_classes_str = '\n'.join(str(i) for i in loaded_io_classes)
TestRun.LOGGER.info(f"Loaded IO class configuration is:\n"
f"{IoClass.default_header()}\n{loaded_io_classes_str}")
loaded_io_classes_str = "\n".join(str(i) for i in loaded_io_classes)
TestRun.LOGGER.info(
f"Loaded IO class configuration is:\n"
f"{IoClass.default_header()}\n{loaded_io_classes_str}"
)
config_io_classes = IoClass.csv_to_list(fs_utils.read_file(ioclass_config_path))
if not IoClass.compare_ioclass_lists(config_io_classes, loaded_io_classes):
TestRun.fail("Default IO class configuration not loaded correctly.")
with TestRun.step("Create illegal configuration file containing IO configuration "
"without header and check if it can not be loaded."):
TestRun.LOGGER.info(f"Preparing headerless configuration file with following content:\n"
f"{headerless_configuration}")
with TestRun.step(
"Create illegal configuration file containing IO configuration "
"without header and check if it can not be loaded."
):
TestRun.LOGGER.info(
f"Preparing headerless configuration file with following content:\n"
f"{headerless_configuration}"
)
fs_utils.write_file(ioclass_config_path, headerless_configuration)
try_load_malformed_config(cache, config_io_classes,
expected_err_msg=cli_messages.headerless_io_class_config)
try_load_malformed_config(
cache,
config_io_classes,
expected_err_msg=cli_messages.headerless_io_class_config,
)
with TestRun.step("Create illegal configuration file containing IO configuration with "
"malformed header and check if it can not be loaded."):
with TestRun.step(
"Create illegal configuration file containing IO configuration with "
"malformed header and check if it can not be loaded."
):
for header, err_message in setup_headers().items():
config_content = f"{header}\n{IoClass.default()}"
TestRun.LOGGER.info(f"Testing following header with default IO class:\n"
f"{config_content}")
TestRun.LOGGER.info(
f"Testing following header with default IO class:\n" f"{config_content}"
)
fs_utils.write_file(ioclass_config_path, config_content)
try_load_malformed_config(cache, config_io_classes,
expected_err_msg=err_message)
try_load_malformed_config(cache, config_io_classes, expected_err_msg=err_message)
with TestRun.step("Create illegal configuration file containing double IO class configuration "
"and check if it can not be loaded."):
with TestRun.step(
"Create illegal configuration file containing double IO class configuration "
"and check if it can not be loaded."
):
config_content = f"{IoClass.default_header()}\n{double_io_class_configuration}"
TestRun.LOGGER.info(f"Testing following configuration file:\n{config_content}")
fs_utils.write_file(ioclass_config_path, config_content)
try_load_malformed_config(cache, config_io_classes,
expected_err_msg=cli_messages.double_io_class_config)
try_load_malformed_config(
cache,
config_io_classes,
expected_err_msg=cli_messages.double_io_class_config,
)
with TestRun.step("Create illegal configuration file containing malformed IO configuration "
"with correct header and check if it can not be loaded."):
with TestRun.step(
"Create illegal configuration file containing malformed IO configuration "
"with correct header and check if it can not be loaded."
):
for io_config, err_message in illegal_io_class_configurations.items():
config_content = f"{IoClass.default_header()}\n{io_config}"
TestRun.LOGGER.info(
f"Testing following header with default IO class:\n{config_content}")
f"Testing following header with default IO class:\n{config_content}"
)
fs_utils.write_file(ioclass_config_path, config_content)
try_load_malformed_config(cache, config_io_classes,
expected_err_msg=err_message)
try_load_malformed_config(cache, config_io_classes, expected_err_msg=err_message)
def try_load_malformed_config(cache, config_io_classes, expected_err_msg):
@ -141,9 +153,10 @@ def try_load_malformed_config(cache, config_io_classes, expected_err_msg):
cli_messages.check_stderr_msg(e.output, expected_err_msg)
output_io_classes = cache.list_io_classes()
if not IoClass.compare_ioclass_lists(output_io_classes, config_io_classes):
output_str = '\n'.join(str(i) for i in output_io_classes)
output_str = "\n".join(str(i) for i in output_io_classes)
TestRun.LOGGER.error(
f"Loaded IO class config should be default but it is different:\n{output_str}")
f"Loaded IO class config should be default but it is different:\n{output_str}"
)
def create_and_load_default_io_class_config(cache):
@ -153,36 +166,51 @@ def create_and_load_default_io_class_config(cache):
def setup_headers():
default_header = IoClass.default_header_dict()
correct_id_header = default_header['id']
correct_name_header = default_header['name']
correct_eviction_priority_header = default_header['eviction_prio']
correct_allocation_header = default_header['allocation']
correct_id_header = default_header["id"]
correct_name_header = default_header["name"]
correct_eviction_priority_header = default_header["eviction_prio"]
correct_allocation_header = default_header["allocation"]
malformed_io_class_id_header = f"{malformed_io_class_id}," \
f"{correct_name_header}," \
f"{correct_eviction_priority_header}," \
malformed_io_class_id_header = (
f"{malformed_io_class_id},"
f"{correct_name_header},"
f"{correct_eviction_priority_header},"
f"{correct_allocation_header}"
malformed_io_class_name_header = f"{correct_id_header}," \
f"{malformed_io_class_name}," \
f"{correct_eviction_priority_header}," \
)
malformed_io_class_name_header = (
f"{correct_id_header},"
f"{malformed_io_class_name},"
f"{correct_eviction_priority_header},"
f"{correct_allocation_header}"
malformed_eviction_priority_header = f"{correct_id_header}," \
f"{correct_name_header}," \
f"{malformed_io_class_eviction_priority}," \
)
malformed_eviction_priority_header = (
f"{correct_id_header},"
f"{correct_name_header},"
f"{malformed_io_class_eviction_priority},"
f"{correct_allocation_header}"
malformed_allocation_header = f"{correct_id_header}," \
f"{correct_name_header}," \
f"{correct_eviction_priority_header}," \
)
malformed_allocation_header = (
f"{correct_id_header},"
f"{correct_name_header},"
f"{correct_eviction_priority_header},"
f"{malformed_io_class_allocation}"
)
return {
malformed_io_class_id_header: [m.replace("value_template", malformed_io_class_id)
for m in cli_messages.malformed_io_class_header],
malformed_io_class_name_header: [m.replace("value_template", malformed_io_class_name)
for m in cli_messages.malformed_io_class_header],
malformed_eviction_priority_header: [m.replace("value_template",
malformed_io_class_eviction_priority)
for m in cli_messages.malformed_io_class_header],
malformed_allocation_header: [m.replace("value_template", malformed_io_class_allocation)
for m in cli_messages.malformed_io_class_header]
malformed_io_class_id_header: [
m.replace("value_template", malformed_io_class_id)
for m in cli_messages.malformed_io_class_header
],
malformed_io_class_name_header: [
m.replace("value_template", malformed_io_class_name)
for m in cli_messages.malformed_io_class_header
],
malformed_eviction_priority_header: [
m.replace("value_template", malformed_io_class_eviction_priority)
for m in cli_messages.malformed_io_class_header
],
malformed_allocation_header: [
m.replace("value_template", malformed_io_class_allocation)
for m in cli_messages.malformed_io_class_header
],
}

View File

@ -1,5 +1,5 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
@ -50,15 +50,15 @@ def test_ioclass_process_name():
with TestRun.step("Check if all data generated by dd process is cached."):
for i in range(iterations):
dd = (
(
Dd()
.input("/dev/zero")
.output(core.path)
.count(dd_count)
.block_size(dd_size)
.seek(i)
.run()
)
dd.run()
sync()
time.sleep(0.1)
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
@ -89,11 +89,7 @@ def test_ioclass_pid():
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
# 'dd' command is created and is appended to 'echo' command instead of running it
dd_command = str(
Dd()
.input("/dev/zero")
.output(core.path)
.count(dd_count)
.block_size(dd_size)
Dd().input("/dev/zero").output(core.path).count(dd_count).block_size(dd_size)
)
for _ in TestRun.iteration(range(iterations)):
@ -117,7 +113,7 @@ def test_ioclass_pid():
rule=f"pid:eq:{pid}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
casadm.load_io_classes(cache.cache_id, ioclass_config_path)
with TestRun.step(f"Run dd with pid {pid}."):
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
@ -136,4 +132,4 @@ def test_ioclass_pid():
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
ioclass_config.remove_ioclass(ioclass_id)
ioclass_config.remove_ioclass(ioclass_id, ioclass_config_path)

View File

@ -1,5 +1,5 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
@ -26,7 +26,7 @@ def test_ioclass_usage_sum():
title: Test for ioclass stats after purge
description: |
Create io classes for 3 different directories. Run IO against each
directory, check usage stats correctness before and after purge
directory, check usage stats correctness before and after purge.
pass_criteria:
- Usage stats are consistent on each test step
- Usage stats don't exceed cache size
@ -38,9 +38,7 @@ def test_ioclass_usage_sum():
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(
f"Prepare filesystem and mount {core.path} at {mountpoint}"
):
with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}"):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
@ -72,9 +70,7 @@ def test_ioclass_usage_sum():
# Since default ioclass is already present in cache and no directory should be
# created, it is added to io classes list after setup is done
io_classes.append(
IoclassConfig(default_ioclass_id, 22, f"{mountpoint}", cache_size * 0.2)
)
io_classes.append(IoclassConfig(default_ioclass_id, 22, f"{mountpoint}", cache_size * 0.2))
with TestRun.step("Verify stats of newly started cache device"):
sync()
@ -83,7 +79,7 @@ def test_ioclass_usage_sum():
with TestRun.step("Trigger IO to each partition and verify stats"):
for io_class in io_classes:
run_io_dir(io_class.dir_path, int((io_class.io_size) / Unit.Blocks4096))
run_io_dir(io_class.dir_path, int(io_class.io_size / Unit.Blocks4096.get_value()))
verify_ioclass_usage_stats(cache, [i.id for i in io_classes])
@ -92,11 +88,9 @@ def test_ioclass_usage_sum():
verify_ioclass_usage_stats(cache, [i.id for i in io_classes])
with TestRun.step(
"Trigger IO to each partition for the second time and verify stats"
):
with TestRun.step("Trigger IO to each partition for the second time and verify stats"):
for io_class in io_classes:
run_io_dir(io_class.dir_path, int((io_class.io_size) / Unit.Blocks4096))
run_io_dir(io_class.dir_path, int(io_class.io_size / Unit.Blocks4096.get_value()))
verify_ioclass_usage_stats(cache, [i.id for i in io_classes])
@ -141,13 +135,13 @@ def add_io_class(class_id, eviction_prio, rule):
def run_io_dir(path, num_ios):
dd = (
(
Dd()
.input("/dev/zero")
.output(f"{path}/tmp_file")
.count(num_ios)
.block_size(Size(1, Unit.Blocks4096))
.run()
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)

View File

@ -20,8 +20,14 @@ from test_tools.fio.fio_param import IoEngine, ReadWrite
from test_utils import os_utils
from test_utils.os_utils import Runlevel
from test_utils.size import Size, Unit
from tests.io_class.io_class_common import prepare, mountpoint, ioclass_config_path, \
compare_io_classes_list, run_io_dir_read, template_config_path
from tests.io_class.io_class_common import (
prepare,
mountpoint,
ioclass_config_path,
compare_io_classes_list,
run_io_dir_read,
template_config_path,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -37,8 +43,7 @@ def test_io_class_service_load(runlevel):
- IO class configuration is the same before and after reboot
"""
with TestRun.step("Prepare devices."):
cache, core = prepare(core_size=Size(300, Unit.MebiByte),
cache_mode=CacheMode.WT)
cache, core = prepare(core_size=Size(300, Unit.MebiByte), cache_mode=CacheMode.WT)
with TestRun.step("Read the whole CAS device."):
run_io_dir_read(core.path)
@ -47,30 +52,37 @@ def test_io_class_service_load(runlevel):
core.create_filesystem(Filesystem.ext4)
core.mount(mountpoint)
with TestRun.step("Load IO class configuration file with rules that metadata will not be "
"cached and all other IO will be cached as unclassified."):
with TestRun.step(
"Load IO class configuration file with rules that metadata will not be "
"cached and all other IO will be cached as unclassified."
):
config_io_classes = prepare_and_load_io_class_config(cache, metadata_not_cached=True)
with TestRun.step("Run IO."):
run_io()
with TestRun.step("Save IO class usage and configuration statistic."):
saved_usage_stats = cache.get_io_class_statistics(io_class_id=0, stat_filter=[
StatsFilter.usage]).usage_stats
saved_conf_stats = cache.get_io_class_statistics(io_class_id=0, stat_filter=[
StatsFilter.conf]).config_stats
saved_usage_stats = cache.get_io_class_statistics(
io_class_id=0, stat_filter=[StatsFilter.usage]
).usage_stats
saved_conf_stats = cache.get_io_class_statistics(
io_class_id=0, stat_filter=[StatsFilter.conf]
).config_stats
with TestRun.step("Create init config from running CAS configuration."):
InitConfig.create_init_config_from_running_configuration(
cache_extra_flags=f"ioclass_file={ioclass_config_path}")
cache_extra_flags=f"ioclass_file={ioclass_config_path}"
)
os_utils.sync()
with TestRun.step(f"Reboot system to runlevel {runlevel}."):
os_utils.change_runlevel(runlevel)
TestRun.executor.reboot()
with TestRun.step("Check if CAS device loads properly - "
"IO class configuration and statistics shall not change"):
with TestRun.step(
"Check if CAS device loads properly - "
"IO class configuration and statistics shall not change"
):
caches = casadm_parser.get_caches()
if len(caches) != 1:
TestRun.fail("Cache did not start at boot time.")
@ -85,18 +97,26 @@ def test_io_class_service_load(runlevel):
# Reads from core can invalidate some data so it is possible that occupancy after reboot
# is lower than before
reads_from_core = cache.get_statistics(stat_filter=[StatsFilter.blk]).block_stats.core.reads
read_usage_stats = cache.get_io_class_statistics(io_class_id=0, stat_filter=[
StatsFilter.usage]).usage_stats
read_conf_stats = cache.get_io_class_statistics(io_class_id=0, stat_filter=[
StatsFilter.conf]).config_stats
read_usage_stats = cache.get_io_class_statistics(
io_class_id=0, stat_filter=[StatsFilter.usage]
).usage_stats
read_conf_stats = cache.get_io_class_statistics(
io_class_id=0, stat_filter=[StatsFilter.conf]
).config_stats
if read_conf_stats != saved_conf_stats:
TestRun.LOGGER.error(f"Statistics do not match. Before: {str(saved_conf_stats)} "
f"After: {str(read_conf_stats)}")
if read_usage_stats != saved_usage_stats and \
saved_usage_stats.occupancy - read_usage_stats.occupancy > reads_from_core:
TestRun.LOGGER.error(f"Statistics do not match. Before: {str(saved_usage_stats)} "
f"After: {str(read_usage_stats)}")
TestRun.LOGGER.error(
f"Statistics do not match. Before: {str(saved_conf_stats)} "
f"After: {str(read_conf_stats)}"
)
if (
read_usage_stats != saved_usage_stats
and saved_usage_stats.occupancy - read_usage_stats.occupancy > reads_from_core
):
TestRun.LOGGER.error(
f"Statistics do not match. Before: {str(saved_usage_stats)} "
f"After: {str(read_usage_stats)}"
)
with TestRun.step("Mount CAS device and run IO again."):
core.mount(mountpoint)
@ -108,24 +128,28 @@ def test_io_class_service_load(runlevel):
read_total = cache_stats.request_stats.read.total
read_hits_percentage = read_hits / read_total * 100
if read_hits_percentage <= 95:
TestRun.LOGGER.error(f"Read hits percentage too low: {read_hits_percentage}%\n"
f"Read hits: {read_hits}, read total: {read_total}")
TestRun.LOGGER.error(
f"Read hits percentage too low: {read_hits_percentage}%\n"
f"Read hits: {read_hits}, read total: {read_total}"
)
def run_io():
fio = Fio() \
.create_command() \
.block_size(Size(1, Unit.Blocks4096)) \
.io_engine(IoEngine.libaio) \
.read_write(ReadWrite.read) \
.directory(os.path.join(mountpoint)) \
.sync() \
.do_verify() \
.num_jobs(32) \
.run_time(timedelta(minutes=1)) \
.time_based()\
.nr_files(30)\
fio = (
Fio()
.create_command()
.block_size(Size(1, Unit.Blocks4096))
.io_engine(IoEngine.libaio)
.read_write(ReadWrite.read)
.directory(os.path.join(mountpoint))
.sync()
.do_verify()
.num_jobs(32)
.run_time(timedelta(minutes=1))
.time_based()
.nr_files(30)
.file_size(Size(250, Unit.KiB))
)
fio.run()
os_utils.sync()

View File

@ -6,12 +6,12 @@
import pytest
from api.cas import ioclass_config, casadm
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_utils.os_utils import sync, Udev, drop_caches
from test_utils.size import Unit, Size
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.os_utils import sync, Udev, drop_caches
from test_utils.size import Unit, Size
from tests.io_class.io_class_common import prepare
@ -21,7 +21,7 @@ def test_ioclass_wlth():
"""
title: Test for `wlth` classification rule
description: |
Test CAS ability to cache IO based on write life time hints.
Test CAS ability to cache IO based on 'write-life-time-hints' classification rule.
pass_criteria:
- IO with wlth is cached
- IO without wlth is not cached
@ -60,7 +60,7 @@ def test_ioclass_wlth():
cache.reset_counters()
with TestRun.step(f"Trigger IO with a write life time hint"):
# Fio adds hints only to direct IO. Even if `write_hint` param isn't proveded, direct IO
# Fio adds hints only to direct IO. Even if `write_hint` param isn't provided, direct IO
# has assigned a hint by default
io_count = 12345
io_size = Size(io_count, Unit.Blocks4096)

View File

@ -1,5 +1,5 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
@ -30,8 +30,7 @@ def test_ioclass_lba():
"""
title: Test IO classification by lba.
description: |
Write data to random lba and check if it is cached according to range
defined in ioclass rule
Write data to random lba and check if it is cached according to range defined in ioclass rule.
pass_criteria:
- No kernel bug.
- IO is classified properly based on lba range defined in config.
@ -64,15 +63,16 @@ def test_ioclass_lba():
# '8' step is set to prevent writing cache line more than once
TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
for lba in range(min_cached_lba, max_cached_lba, 8):
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.seek(lba)
.oflag("direct")
.run()
)
dd.run()
dirty_count += 1
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
@ -84,22 +84,24 @@ def test_ioclass_lba():
test_lba = [max_cached_lba + 1] + random.sample(
[
*range(0, min_cached_lba),
*range(max_cached_lba + 1, int(core.size.get_value(Unit.Blocks512)))
*range(max_cached_lba + 1, int(core.size.get_value(Unit.Blocks512))),
],
k=100)
k=100,
)
for lba in test_lba:
prev_dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.seek(lba)
.oflag("direct")
.run()
)
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if prev_dirty != dirty:
@ -141,14 +143,15 @@ def test_ioclass_request_size():
for i in range(iterations):
cache.flush_cache()
req_size = random.choice(cached_req_sizes)
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(core.path)
.count(1)
.block_size(req_size)
.oflag("direct")
.run()
)
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
TestRun.fail("Incorrect number of dirty blocks!")
@ -164,14 +167,15 @@ def test_ioclass_request_size():
]
for i in range(iterations):
req_size = random.choice(not_cached_req_sizes)
dd = (
Dd().input("/dev/zero")
(
Dd()
.input("/dev/zero")
.output(core.path)
.count(1)
.block_size(req_size)
.oflag("direct")
.run()
)
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.fail("Dirty data present!")
@ -211,11 +215,15 @@ def test_ioclass_direct(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Prepare fio command."):
fio = Fio().create_command() \
.io_engine(IoEngine.libaio) \
.size(io_size).offset(io_size) \
.read_write(ReadWrite.write) \
fio = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(io_size)
.offset(io_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/tmp_file" if filesystem else core.path)
)
with TestRun.step("Prepare filesystem."):
if filesystem:
@ -237,8 +245,10 @@ def test_ioclass_direct(filesystem):
with TestRun.step("Check if buffered writes are not cached."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Buffered writes were cached!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}")
TestRun.fail(
"Buffered writes were cached!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}"
)
with TestRun.step(f"Run direct writes to {'file' if filesystem else 'device'}"):
fio.direct()
@ -248,8 +258,10 @@ def test_ioclass_direct(filesystem):
with TestRun.step("Check if direct writes are cached."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct writes was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
TestRun.fail(
"Wrong number of direct writes was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
)
with TestRun.step(f"Run buffered reads from {'file' if filesystem else 'device'}"):
fio.remove_param("readwrite").remove_param("direct")
@ -260,8 +272,10 @@ def test_ioclass_direct(filesystem):
with TestRun.step("Check if buffered reads caused reclassification."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Buffered reads did not cause reclassification!"
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}")
TestRun.fail(
"Buffered reads did not cause reclassification!"
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}"
)
with TestRun.step(f"Run direct reads from {'file' if filesystem else 'device'}"):
fio.direct()
@ -271,8 +285,10 @@ def test_ioclass_direct(filesystem):
with TestRun.step("Check if direct reads are cached."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct reads was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
TestRun.fail(
"Wrong number of direct reads was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
)
@pytest.mark.os_dependent
@ -308,31 +324,35 @@ def test_ioclass_metadata(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."):
with TestRun.step(
f"Prepare {filesystem.name} filesystem and mount {core.path} " f"at {mountpoint}."
):
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Create 20 test files."):
requests_to_metadata_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
io_class_id=ioclass_id
).request_stats.write
files = []
for i in range(1, 21):
file_path = f"{mountpoint}/test_file_{i}"
dd = (
Dd().input("/dev/urandom")
(
Dd()
.input("/dev/urandom")
.output(file_path)
.count(random.randint(5, 50))
.block_size(Size(1, Unit.MebiByte))
.oflag("sync")
.run()
)
dd.run()
files.append(File(file_path))
with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
io_class_id=ioclass_id
).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while creating files!")
@ -344,7 +364,8 @@ def test_ioclass_metadata(filesystem):
with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
io_class_id=ioclass_id
).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while renaming files!")
@ -359,7 +380,8 @@ def test_ioclass_metadata(filesystem):
with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
io_class_id=ioclass_id
).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while moving files!")
@ -368,7 +390,8 @@ def test_ioclass_metadata(filesystem):
with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
io_class_id=ioclass_id
).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while deleting directory with files!")
@ -376,8 +399,7 @@ def test_ioclass_metadata(filesystem):
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_id_as_condition(filesystem):
def test_ioclass_id_as_condition():
"""
title: IO class as a condition.
description: |
@ -386,7 +408,7 @@ def test_ioclass_id_as_condition(filesystem):
- No kernel bug.
- IO is classified properly as described in IO class config.
"""
filesystem = Filesystem.xfs
base_dir_path = f"{mountpoint}/base_dir"
ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))
@ -448,8 +470,9 @@ def test_ioclass_id_as_condition(filesystem):
# CAS needs some time to resolve directory to inode
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.path} at {mountpoint}."):
with TestRun.step(
f"Prepare {filesystem.name} filesystem " f"and mount {core.path} at {mountpoint}."
):
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(base_dir_path)
@ -457,83 +480,109 @@ def test_ioclass_id_as_condition(filesystem):
# CAS needs some time to resolve directory to inode
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
with TestRun.step("Run IO fulfilling IO class 1 condition (and not IO class 2) and check if "
"it is classified properly."):
with TestRun.step(
"Run IO fulfilling IO class 1 condition (and not IO class 2) and check if "
"it is classified properly."
):
# Should be classified as IO class 4
base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
(Fio().create_command()
(
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(non_ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_1")
.run())
.run()
)
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
if new_occupancy != base_occupancy + non_ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
TestRun.fail(
"Writes were not properly cached!\n"
f"Expected: {base_occupancy + non_ioclass_file_size}, "
f"actual: {new_occupancy}")
f"actual: {new_occupancy}"
)
with TestRun.step("Run IO fulfilling IO class 2 condition (and not IO class 1) and check if "
"it is classified properly."):
with TestRun.step(
"Run IO fulfilling IO class 2 condition (and not IO class 1) and check if "
"it is classified properly."
):
# Should be classified as IO class 5
base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
(Fio().create_command()
(
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file_2")
.run())
.run()
)
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
TestRun.fail(
"Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
)
with TestRun.step("Run IO fulfilling IO class 1 and 2 conditions and check if "
"it is classified properly."):
with TestRun.step(
"Run IO fulfilling IO class 1 and 2 conditions and check if " "it is classified properly."
):
# Should be classified as IO class 5
base_occupancy = new_occupancy
(Fio().create_command()
(
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.run())
.run()
)
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
TestRun.fail(
"Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
)
with TestRun.step("Run direct IO fulfilling IO class 1 and 2 conditions and check if "
"it is classified properly."):
with TestRun.step(
"Run direct IO fulfilling IO class 1 and 2 conditions and check if "
"it is classified properly."
):
# Should be classified as IO class 6
base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
(Fio().create_command()
(
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.direct()
.run())
.run()
)
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
TestRun.fail(
"Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
)
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_conditions_or(filesystem):
def test_ioclass_conditions_or():
"""
title: IO class condition 'or'.
description: |
@ -542,6 +591,7 @@ def test_ioclass_conditions_or(filesystem):
- No kernel bug.
- Every IO fulfilling one condition is classified properly.
"""
filesystem = Filesystem.xfs
with TestRun.step("Prepare cache and core. Disable udev."):
cache, core = prepare()
@ -553,14 +603,17 @@ def test_ioclass_conditions_or(filesystem):
ioclass_id=1,
eviction_priority=1,
allocation="1.00",
rule=f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
rule=(
f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5"
),
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.path} at {mountpoint}."):
with TestRun.step(
f"Prepare {filesystem.name} filesystem " f"and mount {core.path} at {mountpoint}."
):
core.create_filesystem(filesystem)
core.mount(mountpoint)
for i in range(1, 6):
@ -571,35 +624,38 @@ def test_ioclass_conditions_or(filesystem):
for i in range(1, 6):
file_size = Size(random.randint(25, 50), Unit.MebiByte)
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
(Fio().create_command()
(
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/dir{i}/test_file")
.run())
.run()
)
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size:
TestRun.fail("Occupancy has not increased correctly!\n"
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}")
TestRun.fail(
"Occupancy has not increased correctly!\n"
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
)
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_conditions_and(filesystem):
def test_ioclass_conditions_and():
"""
title: IO class condition 'and'.
description: |
Load config with IO class combining 5 conditions contradicting
at least one other condition.
Load config with IO class combining 5 conditions contradicting at least one other condition.
pass_criteria:
- No kernel bug.
- Every IO fulfilling one of the conditions is not classified.
"""
filesystem = Filesystem.xfs
file_size = Size(random.randint(25, 50), Unit.MebiByte)
file_size_bytes = int(file_size.get_value(Unit.Byte))
@ -613,41 +669,52 @@ def test_ioclass_conditions_and(filesystem):
ioclass_id=1,
eviction_priority=1,
allocation="1.00",
rule=f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
rule=(
f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
f"file_size:eq:{file_size_bytes}",
f"file_size:eq:{file_size_bytes}"
),
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.path} at {mountpoint}")
TestRun.LOGGER.info(
f"Preparing {filesystem.name} filesystem " f"and mounting {core.path} at {mountpoint}"
)
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
# Perform IO
for size in [file_size, file_size + Size(1, Unit.MebiByte), file_size - Size(1, Unit.MebiByte)]:
(Fio().create_command()
for size in [
file_size,
file_size + Size(1, Unit.MebiByte),
file_size - Size(1, Unit.MebiByte),
]:
(
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file")
.run())
.run()
)
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Unexpected occupancy increase!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}")
TestRun.fail(
"Unexpected occupancy increase!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}"
)
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_effective_ioclass(filesystem):
def test_ioclass_effective_ioclass():
"""
title: Effective IO class with multiple non-exclusive conditions
description: |
@ -657,19 +724,24 @@ def test_ioclass_effective_ioclass(filesystem):
- In every iteration first IO is classified to the last in order IO class
- In every iteration second IO is classified to the IO class with '&done' annotation
"""
filesystem = Filesystem.xfs
with TestRun.LOGGER.step(f"Test prepare"):
cache, core = prepare(default_allocation="1.00")
Udev.disable()
file_size = Size(10, Unit.Blocks4096)
file_size_bytes = int(file_size.get_value(Unit.Byte))
test_dir = f"{mountpoint}/test"
rules = ["direct", # rule contradicting other rules
rules = [
"direct", # rule contradicting other rules
f"directory:{test_dir}",
f"file_size:le:{2 * file_size_bytes}",
f"file_size:ge:{file_size_bytes // 2}"]
f"file_size:ge:{file_size_bytes // 2}",
]
with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.path} at {mountpoint}"):
with TestRun.LOGGER.step(
f"Preparing {filesystem.name} filesystem " f"and mounting {core.path} at {mountpoint}"
):
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(test_dir)
@ -682,41 +754,54 @@ def test_ioclass_effective_ioclass(filesystem):
with TestRun.LOGGER.step("Perform IO fulfilling the non-contradicting conditions"):
base_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
fio = (Fio().create_command()
io_class_id=io_class_id
).usage_stats.occupancy
fio = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(file_size)
.read_write(ReadWrite.write)
.target(f"{test_dir}/test_file{i}"))
.target(f"{test_dir}/test_file{i}")
)
fio.run()
sync()
with TestRun.LOGGER.step("Check if IO was properly classified "
"(to the last non-contradicting IO class)"):
with TestRun.LOGGER.step(
"Check if IO was properly classified " "(to the last non-contradicting IO class)"
):
new_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
io_class_id=io_class_id
).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size:
TestRun.LOGGER.error("Wrong IO classification!\n"
TestRun.LOGGER.error(
"Wrong IO classification!\n"
f"Expected: {base_occupancy + file_size}, "
f"actual: {new_occupancy}")
f"actual: {new_occupancy}"
)
with TestRun.LOGGER.step("Add '&done' to the second in order non-contradicting condition"):
io_class_id = add_done_to_second_non_exclusive_condition(rules, permutation, cache)
with TestRun.LOGGER.step("Repeat IO"):
base_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
io_class_id=io_class_id
).usage_stats.occupancy
fio.run()
sync()
with TestRun.LOGGER.step("Check if IO was properly classified "
"(to the IO class with '&done' annotation)"):
with TestRun.LOGGER.step(
"Check if IO was properly classified " "(to the IO class with '&done' annotation)"
):
new_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
io_class_id=io_class_id
).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size:
TestRun.LOGGER.error("Wrong IO classification!\n"
TestRun.LOGGER.error(
"Wrong IO classification!\n"
f"Expected: {base_occupancy + file_size}, "
f"actual: {new_occupancy}")
f"actual: {new_occupancy}"
)
def load_io_classes_in_permutation_order(rules, permutation, cache):
@ -729,9 +814,9 @@ def load_io_classes_in_permutation_order(rules, permutation, cache):
ioclass_list = [IoClass.default(allocation="0.0")]
for n in range(len(rules)):
ioclass_list.append(IoClass(class_id=permutation[n], rule=rules[n]))
IoClass.save_list_to_config_file(ioclass_list,
add_default_rule=False,
ioclass_config_path=ioclass_config_path)
IoClass.save_list_to_config_file(
ioclass_list, add_default_rule=False, ioclass_config_path=ioclass_config_path
)
casadm.load_io_classes(cache.cache_id, file=ioclass_config_path)
@ -745,8 +830,7 @@ def add_done_to_second_non_exclusive_condition(rules, permutation, cache):
if non_exclusive_conditions == 2:
break
second_class_id += 1
fs_utils.replace_first_pattern_occurrence(ioclass_config_path,
rules[idx], f"{rules[idx]}&done")
fs_utils.replace_first_pattern_occurrence(ioclass_config_path, rules[idx], f"{rules[idx]}&done")
sync()
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
return second_class_id