Merge pull request #1326 from KlaudiaJ/io-class-tests-refactor

Refactor IO class tests
This commit is contained in:
Karolina Rogowska 2022-09-26 11:55:08 +02:00 committed by GitHub
commit 90106d545a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 1100 additions and 883 deletions

View File

@ -1,5 +1,5 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -20,7 +20,6 @@ from test_utils.os_utils import Udev, sync
from test_utils.os_utils import drop_caches, DropCachesMode from test_utils.os_utils import drop_caches, DropCachesMode
from test_utils.size import Size, Unit from test_utils.size import Size, Unit
ioclass_config_path = "/etc/opencas/ioclass.conf" ioclass_config_path = "/etc/opencas/ioclass.conf"
template_config_path = "/etc/opencas/ioclass-config.csv" template_config_path = "/etc/opencas/ioclass-config.csv"
mountpoint = "/tmp/cas1-1" mountpoint = "/tmp/cas1-1"
@ -31,9 +30,9 @@ def prepare(
core_size=Size(40, Unit.GibiByte), core_size=Size(40, Unit.GibiByte),
cache_mode=CacheMode.WB, cache_mode=CacheMode.WB,
cache_line_size=CacheLineSize.LINE_4KiB, cache_line_size=CacheLineSize.LINE_4KiB,
default_allocation="0.00" default_allocation="0.00",
): ):
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config(ioclass_config_path)
cache_device = TestRun.disks["cache"] cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"] core_device = TestRun.disks["core"]
@ -60,6 +59,7 @@ def prepare(
) )
# To make test more precise all workload except of tested ioclass should be # To make test more precise all workload except of tested ioclass should be
# put in pass-through mode # put in pass-through mode
# Avoid caching anything else than files with specified prefix
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
ioclass_id=ioclass_config.DEFAULT_IO_CLASS_ID, ioclass_id=ioclass_config.DEFAULT_IO_CLASS_ID,
eviction_priority=ioclass_config.DEFAULT_IO_CLASS_PRIORITY, eviction_priority=ioclass_config.DEFAULT_IO_CLASS_PRIORITY,
@ -99,9 +99,9 @@ def generate_and_load_random_io_class_config(cache):
def compare_io_classes_list(expected, actual): def compare_io_classes_list(expected, actual):
if not IoClass.compare_ioclass_lists(expected, actual): if not IoClass.compare_ioclass_lists(expected, actual):
TestRun.LOGGER.error("IO classes configuration is not as expected.") TestRun.LOGGER.error("IO classes configuration is not as expected.")
expected = '\n'.join(str(i) for i in expected) expected = "\n".join(str(i) for i in expected)
TestRun.LOGGER.error(f"Expected IO classes:\n{expected}") TestRun.LOGGER.error(f"Expected IO classes:\n{expected}")
actual = '\n'.join(str(i) for i in actual) actual = "\n".join(str(i) for i in actual)
TestRun.LOGGER.error(f"Actual IO classes:\n{actual}") TestRun.LOGGER.error(f"Actual IO classes:\n{actual}")
@ -131,14 +131,14 @@ def run_io_dir_read(path):
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
def run_fio_count(core, blocksize, num_ios): def run_fio_count(core, block_size, num_ios):
( (
Fio() Fio()
.create_command() .create_command()
.target(core) .target(core)
.io_engine(IoEngine.libaio) .io_engine(IoEngine.libaio)
.read_write(ReadWrite.randread) .read_write(ReadWrite.randread)
.block_size(blocksize) .block_size(block_size)
.direct() .direct()
.file_size(Size(10, Unit.GibiByte)) .file_size(Size(10, Unit.GibiByte))
.num_ios(num_ios) .num_ios(num_ios)

View File

@ -1,5 +1,5 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -20,8 +20,7 @@ ioclass_config_path = "/tmp/opencas_ioclass.conf"
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode) def test_io_class_export_configuration():
def test_ioclass_export_configuration(cache_mode):
""" """
title: Export IO class configuration to a file title: Export IO class configuration to a file
description: | description: |
@ -30,6 +29,8 @@ def test_ioclass_export_configuration(cache_mode):
- CAS default IO class configuration contains unclassified class only - CAS default IO class configuration contains unclassified class only
- CAS properly imports previously exported configuration - CAS properly imports previously exported configuration
""" """
cache_mode = CacheMode.WB
with TestRun.LOGGER.step(f"Test prepare"): with TestRun.LOGGER.step(f"Test prepare"):
cache, core = prepare(cache_mode) cache, core = prepare(cache_mode)
saved_config_path = "/tmp/opencas_saved.conf" saved_config_path = "/tmp/opencas_saved.conf"
@ -38,28 +39,37 @@ def test_ioclass_export_configuration(cache_mode):
with TestRun.LOGGER.step(f"Check IO class configuration (should contain only default class)"): with TestRun.LOGGER.step(f"Check IO class configuration (should contain only default class)"):
csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), default_list): if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), default_list):
TestRun.LOGGER.error("Default configuration does not match expected\n" TestRun.LOGGER.error(
f"Current:\n{csv}\n" "Default configuration does not match expected\n"
f"Expected:{IoClass.list_to_csv(default_list)}") f"Current:\n{csv}\n"
f"Expected:{IoClass.list_to_csv(default_list)}"
)
with TestRun.LOGGER.step("Create and load configuration file for 33 IO classes " with TestRun.LOGGER.step(
"with random names, allocation and priority values"): "Create and load configuration file for 33 IO classes "
"with random names, allocation and priority values"
):
random_list = IoClass.generate_random_ioclass_list(33) random_list = IoClass.generate_random_ioclass_list(33)
IoClass.save_list_to_config_file(random_list, ioclass_config_path=ioclass_config_path) IoClass.save_list_to_config_file(random_list, ioclass_config_path=ioclass_config_path)
casadm.load_io_classes(cache.cache_id, ioclass_config_path) casadm.load_io_classes(cache.cache_id, ioclass_config_path)
with TestRun.LOGGER.step("Display and export IO class configuration - displayed configuration " with TestRun.LOGGER.step(
"should be the same as created"): "Display and export IO class configuration - displayed configuration "
"should be the same as created"
):
TestRun.executor.run( TestRun.executor.run(
f"{casadm.list_io_classes_cmd(str(cache.cache_id), OutputFormat.csv.name)}" f"{casadm.list_io_classes_cmd(str(cache.cache_id), OutputFormat.csv.name)}"
f" > {saved_config_path}") f" > {saved_config_path}"
)
csv = fs_utils.read_file(saved_config_path) csv = fs_utils.read_file(saved_config_path)
if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), random_list): if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), random_list):
TestRun.LOGGER.error("Exported configuration does not match expected\n" TestRun.LOGGER.error(
f"Current:\n{csv}\n" "Exported configuration does not match expected\n"
f"Expected:{IoClass.list_to_csv(random_list)}") f"Current:\n{csv}\n"
f"Expected:{IoClass.list_to_csv(random_list)}"
)
with TestRun.LOGGER.step("Stop Intel CAS"): with TestRun.LOGGER.step("Stop Open CAS"):
casadm.stop_cache(cache.cache_id) casadm.stop_cache(cache.cache_id)
with TestRun.LOGGER.step("Start cache and add core"): with TestRun.LOGGER.step("Start cache and add core"):
@ -69,9 +79,11 @@ def test_ioclass_export_configuration(cache_mode):
with TestRun.LOGGER.step("Check IO class configuration (should contain only default class)"): with TestRun.LOGGER.step("Check IO class configuration (should contain only default class)"):
csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), default_list): if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), default_list):
TestRun.LOGGER.error("Default configuration does not match expected\n" TestRun.LOGGER.error(
f"Current:\n{csv}\n" "Default configuration does not match expected\n"
f"Expected:{IoClass.list_to_csv(default_list)}") f"Current:\n{csv}\n"
f"Expected:{IoClass.list_to_csv(default_list)}"
)
with TestRun.LOGGER.step("Load exported configuration file for 33 IO classes"): with TestRun.LOGGER.step("Load exported configuration file for 33 IO classes"):
casadm.load_io_classes(cache.cache_id, saved_config_path) casadm.load_io_classes(cache.cache_id, saved_config_path)
@ -79,18 +91,20 @@ def test_ioclass_export_configuration(cache_mode):
with TestRun.LOGGER.step("Display IO class configuration - should be the same as created"): with TestRun.LOGGER.step("Display IO class configuration - should be the same as created"):
csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), random_list): if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv), random_list):
TestRun.LOGGER.error("Exported configuration does not match expected\n" TestRun.LOGGER.error(
f"Current:\n{csv}\n" "Exported configuration does not match expected\n"
f"Expected:{IoClass.list_to_csv(random_list)}") f"Current:\n{csv}\n"
f"Expected:{IoClass.list_to_csv(random_list)}"
)
with TestRun.LOGGER.step(f"Test cleanup"): with TestRun.LOGGER.step(f"Test cleanup"):
fs_utils.remove(saved_config_path) fs_utils.remove(saved_config_path)
def prepare(cache_mode: CacheMode = None): def prepare(cache_mode: CacheMode = None):
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config(ioclass_config_path)
cache_device = TestRun.disks['cache'] cache_device = TestRun.disks["cache"]
core_device = TestRun.disks['core'] core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(150, Unit.MebiByte)]) cache_device.create_partitions([Size(150, Unit.MebiByte)])
core_device.create_partitions([Size(300, Unit.MebiByte)]) core_device.create_partitions([Size(300, Unit.MebiByte)])

View File

@ -1,19 +1,17 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import pytest import pytest
from test_tools.disk_utils import Filesystem
from api.cas import ioclass_config, casadm from api.cas import ioclass_config, casadm
from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev, drop_caches from test_utils.os_utils import sync, Udev, drop_caches
from test_utils.size import Unit, Size from test_utils.size import Unit, Size
from core.test_run import TestRun
dd_bs = Size(1, Unit.Blocks4096) dd_bs = Size(1, Unit.Blocks4096)
dd_count = 1230 dd_count = 1230
@ -23,8 +21,7 @@ not_cached_mountpoint = "/tmp/ioclass_core_id_test/not_cached"
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", [fs for fs in Filesystem] + [None]) def test_ioclass_core_id():
def test_ioclass_core_id(filesystem):
""" """
title: Test for `core_id` classification rule title: Test for `core_id` classification rule
description: | description: |
@ -34,12 +31,11 @@ def test_ioclass_core_id(filesystem):
- IO to core with enabled selective allocation is cached - IO to core with enabled selective allocation is cached
- IO to core with disabled selective allocation is not cached - IO to core with disabled selective allocation is not cached
""" """
fs_info = f"with {filesystem}" if filesystem else ""
with TestRun.step( with TestRun.step(
f"Start cache with two cores on created partitions {fs_info}, " f"Start cache with two cores on created partitions with NOP, disabled seq cutoff"
"with NOP, disabled seq cutoff"
): ):
cache, cores = prepare(filesystem, 2) cache, cores = prepare(2)
core_1, core_2 = cores[0], cores[1] core_1, core_2 = cores[0], cores[1]
with TestRun.step(f"Add core_id based classification rules"): with TestRun.step(f"Add core_id based classification rules"):
@ -66,11 +62,6 @@ def test_ioclass_core_id(filesystem):
cache_id=cache.cache_id, file=ioclass_config.default_config_file_path cache_id=cache.cache_id, file=ioclass_config.default_config_file_path
) )
if filesystem:
with TestRun.step(f"Mount cores"):
core_1.mount(cached_mountpoint)
core_2.mount(not_cached_mountpoint)
with TestRun.step(f"Reset counters"): with TestRun.step(f"Reset counters"):
sync() sync()
drop_caches() drop_caches()
@ -78,21 +69,18 @@ def test_ioclass_core_id(filesystem):
cache.reset_counters() cache.reset_counters()
with TestRun.step(f"Trigger IO to both cores"): with TestRun.step(f"Trigger IO to both cores"):
if filesystem: dd_dst_paths = [core_1.path, core_2.path]
dd_dst_paths = [cached_mountpoint + "/test_file", not_cached_mountpoint + "/test_file"]
else:
dd_dst_paths = [core_1.path, core_2.path]
for path in dd_dst_paths: for path in dd_dst_paths:
dd = ( (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(path) .output(path)
.count(dd_count) .count(dd_count)
.block_size(dd_bs) .block_size(dd_bs)
.oflag("sync") .oflag("sync")
.run()
) )
dd.run()
sync() sync()
drop_caches() drop_caches()
@ -105,13 +93,13 @@ def test_ioclass_core_id(filesystem):
if core_1_occupancy < dd_size: if core_1_occupancy < dd_size:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"First core's occupancy is {core_1_occupancy} " f"First core's occupancy is {core_1_occupancy} "
f"- it is less than {dd_size} - triggerd IO size!" f"- it is less than {dd_size} - triggered IO size!"
) )
if core_2_occupancy.get_value() != 0: if core_2_occupancy.get_value() != 0:
TestRun.LOGGER.error(f"First core's occupancy is {core_2_occupancy} instead of 0!") TestRun.LOGGER.error(f"First core's occupancy is {core_2_occupancy} instead of 0!")
with TestRun.step(f"Check ioclasses occupancy"): with TestRun.step(f"Check io classes occupancy"):
cached_ioclass_occupancy = cache.get_io_class_statistics( cached_ioclass_occupancy = cache.get_io_class_statistics(
io_class_id=cached_ioclass_id io_class_id=cached_ioclass_id
).usage_stats.occupancy ).usage_stats.occupancy
@ -122,7 +110,7 @@ def test_ioclass_core_id(filesystem):
if cached_ioclass_occupancy < dd_size: if cached_ioclass_occupancy < dd_size:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Cached ioclass occupancy is {cached_ioclass_occupancy} " f"Cached ioclass occupancy is {cached_ioclass_occupancy} "
f"- it is less than {dd_size} - triggerd IO size!" f"- it is less than {dd_size} - triggered IO size!"
) )
if not_cached_ioclass_occupancy.get_value() != 0: if not_cached_ioclass_occupancy.get_value() != 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
@ -138,7 +126,7 @@ def test_ioclass_core_id(filesystem):
) )
def prepare(filesystem, cores_number): def prepare(cores_number):
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config()
cache_device = TestRun.disks["cache"] cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"] core_device = TestRun.disks["core"]
@ -155,14 +143,13 @@ def prepare(filesystem, cores_number):
cores = [] cores = []
for part in core_device.partitions: for part in core_device.partitions:
if filesystem:
part.create_filesystem(filesystem)
cores.append(casadm.add_core(cache, core_dev=part)) cores.append(casadm.add_core(cache, core_dev=part))
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
ioclass_config.create_ioclass_config( ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config.default_config_file_path add_default_rule=False,
ioclass_config_path=ioclass_config.default_config_file_path,
) )
# To make test more precise all workload except of tested ioclass should be # To make test more precise all workload except of tested ioclass should be
# put in pass-through mode # put in pass-through mode

View File

@ -1,11 +1,11 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import random import random
from datetime import datetime
import time import time
from datetime import datetime
import pytest import pytest
@ -28,13 +28,13 @@ from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_p
@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_depth(filesystem): def test_ioclass_directory_depth(filesystem):
""" """
title: Test IO classification by directory. title: Test IO classification by directory.
description: | description: |
Test if directory classification works properly for deeply nested directories for read and Test if directory classification works properly for deeply nested directories for read and
write operations. write operations.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- Read and write operations to directories are classified properly. - Read and write operations to directories are classified properly.
""" """
base_dir_path = f"{mountpoint}/base_dir" base_dir_path = f"{mountpoint}/base_dir"
@ -42,8 +42,9 @@ def test_ioclass_directory_depth(filesystem):
cache, core = prepare() cache, core = prepare()
Udev.disable() Udev.disable()
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} " with TestRun.step(
f"at {mountpoint}."): f"Prepare {filesystem.name} filesystem and mount {core.path} " f"at {mountpoint}."
):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
sync() sync()
@ -61,13 +62,14 @@ def test_ioclass_directory_depth(filesystem):
# Test classification in nested dir by reading a previously unclassified file # Test classification in nested dir by reading a previously unclassified file
with TestRun.step("Create the first file in the nested directory."): with TestRun.step("Create the first file in the nested directory."):
test_file_1 = File(f"{nested_dir_path}/test_file_1") test_file_1 = File(f"{nested_dir_path}/test_file_1")
dd = ( (
Dd().input("/dev/urandom") Dd()
.output(test_file_1.full_path) .input("/dev/urandom")
.count(random.randint(1, 200)) .output(test_file_1.full_path)
.block_size(Size(1, Unit.MebiByte)) .count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
.run()
) )
dd.run()
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
test_file_1.refresh_item() test_file_1.refresh_item()
@ -86,42 +88,48 @@ def test_ioclass_directory_depth(filesystem):
with TestRun.step("Read the file in the nested directory"): with TestRun.step("Read the file in the nested directory"):
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
dd = ( (
Dd().input(test_file_1.full_path) Dd()
.output("/dev/null") .input(test_file_1.full_path)
.block_size(Size(1, Unit.MebiByte)) .output("/dev/null")
.block_size(Size(1, Unit.MebiByte))
.run()
) )
dd.run()
with TestRun.step("Check occupancy after creating the file."): with TestRun.step("Check occupancy after creating the file."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + test_file_1.size: if new_occupancy != base_occupancy + test_file_1.size:
TestRun.LOGGER.error("Wrong occupancy after reading file!\n" TestRun.LOGGER.error(
f"Expected: {base_occupancy + test_file_1.size}, " "Wrong occupancy after reading file!\n"
f"actual: {new_occupancy}") f"Expected: {base_occupancy + test_file_1.size}, "
f"actual: {new_occupancy}"
)
# Test classification in nested dir by creating a file # Test classification in nested dir by creating a file
with TestRun.step("Create the second file in the nested directory"): with TestRun.step("Create the second file in the nested directory"):
base_occupancy = new_occupancy base_occupancy = new_occupancy
test_file_2 = File(f"{nested_dir_path}/test_file_2") test_file_2 = File(f"{nested_dir_path}/test_file_2")
dd = ( (
Dd().input("/dev/urandom") Dd()
.output(test_file_2.full_path) .input("/dev/urandom")
.count(random.randint(25600, 51200)) # 100MB to 200MB .output(test_file_2.full_path)
.block_size(Size(1, Unit.Blocks4096)) .count(random.randint(25600, 51200)) # count from 100MB to 200MB
.block_size(Size(1, Unit.Blocks4096))
.run()
) )
dd.run()
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
test_file_2.refresh_item() test_file_2.refresh_item()
with TestRun.step("Check occupancy after creating the second file."): with TestRun.step("Check occupancy after creating the second file."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
expected_occpuancy = (base_occupancy + test_file_2.size).set_unit(Unit.Blocks4096) expected_occupancy = (base_occupancy + test_file_2.size).set_unit(Unit.Blocks4096)
if new_occupancy != base_occupancy + test_file_2.size: if new_occupancy != base_occupancy + test_file_2.size:
TestRun.LOGGER.error("Wrong occupancy after creating file!\n" TestRun.LOGGER.error(
f"Expected: {expected_occpuancy}, " "Wrong occupancy after creating file!\n"
f"actual: {new_occupancy}") f"Expected: {expected_occupancy}, "
f"actual: {new_occupancy}"
)
@pytest.mark.os_dependent @pytest.mark.os_dependent
@ -130,13 +138,13 @@ def test_ioclass_directory_depth(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_file_operations(filesystem): def test_ioclass_directory_file_operations(filesystem):
""" """
title: Test IO classification by file operations. title: Test IO classification by file operations.
description: | description: |
Test if directory classification works properly after file operations like move or rename. Test if directory classification works properly after file operations like move or rename.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- The operations themselves should not cause reclassification but IO after those - The operations themselves should not cause reclassification but IO after those
operations should be reclassified to proper IO class. operations should be reclassified to proper IO class.
""" """
test_dir_path = f"{mountpoint}/test_dir" test_dir_path = f"{mountpoint}/test_dir"
@ -149,11 +157,13 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Create and load IO class config file."): with TestRun.step("Create and load IO class config file."):
ioclass_id = random.randint(2, ioclass_config.MAX_IO_CLASS_ID) ioclass_id = random.randint(2, ioclass_config.MAX_IO_CLASS_ID)
ioclass_config.add_ioclass(ioclass_id=1, ioclass_config.add_ioclass(
eviction_priority=1, ioclass_id=1,
allocation="1.00", eviction_priority=1,
rule="metadata", allocation="1.00",
ioclass_config_path=ioclass_config_path) rule="metadata",
ioclass_config_path=ioclass_config_path,
)
# directory IO class # directory IO class
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
ioclass_id=ioclass_id, ioclass_id=ioclass_id,
@ -164,8 +174,9 @@ def test_ioclass_directory_file_operations(filesystem):
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem " with TestRun.step(
f"and mounting {core.path} at {mountpoint}."): f"Prepare {filesystem.name} filesystem " f"and mounting {core.path} at {mountpoint}."
):
core.create_filesystem(fs_type=filesystem) core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint) core.mount(mount_point=mountpoint)
sync() sync()
@ -178,10 +189,18 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Create test file."): with TestRun.step("Create test file."):
classified_before = cache.get_io_class_statistics( classified_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
file_path = f"{test_dir_path}/test_file" file_path = f"{test_dir_path}/test_file"
(Dd().input("/dev/urandom").output(file_path).oflag("sync") (
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run()) Dd()
.input("/dev/urandom")
.output(file_path)
.oflag("sync")
.block_size(Size(1, Unit.MebiByte))
.count(dd_blocks)
.run()
)
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds) time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
@ -189,7 +208,8 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Check classified occupancy."): with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics( classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after) check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Move test file out of classified directory."): with TestRun.step("Move test file out of classified directory."):
@ -202,7 +222,8 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Check classified occupancy."): with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics( classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before, classified_after) check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy") TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
@ -211,15 +232,15 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Read test file."): with TestRun.step("Read test file."):
classified_before = classified_after classified_before = classified_after
non_classified_before = non_classified_after non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null") Dd().input(test_file.full_path).output("/dev/null").iflag("sync").run()
.iflag("sync").run())
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds) time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."): with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics( classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before - test_file.size, classified_after) check_occupancy(classified_before - test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy") TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
@ -235,7 +256,8 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Check classified occupancy."): with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics( classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before, classified_after) check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy") TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy non_classified_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
@ -244,15 +266,21 @@ def test_ioclass_directory_file_operations(filesystem):
with TestRun.step("Read test file."): with TestRun.step("Read test file."):
classified_before = classified_after classified_before = classified_after
non_classified_before = non_classified_after non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null") (
.block_size(Size(1, Unit.Blocks4096)).run()) Dd()
.input(test_file.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.Blocks4096))
.run()
)
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds) time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
with TestRun.step("Check classified occupancy."): with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics( classified_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
check_occupancy(classified_before + test_file.size, classified_after) check_occupancy(classified_before + test_file.size, classified_after)
with TestRun.step("Check non-classified occupancy."): with TestRun.step("Check non-classified occupancy."):
@ -266,16 +294,16 @@ def test_ioclass_directory_file_operations(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_directory_dir_operations(filesystem): def test_ioclass_directory_dir_operations(filesystem):
""" """
title: Test IO classification by directory operations. title: Test IO classification by directory operations.
description: | description: |
Test if directory classification works properly after directory operations like move or Test if directory classification works properly after directory operations like move or
rename. rename.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- The operations themselves should not cause reclassification but IO after those - The operations themselves should not cause reclassification but IO after those
operations should be reclassified to proper IO class. operations should be reclassified to proper IO class.
- Directory classification may work with a delay after loading IO class configuration or - Directory classification may work with a delay after loading IO class configuration or
move/rename operations. Test checks if maximum delay is not exceeded. move/rename operations. Test checks if maximum delay is not exceeded.
""" """
non_classified_dir_path = f"{mountpoint}/non_classified" non_classified_dir_path = f"{mountpoint}/non_classified"
@ -314,8 +342,9 @@ def test_ioclass_directory_dir_operations(filesystem):
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem " with TestRun.step(
f"and mount {core.path} at {mountpoint}."): f"Prepare {filesystem.name} filesystem " f"and mount {core.path} at {mountpoint}."
):
core.create_filesystem(fs_type=filesystem) core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint) core.mount(mount_point=mountpoint)
sync() sync()
@ -328,14 +357,16 @@ def test_ioclass_directory_dir_operations(filesystem):
with TestRun.step("Create files with delay check."): with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check( create_files_with_classification_delay_check(
cache, directory=dir_1, ioclass_id=ioclass_id_1) cache, directory=dir_1, ioclass_id=ioclass_id_1
)
with TestRun.step(f"Create {classified_dir_path_2}/subdir."): with TestRun.step(f"Create {classified_dir_path_2}/subdir."):
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True) dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
with TestRun.step("Create files with delay check."): with TestRun.step("Create files with delay check."):
create_files_with_classification_delay_check(cache, directory=dir_2, create_files_with_classification_delay_check(
ioclass_id=ioclass_id_2) cache, directory=dir_2, ioclass_id=ioclass_id_2
)
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
@ -343,10 +374,13 @@ def test_ioclass_directory_dir_operations(filesystem):
dir_2.move(destination=classified_dir_path_1) dir_2.move(destination=classified_dir_path_1)
with TestRun.step("Read files with reclassification check."): with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache, read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_1, cache,
source_ioclass_id=ioclass_id_2, target_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False) source_ioclass_id=ioclass_id_2,
directory=dir_2,
with_delay=False,
)
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
@ -354,9 +388,13 @@ def test_ioclass_directory_dir_operations(filesystem):
dir_2.move(destination=mountpoint) dir_2.move(destination=mountpoint)
with TestRun.step("Read files with reclassification check."): with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache, read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_1, cache,
directory=dir_2, with_delay=True) target_ioclass_id=0,
source_ioclass_id=ioclass_id_1,
directory=dir_2,
with_delay=True,
)
with TestRun.step(f"Remove {classified_dir_path_2}."): with TestRun.step(f"Remove {classified_dir_path_2}."):
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True) fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
@ -367,24 +405,30 @@ def test_ioclass_directory_dir_operations(filesystem):
dir_1.move(destination=classified_dir_path_2) dir_1.move(destination=classified_dir_path_2)
with TestRun.step("Read files with reclassification check."): with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache, read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_2, cache,
source_ioclass_id=ioclass_id_1, target_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True) source_ioclass_id=ioclass_id_1,
directory=dir_1,
with_delay=True,
)
with TestRun.step(f"Rename {classified_dir_path_2} to {non_classified_dir_path}."): with TestRun.step(f"Rename {classified_dir_path_2} to {non_classified_dir_path}."):
dir_1.move(destination=non_classified_dir_path) dir_1.move(destination=non_classified_dir_path)
with TestRun.step("Read files with reclassification check."): with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache, read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_2, cache,
directory=dir_1, with_delay=True) target_ioclass_id=0,
source_ioclass_id=ioclass_id_2,
directory=dir_1,
with_delay=True,
)
def create_files_with_classification_delay_check(cache, directory: Directory, ioclass_id: int): def create_files_with_classification_delay_check(cache, directory: Directory, ioclass_id: int):
start_time = datetime.now() start_time = datetime.now()
occupancy_after = cache.get_io_class_statistics( occupancy_after = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
io_class_id=ioclass_id).usage_stats.occupancy
dd_blocks = 10 dd_blocks = 10
dd_size = Size(dd_blocks, Unit.Blocks4096) dd_size = Size(dd_blocks, Unit.Blocks4096)
file_counter = 0 file_counter = 0
@ -395,10 +439,18 @@ def create_files_with_classification_delay_check(cache, directory: Directory, io
file_path = f"{directory.full_path}/test_file_{file_counter}" file_path = f"{directory.full_path}/test_file_{file_counter}"
file_counter += 1 file_counter += 1
time_from_start = datetime.now() - start_time time_from_start = datetime.now() - start_time
(Dd().input("/dev/zero").output(file_path).oflag("sync") (
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run()) Dd()
.input("/dev/zero")
.output(file_path)
.oflag("sync")
.block_size(Size(1, Unit.Blocks4096))
.count(dd_blocks)
.run()
)
occupancy_after = cache.get_io_class_statistics( occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
if occupancy_after - occupancy_before < dd_size: if occupancy_after - occupancy_before < dd_size:
unclassified_files.append(file_path) unclassified_files.append(file_path)
@ -408,17 +460,31 @@ def create_files_with_classification_delay_check(cache, directory: Directory, io
if len(unclassified_files): if len(unclassified_files):
TestRun.LOGGER.info("Rewriting unclassified test files...") TestRun.LOGGER.info("Rewriting unclassified test files...")
for file_path in unclassified_files: for file_path in unclassified_files:
(Dd().input("/dev/zero").output(file_path).oflag("sync") (
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run()) Dd()
.input("/dev/zero")
.output(file_path)
.oflag("sync")
.block_size(Size(1, Unit.Blocks4096))
.count(dd_blocks)
.run()
)
def read_files_with_reclassification_check(cache, target_ioclass_id: int, source_ioclass_id: int, def read_files_with_reclassification_check(
directory: Directory, with_delay: bool): cache,
target_ioclass_id: int,
source_ioclass_id: int,
directory: Directory,
with_delay: bool,
):
start_time = datetime.now() start_time = datetime.now()
target_occupancy_after = cache.get_io_class_statistics( target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy io_class_id=target_ioclass_id
).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics( source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy io_class_id=source_ioclass_id
).usage_stats.occupancy
files_to_reclassify = [] files_to_reclassify = []
target_ioclass_is_enabled = ioclass_is_enabled(cache, target_ioclass_id) target_ioclass_is_enabled = ioclass_is_enabled(cache, target_ioclass_id)
@ -426,12 +492,13 @@ def read_files_with_reclassification_check(cache, target_ioclass_id: int, source
target_occupancy_before = target_occupancy_after target_occupancy_before = target_occupancy_after
source_occupancy_before = source_occupancy_after source_occupancy_before = source_occupancy_after
time_from_start = datetime.now() - start_time time_from_start = datetime.now() - start_time
dd = Dd().input(file.full_path).output("/dev/null").block_size(Size(1, Unit.Blocks4096)) Dd().input(file.full_path).output("/dev/null").block_size(Size(1, Unit.Blocks4096)).run()
dd.run()
target_occupancy_after = cache.get_io_class_statistics( target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy io_class_id=target_ioclass_id
).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics( source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy io_class_id=source_ioclass_id
).usage_stats.occupancy
if target_ioclass_is_enabled: if target_ioclass_is_enabled:
if target_occupancy_after < target_occupancy_before: if target_occupancy_after < target_occupancy_before:
@ -464,8 +531,13 @@ def read_files_with_reclassification_check(cache, target_ioclass_id: int, source
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
for file in files_to_reclassify: for file in files_to_reclassify:
(Dd().input(file.full_path).output("/dev/null") (
.block_size(Size(1, Unit.Blocks4096)).run()) Dd()
.input(file.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.Blocks4096))
.run()
)
def check_occupancy(expected: Size, actual: Size): def check_occupancy(expected: Size, actual: Size):

View File

@ -1,5 +1,5 @@
# #
# Copyright(c) 2020-2021 Intel Corporation # Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -8,28 +8,26 @@ from math import isclose
import pytest import pytest
from api.cas.cache_config import CacheMode, CacheLineSize
from api.cas.ioclass_config import IoClass
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils from test_tools import fs_utils
from test_tools.disk_utils import Filesystem from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
from .io_class_common import * from .io_class_common import *
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_line_size", CacheLineSize) def test_io_class_eviction_priority():
def test_ioclass_eviction_priority(cache_line_size):
""" """
title: Check whether eviction priorites are respected. title: Check whether eviction priorities are respected.
description: | description: |
Create ioclass for 4 different directories, each with different Create io class for 4 different directories, each with different
eviction priority configured. Saturate 3 of them and check if the eviction priority configured. Saturate 3 of them and check if the
partitions are evicted in a good order during IO to the fourth partitions are evicted in a good order during IO to the fourth
pass_criteria: pass_criteria:
- Partitions are evicted in specified order - Partitions are evicted in specified order
""" """
cache_line_size = CacheLineSize.LINE_64KiB
with TestRun.step("Prepare CAS device"): with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=CacheMode.WT, cache_line_size=cache_line_size) cache, core = prepare(cache_mode=CacheMode.WT, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size cache_size = cache.get_statistics().config_stats.cache_size
@ -37,44 +35,32 @@ def test_ioclass_eviction_priority(cache_line_size):
with TestRun.step("Disable udev"): with TestRun.step("Disable udev"):
Udev.disable() Udev.disable()
with TestRun.step( with TestRun.step(f"Preparing filesystem and mounting {core.path} at {mountpoint}"):
f"Preparing filesystem and mounting {core.path} at {mountpoint}"
):
filesystem = Filesystem.xfs filesystem = Filesystem.xfs
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
sync() sync()
with TestRun.step("Prepare test dirs"): with TestRun.step("Prepare test dirs"):
IoclassConfig = namedtuple( IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path")
"IoclassConfig", "id eviction_prio max_occupancy dir_path"
)
io_classes = [ io_classes = [
IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"), IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
IoclassConfig(2, 4, 0.30, f"{mountpoint}/B"), IoclassConfig(2, 4, 0.30, f"{mountpoint}/B"),
IoclassConfig(3, 5, 0.40, f"{mountpoint}/C"), IoclassConfig(3, 5, 0.40, f"{mountpoint}/C"),
IoclassConfig(4, 1, 1.00, f"{mountpoint}/D"), IoclassConfig(4, 1, 1.00, f"{mountpoint}/D"),
] ]
for io_class in io_classes: for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True) fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"): with TestRun.step("Adding io classes for all dirs"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Adding default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Adding ioclasses for all dirs"):
for io_class in io_classes: for io_class in io_classes:
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
io_class.id, io_class.id,
f"directory:{io_class.dir_path}&done", f"directory:{io_class.dir_path}&done",
io_class.eviction_prio, io_class.eviction_prio,
f"{io_class.max_occupancy:0.2f}", f"{io_class.max_occupancy:0.2f}",
ioclass_config_path
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Resetting cache stats"): with TestRun.step("Resetting cache stats"):
@ -86,22 +72,20 @@ def test_ioclass_eviction_priority(cache_line_size):
occupancy = get_io_class_occupancy(cache, io_class.id) occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0: if occupancy.get_value() != 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}." f"Incorrect initial occupancy for io class id: {io_class.id}."
f" Expected 0, got: {occupancy}" f" Expected 0, got: {occupancy}"
) )
with TestRun.step( with TestRun.step(f"To A, B and C directories perform IO with size of max io_class occupancy"):
f"To A, B and C directories perform IO with size of max io_class occupancy"
):
for io_class in io_classes[0:3]: for io_class in io_classes[0:3]:
run_io_dir( run_io_dir(
f"{io_class.dir_path}/tmp_file", f"{io_class.dir_path}/tmp_file",
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096), int((io_class.max_occupancy * cache_size) / Unit.Blocks4096.get_value()),
) )
with TestRun.step("Check if each ioclass reached it's occupancy limit"): with TestRun.step("Check if each io class reached it's occupancy limit"):
for io_class in io_classes[0:3]: for io_class in io_classes[0:3]:
actuall_occupancy = get_io_class_occupancy(cache, io_class.id) actual_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = ( occupancy_limit = (
(io_class.max_occupancy * cache_size) (io_class.max_occupancy * cache_size)
@ -109,16 +93,16 @@ def test_ioclass_eviction_priority(cache_line_size):
.set_unit(Unit.Blocks4096) .set_unit(Unit.Blocks4096)
) )
if not isclose(actuall_occupancy.value, occupancy_limit.value, rel_tol=0.1): if not isclose(actual_occupancy.value, occupancy_limit.value, rel_tol=0.1):
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Occupancy for ioclass {io_class.id} does not match. " f"Occupancy for io class {io_class.id} does not match. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}" f"Limit: {occupancy_limit}, actual: {actual_occupancy}"
) )
if get_io_class_occupancy(cache, io_classes[3].id).value != 0: if get_io_class_occupancy(cache, io_classes[3].id).value != 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Occupancy for ioclass {io_classes[3].id} should be 0. " f"Occupancy for io class {io_classes[3].id} should be 0. "
f"Actuall: {actuall_occupancy}" f"Actual: {actual_occupancy}"
) )
with TestRun.step( with TestRun.step(
@ -126,30 +110,22 @@ def test_ioclass_eviction_priority(cache_line_size):
"if other partitions are evicted in a good order" "if other partitions are evicted in a good order"
): ):
target_io_class = io_classes[3] target_io_class = io_classes[3]
io_classes_to_evict = io_classes[:3][ io_classes_to_evict = io_classes[:3][::-1] # List is ordered by eviction priority
::-1
] # List is ordered by eviction priority
io_classes_evicted = [] io_classes_evicted = []
io_offset = 0 io_offset = 0
for io_class in io_classes_to_evict: for io_class in io_classes_to_evict:
io_size = int((io_class.max_occupancy * cache_size) / Unit.Blocks4096) io_size = int((io_class.max_occupancy * cache_size) / Unit.Blocks4096.get_value())
run_io_dir( run_io_dir(f"{target_io_class.dir_path}/tmp_file_{io_class.id}", io_size, io_offset)
f"{target_io_class.dir_path}/tmp_file_{io_class.id}",
io_size,
io_offset
)
io_offset += io_size io_offset += io_size
part_to_evict_end_occupancy = get_io_class_occupancy( part_to_evict_end_occupancy = get_io_class_occupancy(cache, io_class.id, percent=True)
cache, io_class.id, percent=True
)
# Since number of evicted cachelines is always >= 128, occupancy is checked # Since number of evicted cache lines is always >= 128, occupancy is checked
# with approximation # with approximation
if not isclose(part_to_evict_end_occupancy, 0, abs_tol=4): if not isclose(part_to_evict_end_occupancy, 0, abs_tol=4):
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Wrong percent of cache lines evicted from part {io_class.id}. " f"Wrong percent of cache lines evicted from part {io_class.id}. "
f"Meant to be evicted {io_class.max_occupancy*100}%, actaully evicted " f"Meant to be evicted {io_class.max_occupancy * 100}%, actaully evicted "
f"{io_class.max_occupancy*100-part_to_evict_end_occupancy}%" f"{io_class.max_occupancy * 100 - part_to_evict_end_occupancy}%"
) )
io_classes_evicted.append(io_class) io_classes_evicted.append(io_class)
@ -161,4 +137,4 @@ def test_ioclass_eviction_priority(cache_line_size):
occupancy = get_io_class_occupancy(cache, i.id, percent=True) occupancy = get_io_class_occupancy(cache, i.id, percent=True)
if not isclose(occupancy, i.max_occupancy * 100, abs_tol=4): if not isclose(occupancy, i.max_occupancy * 100, abs_tol=4):
TestRun.LOGGER.error(f"Ioclass {i.id} evicted incorrectly") TestRun.LOGGER.error(f"Io class {i.id} evicted incorrectly")

View File

@ -1,5 +1,5 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -22,11 +22,11 @@ from tests.io_class.io_class_common import mountpoint, prepare, ioclass_config_p
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_extension(): def test_ioclass_file_extension():
""" """
title: Test IO classification by file extension. title: Test IO classification by file extension.
description: Test if file extension classification works properly. description: Test if file extension classification works properly.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- IO is classified properly based on IO class rule with file extension. - IO is classified properly based on IO class rule with file extension.
""" """
iterations = 50 iterations = 50
ioclass_id = 1 ioclass_id = 1
@ -35,10 +35,11 @@ def test_ioclass_file_extension():
dd_size = Size(4, Unit.KibiByte) dd_size = Size(4, Unit.KibiByte)
dd_count = 10 dd_count = 10
dd = ( dd = (
Dd().input("/dev/zero") Dd()
.output(f"{mountpoint}/test_file.{tested_extension}") .input("/dev/zero")
.count(dd_count) .output(f"{mountpoint}/test_file.{tested_extension}")
.block_size(dd_size) .count(dd_count)
.block_size(dd_size)
) )
with TestRun.step("Prepare cache and core."): with TestRun.step("Prepare cache and core."):
@ -74,13 +75,14 @@ def test_ioclass_file_extension():
with TestRun.step(f"Write to file with not cached extension and check if it is not cached."): with TestRun.step(f"Write to file with not cached extension and check if it is not cached."):
for ext in wrong_extensions: for ext in wrong_extensions:
dd = ( (
Dd().input("/dev/zero") Dd()
.output(f"{mountpoint}/test_file.{ext}") .input("/dev/zero")
.count(dd_count) .output(f"{mountpoint}/test_file.{ext}")
.block_size(dd_size) .count(dd_count)
.block_size(dd_size)
.run()
) )
dd.run()
sync() sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0: if dirty.get_value(Unit.Blocks4096) != 0:
@ -91,11 +93,11 @@ def test_ioclass_file_extension():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_name_prefix(): def test_ioclass_file_name_prefix():
""" """
title: Test IO classification by file name prefix. title: Test IO classification by file name prefix.
description: Test if file name prefix classification works properly. description: Test if file name prefix classification works properly.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- IO is classified properly based on IO class rule with file name prefix. - IO is classified properly based on IO class rule with file name prefix.
""" """
ioclass_id = 1 ioclass_id = 1
@ -107,25 +109,14 @@ def test_ioclass_file_name_prefix():
with TestRun.step("Prepare cache and core."): with TestRun.step("Prepare cache and core."):
cache, core = prepare() cache, core = prepare()
with TestRun.step("Create and load IO class config."): with TestRun.step("Add io class for specific file name prefix."):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
# Avoid caching anything else than files with specified prefix
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=255,
allocation="0.00",
rule=f"unclassified",
ioclass_config_path=ioclass_config_path,
)
# Enables file with specified prefix to be cached # Enables file with specified prefix to be cached
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
ioclass_id=ioclass_id, ioclass_id=ioclass_id,
eviction_priority=1, eviction_priority=1,
allocation="1.00", allocation="1.00",
rule=f"file_name_prefix:test&done", rule=f"file_name_prefix:test&done",
ioclass_config_path=ioclass_config_path, ioclass_config_path=ioclass_config_path
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
@ -137,66 +128,76 @@ def test_ioclass_file_name_prefix():
current_occupancy = cache.get_occupancy() current_occupancy = cache.get_occupancy()
if previous_occupancy.get_value() > current_occupancy.get_value(): if previous_occupancy.get_value() > current_occupancy.get_value():
TestRun.fail(f"Current occupancy ({str(current_occupancy)}) is lower " TestRun.fail(
f"than before ({str(previous_occupancy)}).") f"Current occupancy ({str(current_occupancy)}) is lower "
f"than before ({str(previous_occupancy)})."
)
# Filesystem creation caused metadata IO which is not supposed # Filesystem creation caused metadata IO which is not supposed
# to be cached # to be cached
# Check if files with proper prefix are cached # Check if files with proper prefix are cached
with TestRun.step(f"Write files which are supposed to be cached and check " with TestRun.step(
f"if they are cached."): f"Write files which are supposed to be cached and check " f"if they are cached."
):
for f in cached_files: for f in cached_files:
dd = ( (
Dd().input("/dev/zero") Dd()
.output(f"{mountpoint}/{f}") .input("/dev/zero")
.count(dd_count) .output(f"{mountpoint}/{f}")
.block_size(dd_size) .count(dd_count)
.block_size(dd_size)
.run()
) )
dd.run()
sync() sync()
current_occupancy = cache.get_occupancy() current_occupancy = cache.get_occupancy()
expected_occupancy = previous_occupancy + (dd_size * dd_count) expected_occupancy = previous_occupancy + (dd_size * dd_count)
if current_occupancy != expected_occupancy: if current_occupancy != expected_occupancy:
TestRun.fail(f"Current occupancy value is not valid. " TestRun.fail(
f"(Expected: {str(expected_occupancy)}, " f"Current occupancy value is not valid. "
f"actual: {str(current_occupancy)})") f"(Expected: {str(expected_occupancy)}, "
f"actual: {str(current_occupancy)})"
)
previous_occupancy = current_occupancy previous_occupancy = current_occupancy
with TestRun.step("Flush cache."): with TestRun.step("Flush cache."):
cache.flush_cache() cache.flush_cache()
# Check if file with improper extension is not cached # Check if file with improper extension is not cached
with TestRun.step(f"Write files which are not supposed to be cached and check if " with TestRun.step(
f"they are not cached."): f"Write files which are not supposed to be cached and check if " f"they are not cached."
):
for f in not_cached_files: for f in not_cached_files:
dd = ( (
Dd().input("/dev/zero") Dd()
.output(f"{mountpoint}/{f}") .input("/dev/zero")
.count(dd_count) .output(f"{mountpoint}/{f}")
.block_size(dd_size) .count(dd_count)
.block_size(dd_size)
.run()
) )
dd.run()
sync() sync()
current_occupancy = cache.get_occupancy() current_occupancy = cache.get_occupancy()
if current_occupancy != previous_occupancy: if current_occupancy != previous_occupancy:
TestRun.fail(f"Current occupancy value is not valid. " TestRun.fail(
f"(Expected: {str(previous_occupancy)}, " f"Current occupancy value is not valid. "
f"actual: {str(current_occupancy)})") f"(Expected: {str(previous_occupancy)}, "
f"actual: {str(current_occupancy)})"
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_extension_preexisting_filesystem(): def test_ioclass_file_extension_preexisting_filesystem():
""" """
title: Test IO classification by file extension with preexisting filesystem on core device. title: Test IO classification by file extension with preexisting filesystem on core device.
description: | description: |
Test if file extension classification works properly when there is an existing Test if file extension classification works properly when there is an existing
filesystem on core device. filesystem on core device.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- IO is classified properly based on IO class rule with file extension - IO is classified properly based on IO class rule with file extension
after mounting core device. after mounting core device.
""" """
ioclass_id = 1 ioclass_id = 1
extensions = ["tmp", "tm", "out", "txt", "log", "123"] extensions = ["tmp", "tm", "out", "txt", "log", "123"]
@ -212,13 +213,14 @@ def test_ioclass_file_extension_preexisting_filesystem():
core.core_device.mount(mountpoint) core.core_device.mount(mountpoint)
for ext in extensions: for ext in extensions:
dd = ( (
Dd().input("/dev/zero") Dd()
.output(f"{mountpoint}/test_file.{ext}") .input("/dev/zero")
.count(dd_count) .output(f"{mountpoint}/test_file.{ext}")
.block_size(dd_size) .count(dd_count)
.block_size(dd_size)
.run()
) )
dd.run()
core.core_device.unmount() core.core_device.unmount()
with TestRun.step("Create IO class config."): with TestRun.step("Create IO class config."):
@ -243,13 +245,14 @@ def test_ioclass_file_extension_preexisting_filesystem():
with TestRun.step(f"Write to file with cached extension and check if they are cached."): with TestRun.step(f"Write to file with cached extension and check if they are cached."):
for ext in extensions: for ext in extensions:
dd = ( (
Dd().input("/dev/zero") Dd()
.output(f"{mountpoint}/test_file.{ext}") .input("/dev/zero")
.count(dd_count) .output(f"{mountpoint}/test_file.{ext}")
.block_size(dd_size) .count(dd_count)
.block_size(dd_size)
.run()
) )
dd.run()
sync() sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count: if dirty.get_value(Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count:
@ -260,11 +263,11 @@ def test_ioclass_file_extension_preexisting_filesystem():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_file_offset(): def test_ioclass_file_offset():
""" """
title: Test IO classification by file offset. title: Test IO classification by file offset.
description: Test if file offset classification works properly. description: Test if file offset classification works properly.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- IO is classified properly based on IO class rule with file offset. - IO is classified properly based on IO class rule with file offset.
""" """
ioclass_id = 1 ioclass_id = 1
iterations = 100 iterations = 100
@ -272,6 +275,7 @@ def test_ioclass_file_offset():
dd_count = 1 dd_count = 1
min_cached_offset = 16384 min_cached_offset = 16384
max_cached_offset = 65536 max_cached_offset = 65536
blocks4096 = Unit.Blocks4096.get_value()
with TestRun.step("Prepare cache and core."): with TestRun.step("Prepare cache and core."):
cache, core = prepare() cache, core = prepare()
@ -296,22 +300,21 @@ def test_ioclass_file_offset():
with TestRun.step("Write to file within cached offset range and check if it is cached."): with TestRun.step("Write to file within cached offset range and check if it is cached."):
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first # Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
# nor last sector # nor last sector
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
max_seek = int( min_seek = int((min_cached_offset + blocks4096) / blocks4096)
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value) max_seek = int((max_cached_offset - min_cached_offset - blocks4096) / blocks4096)
/ Unit.Blocks4096.value
)
for i in range(iterations): for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek)) file_offset = random.choice(range(min_seek, max_seek))
dd = ( (
Dd().input("/dev/zero") Dd()
.output(f"{mountpoint}/tmp_file") .input("/dev/zero")
.count(dd_count) .output(f"{mountpoint}/tmp_file")
.block_size(dd_size) .count(dd_count)
.seek(file_offset) .block_size(dd_size)
.seek(file_offset)
.run()
) )
dd.run()
sync() sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 1: if dirty.get_value(Unit.Blocks4096) != 1:
@ -319,20 +322,22 @@ def test_ioclass_file_offset():
cache.flush_cache() cache.flush_cache()
with TestRun.step( with TestRun.step(
"Write to file outside of cached offset range and check if it is not cached."): "Write to file outside of cached offset range and check if it is not cached."
):
min_seek = 0 min_seek = 0
max_seek = int(min_cached_offset / Unit.Blocks4096.value) max_seek = int(min_cached_offset / blocks4096)
TestRun.LOGGER.info(f"Writing to file outside of cached offset range") TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
for i in range(iterations): for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek)) file_offset = random.choice(range(min_seek, max_seek))
dd = ( (
Dd().input("/dev/zero") Dd()
.output(f"{mountpoint}/tmp_file") .input("/dev/zero")
.count(dd_count) .output(f"{mountpoint}/tmp_file")
.block_size(dd_size) .count(dd_count)
.seek(file_offset) .block_size(dd_size)
.seek(file_offset)
.run()
) )
dd.run()
sync() sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0: if dirty.get_value(Unit.Blocks4096) != 0:
@ -345,11 +350,11 @@ def test_ioclass_file_offset():
@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_file_size(filesystem): def test_ioclass_file_size(filesystem):
""" """
title: Test IO classification by file size. title: Test IO classification by file size.
description: Test if file size classification works properly. description: Test if file size classification works properly.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- IO is classified properly based on IO class rule with file size. - IO is classified properly based on IO class rule with file size.
""" """
# File size IO class rules are configured in a way that each tested file size is unambiguously # File size IO class rules are configured in a way that each tested file size is unambiguously
@ -375,8 +380,9 @@ def test_ioclass_file_size(filesystem):
with TestRun.step("Prepare and load IO class config."): with TestRun.step("Prepare and load IO class config."):
load_file_size_io_classes(cache, base_size) load_file_size_io_classes(cache, base_size)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} " with TestRun.step(
f"at {mountpoint}."): f"Prepare {filesystem.name} filesystem and mount {core.path} " f"at {mountpoint}."
):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
sync() sync()
@ -385,26 +391,28 @@ def test_ioclass_file_size(filesystem):
test_files = [] test_files = []
for size, ioclass_id in size_to_class.items(): for size, ioclass_id in size_to_class.items():
occupancy_before = cache.get_io_class_statistics( occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
file_path = f"{mountpoint}/test_file_{size.get_value()}" file_path = f"{mountpoint}/test_file_{size.get_value()}"
Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run() Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run()
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
occupancy_after = cache.get_io_class_statistics( occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
if occupancy_after != occupancy_before + size: if occupancy_after != occupancy_before + size:
TestRun.fail("File not cached properly!\n" TestRun.fail(
f"Expected {occupancy_before + size}\n" "File not cached properly!\n"
f"Actual {occupancy_after}") f"Expected {occupancy_before + size}\n"
f"Actual {occupancy_after}"
)
test_files.append(File(file_path).refresh_item()) test_files.append(File(file_path).refresh_item())
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
with TestRun.step("Move all files to 'unclassified' IO class."): with TestRun.step("Move all files to 'unclassified' IO class."):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path) ioclass_config.remove_ioclass_config(ioclass_config_path)
ioclass_config.create_ioclass_config( ioclass_config.create_ioclass_config(False, ioclass_config_path)
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
ioclass_id=0, ioclass_id=0,
eviction_priority=22, eviction_priority=22,
@ -428,18 +436,18 @@ def test_ioclass_file_size(filesystem):
occupancy_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy occupancy_after = cache.get_io_class_statistics(io_class_id=0).usage_stats.occupancy
occupancy_expected = occupancy_before + file.size occupancy_expected = occupancy_before + file.size
if occupancy_after != occupancy_expected: if occupancy_after != occupancy_expected:
TestRun.fail("File not reclassified properly!\n" TestRun.fail(
f"Expected {occupancy_expected}\n" "File not reclassified properly!\n"
f"Actual {occupancy_after}") f"Expected {occupancy_expected}\n"
f"Actual {occupancy_after}"
)
occupancy_before = occupancy_after occupancy_before = occupancy_after
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
with TestRun.step("Restore IO class configuration."): with TestRun.step("Restore IO class configuration."):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path) ioclass_config.remove_ioclass_config(ioclass_config_path)
ioclass_config.create_ioclass_config( ioclass_config.create_ioclass_config(False, ioclass_config_path)
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
ioclass_id=0, ioclass_id=0,
eviction_priority=22, eviction_priority=22,
@ -457,18 +465,22 @@ def test_ioclass_file_size(filesystem):
for file in test_files: for file in test_files:
ioclass_id = size_to_class[file.size] ioclass_id = size_to_class[file.size]
occupancy_before = cache.get_io_class_statistics( occupancy_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run() Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)
occupancy_after = cache.get_io_class_statistics( occupancy_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).usage_stats.occupancy io_class_id=ioclass_id
).usage_stats.occupancy
actual_blocks = occupancy_after.get_value(Unit.Blocks4096) actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096) expected_blocks = (occupancy_before + file.size).get_value(Unit.Blocks4096)
if actual_blocks != expected_blocks: if actual_blocks != expected_blocks:
TestRun.fail("File not reclassified properly!\n" TestRun.fail(
f"Expected {occupancy_before + file.size}\n" "File not reclassified properly!\n"
f"Actual {occupancy_after}") f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}"
)
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)

View File

@ -18,26 +18,33 @@ from test_tools import fs_utils
from test_tools.disk_utils import Filesystem from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev from test_utils.os_utils import sync, Udev
from test_utils.size import Unit, Size from test_utils.size import Unit, Size
from tests.io_class.io_class_common import prepare, mountpoint, run_io_dir, \ from tests.io_class.io_class_common import (
get_io_class_occupancy, run_io_dir_read, get_io_class_usage prepare,
mountpoint,
run_io_dir,
get_io_class_occupancy,
run_io_dir_read,
get_io_class_usage,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("io_size_multiplication", [0.5, 2]) @pytest.mark.parametrize("io_size_multiplication", [0.5, 2])
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB]) @pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize) def test_io_class_occupancy_directory_write(io_size_multiplication, cache_mode):
def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, cache_line_size):
""" """
title: Test for max occupancy set for ioclass based on directory title: Test for max occupancy set for ioclass based on directory
description: | description: |
Create ioclass for 3 different directories, each with different Create ioclass for 3 different directories, each with different
max cache occupancy configured. Run IO against each directory and see max cache occupancy configured. Run IO against each directory and see
if occupancy limit is repected. if occupancy limit is respected.
pass_criteria: pass_criteria:
- Max occupancy is set correctly for each ioclass - Max occupancy is set correctly for each ioclass
- Each ioclass does not exceed max occupancy - Each ioclass does not exceed max occupancy
""" """
cache_line_size = CacheLineSize.LINE_64KiB
with TestRun.step("Prepare CAS device"): with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size) cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size cache_size = cache.get_statistics().config_stats.cache_size
@ -66,10 +73,10 @@ def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, c
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False) ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"): with TestRun.step("Add default io classes"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(",")) ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add ioclasses for all dirs"): with TestRun.step("Add io classes for all dirs"):
for io_class in io_classes: for io_class in io_classes:
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
io_class.id, io_class.id,
@ -94,7 +101,8 @@ def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, c
) )
with TestRun.step( with TestRun.step(
f"To each directory perform IO with size of {io_size_multiplication} max io_class occupancy" f"To each directory perform IO"
f" with size of {io_size_multiplication} max io_class occupancy"
): ):
for io_class in io_classes: for io_class in io_classes:
original_occupancies = {} original_occupancies = {}
@ -123,7 +131,7 @@ def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, c
io_count = get_io_count(i, cache_size, cache_line_size, io_size_multiplication) io_count = get_io_count(i, cache_size, cache_line_size, io_size_multiplication)
if ( if (
original_occupancies[i.id] != actual_occupancy original_occupancies[i.id] != actual_occupancy
and io_count * Unit.Blocks4096.value < actual_occupancy.value and io_count * Unit.Blocks4096.get_value() < actual_occupancy.value
): ):
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Occupancy for ioclass {i.id} should not change " f"Occupancy for ioclass {i.id} should not change "
@ -141,7 +149,7 @@ def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, c
.set_unit(Unit.Blocks4096) .set_unit(Unit.Blocks4096)
) )
# Divergency may be caused by rounding max occupancy # Divergence may be caused by rounding max occupancy
if actual_occupancy > occupancy_limit * 1.01: if actual_occupancy > occupancy_limit * 1.01:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. " f"Occupancy for ioclass id exceeded: {io_class.id}. "
@ -152,20 +160,20 @@ def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, c
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("io_size_multiplication", [0.5, 2]) @pytest.mark.parametrize("io_size_multiplication", [0.5, 2])
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB]) def test_io_class_occupancy_directory_read(io_size_multiplication):
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_size, cache_mode):
""" """
title: Test for max occupancy set for ioclass based on directory - read title: Test for max occupancy set for ioclass based on directory - read
description: | description: |
Set cache mode to pass-through and create files on mounted core Set cache mode to pass-through and create files on mounted core device.
device. Swtich cache to write through, and load ioclasses applaying Switch cache to write through, and load io classes applying to different files.
to different files. Read files and check if occupancy threshold is Read files and check if occupancy threshold is respected.
respected. pass_criteria:
pass_criteria: - Max occupancy is set correctly for each ioclass
- Max occupancy is set correctly for each ioclass - Each ioclass does not exceed max occupancy
- Each ioclass does not exceed max occupancy
""" """
cache_line_size = CacheLineSize.LINE_64KiB
cache_mode = CacheMode.WB
with TestRun.step("Prepare CAS device"): with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size) cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size cache_size = cache.get_statistics().config_stats.cache_size
@ -202,10 +210,10 @@ def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_siz
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False) ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"): with TestRun.step("Add default io classes"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(",")) ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add ioclasses for all dirs"): with TestRun.step("Add io classes for all dirs"):
for io_class in io_classes: for io_class in io_classes:
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
io_class.id, io_class.id,
@ -225,7 +233,7 @@ def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_siz
occupancy = get_io_class_occupancy(cache, io_class.id) occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0: if occupancy.get_value() != 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}." f"Incorrect initial occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}" f" Expected 0, got: {occupancy}"
) )
@ -256,7 +264,7 @@ def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_siz
io_count = get_io_count(i, cache_size, cache_line_size, io_size_multiplication) io_count = get_io_count(i, cache_size, cache_line_size, io_size_multiplication)
if ( if (
original_occupancies[i.id] != actual_occupancy original_occupancies[i.id] != actual_occupancy
and io_count * Unit.Blocks4096.value < actual_occupancy.value and io_count * Unit.Blocks4096.get_value() < actual_occupancy.value
): ):
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Occupancy for ioclass {i.id} should not change " f"Occupancy for ioclass {i.id} should not change "
@ -274,7 +282,7 @@ def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_siz
.set_unit(Unit.Blocks4096) .set_unit(Unit.Blocks4096)
) )
# Divergency may be caused by rounding max occupancy # Divergence may be caused by rounding max occupancy
if actual_occupancy > occupancy_limit * 1.01: if actual_occupancy > occupancy_limit * 1.01:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. " f"Occupancy for ioclass id exceeded: {io_class.id}. "
@ -286,14 +294,14 @@ def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_siz
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_occupancy_sum_cache(): def test_ioclass_occupancy_sum_cache():
""" """
title: Test for ioclasses occupancy sum title: Test for io classes occupancy sum
description: | description: |
Create ioclass for 3 different directories, each with different Create ioclass for 3 different directories, each with different
max cache occupancy configured. Trigger IO to each ioclass and check max cache occupancy configured. Trigger IO to each ioclass and check
if sum of their Usage stats is equal to cache Usage stats. if sum of their Usage stats is equal to cache Usage stats.
pass_criteria: pass_criteria:
- Max occupancy is set correctly for each ioclass - Max occupancy is set correctly for each ioclass
- Sum of ioclassess stats is equal to cache stats - Sum of io classes stats is equal to cache stats
""" """
with TestRun.step("Prepare CAS device"): with TestRun.step("Prepare CAS device"):
cache, core = prepare() cache, core = prepare()
@ -324,7 +332,7 @@ def test_ioclass_occupancy_sum_cache():
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False) ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"): with TestRun.step("Add default io classes"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(",")) ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add ioclasses for all dirs"): with TestRun.step("Add ioclasses for all dirs"):
@ -365,7 +373,7 @@ def test_ioclass_occupancy_sum_cache():
for io_class in io_classes: for io_class in io_classes:
run_io_dir( run_io_dir(
f"{io_class.dir_path}/tmp_file", f"{io_class.dir_path}/tmp_file",
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096), int((io_class.max_occupancy * cache_size) / Unit.Blocks4096.get_value()),
) )
with TestRun.step("Verify stats after IO"): with TestRun.step("Verify stats after IO"):

View File

@ -16,23 +16,32 @@ from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils from test_tools import fs_utils
from test_tools.disk_utils import Filesystem from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev from test_utils.os_utils import sync, Udev
from tests.io_class.io_class_common import prepare, mountpoint, TestRun, Unit, \ from tests.io_class.io_class_common import (
run_io_dir, get_io_class_dirty, get_io_class_usage, get_io_class_occupancy prepare,
mountpoint,
TestRun,
Unit,
run_io_dir,
get_io_class_dirty,
get_io_class_usage,
get_io_class_occupancy,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_line_size", CacheLineSize) def test_ioclass_occupancy_load():
def test_ioclass_occuppancy_load(cache_line_size):
""" """
title: Load cache with occupancy limit specified title: Load cache with occupancy limit specified
description: | description: |
Load cache and verify if occupancy limits are loaded correctly and if Load cache and verify if occupancy limits are loaded correctly and if
each part has assigned apropriate number of each part has assigned appropriate number of dirty blocks.
dirty blocks. pass_criteria:
pass_criteria: - Occupancy thresholds have correct values for each ioclass after load
- Occupancy thresholds have correct values for each ioclass after load
""" """
cache_line_size = CacheLineSize.LINE_64KiB
blocks4096 = Unit.Blocks4096.get_value()
with TestRun.step("Prepare CAS device"): with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=CacheMode.WB, cache_line_size=cache_line_size) cache, core = prepare(cache_mode=CacheMode.WB, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size cache_size = cache.get_statistics().config_stats.cache_size
@ -40,18 +49,14 @@ def test_ioclass_occuppancy_load(cache_line_size):
with TestRun.step("Disable udev"): with TestRun.step("Disable udev"):
Udev.disable() Udev.disable()
with TestRun.step( with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}"):
f"Prepare filesystem and mount {core.path} at {mountpoint}"
):
filesystem = Filesystem.xfs filesystem = Filesystem.xfs
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
sync() sync()
with TestRun.step("Prepare test dirs"): with TestRun.step("Prepare test dirs"):
IoclassConfig = namedtuple( IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path")
"IoclassConfig", "id eviction_prio max_occupancy dir_path"
)
io_classes = [ io_classes = [
IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"), IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
IoclassConfig(2, 3, 0.30, f"{mountpoint}/B"), IoclassConfig(2, 3, 0.30, f"{mountpoint}/B"),
@ -65,10 +70,10 @@ def test_ioclass_occuppancy_load(cache_line_size):
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False) ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"): with TestRun.step("Add default io classes"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(",")) ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add ioclasses for all dirs"): with TestRun.step("Add io classes for all dirs"):
for io_class in io_classes: for io_class in io_classes:
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
io_class.id, io_class.id,
@ -88,32 +93,28 @@ def test_ioclass_occuppancy_load(cache_line_size):
occupancy = get_io_class_occupancy(cache, io_class.id) occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0: if occupancy.get_value() != 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}." f"Incorrect initial occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}" f" Expected 0, got: {occupancy}"
) )
with TestRun.step(f"Perform IO with size equal to cache size"): with TestRun.step(f"Perform IO with size equal to cache size"):
for io_class in io_classes: for io_class in io_classes:
run_io_dir( run_io_dir(f"{io_class.dir_path}/tmp_file", int(cache_size / blocks4096))
f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096)
)
with TestRun.step("Check if the ioclass did not exceed specified occupancy"): with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
for io_class in io_classes: for io_class in io_classes:
actuall_dirty = get_io_class_dirty(cache, io_class.id) actual_dirty = get_io_class_dirty(cache, io_class.id)
dirty_limit = ( dirty_limit = (
(io_class.max_occupancy * cache_size) (io_class.max_occupancy * cache_size)
.align_down(Unit.Blocks4096.get_value()) .align_down(blocks4096)
.set_unit(Unit.Blocks4096) .set_unit(Unit.Blocks4096)
) )
if not isclose( if not isclose(actual_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1):
actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1
):
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Dirty for ioclass id: {io_class.id} doesn't match expected." f"Dirty for ioclass id: {io_class.id} doesn't match expected."
f"Expected: {dirty_limit}, actuall: {actuall_dirty}" f"Expected: {dirty_limit}, actual: {actual_dirty}"
) )
with TestRun.step("Stop cache without flushing the data"): with TestRun.step("Stop cache without flushing the data"):
@ -131,20 +132,18 @@ def test_ioclass_occuppancy_load(cache_line_size):
with TestRun.step("Check if the ioclass did not exceed specified occupancy"): with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
for io_class in io_classes: for io_class in io_classes:
actuall_dirty = get_io_class_dirty(cache, io_class.id) actual_dirty = get_io_class_dirty(cache, io_class.id)
dirty_limit = ( dirty_limit = (
(io_class.max_occupancy * cache_size) (io_class.max_occupancy * cache_size)
.align_down(Unit.Blocks4096.get_value()) .align_down(blocks4096)
.set_unit(Unit.Blocks4096) .set_unit(Unit.Blocks4096)
) )
if not isclose( if not isclose(actual_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1):
actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1
):
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Dirty for ioclass id: {io_class.id} doesn't match expected." f"Dirty for ioclass id: {io_class.id} doesn't match expected."
f"Expected: {dirty_limit}, actuall: {actuall_dirty}" f"Expected: {dirty_limit}, actual: {actual_dirty}"
) )
with TestRun.step("Compare ioclass configs"): with TestRun.step("Compare ioclass configs"):
@ -172,10 +171,10 @@ def test_ioclass_occuppancy_load(cache_line_size):
with TestRun.step("Compare usage stats before and after the load"): with TestRun.step("Compare usage stats before and after the load"):
for io_class in io_classes: for io_class in io_classes:
actuall_usage_stats = get_io_class_usage(cache, io_class.id) actual_usage_stats = get_io_class_usage(cache, io_class.id)
if original_usage_stats[io_class.id] != actuall_usage_stats: if original_usage_stats[io_class.id] != actual_usage_stats:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Usage stats doesn't match for ioclass {io_class.id}. " f"Usage stats doesn't match for ioclass {io_class.id}. "
f"Original: {original_usage_stats[io_class.id]}, " f"Original: {original_usage_stats[io_class.id]}, "
f"loaded: {actuall_usage_stats}" f"loaded: {actual_usage_stats}"
) )

View File

@ -17,26 +17,33 @@ from test_tools import fs_utils
from test_tools.disk_utils import Filesystem from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev from test_utils.os_utils import sync, Udev
from test_utils.size import Unit from test_utils.size import Unit
from tests.io_class.io_class_common import prepare, mountpoint, ioclass_config_path, \ from tests.io_class.io_class_common import (
get_io_class_occupancy, run_io_dir, run_io_dir_read prepare,
mountpoint,
ioclass_config_path,
get_io_class_occupancy,
run_io_dir,
run_io_dir_read,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("cache_mode", [CacheMode.WB, CacheMode.WT]) @pytest.mark.parametrize("io_class_size_multiplication", [0.5, 1])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize) def test_ioclass_repart(io_class_size_multiplication):
@pytest.mark.parametrize("ioclass_size_multiplicatior", [0.5, 1])
def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior):
""" """
title: Check whether occupancy limit is respected during repart title: Check whether occupancy limit is respected during repart
description: | description: |
Create ioclass for 3 different directories, each with different max Create ioclass for 3 different directories, each with different max
occupancy threshold. Create 3 files classified on default ioclass. occupancy threshold. Create 3 files classified on default ioclass.
Move files to directories created earlier and force repart by reading Move files to directories created earlier and force repart by reading
theirs contents. theirs contents.
pass_criteria: pass_criteria:
- Partitions are evicted in specified order - Partitions are evicted in specified order
""" """
cache_mode = CacheMode.WB
cache_line_size = CacheLineSize.LINE_64KiB
with TestRun.step("Prepare CAS device"): with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size) cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size cache_size = cache.get_statistics().config_stats.cache_size
@ -61,27 +68,27 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
for io_class in io_classes: for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True) fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"): with TestRun.step("Remove old io class config"):
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False) ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"): with TestRun.step("Add default io classes"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="1.00")).split(",")) ioclass_config.add_ioclass(*str(IoClass.default(allocation="1.00")).split(","))
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
ioclass_id=5, ioclass_id=5,
rule="metadata", rule="metadata",
eviction_priority=1, eviction_priority=1,
allocation="1.00", allocation="1.00",
ioclass_config_path=ioclass_config_path ioclass_config_path=ioclass_config_path,
) )
with TestRun.step("Add ioclasses for all dirs"): with TestRun.step("Add io classes for all dirs"):
for io_class in io_classes: for io_class in io_classes:
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
io_class.id, io_class.id,
f"directory:{io_class.dir_path}&done", f"directory:{io_class.dir_path}&done",
io_class.eviction_prio, io_class.eviction_prio,
f"{io_class.max_occupancy*ioclass_size_multiplicatior:0.2f}", f"{io_class.max_occupancy * io_class_size_multiplication:0.2f}",
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=default_config_file_path) casadm.load_io_classes(cache_id=cache.cache_id, file=default_config_file_path)
@ -93,7 +100,8 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
with TestRun.step(f"Create 3 files classified in default ioclass"): with TestRun.step(f"Create 3 files classified in default ioclass"):
for i, io_class in enumerate(io_classes[0:3]): for i, io_class in enumerate(io_classes[0:3]):
run_io_dir( run_io_dir(
f"{mountpoint}/{i}", int((io_class.max_occupancy * cache_size) / Unit.Blocks4096) f"{mountpoint}/{i}",
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096.get_value()),
) )
if not isclose( if not isclose(
@ -108,7 +116,7 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
occupancy = get_io_class_occupancy(cache, io_class.id) occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0: if occupancy.get_value() != 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}." f"Incorrect initial occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}" f" Expected 0, got: {occupancy}"
) )
@ -119,7 +127,7 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
with TestRun.step("Check if each ioclass reached it's occupancy limit"): with TestRun.step("Check if each ioclass reached it's occupancy limit"):
for io_class in io_classes[0:3]: for io_class in io_classes[0:3]:
actuall_occupancy = get_io_class_occupancy(cache, io_class.id) actual_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = ( occupancy_limit = (
(io_class.max_occupancy * cache_size) (io_class.max_occupancy * cache_size)
@ -127,8 +135,8 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
.set_unit(Unit.Blocks4096) .set_unit(Unit.Blocks4096)
) )
if not isclose(actuall_occupancy.value, occupancy_limit.value, rel_tol=0.1): if not isclose(actual_occupancy.value, occupancy_limit.value, rel_tol=0.1):
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Occupancy for ioclass {io_class.id} does not match. " f"Occupancy for ioclass {io_class.id} does not match. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}" f"Limit: {occupancy_limit}, actual: {actual_occupancy}"
) )

View File

@ -15,7 +15,12 @@ from test_tools import fs_utils
from test_tools.disk_utils import Filesystem from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev from test_utils.os_utils import sync, Udev
from test_utils.size import Unit from test_utils.size import Unit
from tests.io_class.io_class_common import mountpoint, prepare, get_io_class_occupancy, run_io_dir from tests.io_class.io_class_common import (
mountpoint,
prepare,
get_io_class_occupancy,
run_io_dir,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -24,12 +29,12 @@ from tests.io_class.io_class_common import mountpoint, prepare, get_io_class_occ
@pytest.mark.parametrize("new_occupancy", [25, 50, 70, 100]) @pytest.mark.parametrize("new_occupancy", [25, 50, 70, 100])
def test_ioclass_resize(cache_line_size, new_occupancy): def test_ioclass_resize(cache_line_size, new_occupancy):
""" """
title: Resize ioclass title: Resize ioclass
description: | description: |
Add ioclass, fill it with data, change it's size and check if new Add ioclass, fill it with data, change it's size and check if new
limit is respected limit is respected
pass_criteria: pass_criteria:
- Occupancy threshold is respected - Occupancy threshold is respected
""" """
with TestRun.step("Prepare CAS device"): with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=CacheMode.WT, cache_line_size=cache_line_size) cache, core = prepare(cache_mode=CacheMode.WT, cache_line_size=cache_line_size)
@ -54,13 +59,13 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False) ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"): with TestRun.step("Add default io classes"):
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
ioclass_id=1, ioclass_id=1,
rule="metadata&done", rule="metadata&done",
eviction_priority=1, eviction_priority=1,
allocation="1.00", allocation="1.00",
ioclass_config_path=default_config_file_path ioclass_config_path=default_config_file_path,
) )
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(",")) ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
@ -82,15 +87,15 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
occupancy = get_io_class_occupancy(cache, io_class.id) occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0: if occupancy.get_value() != 0:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}." f"Incorrect initial occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}" f" Expected 0, got: {occupancy}"
) )
with TestRun.step(f"Perform IO with size equal to cache size"): with TestRun.step(f"Perform IO with size equal to cache size"):
run_io_dir(f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096)) run_io_dir(f"{io_class.dir_path}/tmp_file", int(cache_size / Unit.Blocks4096))
with TestRun.step("Check if the ioclass did not exceed specified occupancy"): with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
actuall_occupancy = get_io_class_occupancy(cache, io_class.id) actual_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = ( occupancy_limit = (
(io_class.max_occupancy * cache_size) (io_class.max_occupancy * cache_size)
@ -98,15 +103,16 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
.set_unit(Unit.Blocks4096) .set_unit(Unit.Blocks4096)
) )
# Divergency may be casued be rounding max occupancy # Divergence may be caused be rounding max occupancy
if actuall_occupancy > occupancy_limit * 1.01: if actual_occupancy > occupancy_limit * 1.01:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. " f"Occupancy for ioclass id exceeded: {io_class.id}. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}" f"Limit: {occupancy_limit}, actual: {actual_occupancy}"
) )
with TestRun.step( with TestRun.step(
f"Resize ioclass from {io_class.max_occupancy*100}% to {new_occupancy}%" " cache occupancy" f"Resize ioclass from {io_class.max_occupancy * 100}% to {new_occupancy}%"
" cache occupancy"
): ):
io_class.max_occupancy = new_occupancy / 100 io_class.max_occupancy = new_occupancy / 100
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config()
@ -119,7 +125,7 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
rule="metadata&done", rule="metadata&done",
eviction_priority=1, eviction_priority=1,
allocation="1.00", allocation="1.00",
ioclass_config_path=default_config_file_path ioclass_config_path=default_config_file_path,
) )
ioclass_config.add_ioclass( ioclass_config.add_ioclass(
io_class.id, io_class.id,
@ -131,10 +137,10 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
casadm.load_io_classes(cache_id=cache.cache_id, file=default_config_file_path) casadm.load_io_classes(cache_id=cache.cache_id, file=default_config_file_path)
with TestRun.step(f"Perform IO with size equal to cache size"): with TestRun.step(f"Perform IO with size equal to cache size"):
run_io_dir(f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096)) run_io_dir(f"{io_class.dir_path}/tmp_file", int(cache_size / Unit.Blocks4096))
with TestRun.step("Check if the ioclass did not exceed specified occupancy"): with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
actuall_occupancy = get_io_class_occupancy(cache, io_class.id) actual_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = ( occupancy_limit = (
(io_class.max_occupancy * cache_size) (io_class.max_occupancy * cache_size)
@ -142,9 +148,9 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
.set_unit(Unit.Blocks4096) .set_unit(Unit.Blocks4096)
) )
# Divergency may be casued be rounding max occupancy # Divergence may be caused be rounding max occupancy
if actuall_occupancy > occupancy_limit * 1.01: if actual_occupancy > occupancy_limit * 1.01:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. " f"Occupancy for ioclass id exceeded: {io_class.id}. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}" f"Limit: {occupancy_limit}, actual: {actual_occupancy}"
) )

View File

@ -10,8 +10,10 @@ from api.cas.ioclass_config import IoClass
from core.test_run_utils import TestRun from core.test_run_utils import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_utils.size import Size, Unit from test_utils.size import Size, Unit
from tests.io_class.io_class_common import compare_io_classes_list, \ from tests.io_class.io_class_common import (
generate_and_load_random_io_class_config compare_io_classes_list,
generate_and_load_random_io_class_config,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -20,16 +22,16 @@ def test_io_class_preserve_configuration():
""" """
title: Preserve IO class configuration after load. title: Preserve IO class configuration after load.
description: | description: |
Check Open CAS ability to preserve IO class configuration after starting CAS with Check Open CAS ability to preserve IO class configuration
load option. after starting CAS with load option.
pass_criteria: pass_criteria:
- No system crash - No system crash
- Cache loads successfully - Cache loads successfully
- IO class configuration is the same before and after reboot - IO class configuration is the same before and after reboot
""" """
with TestRun.step("Prepare devices."): with TestRun.step("Prepare devices."):
cache_device = TestRun.disks['cache'] cache_device = TestRun.disks["cache"]
core_device = TestRun.disks['core'] core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(150, Unit.MebiByte)]) cache_device.create_partitions([Size(150, Unit.MebiByte)])
core_device.create_partitions([Size(300, Unit.MebiByte)]) core_device.create_partitions([Size(300, Unit.MebiByte)])
@ -41,19 +43,24 @@ def test_io_class_preserve_configuration():
cache = casadm.start_cache(cache_device, force=True) cache = casadm.start_cache(cache_device, force=True)
with TestRun.step("Display IO class configuration shall be only Unclassified IO class."): with TestRun.step("Display IO class configuration shall be only Unclassified IO class."):
default_io_class = [IoClass( default_io_class = [
ioclass_config.DEFAULT_IO_CLASS_ID, IoClass(
ioclass_config.DEFAULT_IO_CLASS_RULE, ioclass_config.DEFAULT_IO_CLASS_ID,
ioclass_config.DEFAULT_IO_CLASS_PRIORITY, ioclass_config.DEFAULT_IO_CLASS_RULE,
allocation="1.00")] ioclass_config.DEFAULT_IO_CLASS_PRIORITY,
allocation="1.00",
)
]
actual = cache.list_io_classes() actual = cache.list_io_classes()
compare_io_classes_list(default_io_class, actual) compare_io_classes_list(default_io_class, actual)
with TestRun.step("Add core device."): with TestRun.step("Add core device."):
cache.add_core(core_device) cache.add_core(core_device)
with TestRun.step("Create and load configuration file for 33 IO classes with random names, " with TestRun.step(
"allocation and priority values."): "Create and load configuration file for 33 IO classes with random names, "
"allocation and priority values."
):
generated_io_classes = generate_and_load_random_io_class_config(cache) generated_io_classes = generate_and_load_random_io_class_config(cache)
with TestRun.step("Display IO class configuration shall be the same as created."): with TestRun.step("Display IO class configuration shall be the same as created."):
@ -64,7 +71,8 @@ def test_io_class_preserve_configuration():
cache.stop() cache.stop()
with TestRun.step( with TestRun.step(
"Load cache and check IO class configuration - shall be the same as created."): "Load cache and check IO class configuration - shall be the same as created."
):
cache = casadm.load_cache(cache_device) cache = casadm.load_cache(cache_device)
actual = cache.list_io_classes() actual = cache.list_io_classes()
compare_io_classes_list(generated_io_classes, actual) compare_io_classes_list(generated_io_classes, actual)
@ -73,7 +81,8 @@ def test_io_class_preserve_configuration():
TestRun.executor.reboot() TestRun.executor.reboot()
with TestRun.step( with TestRun.step(
"Load cache and check IO class configuration - shall be the same as created."): "Load cache and check IO class configuration - shall be the same as created."
):
cache = casadm.load_cache(cache_device) cache = casadm.load_cache(cache_device)
actual = cache.list_io_classes() actual = cache.list_io_classes()
compare_io_classes_list(generated_io_classes, actual) compare_io_classes_list(generated_io_classes, actual)

View File

@ -4,14 +4,15 @@
# #
import pytest import pytest
from api.cas import ioclass_config, cli_messages from api.cas import ioclass_config, cli_messages
from api.cas.ioclass_config import IoClass
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_utils.output import CmdException from test_utils.output import CmdException
from test_utils.size import Unit, Size from test_utils.size import Unit, Size
from tests.io_class.io_class_common import prepare, ioclass_config_path from tests.io_class.io_class_common import prepare, ioclass_config_path
from api.cas.ioclass_config import IoClass
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
headerless_configuration = "1,unclassified,22,1.00" headerless_configuration = "1,unclassified,22,1.00"
double_io_class_configuration = "2,file_size:le:4096,1,1.00\n2,file_size:le:4096,1,1.00" double_io_class_configuration = "2,file_size:le:4096,1,1.00\n2,file_size:le:4096,1,1.00"
@ -28,7 +29,6 @@ illegal_io_class_configurations = {
",,1,": cli_messages.illegal_io_class_config_L2C1, ",,1,": cli_messages.illegal_io_class_config_L2C1,
",1,,": cli_messages.illegal_io_class_config_L2C1, ",1,,": cli_messages.illegal_io_class_config_L2C1,
"1,,,": cli_messages.illegal_io_class_config_L2C2, "1,,,": cli_messages.illegal_io_class_config_L2C2,
# 2 parameters # 2 parameters
",,1,1": cli_messages.illegal_io_class_config_L2C1, ",,1,1": cli_messages.illegal_io_class_config_L2C1,
",1,,1": cli_messages.illegal_io_class_config_L2C1, ",1,,1": cli_messages.illegal_io_class_config_L2C1,
@ -36,34 +36,28 @@ illegal_io_class_configurations = {
"1,,1,": cli_messages.illegal_io_class_config_L2C2, "1,,1,": cli_messages.illegal_io_class_config_L2C2,
"1,,,1": cli_messages.illegal_io_class_config_L2C2, "1,,,1": cli_messages.illegal_io_class_config_L2C2,
"1,1,,": cli_messages.illegal_io_class_config_L2C4, "1,1,,": cli_messages.illegal_io_class_config_L2C4,
# 3 parameters # 3 parameters
",1,1,1": cli_messages.illegal_io_class_config_L2C1, ",1,1,1": cli_messages.illegal_io_class_config_L2C1,
"1,,1,1": cli_messages.illegal_io_class_config_L2C2, "1,,1,1": cli_messages.illegal_io_class_config_L2C2,
"1,1,1,": cli_messages.illegal_io_class_config_L2C4, "1,1,1,": cli_messages.illegal_io_class_config_L2C4,
# 5 parameters # 5 parameters
"1,1,1,1,1": cli_messages.illegal_io_class_config_L2, "1,1,1,1,1": cli_messages.illegal_io_class_config_L2,
# Try to configure IO class ID as: string, negative value or 33 # Try to configure IO class ID as: string, negative value or 33
"IllegalInput,Superblock,22,1": cli_messages.illegal_io_class_invalid_id, "IllegalInput,Superblock,22,1": cli_messages.illegal_io_class_invalid_id,
"-2,Superblock,22,1": cli_messages.illegal_io_class_invalid_id_number, "-2,Superblock,22,1": cli_messages.illegal_io_class_invalid_id_number,
"33,Superblock,22,1": cli_messages.illegal_io_class_invalid_id_number, "33,Superblock,22,1": cli_messages.illegal_io_class_invalid_id_number,
# Try to use semicolon, dots or new line as csv delimiters # Try to use semicolon, dots or new line as csv delimiters
"1;1;1;1": cli_messages.illegal_io_class_config_L2, "1;1;1;1": cli_messages.illegal_io_class_config_L2,
"1.1.1.1": cli_messages.illegal_io_class_config_L2, "1.1.1.1": cli_messages.illegal_io_class_config_L2,
"1\n1\n1\n1": cli_messages.illegal_io_class_config_L2, "1\n1\n1\n1": cli_messages.illegal_io_class_config_L2,
# Try to configure eviction priority as: string, negative value or 256 # Try to configure eviction priority as: string, negative value or 256
"1,Superblock,IllegalInput,1": cli_messages.illegal_io_class_invalid_priority, "1,Superblock,IllegalInput,1": cli_messages.illegal_io_class_invalid_priority,
"1,Superblock,-2,1": cli_messages.illegal_io_class_invalid_priority_number, "1,Superblock,-2,1": cli_messages.illegal_io_class_invalid_priority_number,
"1,Superblock,256,1": cli_messages.illegal_io_class_invalid_priority_number, "1,Superblock,256,1": cli_messages.illegal_io_class_invalid_priority_number,
# Try to configure allocation as: string, negative value or 2 # Try to configure allocation as: string, negative value or 2
"1,Superblock,22,IllegalInput": cli_messages.illegal_io_class_invalid_allocation, "1,Superblock,22,IllegalInput": cli_messages.illegal_io_class_invalid_allocation,
"1,Superblock,255,-2": cli_messages.illegal_io_class_invalid_allocation_number, "1,Superblock,255,-2": cli_messages.illegal_io_class_invalid_allocation_number,
"1,Superblock,255,2": cli_messages.illegal_io_class_invalid_allocation_number "1,Superblock,255,2": cli_messages.illegal_io_class_invalid_allocation_number,
} }
@ -71,14 +65,14 @@ illegal_io_class_configurations = {
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_io_class_prevent_wrong_configuration(): def test_io_class_prevent_wrong_configuration():
""" """
title: Open CAS ability to prevent loading wrong configuration. title: Open CAS ability to prevent loading wrong configuration.
description: | description: |
Check Open CAS ability to prevent loading configuration from wrong configuration file Check Open CAS ability to prevent loading configuration from wrong configuration file
like: illegal number of parameters in line, IO class values, using semicolon like: illegal number of parameters in line, IO class values, using semicolon
instead of comma, wrong value of eviction, and priority. instead of comma, wrong value of eviction, and priority.
pass_criteria: pass_criteria:
- Wrong configuration must not be loaded - Wrong configuration must not be loaded
- There is an appropriate message about wrong io class configuration - There is an appropriate message about wrong io class configuration
""" """
with TestRun.step("Prepare CAS device"): with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_size=Size(150, Unit.MiB), core_size=Size(300, Unit.MiB)) cache, core = prepare(cache_size=Size(150, Unit.MiB), core_size=Size(300, Unit.MiB))
@ -86,49 +80,67 @@ def test_io_class_prevent_wrong_configuration():
with TestRun.step("Display IO class configuration shall be default"): with TestRun.step("Display IO class configuration shall be default"):
create_and_load_default_io_class_config(cache) create_and_load_default_io_class_config(cache)
loaded_io_classes = cache.list_io_classes() loaded_io_classes = cache.list_io_classes()
loaded_io_classes_str = '\n'.join(str(i) for i in loaded_io_classes) loaded_io_classes_str = "\n".join(str(i) for i in loaded_io_classes)
TestRun.LOGGER.info(f"Loaded IO class configuration is:\n" TestRun.LOGGER.info(
f"{IoClass.default_header()}\n{loaded_io_classes_str}") f"Loaded IO class configuration is:\n"
f"{IoClass.default_header()}\n{loaded_io_classes_str}"
)
config_io_classes = IoClass.csv_to_list(fs_utils.read_file(ioclass_config_path)) config_io_classes = IoClass.csv_to_list(fs_utils.read_file(ioclass_config_path))
if not IoClass.compare_ioclass_lists(config_io_classes, loaded_io_classes): if not IoClass.compare_ioclass_lists(config_io_classes, loaded_io_classes):
TestRun.fail("Default IO class configuration not loaded correctly.") TestRun.fail("Default IO class configuration not loaded correctly.")
with TestRun.step("Create illegal configuration file containing IO configuration " with TestRun.step(
"without header and check if it can not be loaded."): "Create illegal configuration file containing IO configuration "
TestRun.LOGGER.info(f"Preparing headerless configuration file with following content:\n" "without header and check if it can not be loaded."
f"{headerless_configuration}") ):
TestRun.LOGGER.info(
f"Preparing headerless configuration file with following content:\n"
f"{headerless_configuration}"
)
fs_utils.write_file(ioclass_config_path, headerless_configuration) fs_utils.write_file(ioclass_config_path, headerless_configuration)
try_load_malformed_config(cache, config_io_classes, try_load_malformed_config(
expected_err_msg=cli_messages.headerless_io_class_config) cache,
config_io_classes,
expected_err_msg=cli_messages.headerless_io_class_config,
)
with TestRun.step("Create illegal configuration file containing IO configuration with " with TestRun.step(
"malformed header and check if it can not be loaded."): "Create illegal configuration file containing IO configuration with "
"malformed header and check if it can not be loaded."
):
for header, err_message in setup_headers().items(): for header, err_message in setup_headers().items():
config_content = f"{header}\n{IoClass.default()}" config_content = f"{header}\n{IoClass.default()}"
TestRun.LOGGER.info(f"Testing following header with default IO class:\n" TestRun.LOGGER.info(
f"{config_content}") f"Testing following header with default IO class:\n" f"{config_content}"
)
fs_utils.write_file(ioclass_config_path, config_content) fs_utils.write_file(ioclass_config_path, config_content)
try_load_malformed_config(cache, config_io_classes, try_load_malformed_config(cache, config_io_classes, expected_err_msg=err_message)
expected_err_msg=err_message)
with TestRun.step("Create illegal configuration file containing double IO class configuration " with TestRun.step(
"and check if it can not be loaded."): "Create illegal configuration file containing double IO class configuration "
"and check if it can not be loaded."
):
config_content = f"{IoClass.default_header()}\n{double_io_class_configuration}" config_content = f"{IoClass.default_header()}\n{double_io_class_configuration}"
TestRun.LOGGER.info(f"Testing following configuration file:\n{config_content}") TestRun.LOGGER.info(f"Testing following configuration file:\n{config_content}")
fs_utils.write_file(ioclass_config_path, config_content) fs_utils.write_file(ioclass_config_path, config_content)
try_load_malformed_config(cache, config_io_classes, try_load_malformed_config(
expected_err_msg=cli_messages.double_io_class_config) cache,
config_io_classes,
expected_err_msg=cli_messages.double_io_class_config,
)
with TestRun.step("Create illegal configuration file containing malformed IO configuration " with TestRun.step(
"with correct header and check if it can not be loaded."): "Create illegal configuration file containing malformed IO configuration "
"with correct header and check if it can not be loaded."
):
for io_config, err_message in illegal_io_class_configurations.items(): for io_config, err_message in illegal_io_class_configurations.items():
config_content = f"{IoClass.default_header()}\n{io_config}" config_content = f"{IoClass.default_header()}\n{io_config}"
TestRun.LOGGER.info( TestRun.LOGGER.info(
f"Testing following header with default IO class:\n{config_content}") f"Testing following header with default IO class:\n{config_content}"
)
fs_utils.write_file(ioclass_config_path, config_content) fs_utils.write_file(ioclass_config_path, config_content)
try_load_malformed_config(cache, config_io_classes, try_load_malformed_config(cache, config_io_classes, expected_err_msg=err_message)
expected_err_msg=err_message)
def try_load_malformed_config(cache, config_io_classes, expected_err_msg): def try_load_malformed_config(cache, config_io_classes, expected_err_msg):
@ -141,9 +153,10 @@ def try_load_malformed_config(cache, config_io_classes, expected_err_msg):
cli_messages.check_stderr_msg(e.output, expected_err_msg) cli_messages.check_stderr_msg(e.output, expected_err_msg)
output_io_classes = cache.list_io_classes() output_io_classes = cache.list_io_classes()
if not IoClass.compare_ioclass_lists(output_io_classes, config_io_classes): if not IoClass.compare_ioclass_lists(output_io_classes, config_io_classes):
output_str = '\n'.join(str(i) for i in output_io_classes) output_str = "\n".join(str(i) for i in output_io_classes)
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Loaded IO class config should be default but it is different:\n{output_str}") f"Loaded IO class config should be default but it is different:\n{output_str}"
)
def create_and_load_default_io_class_config(cache): def create_and_load_default_io_class_config(cache):
@ -153,36 +166,51 @@ def create_and_load_default_io_class_config(cache):
def setup_headers(): def setup_headers():
default_header = IoClass.default_header_dict() default_header = IoClass.default_header_dict()
correct_id_header = default_header['id'] correct_id_header = default_header["id"]
correct_name_header = default_header['name'] correct_name_header = default_header["name"]
correct_eviction_priority_header = default_header['eviction_prio'] correct_eviction_priority_header = default_header["eviction_prio"]
correct_allocation_header = default_header['allocation'] correct_allocation_header = default_header["allocation"]
malformed_io_class_id_header = f"{malformed_io_class_id}," \ malformed_io_class_id_header = (
f"{correct_name_header}," \ f"{malformed_io_class_id},"
f"{correct_eviction_priority_header}," \ f"{correct_name_header},"
f"{correct_allocation_header}" f"{correct_eviction_priority_header},"
malformed_io_class_name_header = f"{correct_id_header}," \ f"{correct_allocation_header}"
f"{malformed_io_class_name}," \ )
f"{correct_eviction_priority_header}," \ malformed_io_class_name_header = (
f"{correct_allocation_header}" f"{correct_id_header},"
malformed_eviction_priority_header = f"{correct_id_header}," \ f"{malformed_io_class_name},"
f"{correct_name_header}," \ f"{correct_eviction_priority_header},"
f"{malformed_io_class_eviction_priority}," \ f"{correct_allocation_header}"
f"{correct_allocation_header}" )
malformed_allocation_header = f"{correct_id_header}," \ malformed_eviction_priority_header = (
f"{correct_name_header}," \ f"{correct_id_header},"
f"{correct_eviction_priority_header}," \ f"{correct_name_header},"
f"{malformed_io_class_allocation}" f"{malformed_io_class_eviction_priority},"
f"{correct_allocation_header}"
)
malformed_allocation_header = (
f"{correct_id_header},"
f"{correct_name_header},"
f"{correct_eviction_priority_header},"
f"{malformed_io_class_allocation}"
)
return { return {
malformed_io_class_id_header: [m.replace("value_template", malformed_io_class_id) malformed_io_class_id_header: [
for m in cli_messages.malformed_io_class_header], m.replace("value_template", malformed_io_class_id)
malformed_io_class_name_header: [m.replace("value_template", malformed_io_class_name) for m in cli_messages.malformed_io_class_header
for m in cli_messages.malformed_io_class_header], ],
malformed_eviction_priority_header: [m.replace("value_template", malformed_io_class_name_header: [
malformed_io_class_eviction_priority) m.replace("value_template", malformed_io_class_name)
for m in cli_messages.malformed_io_class_header], for m in cli_messages.malformed_io_class_header
malformed_allocation_header: [m.replace("value_template", malformed_io_class_allocation) ],
for m in cli_messages.malformed_io_class_header] malformed_eviction_priority_header: [
m.replace("value_template", malformed_io_class_eviction_priority)
for m in cli_messages.malformed_io_class_header
],
malformed_allocation_header: [
m.replace("value_template", malformed_io_class_allocation)
for m in cli_messages.malformed_io_class_header
],
} }

View File

@ -1,5 +1,5 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -20,11 +20,11 @@ from tests.io_class.io_class_common import prepare, ioclass_config_path
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_process_name(): def test_ioclass_process_name():
""" """
title: Test IO classification by process name. title: Test IO classification by process name.
description: Check if data generated by process with particular name is cached. description: Check if data generated by process with particular name is cached.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- IO is classified properly based on process generating IO name. - IO is classified properly based on process generating IO name.
""" """
ioclass_id = 1 ioclass_id = 1
dd_size = Size(4, Unit.KibiByte) dd_size = Size(4, Unit.KibiByte)
@ -50,15 +50,15 @@ def test_ioclass_process_name():
with TestRun.step("Check if all data generated by dd process is cached."): with TestRun.step("Check if all data generated by dd process is cached."):
for i in range(iterations): for i in range(iterations):
dd = ( (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(core.path) .output(core.path)
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_size)
.seek(i) .seek(i)
.run()
) )
dd.run()
sync() sync()
time.sleep(0.1) time.sleep(0.1)
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
@ -70,11 +70,11 @@ def test_ioclass_process_name():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_pid(): def test_ioclass_pid():
""" """
title: Test IO classification by process id. title: Test IO classification by process id.
description: Check if data generated by process with particular id is cached. description: Check if data generated by process with particular id is cached.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- IO is classified properly based on process generating IO id. - IO is classified properly based on process generating IO id.
""" """
ioclass_id = 1 ioclass_id = 1
iterations = 20 iterations = 20
@ -89,11 +89,7 @@ def test_ioclass_pid():
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid', # Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
# 'dd' command is created and is appended to 'echo' command instead of running it # 'dd' command is created and is appended to 'echo' command instead of running it
dd_command = str( dd_command = str(
Dd() Dd().input("/dev/zero").output(core.path).count(dd_count).block_size(dd_size)
.input("/dev/zero")
.output(core.path)
.count(dd_count)
.block_size(dd_size)
) )
for _ in TestRun.iteration(range(iterations)): for _ in TestRun.iteration(range(iterations)):
@ -117,12 +113,12 @@ def test_ioclass_pid():
rule=f"pid:eq:{pid}&done", rule=f"pid:eq:{pid}&done",
ioclass_config_path=ioclass_config_path, ioclass_config_path=ioclass_config_path,
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache.cache_id, ioclass_config_path)
with TestRun.step(f"Run dd with pid {pid}."): with TestRun.step(f"Run dd with pid {pid}."):
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid # pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
dd_and_pid_command = ( dd_and_pid_command = (
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command} " f"echo {pid - 1} > /proc/sys/kernel/ns_last_pid && {dd_command} "
f"&& cat /proc/sys/kernel/ns_last_pid" f"&& cat /proc/sys/kernel/ns_last_pid"
) )
output = TestRun.executor.run(dd_and_pid_command) output = TestRun.executor.run(dd_and_pid_command)
@ -136,4 +132,4 @@ def test_ioclass_pid():
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != dd_count: if dirty.get_value(Unit.Blocks4096) != dd_count:
TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).") TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
ioclass_config.remove_ioclass(ioclass_id) ioclass_config.remove_ioclass(ioclass_id, ioclass_config_path)

View File

@ -1,5 +1,5 @@
# #
# Copyright(c) 2020-2021 Intel Corporation # Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -23,13 +23,13 @@ from tests.io_class.io_class_common import prepare, mountpoint, ioclass_config_p
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_usage_sum(): def test_ioclass_usage_sum():
""" """
title: Test for ioclass stats after purge title: Test for ioclass stats after purge
description: | description: |
Create ioclasses for 3 different directories. Run IO against each Create io classes for 3 different directories. Run IO against each
directory, check usage stats correctness before and after purge directory, check usage stats correctness before and after purge.
pass_criteria: pass_criteria:
- Usage stats are consistent on each test step - Usage stats are consistent on each test step
- Usage stats don't exceed cache size - Usage stats don't exceed cache size
""" """
with TestRun.step("Prepare disks"): with TestRun.step("Prepare disks"):
cache, core = prepare() cache, core = prepare()
@ -38,9 +38,7 @@ def test_ioclass_usage_sum():
with TestRun.step("Disable udev"): with TestRun.step("Disable udev"):
Udev.disable() Udev.disable()
with TestRun.step( with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}"):
f"Prepare filesystem and mount {core.path} at {mountpoint}"
):
filesystem = Filesystem.xfs filesystem = Filesystem.xfs
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
@ -58,7 +56,7 @@ def test_ioclass_usage_sum():
for io_class in io_classes: for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True) fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Add ioclasses for all dirs"): with TestRun.step("Add io classes for all dirs"):
ioclass_config.remove_ioclass_config() ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(True) ioclass_config.create_ioclass_config(True)
for io_class in io_classes: for io_class in io_classes:
@ -71,10 +69,8 @@ def test_ioclass_usage_sum():
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
# Since default ioclass is already present in cache and no directory should be # Since default ioclass is already present in cache and no directory should be
# created, it is added to ioclasses list after setup is done # created, it is added to io classes list after setup is done
io_classes.append( io_classes.append(IoclassConfig(default_ioclass_id, 22, f"{mountpoint}", cache_size * 0.2))
IoclassConfig(default_ioclass_id, 22, f"{mountpoint}", cache_size * 0.2)
)
with TestRun.step("Verify stats of newly started cache device"): with TestRun.step("Verify stats of newly started cache device"):
sync() sync()
@ -83,7 +79,7 @@ def test_ioclass_usage_sum():
with TestRun.step("Trigger IO to each partition and verify stats"): with TestRun.step("Trigger IO to each partition and verify stats"):
for io_class in io_classes: for io_class in io_classes:
run_io_dir(io_class.dir_path, int((io_class.io_size) / Unit.Blocks4096)) run_io_dir(io_class.dir_path, int(io_class.io_size / Unit.Blocks4096.get_value()))
verify_ioclass_usage_stats(cache, [i.id for i in io_classes]) verify_ioclass_usage_stats(cache, [i.id for i in io_classes])
@ -92,11 +88,9 @@ def test_ioclass_usage_sum():
verify_ioclass_usage_stats(cache, [i.id for i in io_classes]) verify_ioclass_usage_stats(cache, [i.id for i in io_classes])
with TestRun.step( with TestRun.step("Trigger IO to each partition for the second time and verify stats"):
"Trigger IO to each partition for the second time and verify stats"
):
for io_class in io_classes: for io_class in io_classes:
run_io_dir(io_class.dir_path, int((io_class.io_size) / Unit.Blocks4096)) run_io_dir(io_class.dir_path, int(io_class.io_size / Unit.Blocks4096.get_value()))
verify_ioclass_usage_stats(cache, [i.id for i in io_classes]) verify_ioclass_usage_stats(cache, [i.id for i in io_classes])
@ -116,9 +110,9 @@ def verify_ioclass_usage_stats(cache, ioclasses_ids):
if usage_stats_sum != cache_usage_stats: if usage_stats_sum != cache_usage_stats:
TestRun.LOGGER.error( TestRun.LOGGER.error(
"Sum of ioclasses usage stats doesn't match cache usage stats!" "Sum of io classes usage stats doesn't match cache usage stats!"
f" Cache stats:\n{cache_usage_stats} ioclasses sum:\n{usage_stats_sum}" f" Cache stats:\n{cache_usage_stats} io classes sum:\n{usage_stats_sum}"
f" Stats of particular ioclasses:\n" f" Stats of particular io classes:\n"
f"{[get_io_class_usage(cache, i) for i in ioclasses_ids]}" f"{[get_io_class_usage(cache, i) for i in ioclasses_ids]}"
) )
@ -141,13 +135,13 @@ def add_io_class(class_id, eviction_prio, rule):
def run_io_dir(path, num_ios): def run_io_dir(path, num_ios):
dd = ( (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(f"{path}/tmp_file") .output(f"{path}/tmp_file")
.count(num_ios) .count(num_ios)
.block_size(Size(1, Unit.Blocks4096)) .block_size(Size(1, Unit.Blocks4096))
.run()
) )
dd.run()
sync() sync()
drop_caches(DropCachesMode.ALL) drop_caches(DropCachesMode.ALL)

View File

@ -20,8 +20,14 @@ from test_tools.fio.fio_param import IoEngine, ReadWrite
from test_utils import os_utils from test_utils import os_utils
from test_utils.os_utils import Runlevel from test_utils.os_utils import Runlevel
from test_utils.size import Size, Unit from test_utils.size import Size, Unit
from tests.io_class.io_class_common import prepare, mountpoint, ioclass_config_path, \ from tests.io_class.io_class_common import (
compare_io_classes_list, run_io_dir_read, template_config_path prepare,
mountpoint,
ioclass_config_path,
compare_io_classes_list,
run_io_dir_read,
template_config_path,
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -37,8 +43,7 @@ def test_io_class_service_load(runlevel):
- IO class configuration is the same before and after reboot - IO class configuration is the same before and after reboot
""" """
with TestRun.step("Prepare devices."): with TestRun.step("Prepare devices."):
cache, core = prepare(core_size=Size(300, Unit.MebiByte), cache, core = prepare(core_size=Size(300, Unit.MebiByte), cache_mode=CacheMode.WT)
cache_mode=CacheMode.WT)
with TestRun.step("Read the whole CAS device."): with TestRun.step("Read the whole CAS device."):
run_io_dir_read(core.path) run_io_dir_read(core.path)
@ -47,30 +52,37 @@ def test_io_class_service_load(runlevel):
core.create_filesystem(Filesystem.ext4) core.create_filesystem(Filesystem.ext4)
core.mount(mountpoint) core.mount(mountpoint)
with TestRun.step("Load IO class configuration file with rules that metadata will not be " with TestRun.step(
"cached and all other IO will be cached as unclassified."): "Load IO class configuration file with rules that metadata will not be "
"cached and all other IO will be cached as unclassified."
):
config_io_classes = prepare_and_load_io_class_config(cache, metadata_not_cached=True) config_io_classes = prepare_and_load_io_class_config(cache, metadata_not_cached=True)
with TestRun.step("Run IO."): with TestRun.step("Run IO."):
run_io() run_io()
with TestRun.step("Save IO class usage and configuration statistic."): with TestRun.step("Save IO class usage and configuration statistic."):
saved_usage_stats = cache.get_io_class_statistics(io_class_id=0, stat_filter=[ saved_usage_stats = cache.get_io_class_statistics(
StatsFilter.usage]).usage_stats io_class_id=0, stat_filter=[StatsFilter.usage]
saved_conf_stats = cache.get_io_class_statistics(io_class_id=0, stat_filter=[ ).usage_stats
StatsFilter.conf]).config_stats saved_conf_stats = cache.get_io_class_statistics(
io_class_id=0, stat_filter=[StatsFilter.conf]
).config_stats
with TestRun.step("Create init config from running CAS configuration."): with TestRun.step("Create init config from running CAS configuration."):
InitConfig.create_init_config_from_running_configuration( InitConfig.create_init_config_from_running_configuration(
cache_extra_flags=f"ioclass_file={ioclass_config_path}") cache_extra_flags=f"ioclass_file={ioclass_config_path}"
)
os_utils.sync() os_utils.sync()
with TestRun.step(f"Reboot system to runlevel {runlevel}."): with TestRun.step(f"Reboot system to runlevel {runlevel}."):
os_utils.change_runlevel(runlevel) os_utils.change_runlevel(runlevel)
TestRun.executor.reboot() TestRun.executor.reboot()
with TestRun.step("Check if CAS device loads properly - " with TestRun.step(
"IO class configuration and statistics shall not change"): "Check if CAS device loads properly - "
"IO class configuration and statistics shall not change"
):
caches = casadm_parser.get_caches() caches = casadm_parser.get_caches()
if len(caches) != 1: if len(caches) != 1:
TestRun.fail("Cache did not start at boot time.") TestRun.fail("Cache did not start at boot time.")
@ -85,18 +97,26 @@ def test_io_class_service_load(runlevel):
# Reads from core can invalidate some data so it is possible that occupancy after reboot # Reads from core can invalidate some data so it is possible that occupancy after reboot
# is lower than before # is lower than before
reads_from_core = cache.get_statistics(stat_filter=[StatsFilter.blk]).block_stats.core.reads reads_from_core = cache.get_statistics(stat_filter=[StatsFilter.blk]).block_stats.core.reads
read_usage_stats = cache.get_io_class_statistics(io_class_id=0, stat_filter=[ read_usage_stats = cache.get_io_class_statistics(
StatsFilter.usage]).usage_stats io_class_id=0, stat_filter=[StatsFilter.usage]
read_conf_stats = cache.get_io_class_statistics(io_class_id=0, stat_filter=[ ).usage_stats
StatsFilter.conf]).config_stats read_conf_stats = cache.get_io_class_statistics(
io_class_id=0, stat_filter=[StatsFilter.conf]
).config_stats
if read_conf_stats != saved_conf_stats: if read_conf_stats != saved_conf_stats:
TestRun.LOGGER.error(f"Statistics do not match. Before: {str(saved_conf_stats)} " TestRun.LOGGER.error(
f"After: {str(read_conf_stats)}") f"Statistics do not match. Before: {str(saved_conf_stats)} "
if read_usage_stats != saved_usage_stats and \ f"After: {str(read_conf_stats)}"
saved_usage_stats.occupancy - read_usage_stats.occupancy > reads_from_core: )
TestRun.LOGGER.error(f"Statistics do not match. Before: {str(saved_usage_stats)} " if (
f"After: {str(read_usage_stats)}") read_usage_stats != saved_usage_stats
and saved_usage_stats.occupancy - read_usage_stats.occupancy > reads_from_core
):
TestRun.LOGGER.error(
f"Statistics do not match. Before: {str(saved_usage_stats)} "
f"After: {str(read_usage_stats)}"
)
with TestRun.step("Mount CAS device and run IO again."): with TestRun.step("Mount CAS device and run IO again."):
core.mount(mountpoint) core.mount(mountpoint)
@ -108,24 +128,28 @@ def test_io_class_service_load(runlevel):
read_total = cache_stats.request_stats.read.total read_total = cache_stats.request_stats.read.total
read_hits_percentage = read_hits / read_total * 100 read_hits_percentage = read_hits / read_total * 100
if read_hits_percentage <= 95: if read_hits_percentage <= 95:
TestRun.LOGGER.error(f"Read hits percentage too low: {read_hits_percentage}%\n" TestRun.LOGGER.error(
f"Read hits: {read_hits}, read total: {read_total}") f"Read hits percentage too low: {read_hits_percentage}%\n"
f"Read hits: {read_hits}, read total: {read_total}"
)
def run_io(): def run_io():
fio = Fio() \ fio = (
.create_command() \ Fio()
.block_size(Size(1, Unit.Blocks4096)) \ .create_command()
.io_engine(IoEngine.libaio) \ .block_size(Size(1, Unit.Blocks4096))
.read_write(ReadWrite.read) \ .io_engine(IoEngine.libaio)
.directory(os.path.join(mountpoint)) \ .read_write(ReadWrite.read)
.sync() \ .directory(os.path.join(mountpoint))
.do_verify() \ .sync()
.num_jobs(32) \ .do_verify()
.run_time(timedelta(minutes=1)) \ .num_jobs(32)
.time_based()\ .run_time(timedelta(minutes=1))
.nr_files(30)\ .time_based()
.nr_files(30)
.file_size(Size(250, Unit.KiB)) .file_size(Size(250, Unit.KiB))
)
fio.run() fio.run()
os_utils.sync() os_utils.sync()

View File

@ -6,12 +6,12 @@
import pytest import pytest
from api.cas import ioclass_config, casadm from api.cas import ioclass_config, casadm
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_utils.os_utils import sync, Udev, drop_caches
from test_utils.size import Unit, Size
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.fio.fio import Fio from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.os_utils import sync, Udev, drop_caches
from test_utils.size import Unit, Size
from tests.io_class.io_class_common import prepare from tests.io_class.io_class_common import prepare
@ -21,7 +21,7 @@ def test_ioclass_wlth():
""" """
title: Test for `wlth` classification rule title: Test for `wlth` classification rule
description: | description: |
Test CAS ability to cache IO based on write life time hints. Test CAS ability to cache IO based on 'write-life-time-hints' classification rule.
pass_criteria: pass_criteria:
- IO with wlth is cached - IO with wlth is cached
- IO without wlth is not cached - IO without wlth is not cached
@ -60,7 +60,7 @@ def test_ioclass_wlth():
cache.reset_counters() cache.reset_counters()
with TestRun.step(f"Trigger IO with a write life time hint"): with TestRun.step(f"Trigger IO with a write life time hint"):
# Fio adds hints only to direct IO. Even if `write_hint` param isn't proveded, direct IO # Fio adds hints only to direct IO. Even if `write_hint` param isn't provided, direct IO
# has assigned a hint by default # has assigned a hint by default
io_count = 12345 io_count = 12345
io_size = Size(io_count, Unit.Blocks4096) io_size = Size(io_count, Unit.Blocks4096)

View File

@ -1,5 +1,5 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -28,13 +28,12 @@ from tests.io_class.io_class_common import prepare, ioclass_config_path, mountpo
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_lba(): def test_ioclass_lba():
""" """
title: Test IO classification by lba. title: Test IO classification by lba.
description: | description: |
Write data to random lba and check if it is cached according to range Write data to random lba and check if it is cached according to range defined in ioclass rule.
defined in ioclass rule pass_criteria:
pass_criteria: - No kernel bug.
- No kernel bug. - IO is classified properly based on lba range defined in config.
- IO is classified properly based on lba range defined in config.
""" """
ioclass_id = 1 ioclass_id = 1
@ -64,15 +63,16 @@ def test_ioclass_lba():
# '8' step is set to prevent writing cache line more than once # '8' step is set to prevent writing cache line more than once
TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.") TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
for lba in range(min_cached_lba, max_cached_lba, 8): for lba in range(min_cached_lba, max_cached_lba, 8):
dd = ( (
Dd().input("/dev/zero") Dd()
.output(f"{core.path}") .input("/dev/zero")
.count(dd_count) .output(f"{core.path}")
.block_size(dd_size) .count(dd_count)
.seek(lba) .block_size(dd_size)
.oflag("direct") .seek(lba)
.oflag("direct")
.run()
) )
dd.run()
dirty_count += 1 dirty_count += 1
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
@ -84,22 +84,24 @@ def test_ioclass_lba():
test_lba = [max_cached_lba + 1] + random.sample( test_lba = [max_cached_lba + 1] + random.sample(
[ [
*range(0, min_cached_lba), *range(0, min_cached_lba),
*range(max_cached_lba + 1, int(core.size.get_value(Unit.Blocks512))) *range(max_cached_lba + 1, int(core.size.get_value(Unit.Blocks512))),
], ],
k=100) k=100,
)
for lba in test_lba: for lba in test_lba:
prev_dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty prev_dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
dd = ( (
Dd().input("/dev/zero") Dd()
.output(f"{core.path}") .input("/dev/zero")
.count(dd_count) .output(f"{core.path}")
.block_size(dd_size) .count(dd_count)
.seek(lba) .block_size(dd_size)
.oflag("direct") .seek(lba)
.oflag("direct")
.run()
) )
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if prev_dirty != dirty: if prev_dirty != dirty:
@ -110,11 +112,11 @@ def test_ioclass_lba():
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_request_size(): def test_ioclass_request_size():
""" """
title: Test IO classification by request size. title: Test IO classification by request size.
description: Check if requests with size within defined range are cached. description: Check if requests with size within defined range are cached.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- IO is classified properly based on request size range defined in config. - IO is classified properly based on request size range defined in config.
""" """
ioclass_id = 1 ioclass_id = 1
@ -141,14 +143,15 @@ def test_ioclass_request_size():
for i in range(iterations): for i in range(iterations):
cache.flush_cache() cache.flush_cache()
req_size = random.choice(cached_req_sizes) req_size = random.choice(cached_req_sizes)
dd = ( (
Dd().input("/dev/zero") Dd()
.output(core.path) .input("/dev/zero")
.count(1) .output(core.path)
.block_size(req_size) .count(1)
.oflag("direct") .block_size(req_size)
.oflag("direct")
.run()
) )
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value: if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
TestRun.fail("Incorrect number of dirty blocks!") TestRun.fail("Incorrect number of dirty blocks!")
@ -164,14 +167,15 @@ def test_ioclass_request_size():
] ]
for i in range(iterations): for i in range(iterations):
req_size = random.choice(not_cached_req_sizes) req_size = random.choice(not_cached_req_sizes)
dd = ( (
Dd().input("/dev/zero") Dd()
.output(core.path) .input("/dev/zero")
.count(1) .output(core.path)
.block_size(req_size) .count(1)
.oflag("direct") .block_size(req_size)
.oflag("direct")
.run()
) )
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0: if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.fail("Dirty data present!") TestRun.fail("Dirty data present!")
@ -183,13 +187,13 @@ def test_ioclass_request_size():
@pytest.mark.parametrizex("filesystem", list(Filesystem) + [False]) @pytest.mark.parametrizex("filesystem", list(Filesystem) + [False])
def test_ioclass_direct(filesystem): def test_ioclass_direct(filesystem):
""" """
title: Direct IO classification. title: Direct IO classification.
description: Check if direct requests are properly cached. description: Check if direct requests are properly cached.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- Data from direct IO should be cached. - Data from direct IO should be cached.
- Data from buffered IO should not be cached and if performed to/from already cached data - Data from buffered IO should not be cached and if performed to/from already cached data
should cause reclassification to unclassified IO class. should cause reclassification to unclassified IO class.
""" """
ioclass_id = 1 ioclass_id = 1
@ -211,11 +215,15 @@ def test_ioclass_direct(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Prepare fio command."): with TestRun.step("Prepare fio command."):
fio = Fio().create_command() \ fio = (
.io_engine(IoEngine.libaio) \ Fio()
.size(io_size).offset(io_size) \ .create_command()
.read_write(ReadWrite.write) \ .io_engine(IoEngine.libaio)
.size(io_size)
.offset(io_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/tmp_file" if filesystem else core.path) .target(f"{mountpoint}/tmp_file" if filesystem else core.path)
)
with TestRun.step("Prepare filesystem."): with TestRun.step("Prepare filesystem."):
if filesystem: if filesystem:
@ -237,8 +245,10 @@ def test_ioclass_direct(filesystem):
with TestRun.step("Check if buffered writes are not cached."): with TestRun.step("Check if buffered writes are not cached."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy: if new_occupancy != base_occupancy:
TestRun.fail("Buffered writes were cached!\n" TestRun.fail(
f"Expected: {base_occupancy}, actual: {new_occupancy}") "Buffered writes were cached!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}"
)
with TestRun.step(f"Run direct writes to {'file' if filesystem else 'device'}"): with TestRun.step(f"Run direct writes to {'file' if filesystem else 'device'}"):
fio.direct() fio.direct()
@ -248,8 +258,10 @@ def test_ioclass_direct(filesystem):
with TestRun.step("Check if direct writes are cached."): with TestRun.step("Check if direct writes are cached."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size: if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct writes was cached!\n" TestRun.fail(
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}") "Wrong number of direct writes was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
)
with TestRun.step(f"Run buffered reads from {'file' if filesystem else 'device'}"): with TestRun.step(f"Run buffered reads from {'file' if filesystem else 'device'}"):
fio.remove_param("readwrite").remove_param("direct") fio.remove_param("readwrite").remove_param("direct")
@ -260,8 +272,10 @@ def test_ioclass_direct(filesystem):
with TestRun.step("Check if buffered reads caused reclassification."): with TestRun.step("Check if buffered reads caused reclassification."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy: if new_occupancy != base_occupancy:
TestRun.fail("Buffered reads did not cause reclassification!" TestRun.fail(
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}") "Buffered reads did not cause reclassification!"
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}"
)
with TestRun.step(f"Run direct reads from {'file' if filesystem else 'device'}"): with TestRun.step(f"Run direct reads from {'file' if filesystem else 'device'}"):
fio.direct() fio.direct()
@ -271,8 +285,10 @@ def test_ioclass_direct(filesystem):
with TestRun.step("Check if direct reads are cached."): with TestRun.step("Check if direct reads are cached."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size: if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct reads was cached!\n" TestRun.fail(
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}") "Wrong number of direct reads was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
)
@pytest.mark.os_dependent @pytest.mark.os_dependent
@ -281,13 +297,13 @@ def test_ioclass_direct(filesystem):
@pytest.mark.parametrizex("filesystem", Filesystem) @pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_metadata(filesystem): def test_ioclass_metadata(filesystem):
""" """
title: Metadata IO classification. title: Metadata IO classification.
description: | description: |
Determine if every operation on files that cause metadata update results in increased Determine if every operation on files that cause metadata update results in increased
writes to cached metadata. writes to cached metadata.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- Metadata is classified properly. - Metadata is classified properly.
""" """
# Exact values may not be tested as each file system has different metadata structure. # Exact values may not be tested as each file system has different metadata structure.
test_dir_path = f"{mountpoint}/test_dir" test_dir_path = f"{mountpoint}/test_dir"
@ -308,31 +324,35 @@ def test_ioclass_metadata(filesystem):
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} " with TestRun.step(
f"at {mountpoint}."): f"Prepare {filesystem.name} filesystem and mount {core.path} " f"at {mountpoint}."
):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
sync() sync()
with TestRun.step("Create 20 test files."): with TestRun.step("Create 20 test files."):
requests_to_metadata_before = cache.get_io_class_statistics( requests_to_metadata_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write io_class_id=ioclass_id
).request_stats.write
files = [] files = []
for i in range(1, 21): for i in range(1, 21):
file_path = f"{mountpoint}/test_file_{i}" file_path = f"{mountpoint}/test_file_{i}"
dd = ( (
Dd().input("/dev/urandom") Dd()
.output(file_path) .input("/dev/urandom")
.count(random.randint(5, 50)) .output(file_path)
.block_size(Size(1, Unit.MebiByte)) .count(random.randint(5, 50))
.oflag("sync") .block_size(Size(1, Unit.MebiByte))
.oflag("sync")
.run()
) )
dd.run()
files.append(File(file_path)) files.append(File(file_path))
with TestRun.step("Check requests to metadata."): with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics( requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write io_class_id=ioclass_id
).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before: if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while creating files!") TestRun.fail("No requests to metadata while creating files!")
@ -344,7 +364,8 @@ def test_ioclass_metadata(filesystem):
with TestRun.step("Check requests to metadata."): with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics( requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write io_class_id=ioclass_id
).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before: if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while renaming files!") TestRun.fail("No requests to metadata while renaming files!")
@ -359,7 +380,8 @@ def test_ioclass_metadata(filesystem):
with TestRun.step("Check requests to metadata."): with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics( requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write io_class_id=ioclass_id
).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before: if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while moving files!") TestRun.fail("No requests to metadata while moving files!")
@ -368,7 +390,8 @@ def test_ioclass_metadata(filesystem):
with TestRun.step("Check requests to metadata."): with TestRun.step("Check requests to metadata."):
requests_to_metadata_after = cache.get_io_class_statistics( requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write io_class_id=ioclass_id
).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before: if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while deleting directory with files!") TestRun.fail("No requests to metadata while deleting directory with files!")
@ -376,17 +399,16 @@ def test_ioclass_metadata(filesystem):
@pytest.mark.os_dependent @pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_id_as_condition():
def test_ioclass_id_as_condition(filesystem):
""" """
title: IO class as a condition. title: IO class as a condition.
description: | description: |
Load config in which IO class ids are used as conditions in other IO class definitions. Load config in which IO class ids are used as conditions in other IO class definitions.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- IO is classified properly as described in IO class config. - IO is classified properly as described in IO class config.
""" """
filesystem = Filesystem.xfs
base_dir_path = f"{mountpoint}/base_dir" base_dir_path = f"{mountpoint}/base_dir"
ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte) ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte)) ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))
@ -448,8 +470,9 @@ def test_ioclass_id_as_condition(filesystem):
# CAS needs some time to resolve directory to inode # CAS needs some time to resolve directory to inode
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds) time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
with TestRun.step(f"Prepare {filesystem.name} filesystem " with TestRun.step(
f"and mount {core.path} at {mountpoint}."): f"Prepare {filesystem.name} filesystem " f"and mount {core.path} at {mountpoint}."
):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
fs_utils.create_directory(base_dir_path) fs_utils.create_directory(base_dir_path)
@ -457,91 +480,118 @@ def test_ioclass_id_as_condition(filesystem):
# CAS needs some time to resolve directory to inode # CAS needs some time to resolve directory to inode
time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds) time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)
with TestRun.step("Run IO fulfilling IO class 1 condition (and not IO class 2) and check if " with TestRun.step(
"it is classified properly."): "Run IO fulfilling IO class 1 condition (and not IO class 2) and check if "
"it is classified properly."
):
# Should be classified as IO class 4 # Should be classified as IO class 4
base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte) non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
(Fio().create_command() (
.io_engine(IoEngine.libaio) Fio()
.size(non_ioclass_file_size) .create_command()
.read_write(ReadWrite.write) .io_engine(IoEngine.libaio)
.target(f"{base_dir_path}/test_file_1") .size(non_ioclass_file_size)
.run()) .read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_1")
.run()
)
sync() sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
if new_occupancy != base_occupancy + non_ioclass_file_size: if new_occupancy != base_occupancy + non_ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n" TestRun.fail(
f"Expected: {base_occupancy + non_ioclass_file_size}, " "Writes were not properly cached!\n"
f"actual: {new_occupancy}") f"Expected: {base_occupancy + non_ioclass_file_size}, "
f"actual: {new_occupancy}"
)
with TestRun.step("Run IO fulfilling IO class 2 condition (and not IO class 1) and check if " with TestRun.step(
"it is classified properly."): "Run IO fulfilling IO class 2 condition (and not IO class 1) and check if "
"it is classified properly."
):
# Should be classified as IO class 5 # Should be classified as IO class 5
base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
(Fio().create_command() (
.io_engine(IoEngine.libaio) Fio()
.size(ioclass_file_size) .create_command()
.read_write(ReadWrite.write) .io_engine(IoEngine.libaio)
.target(f"{mountpoint}/test_file_2") .size(ioclass_file_size)
.run()) .read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file_2")
.run()
)
sync() sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size: if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n" TestRun.fail(
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}") "Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
)
with TestRun.step("Run IO fulfilling IO class 1 and 2 conditions and check if " with TestRun.step(
"it is classified properly."): "Run IO fulfilling IO class 1 and 2 conditions and check if " "it is classified properly."
):
# Should be classified as IO class 5 # Should be classified as IO class 5
base_occupancy = new_occupancy base_occupancy = new_occupancy
(Fio().create_command() (
.io_engine(IoEngine.libaio) Fio()
.size(ioclass_file_size) .create_command()
.read_write(ReadWrite.write) .io_engine(IoEngine.libaio)
.target(f"{base_dir_path}/test_file_3") .size(ioclass_file_size)
.run()) .read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.run()
)
sync() sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size: if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n" TestRun.fail(
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}") "Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
)
with TestRun.step("Run direct IO fulfilling IO class 1 and 2 conditions and check if " with TestRun.step(
"it is classified properly."): "Run direct IO fulfilling IO class 1 and 2 conditions and check if "
"it is classified properly."
):
# Should be classified as IO class 6 # Should be classified as IO class 6
base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
(Fio().create_command() (
.io_engine(IoEngine.libaio) Fio()
.size(ioclass_file_size) .create_command()
.read_write(ReadWrite.write) .io_engine(IoEngine.libaio)
.target(f"{base_dir_path}/test_file_3") .size(ioclass_file_size)
.direct() .read_write(ReadWrite.write)
.run()) .target(f"{base_dir_path}/test_file_3")
.direct()
.run()
)
sync() sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size: if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n" TestRun.fail(
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}") "Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
)
@pytest.mark.os_dependent @pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_conditions_or():
def test_ioclass_conditions_or(filesystem):
""" """
title: IO class condition 'or'. title: IO class condition 'or'.
description: | description: |
Load config with IO class combining 5 contradicting conditions connected by OR operator. Load config with IO class combining 5 contradicting conditions connected by OR operator.
pass_criteria: pass_criteria:
- No kernel bug. - No kernel bug.
- Every IO fulfilling one condition is classified properly. - Every IO fulfilling one condition is classified properly.
""" """
filesystem = Filesystem.xfs
with TestRun.step("Prepare cache and core. Disable udev."): with TestRun.step("Prepare cache and core. Disable udev."):
cache, core = prepare() cache, core = prepare()
@ -553,14 +603,17 @@ def test_ioclass_conditions_or(filesystem):
ioclass_id=1, ioclass_id=1,
eviction_priority=1, eviction_priority=1,
allocation="1.00", allocation="1.00",
rule=f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:" rule=(
f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5", f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5"
),
ioclass_config_path=ioclass_config_path, ioclass_config_path=ioclass_config_path,
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem " with TestRun.step(
f"and mount {core.path} at {mountpoint}."): f"Prepare {filesystem.name} filesystem " f"and mount {core.path} at {mountpoint}."
):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
for i in range(1, 6): for i in range(1, 6):
@ -571,35 +624,38 @@ def test_ioclass_conditions_or(filesystem):
for i in range(1, 6): for i in range(1, 6):
file_size = Size(random.randint(25, 50), Unit.MebiByte) file_size = Size(random.randint(25, 50), Unit.MebiByte)
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
(Fio().create_command() (
.io_engine(IoEngine.libaio) Fio()
.size(file_size) .create_command()
.read_write(ReadWrite.write) .io_engine(IoEngine.libaio)
.target(f"{mountpoint}/dir{i}/test_file") .size(file_size)
.run()) .read_write(ReadWrite.write)
.target(f"{mountpoint}/dir{i}/test_file")
.run()
)
sync() sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size: if new_occupancy != base_occupancy + file_size:
TestRun.fail("Occupancy has not increased correctly!\n" TestRun.fail(
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}") "Occupancy has not increased correctly!\n"
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
)
@pytest.mark.os_dependent @pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_conditions_and():
def test_ioclass_conditions_and(filesystem):
""" """
title: IO class condition 'and'. title: IO class condition 'and'.
description: | description: |
Load config with IO class combining 5 conditions contradicting Load config with IO class combining 5 conditions contradicting at least one other condition.
at least one other condition. pass_criteria:
pass_criteria: - No kernel bug.
- No kernel bug. - Every IO fulfilling one of the conditions is not classified.
- Every IO fulfilling one of the conditions is not classified.
""" """
filesystem = Filesystem.xfs
file_size = Size(random.randint(25, 50), Unit.MebiByte) file_size = Size(random.randint(25, 50), Unit.MebiByte)
file_size_bytes = int(file_size.get_value(Unit.Byte)) file_size_bytes = int(file_size.get_value(Unit.Byte))
@ -613,63 +669,79 @@ def test_ioclass_conditions_and(filesystem):
ioclass_id=1, ioclass_id=1,
eviction_priority=1, eviction_priority=1,
allocation="1.00", allocation="1.00",
rule=f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&" rule=(
f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&" f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
f"file_size:eq:{file_size_bytes}", f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
f"file_size:eq:{file_size_bytes}"
),
ioclass_config_path=ioclass_config_path, ioclass_config_path=ioclass_config_path,
) )
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem " TestRun.LOGGER.info(
f"and mounting {core.path} at {mountpoint}") f"Preparing {filesystem.name} filesystem " f"and mounting {core.path} at {mountpoint}"
)
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
sync() sync()
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
# Perform IO # Perform IO
for size in [file_size, file_size + Size(1, Unit.MebiByte), file_size - Size(1, Unit.MebiByte)]: for size in [
(Fio().create_command() file_size,
.io_engine(IoEngine.libaio) file_size + Size(1, Unit.MebiByte),
.size(size) file_size - Size(1, Unit.MebiByte),
.read_write(ReadWrite.write) ]:
.target(f"{mountpoint}/test_file") (
.run()) Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file")
.run()
)
sync() sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
if new_occupancy != base_occupancy: if new_occupancy != base_occupancy:
TestRun.fail("Unexpected occupancy increase!\n" TestRun.fail(
f"Expected: {base_occupancy}, actual: {new_occupancy}") "Unexpected occupancy increase!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}"
)
@pytest.mark.os_dependent @pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem) def test_ioclass_effective_ioclass():
def test_ioclass_effective_ioclass(filesystem):
""" """
title: Effective IO class with multiple non-exclusive conditions title: Effective IO class with multiple non-exclusive conditions
description: | description: |
Test CAS ability to properly classify IO fulfilling multiple conditions based on Test CAS ability to properly classify IO fulfilling multiple conditions based on
IO class ids and presence of '&done' annotation in IO class rules IO class ids and presence of '&done' annotation in IO class rules
pass_criteria: pass_criteria:
- In every iteration first IO is classified to the last in order IO class - In every iteration first IO is classified to the last in order IO class
- In every iteration second IO is classified to the IO class with '&done' annotation - In every iteration second IO is classified to the IO class with '&done' annotation
""" """
filesystem = Filesystem.xfs
with TestRun.LOGGER.step(f"Test prepare"): with TestRun.LOGGER.step(f"Test prepare"):
cache, core = prepare(default_allocation="1.00") cache, core = prepare(default_allocation="1.00")
Udev.disable() Udev.disable()
file_size = Size(10, Unit.Blocks4096) file_size = Size(10, Unit.Blocks4096)
file_size_bytes = int(file_size.get_value(Unit.Byte)) file_size_bytes = int(file_size.get_value(Unit.Byte))
test_dir = f"{mountpoint}/test" test_dir = f"{mountpoint}/test"
rules = ["direct", # rule contradicting other rules rules = [
f"directory:{test_dir}", "direct", # rule contradicting other rules
f"file_size:le:{2 * file_size_bytes}", f"directory:{test_dir}",
f"file_size:ge:{file_size_bytes // 2}"] f"file_size:le:{2 * file_size_bytes}",
f"file_size:ge:{file_size_bytes // 2}",
]
with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem " with TestRun.LOGGER.step(
f"and mounting {core.path} at {mountpoint}"): f"Preparing {filesystem.name} filesystem " f"and mounting {core.path} at {mountpoint}"
):
core.create_filesystem(filesystem) core.create_filesystem(filesystem)
core.mount(mountpoint) core.mount(mountpoint)
fs_utils.create_directory(test_dir) fs_utils.create_directory(test_dir)
@ -682,41 +754,54 @@ def test_ioclass_effective_ioclass(filesystem):
with TestRun.LOGGER.step("Perform IO fulfilling the non-contradicting conditions"): with TestRun.LOGGER.step("Perform IO fulfilling the non-contradicting conditions"):
base_occupancy = cache.get_io_class_statistics( base_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy io_class_id=io_class_id
fio = (Fio().create_command() ).usage_stats.occupancy
.io_engine(IoEngine.libaio) fio = (
.size(file_size) Fio()
.read_write(ReadWrite.write) .create_command()
.target(f"{test_dir}/test_file{i}")) .io_engine(IoEngine.libaio)
.size(file_size)
.read_write(ReadWrite.write)
.target(f"{test_dir}/test_file{i}")
)
fio.run() fio.run()
sync() sync()
with TestRun.LOGGER.step("Check if IO was properly classified " with TestRun.LOGGER.step(
"(to the last non-contradicting IO class)"): "Check if IO was properly classified " "(to the last non-contradicting IO class)"
):
new_occupancy = cache.get_io_class_statistics( new_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy io_class_id=io_class_id
).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size: if new_occupancy != base_occupancy + file_size:
TestRun.LOGGER.error("Wrong IO classification!\n" TestRun.LOGGER.error(
f"Expected: {base_occupancy + file_size}, " "Wrong IO classification!\n"
f"actual: {new_occupancy}") f"Expected: {base_occupancy + file_size}, "
f"actual: {new_occupancy}"
)
with TestRun.LOGGER.step("Add '&done' to the second in order non-contradicting condition"): with TestRun.LOGGER.step("Add '&done' to the second in order non-contradicting condition"):
io_class_id = add_done_to_second_non_exclusive_condition(rules, permutation, cache) io_class_id = add_done_to_second_non_exclusive_condition(rules, permutation, cache)
with TestRun.LOGGER.step("Repeat IO"): with TestRun.LOGGER.step("Repeat IO"):
base_occupancy = cache.get_io_class_statistics( base_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy io_class_id=io_class_id
).usage_stats.occupancy
fio.run() fio.run()
sync() sync()
with TestRun.LOGGER.step("Check if IO was properly classified " with TestRun.LOGGER.step(
"(to the IO class with '&done' annotation)"): "Check if IO was properly classified " "(to the IO class with '&done' annotation)"
):
new_occupancy = cache.get_io_class_statistics( new_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy io_class_id=io_class_id
).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size: if new_occupancy != base_occupancy + file_size:
TestRun.LOGGER.error("Wrong IO classification!\n" TestRun.LOGGER.error(
f"Expected: {base_occupancy + file_size}, " "Wrong IO classification!\n"
f"actual: {new_occupancy}") f"Expected: {base_occupancy + file_size}, "
f"actual: {new_occupancy}"
)
def load_io_classes_in_permutation_order(rules, permutation, cache): def load_io_classes_in_permutation_order(rules, permutation, cache):
@ -729,9 +814,9 @@ def load_io_classes_in_permutation_order(rules, permutation, cache):
ioclass_list = [IoClass.default(allocation="0.0")] ioclass_list = [IoClass.default(allocation="0.0")]
for n in range(len(rules)): for n in range(len(rules)):
ioclass_list.append(IoClass(class_id=permutation[n], rule=rules[n])) ioclass_list.append(IoClass(class_id=permutation[n], rule=rules[n]))
IoClass.save_list_to_config_file(ioclass_list, IoClass.save_list_to_config_file(
add_default_rule=False, ioclass_list, add_default_rule=False, ioclass_config_path=ioclass_config_path
ioclass_config_path=ioclass_config_path) )
casadm.load_io_classes(cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache.cache_id, file=ioclass_config_path)
@ -745,8 +830,7 @@ def add_done_to_second_non_exclusive_condition(rules, permutation, cache):
if non_exclusive_conditions == 2: if non_exclusive_conditions == 2:
break break
second_class_id += 1 second_class_id += 1
fs_utils.replace_first_pattern_occurrence(ioclass_config_path, fs_utils.replace_first_pattern_occurrence(ioclass_config_path, rules[idx], f"{rules[idx]}&done")
rules[idx], f"{rules[idx]}&done")
sync() sync()
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path) casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
return second_class_id return second_class_id