Merge pull request #168 from Deixx/effective-ioclass
Test for determining effective io class
This commit is contained in:
commit
9286c0eae7
@ -4,9 +4,12 @@
|
||||
#
|
||||
|
||||
import random
|
||||
from itertools import permutations
|
||||
|
||||
import pytest
|
||||
|
||||
from api.cas.ioclass_config import IoClass
|
||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||
from test_tools import fs_utils
|
||||
from test_tools.dd import Dd
|
||||
from test_tools.disk_utils import Filesystem
|
||||
@ -14,7 +17,6 @@ from test_tools.fio.fio import Fio
|
||||
from test_tools.fio.fio_param import ReadWrite, IoEngine
|
||||
from test_utils.filesystem.file import File
|
||||
from test_utils.os_utils import sync, Udev
|
||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||
from .io_class_common import *
|
||||
|
||||
|
||||
@ -63,9 +65,8 @@ def test_ioclass_lba():
|
||||
dirty_count += 1
|
||||
|
||||
stats = cache.get_statistics_deprecated(io_class_id=ioclass_id)
|
||||
assert (
|
||||
stats["dirty"].get_value(Unit.Blocks4096) == dirty_count
|
||||
), f"LBA {lba} not cached"
|
||||
if stats["dirty"].get_value(Unit.Blocks4096) != dirty_count:
|
||||
TestRun.fail(f"LBA {lba} not cached")
|
||||
|
||||
cache.flush_cache()
|
||||
|
||||
@ -87,9 +88,8 @@ def test_ioclass_lba():
|
||||
sync()
|
||||
|
||||
stats = cache.get_statistics_deprecated(io_class_id=ioclass_id)
|
||||
assert (
|
||||
stats["dirty"].get_value(Unit.Blocks4096) == 0
|
||||
), f"Inappropriately cached lba: {rand_lba}"
|
||||
if stats["dirty"].get_value(Unit.Blocks4096) != 0:
|
||||
TestRun.fail(f"Inappropriately cached lba: {rand_lba}")
|
||||
|
||||
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@ -129,10 +129,8 @@ def test_ioclass_request_size():
|
||||
)
|
||||
dd.run()
|
||||
stats = cache.get_statistics_deprecated(io_class_id=ioclass_id)
|
||||
assert (
|
||||
stats["dirty"].get_value(Unit.Blocks4096)
|
||||
== req_size.value / Unit.Blocks4096.value
|
||||
)
|
||||
if stats["dirty"].get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
|
||||
TestRun.fail("Incorrect number of dirty blocks!")
|
||||
|
||||
cache.flush_cache()
|
||||
|
||||
@ -157,7 +155,8 @@ def test_ioclass_request_size():
|
||||
)
|
||||
dd.run()
|
||||
stats = cache.get_statistics_deprecated(io_class_id=ioclass_id)
|
||||
assert stats["dirty"].get_value(Unit.Blocks4096) == 0
|
||||
if stats["dirty"].get_value(Unit.Blocks4096) != 0:
|
||||
TestRun.fail("Dirty data present!")
|
||||
|
||||
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@ -212,18 +211,18 @@ def test_ioclass_direct(filesystem):
|
||||
fio.run()
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=ioclass_id)["occupancy"]
|
||||
assert new_occupancy == base_occupancy, \
|
||||
"Buffered writes were cached!\n" \
|
||||
f"Expected: {base_occupancy}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy:
|
||||
TestRun.fail("Buffered writes were cached!\n"
|
||||
f"Expected: {base_occupancy}, actual: {new_occupancy}")
|
||||
|
||||
TestRun.LOGGER.info(f"Direct writes to {'file' if filesystem else 'device'}")
|
||||
fio.direct()
|
||||
fio.run()
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=ioclass_id)["occupancy"]
|
||||
assert new_occupancy == base_occupancy + io_size, \
|
||||
"Wrong number of direct writes was cached!\n" \
|
||||
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy + io_size:
|
||||
TestRun.fail("Wrong number of direct writes was cached!\n"
|
||||
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
|
||||
|
||||
TestRun.LOGGER.info(f"Buffered reads from {'file' if filesystem else 'device'}")
|
||||
fio.remove_param("readwrite").remove_param("direct")
|
||||
@ -231,18 +230,18 @@ def test_ioclass_direct(filesystem):
|
||||
fio.run()
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=ioclass_id)["occupancy"]
|
||||
assert new_occupancy == base_occupancy, \
|
||||
"Buffered reads did not cause reclassification!" \
|
||||
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy:
|
||||
TestRun.fail("Buffered reads did not cause reclassification!"
|
||||
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}")
|
||||
|
||||
TestRun.LOGGER.info(f"Direct reads from {'file' if filesystem else 'device'}")
|
||||
fio.direct()
|
||||
fio.run()
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=ioclass_id)["occupancy"]
|
||||
assert new_occupancy == base_occupancy + io_size, \
|
||||
"Wrong number of direct reads was cached!\n" \
|
||||
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy + io_size:
|
||||
TestRun.fail("Wrong number of direct reads was cached!\n"
|
||||
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
|
||||
|
||||
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@ -420,9 +419,9 @@ def test_ioclass_id_as_condition(filesystem):
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=4)["occupancy"]
|
||||
|
||||
assert new_occupancy == base_occupancy + non_ioclass_file_size, \
|
||||
"Writes were not properly cached!\n" \
|
||||
f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy + non_ioclass_file_size:
|
||||
TestRun.fail("Writes were not properly cached!\n"
|
||||
f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}")
|
||||
|
||||
# IO fulfilling IO class 2 condition (and not IO class 1)
|
||||
# Should be classified as IO class 5
|
||||
@ -436,9 +435,9 @@ def test_ioclass_id_as_condition(filesystem):
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=5)["occupancy"]
|
||||
|
||||
assert new_occupancy == base_occupancy + ioclass_file_size, \
|
||||
"Writes were not properly cached!\n" \
|
||||
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy + ioclass_file_size:
|
||||
TestRun.fail("Writes were not properly cached!\n"
|
||||
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
|
||||
|
||||
# IO fulfilling IO class 1 and 2 conditions
|
||||
# Should be classified as IO class 5
|
||||
@ -452,9 +451,9 @@ def test_ioclass_id_as_condition(filesystem):
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=5)["occupancy"]
|
||||
|
||||
assert new_occupancy == base_occupancy + ioclass_file_size, \
|
||||
"Writes were not properly cached!\n" \
|
||||
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy + ioclass_file_size:
|
||||
TestRun.fail("Writes were not properly cached!\n"
|
||||
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
|
||||
|
||||
# Same IO but direct
|
||||
# Should be classified as IO class 6
|
||||
@ -469,9 +468,9 @@ def test_ioclass_id_as_condition(filesystem):
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=6)["occupancy"]
|
||||
|
||||
assert new_occupancy == base_occupancy + ioclass_file_size, \
|
||||
"Writes were not properly cached!\n" \
|
||||
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy + ioclass_file_size:
|
||||
TestRun.fail("Writes were not properly cached!\n"
|
||||
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
|
||||
|
||||
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@ -517,9 +516,9 @@ def test_ioclass_conditions_or(filesystem):
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=1)["occupancy"]
|
||||
|
||||
assert new_occupancy == base_occupancy + file_size, \
|
||||
"Occupancy has not increased correctly!\n" \
|
||||
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy + file_size:
|
||||
TestRun.fail("Occupancy has not increased correctly!\n"
|
||||
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}")
|
||||
|
||||
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@ -566,6 +565,114 @@ def test_ioclass_conditions_and(filesystem):
|
||||
sync()
|
||||
new_occupancy = cache.get_statistics_deprecated(io_class_id=1)["occupancy"]
|
||||
|
||||
assert new_occupancy == base_occupancy, \
|
||||
"Unexpected occupancy increase!\n" \
|
||||
f"Expected: {base_occupancy}, actual: {new_occupancy}"
|
||||
if new_occupancy != base_occupancy:
|
||||
TestRun.fail("Unexpected occupancy increase!\n"
|
||||
f"Expected: {base_occupancy}, actual: {new_occupancy}")
|
||||
|
||||
|
||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||
@pytest.mark.parametrize("filesystem", Filesystem)
|
||||
def test_ioclass_effective_ioclass(filesystem):
|
||||
"""
|
||||
title: Effective IO class with multiple non-exclusive conditions
|
||||
description: |
|
||||
Test CAS ability to properly classify IO fulfilling multiple conditions based on
|
||||
IO class ids and presence of '&done' annotation in IO class rules
|
||||
pass_criteria:
|
||||
- In every iteration first IO is classified to the last in order IO class
|
||||
- In every iteration second IO is classified to the IO class with '&done' annotation
|
||||
"""
|
||||
with TestRun.LOGGER.step(f"Test prepare"):
|
||||
cache, core = prepare()
|
||||
Udev.disable()
|
||||
file_size = Size(10, Unit.Blocks4096)
|
||||
file_size_bytes = int(file_size.get_value(Unit.Byte))
|
||||
test_dir = f"{mountpoint}/test"
|
||||
rules = ["direct", # rule contradicting other rules
|
||||
f"directory:{test_dir}",
|
||||
f"file_size:le:{2 * file_size_bytes}",
|
||||
f"file_size:ge:{file_size_bytes // 2}"]
|
||||
|
||||
with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem "
|
||||
f"and mounting {core.system_path} at {mountpoint}"):
|
||||
core.create_filesystem(filesystem)
|
||||
core.mount(mountpoint)
|
||||
fs_utils.create_directory(test_dir)
|
||||
sync()
|
||||
|
||||
for i, permutation in TestRun.iteration(enumerate(permutations(range(1, 5)), start=1)):
|
||||
with TestRun.LOGGER.step("Load IO classes in order specified by permutation"):
|
||||
load_io_classes_in_permutation_order(rules, permutation, cache)
|
||||
io_class_id = 3 if rules[permutation.index(4)] == "direct" else 4
|
||||
|
||||
with TestRun.LOGGER.step("Perform IO fulfilling the non-contradicting conditions"):
|
||||
base_occupancy = cache.get_io_class_statistics(
|
||||
io_class_id=io_class_id).usage_stats.occupancy
|
||||
fio = (Fio().create_command()
|
||||
.io_engine(IoEngine.libaio)
|
||||
.size(file_size)
|
||||
.read_write(ReadWrite.write)
|
||||
.target(f"{test_dir}/test_file{i}"))
|
||||
fio.run()
|
||||
sync()
|
||||
|
||||
with TestRun.LOGGER.step("Check if IO was properly classified "
|
||||
"(to the last non-contradicting IO class)"):
|
||||
new_occupancy = cache.get_io_class_statistics(
|
||||
io_class_id=io_class_id).usage_stats.occupancy
|
||||
if new_occupancy != base_occupancy + file_size:
|
||||
TestRun.LOGGER.error("Wrong IO classification!\n"
|
||||
f"Expected: {base_occupancy + file_size}, "
|
||||
f"actual: {new_occupancy}")
|
||||
|
||||
with TestRun.LOGGER.step("Add '&done' to the second in order non-contradicting condition"):
|
||||
io_class_id = add_done_to_second_non_exclusive_condition(rules, permutation, cache)
|
||||
|
||||
with TestRun.LOGGER.step("Repeat IO"):
|
||||
base_occupancy = cache.get_io_class_statistics(
|
||||
io_class_id=io_class_id).usage_stats.occupancy
|
||||
fio.run()
|
||||
sync()
|
||||
|
||||
with TestRun.LOGGER.step("Check if IO was properly classified "
|
||||
"(to the IO class with '&done' annotation)"):
|
||||
new_occupancy = cache.get_io_class_statistics(
|
||||
io_class_id=io_class_id).usage_stats.occupancy
|
||||
if new_occupancy != base_occupancy + file_size:
|
||||
TestRun.LOGGER.error("Wrong IO classification!\n"
|
||||
f"Expected: {base_occupancy + file_size}, "
|
||||
f"actual: {new_occupancy}")
|
||||
|
||||
|
||||
def load_io_classes_in_permutation_order(rules, permutation, cache):
|
||||
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
|
||||
ioclass_config.create_ioclass_config(
|
||||
add_default_rule=False, ioclass_config_path=ioclass_config_path
|
||||
)
|
||||
# To make test more precise all workload except of tested ioclass should be
|
||||
# put in pass-through mode
|
||||
ioclass_list = [IoClass.default(allocation=False)]
|
||||
for n in range(len(rules)):
|
||||
ioclass_list.append(IoClass(class_id=permutation[n], rule=rules[n]))
|
||||
IoClass.save_list_to_config_file(ioclass_list,
|
||||
add_default_rule=False,
|
||||
ioclass_config_path=ioclass_config_path)
|
||||
casadm.load_io_classes(cache.cache_id, file=ioclass_config_path)
|
||||
|
||||
|
||||
def add_done_to_second_non_exclusive_condition(rules, permutation, cache):
|
||||
non_exclusive_conditions = 0
|
||||
second_class_id = 1
|
||||
while True:
|
||||
idx = permutation.index(second_class_id)
|
||||
if rules[idx] != "direct":
|
||||
non_exclusive_conditions += 1
|
||||
if non_exclusive_conditions == 2:
|
||||
break
|
||||
second_class_id += 1
|
||||
fs_utils.replace_first_pattern_occurrence(ioclass_config_path,
|
||||
rules[idx], f"{rules[idx]}&done")
|
||||
sync()
|
||||
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
|
||||
return second_class_id
|
||||
|
Loading…
Reference in New Issue
Block a user