Move OCL tests from test-framework repository

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga
2019-10-17 17:15:38 +02:00
parent d2666b785a
commit 4fb82abeca
35 changed files with 4102 additions and 0 deletions

View File

View File

@@ -0,0 +1,31 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import logging
import pytest
from api.cas import casadm
from tests.conftest import base_prepare
LOGGER = logging.getLogger(__name__)
@pytest.mark.parametrize("shortcut", [True, False])
@pytest.mark.parametrize('prepare_and_cleanup',
[{"core_count": 0, "cache_count": 0}],
indirect=True)
def test_cli_help(prepare_and_cleanup, shortcut):
prepare()
LOGGER.info("Test run")
output = casadm.help(shortcut)
LOGGER.info(output.stdout) # TODO:this is tmp, every ssh command shall be logged via executor
assert output.stdout[0:33] == "Cache Acceleration Software Linux"
# TODO: create yml config for every help command and match the output with it
# TODO: for now the assert above is purely for testing flow in the casadm api
def prepare():
base_prepare()

View File

@@ -0,0 +1,76 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import logging
import pytest
from api.cas import casadm, casadm_parser
from tests.conftest import base_prepare
from core.test_run import TestRun
from storage_devices.disk import DiskType
from test_utils.size import Unit, Size
LOGGER = logging.getLogger(__name__)
@pytest.mark.parametrize("shortcut", [True, False])
@pytest.mark.parametrize('prepare_and_cleanup',
[{"core_count": 0, "cache_count": 1, "cache_type": "optane"}, ],
indirect=True)
def test_cli_start_stop_default_value(prepare_and_cleanup, shortcut):
prepare()
cache_device = next(
disk for disk in TestRun.dut.disks if disk.disk_type == DiskType.optane)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
casadm.start_cache(cache_device, shortcut=shortcut, force=True)
caches = casadm_parser.get_caches()
assert len(caches) == 1
assert caches[0].cache_device.system_path == cache_device.system_path
casadm.stop_cache(cache_id=caches[0].cache_id, shortcut=shortcut)
output = casadm.list_caches(shortcut=shortcut)
caches = casadm_parser.get_caches()
assert len(caches) == 0
assert output.stdout == "No caches running"
@pytest.mark.parametrize("shortcut", [True, False])
@pytest.mark.parametrize('prepare_and_cleanup',
[{"core_count": 1, "cache_count": 1, "cache_type": "optane"}],
indirect=True)
def test_cli_add_remove_default_value(prepare_and_cleanup, shortcut):
prepare()
cache_device = next(
disk for disk in TestRun.dut.disks if disk.disk_type == DiskType.optane)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
cache = casadm.start_cache(cache_device, shortcut=shortcut, force=True)
core_device = next(
disk for disk in TestRun.dut.disks if disk.disk_type != DiskType.optane)
casadm.add_core(cache, core_device, shortcut=shortcut)
caches = casadm_parser.get_caches()
assert len(caches[0].get_core_devices()) == 1
assert caches[0].get_core_devices()[0].core_device.system_path == core_device.system_path
casadm.remove_core(cache.cache_id, 1, shortcut=shortcut)
caches = casadm_parser.get_caches()
assert len(caches) == 1
assert len(caches[0].get_core_devices()) == 0
casadm.stop_cache(cache_id=cache.cache_id, shortcut=shortcut)
output = casadm.list_caches(shortcut=shortcut)
caches = casadm_parser.get_caches()
assert len(caches) == 0
assert output.stdout == "No caches running"
def prepare():
base_prepare()

View File

@@ -0,0 +1,151 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
import os
import sys
import yaml
from IPy import IP
sys.path.append(os.path.join(os.path.dirname(__file__), "../test-framework"))
from core.test_run_utils import TestRun
from api.cas import installer
from api.cas import casadm
from test_utils.os_utils import Udev
# TODO: Provide basic plugin subsystem
plugins_dir = os.path.join(os.path.dirname(__file__), "../plugins")
sys.path.append(plugins_dir)
try:
from test_wrapper import plugin as test_wrapper
except ImportError:
pass
pytest_options = {}
@pytest.fixture(scope="session", autouse=True)
def get_pytest_options(request):
pytest_options["remote"] = request.config.getoption("--remote")
pytest_options["branch"] = request.config.getoption("--repo-tag")
pytest_options["force_reinstall"] = request.config.getoption("--force-reinstall")
@pytest.fixture()
def prepare_and_cleanup(request):
"""
This fixture returns the dictionary, which contains DUT ip, IPMI, spider, list of disks.
This fixture also returns the executor of commands
"""
# There should be dut config file added to config package and
# pytest should be executed with option --dut-config=conf_name'.
#
# 'ip' field should be filled with valid IP string to use remote ssh executor
# or it should be commented out when user want to execute tests on local machine
#
# User can also have own test wrapper, which runs test prepare, cleanup, etc.
# Then in the config/configuration.py file there should be added path to it:
# test_wrapper_dir = 'wrapper_path'
try:
with open(request.config.getoption('--dut-config')) as cfg:
dut_config = yaml.safe_load(cfg)
except Exception:
dut_config = {}
if 'test_wrapper' in sys.modules:
if 'ip' in dut_config:
try:
IP(dut_config['ip'])
except ValueError:
raise Exception("IP address from configuration file is in invalid format.")
dut_config = test_wrapper.prepare(request.param, dut_config)
TestRun.prepare(dut_config)
TestRun.plugins['opencas'] = {'already_updated': False}
TestRun.LOGGER.info(f"**********Test {request.node.name} started!**********")
yield
TestRun.LOGGER.info("Test cleanup")
Udev.enable()
unmount_cas_devices()
casadm.stop_all_caches()
if 'test_wrapper' in sys.modules:
test_wrapper.cleanup()
def pytest_addoption(parser):
parser.addoption("--dut-config", action="store", default="None")
parser.addoption("--remote", action="store", default="origin")
parser.addoption("--repo-tag", action="store", default="master")
parser.addoption("--force-reinstall", action="store", default="False")
# TODO: investigate whether it is possible to pass the last param as bool
def get_remote():
return pytest_options["remote"]
def get_branch():
return pytest_options["branch"]
def get_force_param():
return pytest_options["force_reinstall"]
def unmount_cas_devices():
output = TestRun.executor.run("cat /proc/mounts | grep cas")
# If exit code is '1' but stdout is empty, there is no mounted cas devices
if output.exit_code == 1:
return
elif output.exit_code != 0:
raise Exception(
f"Failed to list mounted cas devices. \
stdout: {output.stdout} \n stderr :{output.stderr}"
)
for line in output.stdout.splitlines():
cas_device_path = line.split()[0]
TestRun.LOGGER.info(f"Unmounting {cas_device_path}")
output = TestRun.executor.run(f"umount {cas_device_path}")
if output.exit_code != 0:
raise Exception(
f"Failed to unmount {cas_device_path}. \
stdout: {output.stdout} \n stderr :{output.stderr}"
)
def kill_all_io():
TestRun.executor.run("pkill --signal SIGKILL dd")
TestRun.executor.run("kill -9 `ps aux | grep -i vdbench.* | awk '{ print $1 }'`")
TestRun.executor.run("pkill --signal SIGKILL fio*")
def base_prepare():
TestRun.LOGGER.info("Base test prepare")
TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")
Udev.enable()
kill_all_io()
if installer.check_if_installed():
try:
unmount_cas_devices()
casadm.stop_all_caches()
except Exception:
pass # TODO: Reboot DUT if test is executed remotely
if get_force_param() is not "False" and not TestRun.plugins['opencas']['already_updated']:
installer.reinstall_opencas()
elif not installer.check_if_installed():
installer.install_opencas()
TestRun.plugins['opencas']['already_updated'] = True

View File

@@ -0,0 +1,65 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from test_tools.disk_utils import Filesystem
from test_utils.size import Size, Unit
from core.test_run import TestRun
from tests.conftest import base_prepare
from test_utils.filesystem.file import File
from test_utils.filesystem.directory import Directory
from test_tools import fs_utils
def setup_module():
TestRun.LOGGER.warning("Entering setup method")
@pytest.mark.parametrize('prepare_and_cleanup',
[{"cache_type": "nand", "cache_count": 1}],
indirect=True)
def test_create_example_partitions(prepare_and_cleanup):
prepare()
TestRun.LOGGER.info("Test run")
TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")
test_disk = TestRun.dut.disks[0]
part_sizes = []
for i in range(1, 6):
part_sizes.append(Size(10 * i + 100, Unit.MebiByte))
test_disk.create_partitions(part_sizes)
TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")
test_disk.partitions[0].create_filesystem(Filesystem.ext3)
@pytest.mark.parametrize('prepare_and_cleanup',
[{"cache_type": "nand", "cache_count": 1}],
indirect=True)
def test_create_example_files(prepare_and_cleanup):
prepare()
TestRun.LOGGER.info("Test run")
file1 = File.create_file("example_file")
file1.write("Test file\ncontent line\ncontent")
content_before_change = file1.read()
TestRun.LOGGER.info(f"File content: {content_before_change}")
fs_utils.replace_in_lines(file1, 'content line', 'replaced line')
content_after_change = file1.read()
assert content_before_change != content_after_change
file2 = file1.copy('/tmp', force=True)
assert file1.md5sum() == file2.md5sum()
file2.chmod_numerical(123)
fs_utils.remove(file2.full_path, True)
dir1 = Directory("~")
dir_content = dir1.ls()
file1.chmod(fs_utils.Permissions['r'] | fs_utils.Permissions['w'], fs_utils.PermissionsUsers(7))
for item in dir_content:
TestRun.LOGGER.info(f"Item {str(item)} - {type(item).__name__}")
fs_utils.remove(file1.full_path, True)
def prepare():
base_prepare()

View File

@@ -0,0 +1,169 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from core.test_run import TestRun
from tests.conftest import base_prepare
from storage_devices.disk import DiskType
from test_utils.size import Size, Unit
from api.cas.cache_config import CacheMode
from api.cas import casadm
from test_tools.dd import Dd
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_core_inactive(prepare_and_cleanup):
"""
1. Start cache with 3 cores.
2. Stop cache.
3. Remove one of core devices.
4. Load cache.
5. Check if cache has appropriate number of valid and inactive core devices.
"""
cache, core_device = prepare()
cache_device = cache.cache_device
stats = cache.get_cache_statistics()
assert stats["core devices"] == 3
assert stats["inactive core devices"] == 0
TestRun.LOGGER.info("Stopping cache")
cache.stop()
TestRun.LOGGER.info("Removing one of core devices")
core_device.remove_partitions()
core_device.create_partitions([Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)])
TestRun.LOGGER.info("Loading cache with missing core device")
cache = casadm.start_cache(cache_device, load=True)
stats = cache.get_cache_statistics()
assert stats["core devices"] == 3
assert stats["inactive core devices"] == 1
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_core_inactive_stats(prepare_and_cleanup):
"""
1. Start cache with 3 cores.
2. Switch cache into WB mode.
3. Issue IO to each core.
4. Stop cache without flush.
5. Remove two core devices.
6. Load cache.
7. Check if cache stats are equal to sum of valid and inactive cores stats.
8. Check if percentage values are calculated properly.
"""
cache, core_device = prepare()
cache_device = cache.cache_device
TestRun.LOGGER.info(cache_device)
TestRun.LOGGER.info("Switching cache mode to WB")
cache.set_cache_mode(cache_mode=CacheMode.WB)
cores = cache.get_core_devices()
TestRun.LOGGER.info("Issue IO to each core")
for core in cores:
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1000)
.block_size(Size(4, Unit.KibiByte))
).run()
TestRun.LOGGER.info("Stopping cache with dirty data")
cores[2].flush_core()
cache.stop(no_data_flush=True)
TestRun.LOGGER.info("Removing two of core devices")
core_device.remove_partitions()
core_device.create_partitions([Size(1, Unit.GibiByte)])
TestRun.LOGGER.info("Loading cache with missing core device")
cache = casadm.start_cache(cache_device, load=True)
# Accumulate valid cores stats
cores_occupancy = 0
cores_clean = 0
cores_dirty = 0
cores = cache.get_core_devices()
for core in cores:
core_stats = core.get_core_statistics()
cores_occupancy += core_stats["occupancy"].value
cores_clean += core_stats["clean"].value
cores_dirty += core_stats["dirty"].value
cache_stats = cache.get_cache_statistics()
# Add inactive core stats
cores_occupancy += cache_stats["inactive occupancy"].value
cores_clean += cache_stats["inactive clean"].value
cores_dirty += cache_stats["inactive dirty"].value
assert cache_stats["occupancy"].value == cores_occupancy
assert cache_stats["dirty"].value == cores_dirty
assert cache_stats["clean"].value == cores_clean
cache_stats_percentage = cache.get_cache_statistics(percentage_val=True)
# Calculate expected percentage value of inactive core stats
inactive_occupancy_perc = (
cache_stats["inactive occupancy"].value / cache_stats["cache size"].value
)
inactive_clean_perc = (
cache_stats["inactive clean"].value / cache_stats["occupancy"].value
)
inactive_dirty_perc = (
cache_stats["inactive dirty"].value / cache_stats["occupancy"].value
)
inactive_occupancy_perc = round(100 * inactive_occupancy_perc, 1)
inactive_clean_perc = round(100 * inactive_clean_perc, 1)
inactive_dirty_perc = round(100 * inactive_dirty_perc, 1)
TestRun.LOGGER.info(cache_stats_percentage)
assert inactive_occupancy_perc == cache_stats_percentage["inactive occupancy"]
assert inactive_clean_perc == cache_stats_percentage["inactive clean"]
assert inactive_dirty_perc == cache_stats_percentage["inactive dirty"]
def prepare():
base_prepare()
cache_device = next(
disk
for disk in TestRun.dut.disks
if disk.disk_type in [DiskType.optane, DiskType.nand]
)
core_device = next(
disk
for disk in TestRun.dut.disks
if (
disk.disk_type.value > cache_device.disk_type.value and disk != cache_device
)
)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions(
[Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]
)
cache_device = cache_device.partitions[0]
core_device_1 = core_device.partitions[0]
core_device_2 = core_device.partitions[1]
core_device_3 = core_device.partitions[2]
TestRun.LOGGER.info("Staring cache")
cache = casadm.start_cache(cache_device, force=True)
TestRun.LOGGER.info("Adding core device")
core_1 = cache.add_core(core_dev=core_device_1)
core_2 = cache.add_core(core_dev=core_device_2)
core_3 = cache.add_core(core_dev=core_device_3)
return cache, core_device

View File

@@ -0,0 +1,60 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from api.cas import casadm
from api.cas import ioclass_config
from api.cas.cache_config import CacheMode, CleaningPolicy
from storage_devices.disk import DiskType
from tests.conftest import base_prepare
from core.test_run import TestRun
from test_utils.size import Size, Unit
ioclass_config_path = "/tmp/opencas_ioclass.conf"
mountpoint = "/tmp/cas1-1"
def prepare():
base_prepare()
ioclass_config.remove_ioclass_config()
cache_device = next(filter(
lambda disk: disk.disk_type in [DiskType.optane, DiskType.nand],
TestRun.dut.disks
))
core_device = next(filter(
lambda disk: disk.disk_type.value > cache_device.disk_type.value,
TestRun.dut.disks
))
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)])
cache_device = cache_device.partitions[0]
core_device = core_device.partitions[0]
TestRun.LOGGER.info(f"Starting cache")
cache = casadm.start_cache(cache_device, cache_mode=CacheMode.WB, force=True)
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
casadm.set_param_cleaning(cache_id=cache.cache_id, policy=CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core device")
core = casadm.add_core(cache, core_dev=core_device)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
# To make test more precise all workload except of tested ioclass should be
# put in pass-through mode
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
if output.exit_code != 0:
raise Exception(f"Failed to create mountpoint")
return cache, core

View File

@@ -0,0 +1,392 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
from datetime import datetime
import pytest
from test_tools import fs_utils
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.directory import Directory
from test_utils.filesystem.file import File
from test_utils.os_utils import drop_caches, DropCachesMode, sync, Udev
from .io_class_common import *
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_directory_depth(prepare_and_cleanup, filesystem):
"""
Test if directory classification works properly for deeply nested directories for read and
write operations.
"""
cache, core = prepare()
Udev.disable()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_dir_path = f"{mountpoint}/base_dir"
TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
fs_utils.create_directory(base_dir_path)
nested_dir_path = base_dir_path
random_depth = random.randint(40, 80)
for i in range(random_depth):
nested_dir_path += f"/dir_{i}"
TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}")
fs_utils.create_directory(path=nested_dir_path, parents=True)
# Test classification in nested dir by reading a previously unclassified file
TestRun.LOGGER.info("Creating the first file in the nested directory")
test_file_1 = File(f"{nested_dir_path}/test_file_1")
dd = (
Dd()
.input("/dev/urandom")
.output(test_file_1.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_1.refresh_item()
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{base_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
base_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
TestRun.LOGGER.info("Reading the file in the nested directory")
dd = (
Dd()
.input(test_file_1.full_path)
.output("/dev/null")
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy + test_file_1.size, \
"Wrong occupancy after reading file!\n" \
f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}"
# Test classification in nested dir by creating a file
base_occupancy = new_occupancy
TestRun.LOGGER.info("Creating the second file in the nested directory")
test_file_2 = File(f"{nested_dir_path}/test_file_2")
dd = (
Dd()
.input("/dev/urandom")
.output(test_file_2.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
)
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
test_file_2.refresh_item()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy + test_file_2.size, \
"Wrong occupancy after creating file!\n" \
f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_directory_dir_operations(prepare_and_cleanup, filesystem):
"""
Test if directory classification works properly after directory operations like move or rename.
The operations themselves should not cause reclassification but IO after those operations
should be reclassified to proper IO class.
Directory classification may work with a delay after loading IO class configuration or
move/rename operations. Test checks if maximum delay is not exceeded.
"""
def create_files_with_classification_delay_check(directory: Directory, ioclass_id: int):
start_time = datetime.now()
occupancy_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
dd_blocks = 10
dd_size = Size(dd_blocks, Unit.Blocks4096)
file_counter = 0
unclassified_files = []
time_from_start = datetime.now() - start_time
while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY:
occupancy_before = occupancy_after
file_path = f"{directory.full_path}/test_file_{file_counter}"
file_counter += 1
time_from_start = datetime.now() - start_time
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
occupancy_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
if occupancy_after - occupancy_before < dd_size:
unclassified_files.append(file_path)
if len(unclassified_files) == file_counter:
pytest.xfail("No files were properly classified within max delay time!")
if len(unclassified_files):
TestRun.LOGGER.info("Rewriting unclassified test files...")
for file_path in unclassified_files:
(Dd().input("/dev/zero").output(file_path).oflag("sync")
.block_size(Size(1, Unit.Blocks4096)).count(dd_blocks).run())
def read_files_with_reclassification_check(
target_ioclass_id: int, source_ioclass_id: int, directory: Directory, with_delay: bool):
start_time = datetime.now()
target_occupancy_after = cache.get_cache_statistics(
io_class_id=target_ioclass_id)["occupancy"]
source_occupancy_after = cache.get_cache_statistics(
io_class_id=source_ioclass_id)["occupancy"]
unclassified_files = []
for file in [item for item in directory.ls() if isinstance(item, File)]:
target_occupancy_before = target_occupancy_after
source_occupancy_before = source_occupancy_after
time_from_start = datetime.now() - start_time
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
target_occupancy_after = cache.get_cache_statistics(
io_class_id=target_ioclass_id)["occupancy"]
source_occupancy_after = cache.get_cache_statistics(
io_class_id=source_ioclass_id)["occupancy"]
if target_occupancy_after < target_occupancy_before:
pytest.xfail("Target IO class occupancy lowered!")
elif target_occupancy_after - target_occupancy_before < file.size:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Target IO class occupancy not changed properly!")
if source_occupancy_after >= source_occupancy_before:
if file not in unclassified_files:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Source IO class occupancy not changed properly!")
if len(unclassified_files):
TestRun.LOGGER.info("Rereading unclassified test files...")
sync()
drop_caches(DropCachesMode.ALL)
for file in unclassified_files:
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
cache, core = prepare()
Udev.disable()
proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
ioclass_id_1 = proper_ids[0]
classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
ioclass_id_2 = proper_ids[1]
classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
# directory IO classes
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_1,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_1}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_2,
eviction_priority=1,
allocation=True,
rule=f"directory:{classified_dir_path_2}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
non_classified_dir_path = f"{mountpoint}/non_classified"
TestRun.LOGGER.info(
f"Creating a non-classified directory: {non_classified_dir_path}")
dir_1 = Directory.create_directory(path=non_classified_dir_path)
TestRun.LOGGER.info(f"Renaming {non_classified_dir_path} to {classified_dir_path_1}")
dir_1.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_1, ioclass_id=ioclass_id_1)
TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir")
dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir", parents=True)
TestRun.LOGGER.info("Creating files with delay check")
create_files_with_classification_delay_check(directory=dir_2, ioclass_id=ioclass_id_2)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}")
dir_2.move(destination=classified_dir_path_1)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_1, source_ioclass_id=ioclass_id_2,
directory=dir_2, with_delay=False)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}")
dir_2.move(destination=mountpoint)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False)
TestRun.LOGGER.info(f"Removing {classified_dir_path_2}")
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_1} to {classified_dir_path_2}")
dir_1.move(destination=classified_dir_path_2)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=ioclass_id_2, source_ioclass_id=ioclass_id_1,
directory=dir_1, with_delay=True)
TestRun.LOGGER.info(f"Renaming {classified_dir_path_2} to {non_classified_dir_path}")
dir_1.move(destination=non_classified_dir_path)
TestRun.LOGGER.info("Reading files with reclassification check")
read_files_with_reclassification_check(
target_ioclass_id=0, source_ioclass_id=ioclass_id_2,
directory=dir_1, with_delay=True)
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_directory_file_operations(prepare_and_cleanup, filesystem):
"""
Test if directory classification works properly after file operations like move or rename.
The operations themselves should not cause reclassification but IO after those operations
should be reclassified to proper IO class.
"""
def check_occupancy(expected: Size, actual: Size):
if expected != actual:
pytest.xfail("Occupancy check failed!\n"
f"Expected: {expected}, actual: {actual}")
cache, core = prepare()
Udev.disable()
test_dir_path = f"{mountpoint}/test_dir"
nested_dir_path = f"{test_dir_path}/nested_dir"
dd_blocks = random.randint(5, 50)
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"directory:{test_dir_path}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
TestRun.LOGGER.info(f"Creating directory {nested_dir_path}")
Directory.create_directory(path=nested_dir_path, parents=True)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Creating test file")
classified_before = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
file_path = f"{test_dir_path}/test_file"
(Dd().input("/dev/urandom").output(file_path).oflag("sync")
.block_size(Size(1, Unit.MebiByte)).count(dd_blocks).run())
sync()
drop_caches(DropCachesMode.ALL)
test_file = File(file_path).refresh_item()
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before + test_file.size, classified_after)
TestRun.LOGGER.info("Moving test file out of classified directory")
classified_before = classified_after
non_classified_before = cache.get_cache_statistics(io_class_id=0)["occupancy"]
test_file.move(destination=mountpoint)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file")
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before - test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
check_occupancy(non_classified_before + test_file.size, non_classified_after)
TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}")
classified_before = classified_after
non_classified_before = non_classified_after
test_file.move(destination=nested_dir_path)
sync()
drop_caches(DropCachesMode.ALL)
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
check_occupancy(non_classified_before, non_classified_after)
TestRun.LOGGER.info("Reading test file")
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
TestRun.LOGGER.info("Checking classified occupancy")
classified_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
check_occupancy(classified_before + test_file.size, classified_after)
TestRun.LOGGER.info("Checking non-classified occupancy")
non_classified_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
check_occupancy(non_classified_before - test_file.size, non_classified_after)

View File

@@ -0,0 +1,374 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
import pytest
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev, DropCachesMode, drop_caches
from .io_class_common import *
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_file_extension(prepare_and_cleanup):
cache, core = prepare()
iterations = 50
ioclass_id = 1
tested_extension = "tmp"
wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"extension:{tested_extension}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
cache.flush_cache()
# Check if file with proper extension is cached
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{tested_extension}")
.count(dd_count)
.block_size(dd_size)
)
TestRun.LOGGER.info(f"Writing to file with cached extension.")
for i in range(iterations):
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == (i + 1) * dd_count
cache.flush_cache()
# Check if file with improper extension is not cached
TestRun.LOGGER.info(f"Writing to file with no cached extension.")
for ext in wrong_extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == 0
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_file_extension_preexisting_filesystem(prepare_and_cleanup):
"""Create files on filesystem, add device with filesystem as a core,
write data to files and check if they are cached properly"""
cache, core = prepare()
ioclass_id = 1
extensions = ["tmp", "tm", "out", "txt", "log", "123"]
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
TestRun.LOGGER.info(f"Preparing files on raw block device")
casadm.remove_core(cache.cache_id, core_id=core.core_id)
core.core_device.create_filesystem(Filesystem.ext3)
core.core_device.mount(mountpoint)
# Prepare files
for ext in extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
core.core_device.unmount()
# Prepare ioclass config
rule = "|".join([f"extension:{ext}" for ext in extensions])
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"{rule}&done",
ioclass_config_path=ioclass_config_path,
)
# Prepare cache for test
TestRun.LOGGER.info(f"Adding device with preexisting data as a core")
core = casadm.add_core(cache, core_dev=core.core_device)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
core.mount(mountpoint)
cache.flush_cache()
# Check if files with proper extensions are cached
TestRun.LOGGER.info(f"Writing to file with cached extension.")
for ext in extensions:
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/test_file.{ext}")
.count(dd_count)
.block_size(dd_size)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096)
== (extensions.index(ext) + 1) * dd_count
)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_file_offset(prepare_and_cleanup):
cache, core = prepare()
ioclass_id = 1
iterations = 100
dd_size = Size(4, Unit.KibiByte)
dd_count = 1
min_cached_offset = 16384
max_cached_offset = 65536
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
)
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
cache.flush_cache()
# Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
# nor last sector
min_seek = int((min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
max_seek = int(
(max_cached_offset - min_cached_offset - Unit.Blocks4096.value)
/ Unit.Blocks4096.value
)
TestRun.LOGGER.info(f"Writing to file within cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096) == 1
), f"Offset not cached: {file_offset}"
cache.flush_cache()
min_seek = 0
max_seek = int(min_cached_offset / Unit.Blocks4096.value)
TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
for i in range(iterations):
file_offset = random.choice(range(min_seek, max_seek))
dd = (
Dd()
.input("/dev/zero")
.output(f"{mountpoint}/tmp_file")
.count(dd_count)
.block_size(dd_size)
.seek(file_offset)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096) == 0
), f"Inappropriately cached offset: {file_offset}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_file_size(prepare_and_cleanup, filesystem):
"""
File size IO class rules are configured in a way that each tested file size is unambiguously
classified.
Firstly write operations are tested (creation of files), secondly read operations.
"""
def load_file_size_io_classes():
# IO class order intentional, do not change
base_size_bytes = int(base_size.get_value(Unit.Byte))
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"file_size:eq:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
rule=f"file_size:lt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule=f"file_size:le:{int(base_size_bytes / 2)}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"file_size:ge:{2 * base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
def create_files_and_check_classification():
TestRun.LOGGER.info("Creating files belonging to different IO classes "
"(classification by writes).")
for size, ioclass_id in size_to_class.items():
occupancy_before = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
file_path = f"{mountpoint}/test_file_{size.get_value()}"
Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run()
occupancy_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
if occupancy_after != occupancy_before + size:
pytest.xfail("File not cached properly!\n"
f"Expected {occupancy_before + size}\n"
f"Actual {occupancy_after}")
test_files.append(File(file_path).refresh_item())
sync()
drop_caches(DropCachesMode.ALL)
def reclassify_files():
TestRun.LOGGER.info("Reading files belonging to different IO classes "
"(classification by reads).")
for file in test_files:
ioclass_id = size_to_class[file.size]
occupancy_before = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
if occupancy_after != occupancy_before + file.size:
pytest.xfail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
sync()
drop_caches(DropCachesMode.ALL)
def remove_files_classification():
TestRun.LOGGER.info("Moving all files to 'unclassified' IO class")
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
occupancy_before = cache.get_cache_statistics(io_class_id=0)["occupancy"]
for file in test_files:
Dd().input(file.full_path).output("/dev/null").block_size(file.size).run()
occupancy_after = cache.get_cache_statistics(io_class_id=0)["occupancy"]
if occupancy_after != occupancy_before + file.size:
pytest.xfail("File not reclassified properly!\n"
f"Expected {occupancy_before + file.size}\n"
f"Actual {occupancy_after}")
occupancy_before = occupancy_after
sync()
drop_caches(DropCachesMode.ALL)
def restore_classification_config():
TestRun.LOGGER.info("Restoring IO class configuration")
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
load_file_size_io_classes()
cache, core = prepare()
Udev.disable()
base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
size_to_class = {
base_size: 1,
base_size - Unit.Blocks4096: 2,
base_size + Unit.Blocks4096: 3,
base_size / 2: 4,
base_size / 2 - Unit.Blocks4096: 4,
base_size / 2 + Unit.Blocks4096: 2,
base_size * 2: 5,
base_size * 2 - Unit.Blocks4096: 3,
base_size * 2 + Unit.Blocks4096: 5,
}
load_file_size_io_classes()
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
test_files = []
create_files_and_check_classification()
remove_files_classification()
restore_classification_config()
reclassify_files()

View File

@@ -0,0 +1,116 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import time
import pytest
from test_tools.dd import Dd
from test_utils.os_utils import sync, Udev
from .io_class_common import *
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_process_name(prepare_and_cleanup):
"""Check if data generated by process with particular name is cached"""
cache, core = prepare()
ioclass_id = 1
dd_size = Size(4, Unit.KibiByte)
dd_count = 1
iterations = 100
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"process_name:dd&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
cache.flush_cache()
Udev.disable()
TestRun.LOGGER.info(f"Check if all data generated by dd process is cached.")
for i in range(iterations):
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
.seek(i)
)
dd.run()
sync()
time.sleep(0.1)
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == (i + 1) * dd_count
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_pid(prepare_and_cleanup):
cache, core = prepare()
ioclass_id = 1
iterations = 20
dd_count = 100
dd_size = Size(4, Unit.KibiByte)
Udev.disable()
# Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
# 'dd' command is created and is appended to 'echo' command instead of running it
dd_command = str(
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(dd_count)
.block_size(dd_size)
)
for i in range(iterations):
cache.flush_cache()
output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
if output.exit_code != 0:
raise Exception(
f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
)
# Few pids might be used by system during test preparation
pid = int(output.stdout) + 50
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"pid:eq:{pid}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Running dd with pid {pid}")
# pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
dd_and_pid_command = (
f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
)
output = TestRun.executor.run(dd_and_pid_command)
if output.exit_code != 0:
raise Exception(
f"Failed to run dd with target pid. "
f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == dd_count
ioclass_config.remove_ioclass(ioclass_id)

View File

@@ -0,0 +1,577 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
import pytest
from test_tools import fs_utils
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev
from .io_class_common import *
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_lba(prepare_and_cleanup):
"""Write data to random lba and check if it is cached according to range
defined in ioclass rule"""
cache, core = prepare()
ioclass_id = 1
min_cached_lba = 56
max_cached_lba = 200
iterations = 100
dd_size = Size(1, Unit.Blocks512)
dd_count = 1
# Prepare ioclass config
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done",
ioclass_config_path=ioclass_config_path,
)
# Prepare cache for test
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
cache.flush_cache()
# Check if lbas from defined range are cached
dirty_count = 0
# '8' step is set to prevent writing cache line more than once
TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
for lba in range(min_cached_lba, max_cached_lba, 8):
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.seek(lba)
)
dd.run()
sync()
dirty_count += 1
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096) == dirty_count
), f"LBA {lba} not cached"
cache.flush_cache()
# Check if lba outside of defined range are not cached
TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.")
for i in range(iterations):
rand_lba = random.randrange(2000)
if min_cached_lba <= rand_lba <= max_cached_lba:
continue
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.seek(rand_lba)
)
dd.run()
sync()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096) == 0
), f"Inappropriately cached lba: {rand_lba}"
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_request_size(prepare_and_cleanup):
cache, core = prepare()
ioclass_id = 1
iterations = 100
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"request_size:ge:8192&request_size:le:16384&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
Udev.disable()
# Check if requests with appropriate size are cached
TestRun.LOGGER.info(
f"Check if requests with size within defined range are cached"
)
cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
for i in range(iterations):
cache.flush_cache()
req_size = random.choice(cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
.oflag("direct")
)
dd.run()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert (
stats["dirty"].get_value(Unit.Blocks4096)
== req_size.value / Unit.Blocks4096.value
)
cache.flush_cache()
# Check if requests with inappropriate size are not cached
TestRun.LOGGER.info(
f"Check if requests with size outside defined range are not cached"
)
not_cached_req_sizes = [
Size(1, Unit.Blocks4096),
Size(8, Unit.Blocks4096),
Size(16, Unit.Blocks4096),
]
for i in range(iterations):
req_size = random.choice(not_cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
.oflag("direct")
)
dd.run()
stats = cache.get_cache_statistics(io_class_id=ioclass_id)
assert stats["dirty"].get_value(Unit.Blocks4096) == 0
@pytest.mark.parametrize("filesystem", list(Filesystem) + [False])
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_direct(prepare_and_cleanup, filesystem):
"""
Perform buffered/direct IO to/from files or raw block device.
Data from buffered IO should be cached.
Data from buffered IO should not be cached and if performed to/from already cached data
should cause reclassification to unclassified IO class.
"""
cache, core = prepare()
Udev.disable()
ioclass_id = 1
io_size = Size(random.randint(1000, 2000), Unit.Blocks4096)
# direct IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule="direct",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
fio = (
Fio().create_command()
.io_engine(IoEngine.libaio)
.size(io_size)
.offset(io_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path)
)
if filesystem:
TestRun.LOGGER.info(
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at"
f" {mountpoint}"
)
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
else:
TestRun.LOGGER.info("Testing on raw exported object")
base_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
TestRun.LOGGER.info(f"Buffered writes to {'file' if filesystem else 'device'}")
fio.run()
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy, \
"Buffered writes were cached!\n" \
f"Expected: {base_occupancy}, actual: {new_occupancy}"
TestRun.LOGGER.info(f"Direct writes to {'file' if filesystem else 'device'}")
fio.direct()
fio.run()
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy + io_size, \
"Wrong number of direct writes was cached!\n" \
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
TestRun.LOGGER.info(f"Buffered reads from {'file' if filesystem else 'device'}")
fio.remove_param("readwrite").remove_param("direct")
fio.read_write(ReadWrite.read)
fio.run()
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy, \
"Buffered reads did not cause reclassification!" \
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}"
TestRun.LOGGER.info(f"Direct reads from {'file' if filesystem else 'device'}")
fio.direct()
fio.run()
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
assert new_occupancy == base_occupancy + io_size, \
"Wrong number of direct reads was cached!\n" \
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_metadata(prepare_and_cleanup, filesystem):
"""
Perform operations on files that cause metadata update.
Determine if every such operation results in increased writes to cached metadata.
Exact values may not be tested as each file system has different metadata structure.
"""
cache, core = prepare()
Udev.disable()
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# metadata IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule="metadata&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
requests_to_metadata_before = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
TestRun.LOGGER.info("Creating 20 test files")
files = []
for i in range(1, 21):
file_path = f"{mountpoint}/test_file_{i}"
dd = (
Dd()
.input("/dev/urandom")
.output(file_path)
.count(random.randint(5, 50))
.block_size(Size(1, Unit.MebiByte))
.oflag("sync")
)
dd.run()
files.append(File(file_path))
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
if requests_to_metadata_after == requests_to_metadata_before:
pytest.xfail("No requests to metadata while creating files!")
requests_to_metadata_before = requests_to_metadata_after
TestRun.LOGGER.info("Renaming all test files")
for file in files:
file.move(f"{file.full_path}_renamed")
sync()
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
if requests_to_metadata_after == requests_to_metadata_before:
pytest.xfail("No requests to metadata while renaming files!")
requests_to_metadata_before = requests_to_metadata_after
test_dir_path = f"{mountpoint}/test_dir"
TestRun.LOGGER.info(f"Creating directory {test_dir_path}")
fs_utils.create_directory(path=test_dir_path)
TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
for file in files:
file.move(test_dir_path)
sync()
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
if requests_to_metadata_after == requests_to_metadata_before:
pytest.xfail("No requests to metadata while moving files!")
TestRun.LOGGER.info(f"Removing {test_dir_path}")
fs_utils.remove(path=test_dir_path, force=True, recursive=True)
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_cache_statistics(
io_class_id=ioclass_id)["write total"]
if requests_to_metadata_after == requests_to_metadata_before:
pytest.xfail("No requests to metadata while deleting directory with files!")
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_id_as_condition(prepare_and_cleanup, filesystem):
"""
Load config in which IO class ids are used as conditions in other IO class definitions.
Check if performed IO is properly classified.
"""
cache, core = prepare()
Udev.disable()
base_dir_path = f"{mountpoint}/base_dir"
ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))
# directory condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"directory:{base_dir_path}",
ioclass_config_path=ioclass_config_path,
)
# file size condition
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
rule=f"file_size:eq:{ioclass_file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
# direct condition
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule="direct",
ioclass_config_path=ioclass_config_path,
)
# IO class 1 OR 2 condition
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule="io_class:1|io_class:2",
ioclass_config_path=ioclass_config_path,
)
# IO class 4 AND file size condition (same as IO class 2)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
# IO class 3 condition
ioclass_config.add_ioclass(
ioclass_id=6,
eviction_priority=1,
allocation=True,
rule="io_class:3",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(base_dir_path)
sync()
# IO fulfilling IO class 1 condition (and not IO class 2)
# Should be classified as IO class 4
base_occupancy = cache.get_cache_statistics(io_class_id=4)["occupancy"]
non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(non_ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_1")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=4)["occupancy"]
assert new_occupancy == base_occupancy + non_ioclass_file_size, \
"Writes were not properly cached!\n" \
f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}"
# IO fulfilling IO class 2 condition (and not IO class 1)
# Should be classified as IO class 5
base_occupancy = cache.get_cache_statistics(io_class_id=5)["occupancy"]
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file_2")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=5)["occupancy"]
assert new_occupancy == base_occupancy + ioclass_file_size, \
"Writes were not properly cached!\n" \
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
# IO fulfilling IO class 1 and 2 conditions
# Should be classified as IO class 5
base_occupancy = new_occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=5)["occupancy"]
assert new_occupancy == base_occupancy + ioclass_file_size, \
"Writes were not properly cached!\n" \
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
# Same IO but direct
# Should be classified as IO class 6
base_occupancy = cache.get_cache_statistics(io_class_id=6)["occupancy"]
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.direct()
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=6)["occupancy"]
assert new_occupancy == base_occupancy + ioclass_file_size, \
"Writes were not properly cached!\n" \
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_conditions_or(prepare_and_cleanup, filesystem):
"""
Load config with IO class combining 5 contradicting conditions connected by OR operator.
Check if every IO fulfilling one condition is classified properly.
"""
cache, core = prepare()
Udev.disable()
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
for i in range(1, 6):
fs_utils.create_directory(f"{mountpoint}/dir{i}")
sync()
# Perform IO fulfilling each condition and check if occupancy raises
for i in range(1, 6):
file_size = Size(random.randint(25, 50), Unit.MebiByte)
base_occupancy = cache.get_cache_statistics(io_class_id=1)["occupancy"]
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/dir{i}/test_file")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=1)["occupancy"]
assert new_occupancy == base_occupancy + file_size, \
"Occupancy has not increased correctly!\n" \
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
@pytest.mark.parametrize("filesystem", Filesystem)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_conditions_and(prepare_and_cleanup, filesystem):
"""
Load config with IO class combining 5 conditions contradicting at least one other condition
connected by AND operator.
Check if every IO fulfilling one of the conditions is not classified.
"""
cache, core = prepare()
Udev.disable()
file_size = Size(random.randint(25, 50), Unit.MebiByte)
file_size_bytes = int(file_size.get_value(Unit.Byte))
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
f"file_size:eq:{file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_occupancy = cache.get_cache_statistics(io_class_id=1)["occupancy"]
# Perform IO
for size in [file_size, file_size + Size(1, Unit.MebiByte), file_size - Size(1, Unit.MebiByte)]:
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file")
.run())
sync()
new_occupancy = cache.get_cache_statistics(io_class_id=1)["occupancy"]
assert new_occupancy == base_occupancy, \
"Unexpected occupancy increase!\n" \
f"Expected: {base_occupancy}, actual: {new_occupancy}"

View File

@@ -0,0 +1,76 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from api.cas import casadm, casadm_parser
from tests.conftest import base_prepare
from core.test_run import TestRun
from storage_devices.disk import DiskType
from test_utils.size import Size, Unit
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_load_occupied_id(prepare_and_cleanup):
"""
1. Start new cache instance (don't specify cache id)
2. Add core to newly create cache.
3. Stop cache instance.
4. Start new cache instance on another device (don't specify cache id).
5. Try to load metadata from first device.
* Load should fail.
"""
prepare()
cache_device = next(
disk
for disk in TestRun.dut.disks
if disk.disk_type in [DiskType.optane, DiskType.nand]
)
core_device = next(
disk
for disk in TestRun.dut.disks
if (
disk.disk_type.value > cache_device.disk_type.value and disk != cache_device
)
)
TestRun.LOGGER.info("Creating partitons for test")
cache_device.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)])
cache_device_1 = cache_device.partitions[0]
cache_device_2 = cache_device.partitions[1]
core_device = core_device.partitions[0]
TestRun.LOGGER.info("Starting cache with default id and one core")
cache1 = casadm.start_cache(cache_device_1, force=True)
cache1.add_core(core_device)
TestRun.LOGGER.info("Stopping cache")
cache1.stop()
TestRun.LOGGER.info("Starting cache with default id on different device")
cache2 = casadm.start_cache(cache_device_2, force=True)
TestRun.LOGGER.info("Attempt to load metadata from first cache device")
try:
casadm.load_cache(cache_device_1)
except Exception:
pass
caches = casadm_parser.get_caches()
assert len(caches) == 1, "Inappropirate number of caches after load!"
assert caches[0].cache_device.system_path == cache_device_2.system_path
assert caches[0].cache_id == 1
cores = caches[0].get_core_devices()
assert len(cores) == 0
def prepare():
base_prepare()

View File

@@ -0,0 +1,333 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from api.cas.casadm import StatsFilter
from api.cas import casadm
from api.cas import ioclass_config
from test_tools.dd import Dd
from api.cas.cache_config import CacheMode, CleaningPolicy
from tests.conftest import base_prepare
from core.test_run import TestRun
from storage_devices.disk import DiskType
from test_utils.size import Size, Unit
from test_utils.os_utils import Udev
ioclass_config_path = "/tmp/opencas_ioclass.conf"
mountpoint = "/tmp/cas1-1"
exported_obj_path_prefix = "/dev/cas1-"
cache_id = 1
# lists of cache and core block stats, that should have zero value for particular cache modes
write_wb_zero_stats = [
"reads from core(s)",
"writes to core(s)",
"total to/from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"writes to core",
"total to/from core",
"reads from cache",
"reads from exported object",
]
write_wt_zero_stats = [
"reads from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_pt_zero_stats = [
"reads from core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_wa_zero_stats = [
"reads from core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_wo_zero_stats = [
"reads from core(s)",
"writes to core(s)",
"total to/from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"writes to core",
"total to/from core",
"reads from exported object",
]
@pytest.mark.parametrize(
"cache_mode,zero_stats",
[
(CacheMode.WB, write_wb_zero_stats),
(CacheMode.WT, write_wt_zero_stats),
(CacheMode.PT, write_pt_zero_stats),
(CacheMode.WA, write_wa_zero_stats),
(CacheMode.WO, write_wo_zero_stats),
],
)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_block_stats_write(prepare_and_cleanup, cache_mode, zero_stats):
"""Perform read and write operations to cache instance in different cache modes
and check if block stats values are correct"""
cache, cores = prepare(cache_mode)
iterations = 10
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
flush(cache)
# Check stats for cache after performing write operation
for core in cores:
dd_seek = 0
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.oflag("direct")
)
# Since every IO has the same size, every stat should be increased with the same step.
# So there is no need to keep value of every stat in separate variable
cache_stat = (
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
)
for i in range(iterations):
dd.seek(dd_seek)
dd.run()
cache_stats = cache.get_cache_statistics(stat_filter=[StatsFilter.blk])
core_stats = core.get_core_statistics(stat_filter=[StatsFilter.blk])
# Check cache stats
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
for key, value in cache_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
else:
# For each next tested core, cache stats has to include
# sum of each previous core
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
# Check single core stats
for key, value in core_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value of \n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
else:
assert assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, dd seek: {dd_seek}. Cache mode {cache_mode}"
)
dd_seek += dd_count
# lists of cache and core block stats, that should have zero value for particular cache modes
read_wb_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wt_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_pt_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wa_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wo_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
@pytest.mark.parametrize(
"cache_mode,zero_stats",
[
(CacheMode.WB, read_wb_zero_stats),
(CacheMode.WT, read_wt_zero_stats),
(CacheMode.PT, read_pt_zero_stats),
(CacheMode.WA, read_wa_zero_stats),
(CacheMode.WO, read_wo_zero_stats),
],
)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_block_stats_read(prepare_and_cleanup, cache_mode, zero_stats):
"""Perform read and write operations to cache instance in different cache modes
and check if block stats values are correct"""
cache, cores = prepare(cache_mode)
iterations = 10
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
flush(cache)
# Check stats for cache after performing read operation
for core in cores:
dd_skip = 0
dd = (
Dd()
.output("/dev/zero")
.input(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.iflag("direct")
)
# Since every IO has the same size, every stat should be increased with the same step.
# So there is no need to keep value of every stat in separate variable
cache_stat = (
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
)
for i in range(iterations):
dd.skip(dd_skip)
dd.run()
cache_stats = cache.get_cache_statistics(stat_filter=[StatsFilter.blk])
core_stats = core.get_core_statistics(stat_filter=[StatsFilter.blk])
# Check cache stats
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
for key, value in cache_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
else:
# For each next tested core, cache stats has to include
# sum of each previous core
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}. Cache mode: {cache_mode}"
)
# Check single core stats
for key, value in core_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}. Cache mode: {cache_mode}"
)
else:
assert assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count} dd skip {dd_skip}. Cache mode: {cache_mode}"
)
dd_skip += dd_count
def flush(cache):
cache.flush_cache()
cache.reset_counters()
stats = cache.get_cache_statistics(stat_filter=[StatsFilter.blk])
for key, value in stats.items():
assert value.get_value(Unit.Blocks4096) == 0
def prepare(cache_mode: CacheMode):
base_prepare()
ioclass_config.remove_ioclass_config()
cache_device = next(
disk
for disk in TestRun.dut.disks
if disk.disk_type in [DiskType.optane, DiskType.nand]
)
core_device = next(
disk
for disk in TestRun.dut.disks
if (disk.disk_type.value > cache_device.disk_type.value and disk != cache_device)
)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions(
[Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]
)
cache_device = cache_device.partitions[0]
core_device_1 = core_device.partitions[0]
core_device_2 = core_device.partitions[1]
core_device_3 = core_device.partitions[2]
Udev.disable()
TestRun.LOGGER.info(f"Starting cache")
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
casadm.set_param_cleaning(cache_id=cache_id, policy=CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core devices")
core_1 = cache.add_core(core_dev=core_device_1)
core_2 = cache.add_core(core_dev=core_device_2)
core_3 = cache.add_core(core_dev=core_device_3)
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
if output.exit_code != 0:
raise Exception(f"Failed to create mountpoint")
return cache, [core_1, core_2, core_3]

View File

@@ -0,0 +1,181 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from api.cas.casadm import StatsFilter
from api.cas import casadm
from api.cas import ioclass_config
from api.cas import casadm_parser
from api.cas.cache_config import CleaningPolicy
from tests.conftest import base_prepare
from core.test_run import TestRun
from storage_devices.disk import DiskType
from test_tools.disk_utils import Filesystem
from test_utils.size import Size, Unit
from test_utils.os_utils import sync, Udev
from test_utils.filesystem.file import File
ioclass_config_path = "/tmp/opencas_ioclass.conf"
mountpoint = "/tmp/cas1-1"
cache_id = 1
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_stats_set(prepare_and_cleanup):
"""Try to retrieve stats for all set ioclasses"""
prepare()
min_ioclass_id = 1
max_ioclass_id = 11
ioclass_config.create_ioclass_config(
add_default_rule=True, ioclass_config_path=ioclass_config_path
)
TestRun.LOGGER.info("Preparing ioclass config file")
for i in range(min_ioclass_id, max_ioclass_id):
ioclass_config.add_ioclass(
ioclass_id=(i + 10),
eviction_priority=22,
allocation=True,
rule=f"file_size:le:{4096*i}&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id, file=ioclass_config_path)
TestRun.LOGGER.info("Preparing ioclass config file")
for i in range(32):
if i != 0 or i not in range(min_ioclass_id, max_ioclass_id):
with pytest.raises(Exception):
assert casadm_parser.get_statistics(
cache_id=cache_id, io_class_id=True, filter=[StatsFilter.conf]
)
@pytest.mark.parametrize(
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
)
def test_ioclass_stats_sum(prepare_and_cleanup):
"""Check if stats for all set ioclasses sum up to cache stats"""
cache, core = prepare()
min_ioclass_id = 1
max_ioclass_id = 11
file_size_base = Unit.KibiByte.value * 4
TestRun.LOGGER.info("Preparing ioclass config file")
ioclass_config.create_ioclass_config(
add_default_rule=True, ioclass_config_path=ioclass_config_path
)
for i in range(min_ioclass_id, max_ioclass_id):
ioclass_config.add_ioclass(
ioclass_id=i,
eviction_priority=22,
allocation=True,
rule=f"file_size:le:{file_size_base*i}&done",
ioclass_config_path=ioclass_config_path,
)
cache.load_io_class(ioclass_config_path)
TestRun.LOGGER.info("Generating files with particular sizes")
files_list = []
for i in range(min_ioclass_id, max_ioclass_id):
path = f"/tmp/test_file_{file_size_base*i}"
File.create_file(path)
f = File(path)
f.padding(Size(file_size_base * i, Unit.Byte))
files_list.append(f)
core.create_filesystem(Filesystem.ext4)
cache.reset_counters()
# Name of stats, which should not be compared
not_compare_stats = ["clean", "occupancy"]
ioclass_id_list = list(range(min_ioclass_id, max_ioclass_id))
# Append default ioclass id
ioclass_id_list.append(0)
TestRun.LOGGER.info("Copying files to mounted core and stats check")
for f in files_list:
# To prevent stats pollution by filesystem requests, umount core device
# after file is copied
core.mount(mountpoint)
f.copy(mountpoint)
sync()
core.unmount()
sync()
cache_stats = cache.get_cache_statistics(
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
)
for ioclass_id in ioclass_id_list:
ioclass_stats = cache.get_cache_statistics(
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk],
io_class_id=ioclass_id,
)
for stat_name in cache_stats:
if stat_name in not_compare_stats:
continue
cache_stats[stat_name] -= ioclass_stats[stat_name]
for stat_name in cache_stats:
if stat_name in not_compare_stats:
continue
stat_val = (
cache_stats[stat_name].get_value()
if isinstance(cache_stats[stat_name], Size)
else cache_stats[stat_name]
)
assert stat_val == 0, f"{stat_name} diverged!\n"
# Test cleanup
for f in files_list:
f.remove()
def flush_cache(cache_id):
casadm.flush(cache_id=cache_id)
sync()
casadm.reset_counters(cache_id=cache_id)
stats = casadm_parser.get_statistics(cache_id=cache_id, filter=[StatsFilter.blk])
for key, value in stats.items():
assert value.get_value(Unit.Blocks4096) == 0
def prepare():
base_prepare()
ioclass_config.remove_ioclass_config()
cache_device = next(
disk
for disk in TestRun.dut.disks
if disk.disk_type in [DiskType.optane, DiskType.nand]
)
core_device = next(
disk
for disk in TestRun.dut.disks
if (disk.disk_type.value > cache_device.disk_type.value and disk != cache_device)
)
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(2, Unit.GibiByte)])
cache_device = cache_device.partitions[0]
core_device_1 = core_device.partitions[0]
Udev.disable()
TestRun.LOGGER.info(f"Staring cache")
cache = casadm.start_cache(cache_device, force=True)
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
cache.set_cleaning_policy(CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core devices")
core = cache.add_core(core_dev=core_device_1)
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
if output.exit_code != 0:
raise Exception(f"Failed to create mountpoint")
return cache, core