Move OCL tests from test-framework repository
Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
333
test/functional/tests/stats/test_block_stats.py
Normal file
333
test/functional/tests/stats/test_block_stats.py
Normal file
@@ -0,0 +1,333 @@
|
||||
#
|
||||
# Copyright(c) 2019 Intel Corporation
|
||||
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
#
|
||||
|
||||
|
||||
import pytest
|
||||
from api.cas.casadm import StatsFilter
|
||||
from api.cas import casadm
|
||||
from api.cas import ioclass_config
|
||||
from test_tools.dd import Dd
|
||||
from api.cas.cache_config import CacheMode, CleaningPolicy
|
||||
from tests.conftest import base_prepare
|
||||
from core.test_run import TestRun
|
||||
from storage_devices.disk import DiskType
|
||||
from test_utils.size import Size, Unit
|
||||
from test_utils.os_utils import Udev
|
||||
|
||||
ioclass_config_path = "/tmp/opencas_ioclass.conf"
|
||||
mountpoint = "/tmp/cas1-1"
|
||||
exported_obj_path_prefix = "/dev/cas1-"
|
||||
cache_id = 1
|
||||
|
||||
# lists of cache and core block stats, that should have zero value for particular cache modes
|
||||
write_wb_zero_stats = [
|
||||
"reads from core(s)",
|
||||
"writes to core(s)",
|
||||
"total to/from core(s)",
|
||||
"reads from cache",
|
||||
"reads from exported object(s)",
|
||||
"reads from core",
|
||||
"writes to core",
|
||||
"total to/from core",
|
||||
"reads from cache",
|
||||
"reads from exported object",
|
||||
]
|
||||
write_wt_zero_stats = [
|
||||
"reads from core(s)",
|
||||
"reads from cache",
|
||||
"reads from exported object(s)",
|
||||
"reads from core",
|
||||
"reads from exported object",
|
||||
]
|
||||
write_pt_zero_stats = [
|
||||
"reads from core(s)",
|
||||
"reads from cache",
|
||||
"writes to cache",
|
||||
"total to/from cache",
|
||||
"reads from exported object(s)",
|
||||
"reads from core",
|
||||
"reads from exported object",
|
||||
]
|
||||
write_wa_zero_stats = [
|
||||
"reads from core(s)",
|
||||
"reads from cache",
|
||||
"writes to cache",
|
||||
"total to/from cache",
|
||||
"reads from exported object(s)",
|
||||
"reads from core",
|
||||
"reads from exported object",
|
||||
]
|
||||
write_wo_zero_stats = [
|
||||
"reads from core(s)",
|
||||
"writes to core(s)",
|
||||
"total to/from core(s)",
|
||||
"reads from cache",
|
||||
"reads from exported object(s)",
|
||||
"reads from core",
|
||||
"writes to core",
|
||||
"total to/from core",
|
||||
"reads from exported object",
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cache_mode,zero_stats",
|
||||
[
|
||||
(CacheMode.WB, write_wb_zero_stats),
|
||||
(CacheMode.WT, write_wt_zero_stats),
|
||||
(CacheMode.PT, write_pt_zero_stats),
|
||||
(CacheMode.WA, write_wa_zero_stats),
|
||||
(CacheMode.WO, write_wo_zero_stats),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
|
||||
)
|
||||
def test_block_stats_write(prepare_and_cleanup, cache_mode, zero_stats):
|
||||
"""Perform read and write operations to cache instance in different cache modes
|
||||
and check if block stats values are correct"""
|
||||
cache, cores = prepare(cache_mode)
|
||||
iterations = 10
|
||||
dd_size = Size(4, Unit.KibiByte)
|
||||
dd_count = 10
|
||||
|
||||
flush(cache)
|
||||
|
||||
# Check stats for cache after performing write operation
|
||||
for core in cores:
|
||||
dd_seek = 0
|
||||
dd = (
|
||||
Dd()
|
||||
.input("/dev/zero")
|
||||
.output(f"{core.system_path}")
|
||||
.count(dd_count)
|
||||
.block_size(dd_size)
|
||||
.oflag("direct")
|
||||
)
|
||||
# Since every IO has the same size, every stat should be increased with the same step.
|
||||
# So there is no need to keep value of every stat in separate variable
|
||||
cache_stat = (
|
||||
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
|
||||
)
|
||||
for i in range(iterations):
|
||||
dd.seek(dd_seek)
|
||||
dd.run()
|
||||
cache_stats = cache.get_cache_statistics(stat_filter=[StatsFilter.blk])
|
||||
core_stats = core.get_core_statistics(stat_filter=[StatsFilter.blk])
|
||||
|
||||
# Check cache stats
|
||||
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
|
||||
for key, value in cache_stats.items():
|
||||
if key in zero_stats:
|
||||
assert value.get_value(Unit.Blocks4096) == 0, (
|
||||
f"{key} has invalid value\n"
|
||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
||||
f"dd count: {dd_count}, cache_stat {cache_stat}"
|
||||
)
|
||||
else:
|
||||
# For each next tested core, cache stats has to include
|
||||
# sum of each previous core
|
||||
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
|
||||
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
|
||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
||||
f"dd count: {dd_count}, cache_stat {cache_stat}"
|
||||
)
|
||||
|
||||
# Check single core stats
|
||||
for key, value in core_stats.items():
|
||||
if key in zero_stats:
|
||||
assert value.get_value(Unit.Blocks4096) == 0, (
|
||||
f"{key} has invalid value of \n"
|
||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
||||
f"dd count: {dd_count}, cache_stat {cache_stat}"
|
||||
)
|
||||
else:
|
||||
assert assumed_value == value.get_value(Unit.Blocks4096), (
|
||||
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
|
||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
||||
f"dd count: {dd_count}, dd seek: {dd_seek}. Cache mode {cache_mode}"
|
||||
)
|
||||
dd_seek += dd_count
|
||||
|
||||
|
||||
# lists of cache and core block stats, that should have zero value for particular cache modes
|
||||
read_wb_zero_stats = [
|
||||
"writes to core(s)",
|
||||
"reads from cache",
|
||||
"writes to exported object(s)",
|
||||
"writes to core",
|
||||
"writes to exported object",
|
||||
]
|
||||
read_wt_zero_stats = [
|
||||
"writes to core(s)",
|
||||
"reads from cache",
|
||||
"writes to exported object(s)",
|
||||
"writes to core",
|
||||
"writes to exported object",
|
||||
]
|
||||
read_pt_zero_stats = [
|
||||
"writes to core(s)",
|
||||
"reads from cache",
|
||||
"writes to cache",
|
||||
"total to/from cache",
|
||||
"writes to exported object(s)",
|
||||
"writes to core",
|
||||
"writes to exported object",
|
||||
]
|
||||
read_wa_zero_stats = [
|
||||
"writes to core(s)",
|
||||
"reads from cache",
|
||||
"writes to exported object(s)",
|
||||
"writes to core",
|
||||
"writes to exported object",
|
||||
]
|
||||
read_wo_zero_stats = [
|
||||
"writes to core(s)",
|
||||
"reads from cache",
|
||||
"writes to cache",
|
||||
"total to/from cache",
|
||||
"writes to exported object(s)",
|
||||
"writes to core",
|
||||
"writes to exported object",
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cache_mode,zero_stats",
|
||||
[
|
||||
(CacheMode.WB, read_wb_zero_stats),
|
||||
(CacheMode.WT, read_wt_zero_stats),
|
||||
(CacheMode.PT, read_pt_zero_stats),
|
||||
(CacheMode.WA, read_wa_zero_stats),
|
||||
(CacheMode.WO, read_wo_zero_stats),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
|
||||
)
|
||||
def test_block_stats_read(prepare_and_cleanup, cache_mode, zero_stats):
|
||||
"""Perform read and write operations to cache instance in different cache modes
|
||||
and check if block stats values are correct"""
|
||||
cache, cores = prepare(cache_mode)
|
||||
iterations = 10
|
||||
dd_size = Size(4, Unit.KibiByte)
|
||||
dd_count = 10
|
||||
|
||||
flush(cache)
|
||||
|
||||
# Check stats for cache after performing read operation
|
||||
for core in cores:
|
||||
dd_skip = 0
|
||||
dd = (
|
||||
Dd()
|
||||
.output("/dev/zero")
|
||||
.input(f"{core.system_path}")
|
||||
.count(dd_count)
|
||||
.block_size(dd_size)
|
||||
.iflag("direct")
|
||||
)
|
||||
# Since every IO has the same size, every stat should be increased with the same step.
|
||||
# So there is no need to keep value of every stat in separate variable
|
||||
cache_stat = (
|
||||
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
|
||||
)
|
||||
for i in range(iterations):
|
||||
dd.skip(dd_skip)
|
||||
dd.run()
|
||||
cache_stats = cache.get_cache_statistics(stat_filter=[StatsFilter.blk])
|
||||
core_stats = core.get_core_statistics(stat_filter=[StatsFilter.blk])
|
||||
|
||||
# Check cache stats
|
||||
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
|
||||
for key, value in cache_stats.items():
|
||||
if key in zero_stats:
|
||||
assert value.get_value(Unit.Blocks4096) == 0, (
|
||||
f"{key} has invalid value\n"
|
||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
||||
f"dd count: {dd_count}, cache_stat {cache_stat}"
|
||||
)
|
||||
else:
|
||||
# For each next tested core, cache stats has to include
|
||||
# sum of each previous core
|
||||
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
|
||||
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
|
||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
||||
f"dd count: {dd_count}. Cache mode: {cache_mode}"
|
||||
)
|
||||
|
||||
# Check single core stats
|
||||
for key, value in core_stats.items():
|
||||
if key in zero_stats:
|
||||
assert value.get_value(Unit.Blocks4096) == 0, (
|
||||
f"{key} has invalid value\n"
|
||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
||||
f"dd count: {dd_count}. Cache mode: {cache_mode}"
|
||||
)
|
||||
else:
|
||||
assert assumed_value == value.get_value(Unit.Blocks4096), (
|
||||
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
|
||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
||||
f"dd count: {dd_count} dd skip {dd_skip}. Cache mode: {cache_mode}"
|
||||
)
|
||||
|
||||
dd_skip += dd_count
|
||||
|
||||
|
||||
def flush(cache):
|
||||
cache.flush_cache()
|
||||
cache.reset_counters()
|
||||
stats = cache.get_cache_statistics(stat_filter=[StatsFilter.blk])
|
||||
for key, value in stats.items():
|
||||
assert value.get_value(Unit.Blocks4096) == 0
|
||||
|
||||
|
||||
def prepare(cache_mode: CacheMode):
|
||||
base_prepare()
|
||||
ioclass_config.remove_ioclass_config()
|
||||
cache_device = next(
|
||||
disk
|
||||
for disk in TestRun.dut.disks
|
||||
if disk.disk_type in [DiskType.optane, DiskType.nand]
|
||||
)
|
||||
core_device = next(
|
||||
disk
|
||||
for disk in TestRun.dut.disks
|
||||
if (disk.disk_type.value > cache_device.disk_type.value and disk != cache_device)
|
||||
)
|
||||
|
||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||
core_device.create_partitions(
|
||||
[Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]
|
||||
)
|
||||
|
||||
cache_device = cache_device.partitions[0]
|
||||
core_device_1 = core_device.partitions[0]
|
||||
core_device_2 = core_device.partitions[1]
|
||||
core_device_3 = core_device.partitions[2]
|
||||
|
||||
Udev.disable()
|
||||
|
||||
TestRun.LOGGER.info(f"Starting cache")
|
||||
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
|
||||
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
|
||||
casadm.set_param_cleaning(cache_id=cache_id, policy=CleaningPolicy.nop)
|
||||
TestRun.LOGGER.info(f"Adding core devices")
|
||||
core_1 = cache.add_core(core_dev=core_device_1)
|
||||
core_2 = cache.add_core(core_dev=core_device_2)
|
||||
core_3 = cache.add_core(core_dev=core_device_3)
|
||||
|
||||
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
|
||||
if output.exit_code != 0:
|
||||
raise Exception(f"Failed to create mountpoint")
|
||||
|
||||
return cache, [core_1, core_2, core_3]
|
181
test/functional/tests/stats/test_ioclass_stats.py
Normal file
181
test/functional/tests/stats/test_ioclass_stats.py
Normal file
@@ -0,0 +1,181 @@
|
||||
#
|
||||
# Copyright(c) 2019 Intel Corporation
|
||||
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
#
|
||||
|
||||
|
||||
import pytest
|
||||
from api.cas.casadm import StatsFilter
|
||||
from api.cas import casadm
|
||||
from api.cas import ioclass_config
|
||||
from api.cas import casadm_parser
|
||||
from api.cas.cache_config import CleaningPolicy
|
||||
from tests.conftest import base_prepare
|
||||
from core.test_run import TestRun
|
||||
from storage_devices.disk import DiskType
|
||||
from test_tools.disk_utils import Filesystem
|
||||
from test_utils.size import Size, Unit
|
||||
from test_utils.os_utils import sync, Udev
|
||||
from test_utils.filesystem.file import File
|
||||
|
||||
ioclass_config_path = "/tmp/opencas_ioclass.conf"
|
||||
mountpoint = "/tmp/cas1-1"
|
||||
cache_id = 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
|
||||
)
|
||||
def test_ioclass_stats_set(prepare_and_cleanup):
|
||||
"""Try to retrieve stats for all set ioclasses"""
|
||||
prepare()
|
||||
min_ioclass_id = 1
|
||||
max_ioclass_id = 11
|
||||
|
||||
ioclass_config.create_ioclass_config(
|
||||
add_default_rule=True, ioclass_config_path=ioclass_config_path
|
||||
)
|
||||
|
||||
TestRun.LOGGER.info("Preparing ioclass config file")
|
||||
for i in range(min_ioclass_id, max_ioclass_id):
|
||||
ioclass_config.add_ioclass(
|
||||
ioclass_id=(i + 10),
|
||||
eviction_priority=22,
|
||||
allocation=True,
|
||||
rule=f"file_size:le:{4096*i}&done",
|
||||
ioclass_config_path=ioclass_config_path,
|
||||
)
|
||||
casadm.load_io_classes(cache_id, file=ioclass_config_path)
|
||||
|
||||
TestRun.LOGGER.info("Preparing ioclass config file")
|
||||
for i in range(32):
|
||||
if i != 0 or i not in range(min_ioclass_id, max_ioclass_id):
|
||||
with pytest.raises(Exception):
|
||||
assert casadm_parser.get_statistics(
|
||||
cache_id=cache_id, io_class_id=True, filter=[StatsFilter.conf]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prepare_and_cleanup", [{"core_count": 1, "cache_count": 1}], indirect=True
|
||||
)
|
||||
def test_ioclass_stats_sum(prepare_and_cleanup):
|
||||
"""Check if stats for all set ioclasses sum up to cache stats"""
|
||||
cache, core = prepare()
|
||||
min_ioclass_id = 1
|
||||
max_ioclass_id = 11
|
||||
file_size_base = Unit.KibiByte.value * 4
|
||||
|
||||
TestRun.LOGGER.info("Preparing ioclass config file")
|
||||
ioclass_config.create_ioclass_config(
|
||||
add_default_rule=True, ioclass_config_path=ioclass_config_path
|
||||
)
|
||||
for i in range(min_ioclass_id, max_ioclass_id):
|
||||
ioclass_config.add_ioclass(
|
||||
ioclass_id=i,
|
||||
eviction_priority=22,
|
||||
allocation=True,
|
||||
rule=f"file_size:le:{file_size_base*i}&done",
|
||||
ioclass_config_path=ioclass_config_path,
|
||||
)
|
||||
cache.load_io_class(ioclass_config_path)
|
||||
|
||||
TestRun.LOGGER.info("Generating files with particular sizes")
|
||||
files_list = []
|
||||
for i in range(min_ioclass_id, max_ioclass_id):
|
||||
path = f"/tmp/test_file_{file_size_base*i}"
|
||||
File.create_file(path)
|
||||
f = File(path)
|
||||
f.padding(Size(file_size_base * i, Unit.Byte))
|
||||
files_list.append(f)
|
||||
|
||||
core.create_filesystem(Filesystem.ext4)
|
||||
|
||||
cache.reset_counters()
|
||||
|
||||
# Name of stats, which should not be compared
|
||||
not_compare_stats = ["clean", "occupancy"]
|
||||
ioclass_id_list = list(range(min_ioclass_id, max_ioclass_id))
|
||||
# Append default ioclass id
|
||||
ioclass_id_list.append(0)
|
||||
TestRun.LOGGER.info("Copying files to mounted core and stats check")
|
||||
for f in files_list:
|
||||
# To prevent stats pollution by filesystem requests, umount core device
|
||||
# after file is copied
|
||||
core.mount(mountpoint)
|
||||
f.copy(mountpoint)
|
||||
sync()
|
||||
core.unmount()
|
||||
sync()
|
||||
|
||||
cache_stats = cache.get_cache_statistics(
|
||||
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
|
||||
)
|
||||
for ioclass_id in ioclass_id_list:
|
||||
ioclass_stats = cache.get_cache_statistics(
|
||||
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk],
|
||||
io_class_id=ioclass_id,
|
||||
)
|
||||
for stat_name in cache_stats:
|
||||
if stat_name in not_compare_stats:
|
||||
continue
|
||||
cache_stats[stat_name] -= ioclass_stats[stat_name]
|
||||
|
||||
for stat_name in cache_stats:
|
||||
if stat_name in not_compare_stats:
|
||||
continue
|
||||
stat_val = (
|
||||
cache_stats[stat_name].get_value()
|
||||
if isinstance(cache_stats[stat_name], Size)
|
||||
else cache_stats[stat_name]
|
||||
)
|
||||
assert stat_val == 0, f"{stat_name} diverged!\n"
|
||||
|
||||
# Test cleanup
|
||||
for f in files_list:
|
||||
f.remove()
|
||||
|
||||
|
||||
def flush_cache(cache_id):
|
||||
casadm.flush(cache_id=cache_id)
|
||||
sync()
|
||||
casadm.reset_counters(cache_id=cache_id)
|
||||
stats = casadm_parser.get_statistics(cache_id=cache_id, filter=[StatsFilter.blk])
|
||||
for key, value in stats.items():
|
||||
assert value.get_value(Unit.Blocks4096) == 0
|
||||
|
||||
|
||||
def prepare():
|
||||
base_prepare()
|
||||
ioclass_config.remove_ioclass_config()
|
||||
cache_device = next(
|
||||
disk
|
||||
for disk in TestRun.dut.disks
|
||||
if disk.disk_type in [DiskType.optane, DiskType.nand]
|
||||
)
|
||||
core_device = next(
|
||||
disk
|
||||
for disk in TestRun.dut.disks
|
||||
if (disk.disk_type.value > cache_device.disk_type.value and disk != cache_device)
|
||||
)
|
||||
|
||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||
core_device.create_partitions([Size(2, Unit.GibiByte)])
|
||||
|
||||
cache_device = cache_device.partitions[0]
|
||||
core_device_1 = core_device.partitions[0]
|
||||
|
||||
Udev.disable()
|
||||
|
||||
TestRun.LOGGER.info(f"Staring cache")
|
||||
cache = casadm.start_cache(cache_device, force=True)
|
||||
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
|
||||
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||
TestRun.LOGGER.info(f"Adding core devices")
|
||||
core = cache.add_core(core_dev=core_device_1)
|
||||
|
||||
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
|
||||
if output.exit_code != 0:
|
||||
raise Exception(f"Failed to create mountpoint")
|
||||
|
||||
return cache, core
|
Reference in New Issue
Block a user