Merge pull request #1547 from Deixx/block-stats

Update for stats tests
This commit is contained in:
Robert Baldyga 2024-10-03 19:31:59 +02:00 committed by GitHub
commit 188e42b752
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 344 additions and 468 deletions

View File

@ -5,9 +5,8 @@
#
import csv
from enum import Enum
from datetime import timedelta
from enum import Enum
from typing import List
from api.cas import casadm
@ -43,34 +42,10 @@ class CacheStats:
filter: List[StatsFilter] = None,
percentage_val: bool = False,
):
stats_dict = get_stats_dict(filter=filter, cache_id=cache_id)
if filter is None:
filters = [
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
StatsFilter.err,
]
else:
filters = filter
csv_stats = casadm.print_statistics(
cache_id=cache_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache:
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
for section in _get_section_filters(filter):
match section:
case StatsFilter.conf:
self.config_stats = CacheConfigStats(stats_dict)
case StatsFilter.usage:
@ -102,30 +77,10 @@ class CoreStats:
filter: List[StatsFilter] = None,
percentage_val: bool = False,
):
stats_dict = get_stats_dict(filter=filter, cache_id=cache_id, core_id=core_id)
if filter is None:
filters = [
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
StatsFilter.err,
]
else:
filters = filter
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
for section in _get_section_filters(filter):
match section:
case StatsFilter.conf:
self.config_stats = CoreConfigStats(stats_dict)
case StatsFilter.usage:
@ -158,34 +113,12 @@ class CoreIoClassStats:
filter: List[StatsFilter] = None,
percentage_val: bool = False,
):
if filter is None:
filters = [
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
]
else:
filters = filter
stats_dict = get_stats_dict(
filter=filter, cache_id=cache_id, core_id=core_id, io_class_id=io_class_id
)
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
io_class_id=io_class_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache:
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
for section in _get_section_filters(filter):
match section:
case StatsFilter.conf:
self.config_stats = IoClassConfigStats(stats_dict)
case StatsFilter.usage:
@ -243,7 +176,7 @@ class CacheConfigStats:
self.metadata_memory_footprint = parse_value(
value=stats_dict["Metadata Memory Footprint [MiB]"], unit_type=UnitType.mebibyte
)
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type="[s]")
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type=UnitType.seconds)
self.status = stats_dict["Status"]
def __str__(self):
@ -399,21 +332,6 @@ class UsageStats:
def __ne__(self, other):
return not self == other
def __add__(self, other):
return UsageStats(
self.occupancy + other.occupancy,
self.free + other.free,
self.clean + other.clean,
self.dirty + other.dirty,
)
def __iadd__(self, other):
self.occupancy += other.occupancy
self.free += other.free
self.clean += other.clean
self.dirty += other.dirty
return self
class IoClassUsageStats:
def __init__(self, stats_dict, percentage_val):
@ -445,43 +363,6 @@ class IoClassUsageStats:
def __ne__(self, other):
return not self == other
def __add__(self, other):
return UsageStats(
self.occupancy + other.occupancy,
self.clean + other.clean,
self.dirty + other.dirty,
)
def __iadd__(self, other):
self.occupancy += other.occupancy
self.clean += other.clean
self.dirty += other.dirty
return self
class InactiveUsageStats:
def __init__(self, inactive_occupancy, inactive_clean, inactive_dirty):
self.inactive_occupancy = inactive_occupancy
self.inactive_clean = inactive_clean
self.inactive_dirty = inactive_dirty
def __str__(self):
return (
f"Inactive usage stats:\n"
f"Inactive occupancy: {self.inactive_occupancy}\n"
f"Inactive clean: {self.inactive_clean}\n"
f"Inactive dirty: {self.inactive_dirty}\n"
)
def __eq__(self, other):
if not other:
return False
return (
self.inactive_occupancy == other.inactive_occupancy
and self.inactive_clean == other.inactive_clean
and self.inactive_dirty == other.inactive_dirty
)
class RequestStats:
def __init__(self, stats_dict, percentage_val):
@ -655,6 +536,12 @@ class BasicStatsChunkError:
)
def get_stat_value(stat_dict: dict, key: str):
idx = key.index("[")
unit = UnitType(key[idx:])
return parse_value(stat_dict[key], unit)
def parse_value(value: str, unit_type: UnitType) -> int | float | Size | timedelta | str:
match unit_type:
case UnitType.requests:
@ -674,3 +561,42 @@ def parse_value(value: str, unit_type: UnitType) -> int | float | Size | timedel
case _:
stat_unit = value
return stat_unit
def _get_section_filters(filter: List[StatsFilter], io_class_stats: bool = False):
if filter is None or StatsFilter.all in filter:
filters = [
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
StatsFilter.err,
]
else:
filters = filter
if io_class_stats and StatsFilter.err in filters:
filters.remove(StatsFilter.err)
return filters
def get_stats_dict(
filter: List[StatsFilter],
cache_id: int,
core_id: int = None,
io_class_id: int = None
):
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
io_class_id=io_class_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache to easier compare
# cache vs core stats using unified key
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
return stats_dict

View File

@ -1,322 +1,261 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
import json
import pytest
from api.cas import casadm
from api.cas import ioclass_config
from api.cas.cache_config import CacheMode, CleaningPolicy
from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait
from api.cas.casadm import StatsFilter
from api.cas.statistics import get_stats_dict, get_stat_value, OperationType
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
ioclass_config_path = "/tmp/opencas_ioclass.conf"
mountpoint = "/tmp/cas1-1"
exported_obj_path_prefix = "/dev/cas1-"
cache_id = 1
# lists of cache and core block stats, that should have zero value for particular cache modes
write_wb_zero_stats = [
"reads from core(s)",
"writes to core(s)",
"total to/from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"writes to core",
"total to/from core",
"reads from cache",
"reads from exported object",
]
write_wt_zero_stats = [
"reads from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_pt_zero_stats = [
"reads from core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_wa_zero_stats = [
"reads from core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_wo_zero_stats = [
"reads from core(s)",
"writes to core(s)",
"total to/from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"writes to core",
"total to/from core",
"reads from exported object",
]
iterations = 10
dd_block_size = Size(1, Unit.Blocks4096)
dd_count = 10
cores_no = 3
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize(
"cache_mode,zero_stats",
[
(CacheMode.WB, write_wb_zero_stats),
(CacheMode.WT, write_wt_zero_stats),
(CacheMode.PT, write_pt_zero_stats),
(CacheMode.WA, write_wa_zero_stats),
(CacheMode.WO, write_wo_zero_stats),
],
)
def test_block_stats_write(cache_mode, zero_stats):
"""Perform read and write operations to cache instance in different cache modes
and check if block stats values are correct"""
cache, cores = prepare(cache_mode)
iterations = 10
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
@pytest.mark.parametrize("cache_mode", CacheMode)
def test_block_stats_write_miss(cache_mode: CacheMode):
"""
title: Block statistics after write miss operations
description: |
Perform write miss operations to cached volume and check if block stats values are correct
for configured cache mode.
pass_criteria:
- Correct block stats values
"""
flush(cache)
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)] * cores_no)
cache_device = cache_device.partitions[0]
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Start cache and set NOP cleaning policy"):
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
cache.set_cleaning_policy(CleaningPolicy.nop)
with TestRun.step("Add core devices"):
cores = [cache.add_core(part) for part in core_device.partitions]
with TestRun.step("Reset cache stats"):
cache.reset_counters()
with TestRun.step("Write data in parts to exported objects and verify block statistics "
"after each part is done"):
expected_zero_stats = get_expected_zero_stats(cache_mode, OperationType.write)
# Check stats for cache after performing write operation
for core in cores:
dd_seek = 0
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.block_size(dd_block_size)
.oflag("direct")
)
# Since every IO has the same size, every stat should be increased with the same step.
# So there is no need to keep value of every stat in separate variable
cache_stat = (
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
)
for i in range(iterations):
core_stat_expected = dd_block_size * dd_count * (i + 1)
core_stat_expected.set_unit(Unit.Blocks4096)
dd.seek(dd_seek)
dd.run()
cache_stats = cache.get_statistics_flat(stat_filter=[StatsFilter.blk])
core_stats = core.get_statistics_flat(stat_filter=[StatsFilter.blk])
for j, core in enumerate(cores):
# expect previous iterations + already written data in this iteration
cache_stat_expected = dd_block_size * dd_count * (i * cores_no + j + 1)
cache_stat_expected.set_unit(Unit.Blocks4096)
dd.output(core.path)
dd.run()
cache_stats = get_stats_dict(filter=[StatsFilter.blk], cache_id=cache.cache_id)
core_stats = get_stats_dict(
filter=[StatsFilter.blk], cache_id=cache.cache_id, core_id=core.core_id
)
# Check cache stats
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
for key, value in cache_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
else:
# For each next tested core, cache stats has to include
# sum of each previous core
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
# Check cache stats after write operation
fail = False
for key, value in cache_stats.items():
if key.endswith('[%]'):
continue
stat = get_stat_value(cache_stats, key)
if any(key.startswith(s) for s in expected_zero_stats):
if stat != Size.zero():
TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
fail = True
elif stat != cache_stat_expected:
TestRun.LOGGER.error(
f"{key} has invalid value of {stat}\n"
f"expected: {cache_stat_expected}"
)
fail = True
if fail:
TestRun.fail(
"Incorrect cache block stats\n"
f"iteration {i}, core id: {core.core_id}\n"
f"cache_stats:\n{json.dumps(cache_stats, indent=0)}"
)
# Check single core stats
for key, value in core_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value of \n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
# Check per-core stats
for key, value in core_stats.items():
if key.endswith('[%]'):
continue
stat = get_stat_value(core_stats, key)
if any(key.startswith(s) for s in expected_zero_stats):
if stat != Size.zero():
TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
fail = True
elif stat != core_stat_expected:
TestRun.LOGGER.error(
f"{key} has invalid value of {stat}\n"
f"expected: {core_stat_expected}"
)
if fail:
TestRun.fail(
"Incorrect core block stats\n"
f"iteration {i}, core id: {core.core_id}\n"
f"core_stats:\n{json.dumps(core_stats, indent=0)}"
)
else:
assert assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, dd seek: {dd_seek}. Cache mode {cache_mode}"
)
dd_seek += dd_count
# lists of cache and core block stats, that should have zero value for particular cache modes
read_wb_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wt_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_pt_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wa_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wo_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
dd_seek += dd_count
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize(
"cache_mode,zero_stats",
[
(CacheMode.WB, read_wb_zero_stats),
(CacheMode.WT, read_wt_zero_stats),
(CacheMode.PT, read_pt_zero_stats),
(CacheMode.WA, read_wa_zero_stats),
(CacheMode.WO, read_wo_zero_stats),
],
)
def test_block_stats_read(cache_mode, zero_stats):
"""Perform read and write operations to cache instance in different cache modes
and check if block stats values are correct"""
cache, cores = prepare(cache_mode)
iterations = 10
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
@pytest.mark.parametrize("cache_mode", CacheMode)
def test_block_stats_read_miss(cache_mode: CacheMode):
"""
title: Block statistics after read miss operations
description: |
Perform read miss operations from cached volume and check if block stats values are correct
for configured cache mode.
pass_criteria:
- Correct block stats values
"""
flush(cache)
with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)] * cores_no)
cache_device = cache_device.partitions[0]
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Start cache and set NOP cleaning policy"):
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
cache.set_cleaning_policy(CleaningPolicy.nop)
with TestRun.step("Add core devices"):
cores = [cache.add_core(part) for part in core_device.partitions]
with TestRun.step("Reset cache stats"):
cache.reset_counters()
with TestRun.step("Read data in parts from exported objects and verify block statistics "
"after each part is done"):
expected_zero_stats = get_expected_zero_stats(cache_mode, OperationType.read)
# Check stats for cache after performing read operation
for core in cores:
dd_skip = 0
dd = (
Dd()
.output("/dev/zero")
.input(f"{core.path}")
.output("/dev/null")
.count(dd_count)
.block_size(dd_size)
.block_size(dd_block_size)
.iflag("direct")
)
# Since every IO has the same size, every stat should be increased with the same step.
# So there is no need to keep value of every stat in separate variable
cache_stat = (
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
)
for i in range(iterations):
core_stat_expected = dd_block_size * dd_count * (i + 1)
core_stat_expected.set_unit(Unit.Blocks4096)
dd.skip(dd_skip)
dd.run()
cache_stats = cache.get_statistics_flat(stat_filter=[StatsFilter.blk])
core_stats = core.get_statistics_flat(stat_filter=[StatsFilter.blk])
for j, core in enumerate(cores):
# expect previous iterations + already read data in this iteration
cache_stat_expected = dd_block_size * dd_count * (i * cores_no + j + 1)
cache_stat_expected.set_unit(Unit.Blocks4096)
dd.input(core.path)
dd.run()
cache_stats = get_stats_dict(filter=[StatsFilter.blk], cache_id=cache.cache_id)
core_stats = get_stats_dict(
filter=[StatsFilter.blk], cache_id=cache.cache_id, core_id=core.core_id
)
# Check cache stats
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
for key, value in cache_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, cache_stat {cache_stat}"
)
else:
# For each next tested core, cache stats has to include
# sum of each previous core
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}. Cache mode: {cache_mode}"
# Check cache stats after read operation
fail = False
for key, value in cache_stats.items():
if key.endswith('[%]'):
continue
stat = get_stat_value(cache_stats, key)
if any(key.startswith(s) for s in expected_zero_stats):
if stat != Size.zero():
TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
fail = True
elif stat != cache_stat_expected:
TestRun.LOGGER.error(
f"{key} has invalid value of {stat}\n"
f"expected: {cache_stat_expected}"
)
fail = True
if fail:
TestRun.fail(
"Incorrect cache block stats\n"
f"iteration {i}, core id: {core.core_id}\n"
f"cache_stats:\n{json.dumps(cache_stats, indent=0)}"
)
# Check single core stats
for key, value in core_stats.items():
if key in zero_stats:
assert value.get_value(Unit.Blocks4096) == 0, (
f"{key} has invalid value\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}. Cache mode: {cache_mode}"
# Check per-core stats
for key, value in core_stats.items():
if key.endswith('[%]'):
continue
stat = get_stat_value(core_stats, key)
if any(key.startswith(s) for s in expected_zero_stats):
if stat != Size.zero():
TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
fail = True
elif stat != core_stat_expected:
TestRun.LOGGER.error(
f"{key} has invalid value of {stat}\n"
f"expected: {core_stat_expected}"
)
if fail:
TestRun.fail(
"Incorrect core block stats\n"
f"iteration {i}, core id: {core.core_id}\n"
f"core_stats:\n{json.dumps(core_stats, indent=0)}"
)
else:
assert assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count} dd skip {dd_skip}. Cache mode: {cache_mode}"
)
dd_skip += dd_count
def flush(cache):
cache.flush_cache()
cache.reset_counters()
stats = cache.get_statistics_flat(stat_filter=[StatsFilter.blk])
for key, value in stats.items():
assert value.get_value(Unit.Blocks4096) == 0
def get_expected_zero_stats(cache_mode: CacheMode, direction: OperationType):
traits = CacheMode.get_traits(cache_mode)
stat_list = ["Reads from cache"]
if direction == OperationType.write:
stat_list.append("Reads from core")
stat_list.append("Reads from exported object")
if direction == OperationType.read or CacheModeTrait.LazyWrites in traits:
stat_list.append("Writes to core")
if direction == OperationType.read:
stat_list.append("Writes to exported object")
if ((direction == OperationType.read and CacheModeTrait.InsertRead not in traits)
or (direction == OperationType.write and CacheModeTrait.InsertWrite not in traits)):
stat_list.append("Writes to cache")
stat_list.append("Total to/from cache")
if direction == OperationType.write and CacheModeTrait.LazyWrites in traits:
stat_list.append("Total to/from core")
def prepare(cache_mode: CacheMode):
ioclass_config.remove_ioclass_config()
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions(
[Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]
)
cache_device = cache_device.partitions[0]
core_device_1 = core_device.partitions[0]
core_device_2 = core_device.partitions[1]
core_device_3 = core_device.partitions[2]
Udev.disable()
TestRun.LOGGER.info(f"Starting cache")
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
casadm.set_param_cleaning(cache_id=cache_id, policy=CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core devices")
core_1 = cache.add_core(core_dev=core_device_1)
core_2 = cache.add_core(core_dev=core_device_2)
core_3 = cache.add_core(core_dev=core_device_3)
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
if output.exit_code != 0:
raise Exception(f"Failed to create mountpoint")
return cache, [core_1, core_2, core_3]
return stat_list

View File

@ -1,22 +1,23 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
from time import sleep
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode, CleaningPolicy
from api.cas.casadm import StatsFilter
from api.cas.statistics import get_stats_dict, get_stat_value
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
from time import sleep
cache_size = Size(1, Unit.GibiByte)
core_size = Size(2, Unit.GibiByte)
@ -33,7 +34,7 @@ def test_stat_max_cache():
Check CAS ability to display correct values in statistics
for 16 cache devices per cache mode.
pass_criteria:
- Core's statistics matches cache's statistics.
- Cores' statistics match cache's statistics.
"""
caches_per_cache_mode = 16
@ -82,20 +83,21 @@ def test_stat_max_cache():
fio.run()
sleep(3)
with TestRun.step("Check if cache's statistics matches core's statistics"):
with TestRun.step("Check if cache's statistics match cores' statistics"):
for i in range(caches_count):
cache_stats = caches[i].get_statistics_flat(stat_filter=stat_filter)
cache_stats = get_stats_dict(filter=stat_filter, cache_id=caches[i].cache_id)
cores_stats = [
cores[i][j].get_statistics_flat(stat_filter=stat_filter)
for j in range(cores_per_cache)
get_stats_dict(
filter=stat_filter, cache_id=caches[i].cache_id, core_id=cores[i][j].core_id
) for j in range(cores_per_cache)
]
fail_message = f"For cache ID {caches[i].cache_id} "
fail_message = f"For cache ID {caches[i].cache_id} ({caches[i].get_cache_mode()}) "
stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message)
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
def test_stat_max_core(cache_mode):
"""
title: CAS statistics values for maximum core devices.
@ -103,7 +105,7 @@ def test_stat_max_core(cache_mode):
Check CAS ability to display correct values in statistics
for 62 core devices.
pass_criteria:
- Core's statistics matches cache's statistics.
- Cores' statistics match cache's statistics.
"""
cores_per_cache = 62
@ -132,11 +134,12 @@ def test_stat_max_core(cache_mode):
fio.run()
sleep(3)
with TestRun.step("Check if cache's statistics matches core's statistics"):
cache_stats = cache.get_statistics_flat(stat_filter=stat_filter)
with TestRun.step("Check if cache's statistics match cores' statistics"):
cache_stats = get_stats_dict(filter=stat_filter, cache_id=cache.cache_id)
cores_stats = [
cores[j].get_statistics_flat(stat_filter=stat_filter)
for j in range(cores_per_cache)
get_stats_dict(
filter=stat_filter, cache_id=cache.cache_id, core_id=cores[j].core_id
) for j in range(cores_per_cache)
]
fail_message = f"In {cache_mode} cache mode "
stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message)
@ -156,20 +159,20 @@ def fio_prepare():
def stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message):
for cache_stat_name in cache_stats.keys():
if cache_stat_name.lower() != "free":
core_stat_name = cache_stat_name.replace("(s)", "")
core_stat_sum = 0
try:
cache_stats[cache_stat_name] = cache_stats[cache_stat_name].value
for j in range(cores_per_cache):
cores_stats[j][core_stat_name] = cores_stats[j][core_stat_name].value
except AttributeError:
pass
for stat_name in cache_stats.keys():
if stat_name.startswith("Free ") or stat_name.endswith("[%]"):
continue
core_stat_sum = 0
try:
cache_stats[stat_name] = get_stat_value(cache_stats, stat_name)
for j in range(cores_per_cache):
core_stat_sum += cores_stats[j][core_stat_name]
if core_stat_sum != cache_stats[cache_stat_name]:
TestRun.LOGGER.error(fail_message + (
f"sum of core's '{core_stat_name}' values is "
f"{core_stat_sum}, should equal cache value: "
f"{cache_stats[cache_stat_name]}\n"))
cores_stats[j][stat_name] = get_stat_value(cores_stats[j], stat_name)
except AttributeError:
pass
for j in range(cores_per_cache):
core_stat_sum += cores_stats[j][stat_name]
if core_stat_sum != cache_stats[stat_name]:
TestRun.LOGGER.error(fail_message + (
f"sum of cores' '{stat_name}' values is "
f"{core_stat_sum}, should equal cache value: "
f"{cache_stats[stat_name]}\n"))

View File

@ -1,22 +1,23 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#
from time import sleep
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode, CacheModeTrait
from api.cas.casadm import StatsFilter
from api.cas.statistics import get_stats_dict, get_stat_value
from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
from time import sleep
# One cache instance per every cache mode:
caches_count = len(CacheMode)
@ -27,7 +28,7 @@ io_value = 1000
io_size = Size(io_value, Unit.Blocks4096)
# Error stats not included in 'stat_filter' because all of them
# should equal 0 and can be checked easier, shorter way.
stat_filter = [StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
default_stat_filter = [StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -38,10 +39,10 @@ def test_stats_values():
description: |
Check if CAS displays proper usage, request, block and error statistics values
for core devices in every cache mode - at the start, after IO and after cache
reload. Also check if core's statistics match cache's statistics.
reload. Also check if cores' statistics match cache's statistics.
pass_criteria:
- Usage, request, block and error statistics have proper values.
- Core's statistics match cache's statistics.
- Cores' statistics match cache's statistics.
"""
with TestRun.step("Partition cache and core devices"):
@ -68,7 +69,7 @@ def test_stats_values():
with TestRun.step("Check statistics values after IO"):
check_stats_after_io(caches, cores)
with TestRun.step("Check if cache's statistics match core's statistics"):
with TestRun.step("Check if cache's statistics match cores' statistics"):
check_stats_sum(caches, cores)
with TestRun.step("Stop and load caches back"):
@ -96,7 +97,7 @@ def cache_prepare(cache_dev, core_dev):
caches.append(
casadm.start_cache(cache_dev.partitions[i], cache_mode, force=True)
)
cores = [[] for i in range(caches_count)]
cores = [[] for _ in range(caches_count)]
for i in range(caches_count):
for j in range(cores_per_cache):
core_partition_number = i * cores_per_cache + j
@ -126,34 +127,41 @@ def fio_prepare():
return fio
def get_stats_flat(cores, cache=None, stat_filter=stat_filter):
if cache:
cache_stats = cache.get_statistics_flat(stat_filter=stat_filter)
def get_stats(stat_filter, cores, cache=None):
cores_stats = [
cores[j].get_statistics_flat(stat_filter=stat_filter)
for j in range(cores_per_cache)
get_stats_dict(
filter=stat_filter, cache_id=cores[j].cache_id, core_id=cores[j].core_id
) for j in range(cores_per_cache)
]
cores_stats_perc = [
cores[j].get_statistics_flat(stat_filter=stat_filter, percentage_val=True)
{k: get_stat_value(cores_stats[j], k) for k in cores_stats[j] if k.endswith("[%]")}
for j in range(cores_per_cache)
]
cores_stats_values = [
{k: get_stat_value(cores_stats[j], k) for k in cores_stats[j] if not k.endswith("[%]")}
for j in range(cores_per_cache)
]
if cache:
return cores_stats, cores_stats_perc, cache_stats
cache_stats = get_stats_dict(filter=stat_filter, cache_id=cache.cache_id)
cache_stats_values = {
k: get_stat_value(cache_stats, k) for k in cache_stats if not k.endswith("[%]")
}
return cores_stats_values, cores_stats_perc, cache_stats_values
else:
return cores_stats, cores_stats_perc
return cores_stats_values, cores_stats_perc
def check_stats_initial(caches, cores):
for i in range(caches_count):
cores_stats, cores_stats_perc = get_stats_flat(cores[i])
cores_stats, cores_stats_perc = get_stats(stat_filter=default_stat_filter, cores=cores[i])
for j in range(cores_per_cache):
for stat_name, stat_value in cores_stats[j].items():
try:
stat_value = stat_value.value
except AttributeError:
pass
if stat_name.lower() == "free":
if stat_name.startswith("Free"):
if stat_value != caches[i].size.value:
TestRun.LOGGER.error(
f"For core device {cores[i][j].path} "
@ -164,7 +172,7 @@ def check_stats_initial(caches, cores):
f"For core device {cores[i][j].path} value for "
f"'{stat_name}' is {stat_value}, should equal 0\n")
for stat_name, stat_value in cores_stats_perc[j].items():
if stat_name.lower() == "free":
if stat_name.startswith("Free"):
if stat_value != 100:
TestRun.LOGGER.error(
f"For core device {cores[i][j].path} percentage value "
@ -179,15 +187,15 @@ def check_stats_after_io(caches, cores, after_reload: bool = False):
for i in range(caches_count):
cache_mode = caches[i].get_cache_mode()
cores_stats = [
cores[i][j].get_statistics(stat_filter=stat_filter)
cores[i][j].get_statistics(stat_filter=default_stat_filter)
for j in range(cores_per_cache)
]
cores_stats_perc = [
cores[i][j].get_statistics(stat_filter=stat_filter, percentage_val=True)
cores[i][j].get_statistics(stat_filter=default_stat_filter, percentage_val=True)
for j in range(cores_per_cache)
]
cores_error_stats, cores_error_stats_perc = get_stats_flat(
cores[i], stat_filter=[StatsFilter.err]
cores_error_stats, cores_error_stats_perc = get_stats(
stat_filter=[StatsFilter.err], cores=cores[i]
)
for j in range(cores_per_cache):
fail_message = (
@ -196,7 +204,7 @@ def check_stats_after_io(caches, cores, after_reload: bool = False):
validate_usage_stats(
cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message)
validate_error_stats(
cores_error_stats[j], cores_error_stats_perc[j], cache_mode, fail_message)
cores_error_stats[j], cores_error_stats_perc[j], fail_message)
else:
validate_usage_stats(
cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message)
@ -205,31 +213,31 @@ def check_stats_after_io(caches, cores, after_reload: bool = False):
validate_block_stats(
cores_stats[j], cores_stats_perc[j], cache_mode, fail_message)
validate_error_stats(
cores_error_stats[j], cores_error_stats_perc[j], cache_mode, fail_message)
cores_error_stats[j], cores_error_stats_perc[j], fail_message)
def check_stats_sum(caches, cores):
for i in range(caches_count):
cores_stats, cores_stats_perc, cache_stats = (
get_stats_flat(cores[i], cache=caches[i])
get_stats(stat_filter=default_stat_filter, cores=cores[i], cache=caches[i])
)
for cache_stat_name in cache_stats.keys():
if cache_stat_name.lower() != "free":
core_stat_name = cache_stat_name.replace("(s)", "")
core_stat_sum = 0
try:
cache_stats[cache_stat_name] = cache_stats[cache_stat_name].value
for j in range(cores_per_cache):
cores_stats[j][core_stat_name] = cores_stats[j][core_stat_name].value
except AttributeError:
pass
for stat_name in cache_stats.keys():
if stat_name.startswith("Free"):
continue
core_stat_sum = 0
try:
cache_stats[stat_name] = cache_stats[stat_name].value
for j in range(cores_per_cache):
core_stat_sum += cores_stats[j][core_stat_name]
if core_stat_sum != cache_stats[cache_stat_name]:
TestRun.LOGGER.error(
f"For cache ID {caches[i].cache_id} sum of core's "
f"'{core_stat_name}' values is {core_stat_sum}, "
f"should equal {cache_stats[cache_stat_name]}\n")
cores_stats[j][stat_name] = cores_stats[j][stat_name].value
except AttributeError:
pass
for j in range(cores_per_cache):
core_stat_sum += cores_stats[j][stat_name]
if core_stat_sum != cache_stats[stat_name]:
TestRun.LOGGER.error(
f"For cache ID {caches[i].cache_id} sum of cores' "
f"'{stat_name}' values is {core_stat_sum}, "
f"should equal {cache_stats[stat_name]}\n")
def validate_usage_stats(stats, stats_perc, cache, cache_mode, fail_message):
@ -692,7 +700,7 @@ def validate_block_stats(stats, stats_perc, cache_mode, fail_message):
f"should equal 100\n")
def validate_error_stats(stats, stats_perc, cache_mode, fail_message):
def validate_error_stats(stats, stats_perc, fail_message):
fail_message += f"in 'error' stats"
for stat_name, stat_value in stats.items():
if stat_value != 0: