Merge pull request #1547 from Deixx/block-stats

Update for stats tests
This commit is contained in:
Robert Baldyga 2024-10-03 19:31:59 +02:00 committed by GitHub
commit 188e42b752
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 344 additions and 468 deletions

View File

@ -5,9 +5,8 @@
# #
import csv import csv
from enum import Enum
from datetime import timedelta from datetime import timedelta
from enum import Enum
from typing import List from typing import List
from api.cas import casadm from api.cas import casadm
@ -43,34 +42,10 @@ class CacheStats:
filter: List[StatsFilter] = None, filter: List[StatsFilter] = None,
percentage_val: bool = False, percentage_val: bool = False,
): ):
stats_dict = get_stats_dict(filter=filter, cache_id=cache_id)
if filter is None: for section in _get_section_filters(filter):
filters = [ match section:
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
StatsFilter.err,
]
else:
filters = filter
csv_stats = casadm.print_statistics(
cache_id=cache_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache:
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
case StatsFilter.conf: case StatsFilter.conf:
self.config_stats = CacheConfigStats(stats_dict) self.config_stats = CacheConfigStats(stats_dict)
case StatsFilter.usage: case StatsFilter.usage:
@ -102,30 +77,10 @@ class CoreStats:
filter: List[StatsFilter] = None, filter: List[StatsFilter] = None,
percentage_val: bool = False, percentage_val: bool = False,
): ):
stats_dict = get_stats_dict(filter=filter, cache_id=cache_id, core_id=core_id)
if filter is None: for section in _get_section_filters(filter):
filters = [ match section:
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
StatsFilter.err,
]
else:
filters = filter
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
case StatsFilter.conf: case StatsFilter.conf:
self.config_stats = CoreConfigStats(stats_dict) self.config_stats = CoreConfigStats(stats_dict)
case StatsFilter.usage: case StatsFilter.usage:
@ -158,34 +113,12 @@ class CoreIoClassStats:
filter: List[StatsFilter] = None, filter: List[StatsFilter] = None,
percentage_val: bool = False, percentage_val: bool = False,
): ):
if filter is None: stats_dict = get_stats_dict(
filters = [ filter=filter, cache_id=cache_id, core_id=core_id, io_class_id=io_class_id
StatsFilter.conf, )
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
]
else:
filters = filter
csv_stats = casadm.print_statistics( for section in _get_section_filters(filter):
cache_id=cache_id, match section:
core_id=core_id,
io_class_id=io_class_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache:
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
for filter in filters:
match filter:
case StatsFilter.conf: case StatsFilter.conf:
self.config_stats = IoClassConfigStats(stats_dict) self.config_stats = IoClassConfigStats(stats_dict)
case StatsFilter.usage: case StatsFilter.usage:
@ -243,7 +176,7 @@ class CacheConfigStats:
self.metadata_memory_footprint = parse_value( self.metadata_memory_footprint = parse_value(
value=stats_dict["Metadata Memory Footprint [MiB]"], unit_type=UnitType.mebibyte value=stats_dict["Metadata Memory Footprint [MiB]"], unit_type=UnitType.mebibyte
) )
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type="[s]") self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type=UnitType.seconds)
self.status = stats_dict["Status"] self.status = stats_dict["Status"]
def __str__(self): def __str__(self):
@ -399,21 +332,6 @@ class UsageStats:
def __ne__(self, other): def __ne__(self, other):
return not self == other return not self == other
def __add__(self, other):
return UsageStats(
self.occupancy + other.occupancy,
self.free + other.free,
self.clean + other.clean,
self.dirty + other.dirty,
)
def __iadd__(self, other):
self.occupancy += other.occupancy
self.free += other.free
self.clean += other.clean
self.dirty += other.dirty
return self
class IoClassUsageStats: class IoClassUsageStats:
def __init__(self, stats_dict, percentage_val): def __init__(self, stats_dict, percentage_val):
@ -445,43 +363,6 @@ class IoClassUsageStats:
def __ne__(self, other): def __ne__(self, other):
return not self == other return not self == other
def __add__(self, other):
return UsageStats(
self.occupancy + other.occupancy,
self.clean + other.clean,
self.dirty + other.dirty,
)
def __iadd__(self, other):
self.occupancy += other.occupancy
self.clean += other.clean
self.dirty += other.dirty
return self
class InactiveUsageStats:
def __init__(self, inactive_occupancy, inactive_clean, inactive_dirty):
self.inactive_occupancy = inactive_occupancy
self.inactive_clean = inactive_clean
self.inactive_dirty = inactive_dirty
def __str__(self):
return (
f"Inactive usage stats:\n"
f"Inactive occupancy: {self.inactive_occupancy}\n"
f"Inactive clean: {self.inactive_clean}\n"
f"Inactive dirty: {self.inactive_dirty}\n"
)
def __eq__(self, other):
if not other:
return False
return (
self.inactive_occupancy == other.inactive_occupancy
and self.inactive_clean == other.inactive_clean
and self.inactive_dirty == other.inactive_dirty
)
class RequestStats: class RequestStats:
def __init__(self, stats_dict, percentage_val): def __init__(self, stats_dict, percentage_val):
@ -655,6 +536,12 @@ class BasicStatsChunkError:
) )
def get_stat_value(stat_dict: dict, key: str):
idx = key.index("[")
unit = UnitType(key[idx:])
return parse_value(stat_dict[key], unit)
def parse_value(value: str, unit_type: UnitType) -> int | float | Size | timedelta | str: def parse_value(value: str, unit_type: UnitType) -> int | float | Size | timedelta | str:
match unit_type: match unit_type:
case UnitType.requests: case UnitType.requests:
@ -674,3 +561,42 @@ def parse_value(value: str, unit_type: UnitType) -> int | float | Size | timedel
case _: case _:
stat_unit = value stat_unit = value
return stat_unit return stat_unit
def _get_section_filters(filter: List[StatsFilter], io_class_stats: bool = False):
if filter is None or StatsFilter.all in filter:
filters = [
StatsFilter.conf,
StatsFilter.usage,
StatsFilter.req,
StatsFilter.blk,
StatsFilter.err,
]
else:
filters = filter
if io_class_stats and StatsFilter.err in filters:
filters.remove(StatsFilter.err)
return filters
def get_stats_dict(
filter: List[StatsFilter],
cache_id: int,
core_id: int = None,
io_class_id: int = None
):
csv_stats = casadm.print_statistics(
cache_id=cache_id,
core_id=core_id,
io_class_id=io_class_id,
filter=filter,
output_format=casadm.OutputFormat.csv,
).stdout.splitlines()
stat_keys, stat_values = csv.reader(csv_stats)
# Unify names in block stats for core and cache to easier compare
# cache vs core stats using unified key
# cache stats: Reads from core(s)
# core stats: Reads from core
stat_keys = [x.replace("(s)", "") for x in stat_keys]
stats_dict = dict(zip(stat_keys, stat_values))
return stats_dict

View File

@ -1,322 +1,261 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import json
import pytest import pytest
from api.cas import casadm from api.cas import casadm
from api.cas import ioclass_config from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait
from api.cas.cache_config import CacheMode, CleaningPolicy
from api.cas.casadm import StatsFilter from api.cas.casadm import StatsFilter
from api.cas.statistics import get_stats_dict, get_stat_value, OperationType
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd from test_tools.dd import Dd
from test_utils.os_utils import Udev from test_utils.os_utils import Udev
from test_utils.size import Size, Unit from test_utils.size import Size, Unit
ioclass_config_path = "/tmp/opencas_ioclass.conf" iterations = 10
mountpoint = "/tmp/cas1-1" dd_block_size = Size(1, Unit.Blocks4096)
exported_obj_path_prefix = "/dev/cas1-" dd_count = 10
cache_id = 1 cores_no = 3
# lists of cache and core block stats, that should have zero value for particular cache modes
write_wb_zero_stats = [
"reads from core(s)",
"writes to core(s)",
"total to/from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"writes to core",
"total to/from core",
"reads from cache",
"reads from exported object",
]
write_wt_zero_stats = [
"reads from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_pt_zero_stats = [
"reads from core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_wa_zero_stats = [
"reads from core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"reads from exported object(s)",
"reads from core",
"reads from exported object",
]
write_wo_zero_stats = [
"reads from core(s)",
"writes to core(s)",
"total to/from core(s)",
"reads from cache",
"reads from exported object(s)",
"reads from core",
"writes to core",
"total to/from core",
"reads from exported object",
]
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize( @pytest.mark.parametrize("cache_mode", CacheMode)
"cache_mode,zero_stats", def test_block_stats_write_miss(cache_mode: CacheMode):
[ """
(CacheMode.WB, write_wb_zero_stats), title: Block statistics after write miss operations
(CacheMode.WT, write_wt_zero_stats), description: |
(CacheMode.PT, write_pt_zero_stats), Perform write miss operations to cached volume and check if block stats values are correct
(CacheMode.WA, write_wa_zero_stats), for configured cache mode.
(CacheMode.WO, write_wo_zero_stats), pass_criteria:
], - Correct block stats values
) """
def test_block_stats_write(cache_mode, zero_stats):
"""Perform read and write operations to cache instance in different cache modes
and check if block stats values are correct"""
cache, cores = prepare(cache_mode)
iterations = 10
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
flush(cache) with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)] * cores_no)
cache_device = cache_device.partitions[0]
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Start cache and set NOP cleaning policy"):
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
cache.set_cleaning_policy(CleaningPolicy.nop)
with TestRun.step("Add core devices"):
cores = [cache.add_core(part) for part in core_device.partitions]
with TestRun.step("Reset cache stats"):
cache.reset_counters()
with TestRun.step("Write data in parts to exported objects and verify block statistics "
"after each part is done"):
expected_zero_stats = get_expected_zero_stats(cache_mode, OperationType.write)
# Check stats for cache after performing write operation
for core in cores:
dd_seek = 0 dd_seek = 0
dd = ( dd = (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(f"{core.path}")
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_block_size)
.oflag("direct") .oflag("direct")
) )
# Since every IO has the same size, every stat should be increased with the same step.
# So there is no need to keep value of every stat in separate variable
cache_stat = (
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
)
for i in range(iterations): for i in range(iterations):
core_stat_expected = dd_block_size * dd_count * (i + 1)
core_stat_expected.set_unit(Unit.Blocks4096)
dd.seek(dd_seek) dd.seek(dd_seek)
dd.run() for j, core in enumerate(cores):
cache_stats = cache.get_statistics_flat(stat_filter=[StatsFilter.blk]) # expect previous iterations + already written data in this iteration
core_stats = core.get_statistics_flat(stat_filter=[StatsFilter.blk]) cache_stat_expected = dd_block_size * dd_count * (i * cores_no + j + 1)
cache_stat_expected.set_unit(Unit.Blocks4096)
dd.output(core.path)
dd.run()
cache_stats = get_stats_dict(filter=[StatsFilter.blk], cache_id=cache.cache_id)
core_stats = get_stats_dict(
filter=[StatsFilter.blk], cache_id=cache.cache_id, core_id=core.core_id
)
# Check cache stats # Check cache stats after write operation
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1) fail = False
for key, value in cache_stats.items(): for key, value in cache_stats.items():
if key in zero_stats: if key.endswith('[%]'):
assert value.get_value(Unit.Blocks4096) == 0, ( continue
f"{key} has invalid value\n" stat = get_stat_value(cache_stats, key)
f"core id {core.core_id}, i: {i}, dd_size: " if any(key.startswith(s) for s in expected_zero_stats):
f"{dd_size.get_value(Unit.Blocks4096)}\n" if stat != Size.zero():
f"dd count: {dd_count}, cache_stat {cache_stat}" TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
) fail = True
else: elif stat != cache_stat_expected:
# For each next tested core, cache stats has to include TestRun.LOGGER.error(
# sum of each previous core f"{key} has invalid value of {stat}\n"
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), ( f"expected: {cache_stat_expected}"
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n" )
f"core id {core.core_id}, i: {i}, dd_size: " fail = True
f"{dd_size.get_value(Unit.Blocks4096)}\n" if fail:
f"dd count: {dd_count}, cache_stat {cache_stat}" TestRun.fail(
"Incorrect cache block stats\n"
f"iteration {i}, core id: {core.core_id}\n"
f"cache_stats:\n{json.dumps(cache_stats, indent=0)}"
) )
# Check single core stats # Check per-core stats
for key, value in core_stats.items(): for key, value in core_stats.items():
if key in zero_stats: if key.endswith('[%]'):
assert value.get_value(Unit.Blocks4096) == 0, ( continue
f"{key} has invalid value of \n" stat = get_stat_value(core_stats, key)
f"core id {core.core_id}, i: {i}, dd_size: " if any(key.startswith(s) for s in expected_zero_stats):
f"{dd_size.get_value(Unit.Blocks4096)}\n" if stat != Size.zero():
f"dd count: {dd_count}, cache_stat {cache_stat}" TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
fail = True
elif stat != core_stat_expected:
TestRun.LOGGER.error(
f"{key} has invalid value of {stat}\n"
f"expected: {core_stat_expected}"
)
if fail:
TestRun.fail(
"Incorrect core block stats\n"
f"iteration {i}, core id: {core.core_id}\n"
f"core_stats:\n{json.dumps(core_stats, indent=0)}"
) )
else: dd_seek += dd_count
assert assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count}, dd seek: {dd_seek}. Cache mode {cache_mode}"
)
dd_seek += dd_count
# lists of cache and core block stats, that should have zero value for particular cache modes
read_wb_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wt_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_pt_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wa_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
read_wo_zero_stats = [
"writes to core(s)",
"reads from cache",
"writes to cache",
"total to/from cache",
"writes to exported object(s)",
"writes to core",
"writes to exported object",
]
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize( @pytest.mark.parametrize("cache_mode", CacheMode)
"cache_mode,zero_stats", def test_block_stats_read_miss(cache_mode: CacheMode):
[ """
(CacheMode.WB, read_wb_zero_stats), title: Block statistics after read miss operations
(CacheMode.WT, read_wt_zero_stats), description: |
(CacheMode.PT, read_pt_zero_stats), Perform read miss operations from cached volume and check if block stats values are correct
(CacheMode.WA, read_wa_zero_stats), for configured cache mode.
(CacheMode.WO, read_wo_zero_stats), pass_criteria:
], - Correct block stats values
) """
def test_block_stats_read(cache_mode, zero_stats):
"""Perform read and write operations to cache instance in different cache modes
and check if block stats values are correct"""
cache, cores = prepare(cache_mode)
iterations = 10
dd_size = Size(4, Unit.KibiByte)
dd_count = 10
flush(cache) with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)] * cores_no)
cache_device = cache_device.partitions[0]
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step("Start cache and set NOP cleaning policy"):
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
cache.set_cleaning_policy(CleaningPolicy.nop)
with TestRun.step("Add core devices"):
cores = [cache.add_core(part) for part in core_device.partitions]
with TestRun.step("Reset cache stats"):
cache.reset_counters()
with TestRun.step("Read data in parts from exported objects and verify block statistics "
"after each part is done"):
expected_zero_stats = get_expected_zero_stats(cache_mode, OperationType.read)
# Check stats for cache after performing read operation
for core in cores:
dd_skip = 0 dd_skip = 0
dd = ( dd = (
Dd() Dd()
.output("/dev/zero") .output("/dev/null")
.input(f"{core.path}")
.count(dd_count) .count(dd_count)
.block_size(dd_size) .block_size(dd_block_size)
.iflag("direct") .iflag("direct")
) )
# Since every IO has the same size, every stat should be increased with the same step.
# So there is no need to keep value of every stat in separate variable
cache_stat = (
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
)
for i in range(iterations): for i in range(iterations):
core_stat_expected = dd_block_size * dd_count * (i + 1)
core_stat_expected.set_unit(Unit.Blocks4096)
dd.skip(dd_skip) dd.skip(dd_skip)
dd.run() for j, core in enumerate(cores):
cache_stats = cache.get_statistics_flat(stat_filter=[StatsFilter.blk]) # expect previous iterations + already read data in this iteration
core_stats = core.get_statistics_flat(stat_filter=[StatsFilter.blk]) cache_stat_expected = dd_block_size * dd_count * (i * cores_no + j + 1)
cache_stat_expected.set_unit(Unit.Blocks4096)
dd.input(core.path)
dd.run()
cache_stats = get_stats_dict(filter=[StatsFilter.blk], cache_id=cache.cache_id)
core_stats = get_stats_dict(
filter=[StatsFilter.blk], cache_id=cache.cache_id, core_id=core.core_id
)
# Check cache stats # Check cache stats after read operation
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1) fail = False
for key, value in cache_stats.items(): for key, value in cache_stats.items():
if key in zero_stats: if key.endswith('[%]'):
assert value.get_value(Unit.Blocks4096) == 0, ( continue
f"{key} has invalid value\n" stat = get_stat_value(cache_stats, key)
f"core id {core.core_id}, i: {i}, dd_size: " if any(key.startswith(s) for s in expected_zero_stats):
f"{dd_size.get_value(Unit.Blocks4096)}\n" if stat != Size.zero():
f"dd count: {dd_count}, cache_stat {cache_stat}" TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
) fail = True
else: elif stat != cache_stat_expected:
# For each next tested core, cache stats has to include TestRun.LOGGER.error(
# sum of each previous core f"{key} has invalid value of {stat}\n"
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), ( f"expected: {cache_stat_expected}"
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n" )
f"core id {core.core_id}, i: {i}, dd_size: " fail = True
f"{dd_size.get_value(Unit.Blocks4096)}\n" if fail:
f"dd count: {dd_count}. Cache mode: {cache_mode}" TestRun.fail(
"Incorrect cache block stats\n"
f"iteration {i}, core id: {core.core_id}\n"
f"cache_stats:\n{json.dumps(cache_stats, indent=0)}"
) )
# Check single core stats # Check per-core stats
for key, value in core_stats.items(): for key, value in core_stats.items():
if key in zero_stats: if key.endswith('[%]'):
assert value.get_value(Unit.Blocks4096) == 0, ( continue
f"{key} has invalid value\n" stat = get_stat_value(core_stats, key)
f"core id {core.core_id}, i: {i}, dd_size: " if any(key.startswith(s) for s in expected_zero_stats):
f"{dd_size.get_value(Unit.Blocks4096)}\n" if stat != Size.zero():
f"dd count: {dd_count}. Cache mode: {cache_mode}" TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
fail = True
elif stat != core_stat_expected:
TestRun.LOGGER.error(
f"{key} has invalid value of {stat}\n"
f"expected: {core_stat_expected}"
)
if fail:
TestRun.fail(
"Incorrect core block stats\n"
f"iteration {i}, core id: {core.core_id}\n"
f"core_stats:\n{json.dumps(core_stats, indent=0)}"
) )
else:
assert assumed_value == value.get_value(Unit.Blocks4096), (
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
f"core id {core.core_id}, i: {i}, dd_size: "
f"{dd_size.get_value(Unit.Blocks4096)}\n"
f"dd count: {dd_count} dd skip {dd_skip}. Cache mode: {cache_mode}"
)
dd_skip += dd_count dd_skip += dd_count
def flush(cache): def get_expected_zero_stats(cache_mode: CacheMode, direction: OperationType):
cache.flush_cache() traits = CacheMode.get_traits(cache_mode)
cache.reset_counters()
stats = cache.get_statistics_flat(stat_filter=[StatsFilter.blk])
for key, value in stats.items():
assert value.get_value(Unit.Blocks4096) == 0
stat_list = ["Reads from cache"]
if direction == OperationType.write:
stat_list.append("Reads from core")
stat_list.append("Reads from exported object")
if direction == OperationType.read or CacheModeTrait.LazyWrites in traits:
stat_list.append("Writes to core")
if direction == OperationType.read:
stat_list.append("Writes to exported object")
if ((direction == OperationType.read and CacheModeTrait.InsertRead not in traits)
or (direction == OperationType.write and CacheModeTrait.InsertWrite not in traits)):
stat_list.append("Writes to cache")
stat_list.append("Total to/from cache")
if direction == OperationType.write and CacheModeTrait.LazyWrites in traits:
stat_list.append("Total to/from core")
def prepare(cache_mode: CacheMode): return stat_list
ioclass_config.remove_ioclass_config()
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions(
[Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]
)
cache_device = cache_device.partitions[0]
core_device_1 = core_device.partitions[0]
core_device_2 = core_device.partitions[1]
core_device_3 = core_device.partitions[2]
Udev.disable()
TestRun.LOGGER.info(f"Starting cache")
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
casadm.set_param_cleaning(cache_id=cache_id, policy=CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core devices")
core_1 = cache.add_core(core_dev=core_device_1)
core_2 = cache.add_core(core_dev=core_device_2)
core_3 = cache.add_core(core_dev=core_device_3)
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
if output.exit_code != 0:
raise Exception(f"Failed to create mountpoint")
return cache, [core_1, core_2, core_3]

View File

@ -1,22 +1,23 @@
# #
# Copyright(c) 2020-2021 Intel Corporation # Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
from time import sleep
import pytest import pytest
from api.cas import casadm from api.cas import casadm
from api.cas.cache_config import CacheMode, CleaningPolicy from api.cas.cache_config import CacheMode, CleaningPolicy
from api.cas.casadm import StatsFilter from api.cas.casadm import StatsFilter
from api.cas.statistics import get_stats_dict, get_stat_value
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.fio.fio import Fio from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.os_utils import Udev from test_utils.os_utils import Udev
from test_utils.size import Size, Unit from test_utils.size import Size, Unit
from time import sleep
cache_size = Size(1, Unit.GibiByte) cache_size = Size(1, Unit.GibiByte)
core_size = Size(2, Unit.GibiByte) core_size = Size(2, Unit.GibiByte)
@ -33,7 +34,7 @@ def test_stat_max_cache():
Check CAS ability to display correct values in statistics Check CAS ability to display correct values in statistics
for 16 cache devices per cache mode. for 16 cache devices per cache mode.
pass_criteria: pass_criteria:
- Core's statistics matches cache's statistics. - Cores' statistics match cache's statistics.
""" """
caches_per_cache_mode = 16 caches_per_cache_mode = 16
@ -82,20 +83,21 @@ def test_stat_max_cache():
fio.run() fio.run()
sleep(3) sleep(3)
with TestRun.step("Check if cache's statistics matches core's statistics"): with TestRun.step("Check if cache's statistics match cores' statistics"):
for i in range(caches_count): for i in range(caches_count):
cache_stats = caches[i].get_statistics_flat(stat_filter=stat_filter) cache_stats = get_stats_dict(filter=stat_filter, cache_id=caches[i].cache_id)
cores_stats = [ cores_stats = [
cores[i][j].get_statistics_flat(stat_filter=stat_filter) get_stats_dict(
for j in range(cores_per_cache) filter=stat_filter, cache_id=caches[i].cache_id, core_id=cores[i][j].core_id
) for j in range(cores_per_cache)
] ]
fail_message = f"For cache ID {caches[i].cache_id} " fail_message = f"For cache ID {caches[i].cache_id} ({caches[i].get_cache_mode()}) "
stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message) stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message)
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
def test_stat_max_core(cache_mode): def test_stat_max_core(cache_mode):
""" """
title: CAS statistics values for maximum core devices. title: CAS statistics values for maximum core devices.
@ -103,7 +105,7 @@ def test_stat_max_core(cache_mode):
Check CAS ability to display correct values in statistics Check CAS ability to display correct values in statistics
for 62 core devices. for 62 core devices.
pass_criteria: pass_criteria:
- Core's statistics matches cache's statistics. - Cores' statistics match cache's statistics.
""" """
cores_per_cache = 62 cores_per_cache = 62
@ -132,11 +134,12 @@ def test_stat_max_core(cache_mode):
fio.run() fio.run()
sleep(3) sleep(3)
with TestRun.step("Check if cache's statistics matches core's statistics"): with TestRun.step("Check if cache's statistics match cores' statistics"):
cache_stats = cache.get_statistics_flat(stat_filter=stat_filter) cache_stats = get_stats_dict(filter=stat_filter, cache_id=cache.cache_id)
cores_stats = [ cores_stats = [
cores[j].get_statistics_flat(stat_filter=stat_filter) get_stats_dict(
for j in range(cores_per_cache) filter=stat_filter, cache_id=cache.cache_id, core_id=cores[j].core_id
) for j in range(cores_per_cache)
] ]
fail_message = f"In {cache_mode} cache mode " fail_message = f"In {cache_mode} cache mode "
stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message) stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message)
@ -156,20 +159,20 @@ def fio_prepare():
def stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message): def stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message):
for cache_stat_name in cache_stats.keys(): for stat_name in cache_stats.keys():
if cache_stat_name.lower() != "free": if stat_name.startswith("Free ") or stat_name.endswith("[%]"):
core_stat_name = cache_stat_name.replace("(s)", "") continue
core_stat_sum = 0 core_stat_sum = 0
try: try:
cache_stats[cache_stat_name] = cache_stats[cache_stat_name].value cache_stats[stat_name] = get_stat_value(cache_stats, stat_name)
for j in range(cores_per_cache):
cores_stats[j][core_stat_name] = cores_stats[j][core_stat_name].value
except AttributeError:
pass
for j in range(cores_per_cache): for j in range(cores_per_cache):
core_stat_sum += cores_stats[j][core_stat_name] cores_stats[j][stat_name] = get_stat_value(cores_stats[j], stat_name)
if core_stat_sum != cache_stats[cache_stat_name]: except AttributeError:
TestRun.LOGGER.error(fail_message + ( pass
f"sum of core's '{core_stat_name}' values is " for j in range(cores_per_cache):
f"{core_stat_sum}, should equal cache value: " core_stat_sum += cores_stats[j][stat_name]
f"{cache_stats[cache_stat_name]}\n")) if core_stat_sum != cache_stats[stat_name]:
TestRun.LOGGER.error(fail_message + (
f"sum of cores' '{stat_name}' values is "
f"{core_stat_sum}, should equal cache value: "
f"{cache_stats[stat_name]}\n"))

View File

@ -1,22 +1,23 @@
# #
# Copyright(c) 2020-2021 Intel Corporation # Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
from time import sleep
import pytest import pytest
from api.cas import casadm from api.cas import casadm
from api.cas.cache_config import CacheMode, CacheModeTrait from api.cas.cache_config import CacheMode, CacheModeTrait
from api.cas.casadm import StatsFilter from api.cas.casadm import StatsFilter
from api.cas.statistics import get_stats_dict, get_stat_value
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.fio.fio import Fio from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.os_utils import Udev from test_utils.os_utils import Udev
from test_utils.size import Size, Unit from test_utils.size import Size, Unit
from time import sleep
# One cache instance per every cache mode: # One cache instance per every cache mode:
caches_count = len(CacheMode) caches_count = len(CacheMode)
@ -27,7 +28,7 @@ io_value = 1000
io_size = Size(io_value, Unit.Blocks4096) io_size = Size(io_value, Unit.Blocks4096)
# Error stats not included in 'stat_filter' because all of them # Error stats not included in 'stat_filter' because all of them
# should equal 0 and can be checked easier, shorter way. # should equal 0 and can be checked easier, shorter way.
stat_filter = [StatsFilter.usage, StatsFilter.req, StatsFilter.blk] default_stat_filter = [StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@ -38,10 +39,10 @@ def test_stats_values():
description: | description: |
Check if CAS displays proper usage, request, block and error statistics values Check if CAS displays proper usage, request, block and error statistics values
for core devices in every cache mode - at the start, after IO and after cache for core devices in every cache mode - at the start, after IO and after cache
reload. Also check if core's statistics match cache's statistics. reload. Also check if cores' statistics match cache's statistics.
pass_criteria: pass_criteria:
- Usage, request, block and error statistics have proper values. - Usage, request, block and error statistics have proper values.
- Core's statistics match cache's statistics. - Cores' statistics match cache's statistics.
""" """
with TestRun.step("Partition cache and core devices"): with TestRun.step("Partition cache and core devices"):
@ -68,7 +69,7 @@ def test_stats_values():
with TestRun.step("Check statistics values after IO"): with TestRun.step("Check statistics values after IO"):
check_stats_after_io(caches, cores) check_stats_after_io(caches, cores)
with TestRun.step("Check if cache's statistics match core's statistics"): with TestRun.step("Check if cache's statistics match cores' statistics"):
check_stats_sum(caches, cores) check_stats_sum(caches, cores)
with TestRun.step("Stop and load caches back"): with TestRun.step("Stop and load caches back"):
@ -96,7 +97,7 @@ def cache_prepare(cache_dev, core_dev):
caches.append( caches.append(
casadm.start_cache(cache_dev.partitions[i], cache_mode, force=True) casadm.start_cache(cache_dev.partitions[i], cache_mode, force=True)
) )
cores = [[] for i in range(caches_count)] cores = [[] for _ in range(caches_count)]
for i in range(caches_count): for i in range(caches_count):
for j in range(cores_per_cache): for j in range(cores_per_cache):
core_partition_number = i * cores_per_cache + j core_partition_number = i * cores_per_cache + j
@ -126,34 +127,41 @@ def fio_prepare():
return fio return fio
def get_stats_flat(cores, cache=None, stat_filter=stat_filter): def get_stats(stat_filter, cores, cache=None):
if cache:
cache_stats = cache.get_statistics_flat(stat_filter=stat_filter)
cores_stats = [ cores_stats = [
cores[j].get_statistics_flat(stat_filter=stat_filter) get_stats_dict(
for j in range(cores_per_cache) filter=stat_filter, cache_id=cores[j].cache_id, core_id=cores[j].core_id
) for j in range(cores_per_cache)
] ]
cores_stats_perc = [ cores_stats_perc = [
cores[j].get_statistics_flat(stat_filter=stat_filter, percentage_val=True) {k: get_stat_value(cores_stats[j], k) for k in cores_stats[j] if k.endswith("[%]")}
for j in range(cores_per_cache)
]
cores_stats_values = [
{k: get_stat_value(cores_stats[j], k) for k in cores_stats[j] if not k.endswith("[%]")}
for j in range(cores_per_cache) for j in range(cores_per_cache)
] ]
if cache: if cache:
return cores_stats, cores_stats_perc, cache_stats cache_stats = get_stats_dict(filter=stat_filter, cache_id=cache.cache_id)
cache_stats_values = {
k: get_stat_value(cache_stats, k) for k in cache_stats if not k.endswith("[%]")
}
return cores_stats_values, cores_stats_perc, cache_stats_values
else: else:
return cores_stats, cores_stats_perc return cores_stats_values, cores_stats_perc
def check_stats_initial(caches, cores): def check_stats_initial(caches, cores):
for i in range(caches_count): for i in range(caches_count):
cores_stats, cores_stats_perc = get_stats_flat(cores[i]) cores_stats, cores_stats_perc = get_stats(stat_filter=default_stat_filter, cores=cores[i])
for j in range(cores_per_cache): for j in range(cores_per_cache):
for stat_name, stat_value in cores_stats[j].items(): for stat_name, stat_value in cores_stats[j].items():
try: try:
stat_value = stat_value.value stat_value = stat_value.value
except AttributeError: except AttributeError:
pass pass
if stat_name.lower() == "free": if stat_name.startswith("Free"):
if stat_value != caches[i].size.value: if stat_value != caches[i].size.value:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"For core device {cores[i][j].path} " f"For core device {cores[i][j].path} "
@ -164,7 +172,7 @@ def check_stats_initial(caches, cores):
f"For core device {cores[i][j].path} value for " f"For core device {cores[i][j].path} value for "
f"'{stat_name}' is {stat_value}, should equal 0\n") f"'{stat_name}' is {stat_value}, should equal 0\n")
for stat_name, stat_value in cores_stats_perc[j].items(): for stat_name, stat_value in cores_stats_perc[j].items():
if stat_name.lower() == "free": if stat_name.startswith("Free"):
if stat_value != 100: if stat_value != 100:
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"For core device {cores[i][j].path} percentage value " f"For core device {cores[i][j].path} percentage value "
@ -179,15 +187,15 @@ def check_stats_after_io(caches, cores, after_reload: bool = False):
for i in range(caches_count): for i in range(caches_count):
cache_mode = caches[i].get_cache_mode() cache_mode = caches[i].get_cache_mode()
cores_stats = [ cores_stats = [
cores[i][j].get_statistics(stat_filter=stat_filter) cores[i][j].get_statistics(stat_filter=default_stat_filter)
for j in range(cores_per_cache) for j in range(cores_per_cache)
] ]
cores_stats_perc = [ cores_stats_perc = [
cores[i][j].get_statistics(stat_filter=stat_filter, percentage_val=True) cores[i][j].get_statistics(stat_filter=default_stat_filter, percentage_val=True)
for j in range(cores_per_cache) for j in range(cores_per_cache)
] ]
cores_error_stats, cores_error_stats_perc = get_stats_flat( cores_error_stats, cores_error_stats_perc = get_stats(
cores[i], stat_filter=[StatsFilter.err] stat_filter=[StatsFilter.err], cores=cores[i]
) )
for j in range(cores_per_cache): for j in range(cores_per_cache):
fail_message = ( fail_message = (
@ -196,7 +204,7 @@ def check_stats_after_io(caches, cores, after_reload: bool = False):
validate_usage_stats( validate_usage_stats(
cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message) cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message)
validate_error_stats( validate_error_stats(
cores_error_stats[j], cores_error_stats_perc[j], cache_mode, fail_message) cores_error_stats[j], cores_error_stats_perc[j], fail_message)
else: else:
validate_usage_stats( validate_usage_stats(
cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message) cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message)
@ -205,31 +213,31 @@ def check_stats_after_io(caches, cores, after_reload: bool = False):
validate_block_stats( validate_block_stats(
cores_stats[j], cores_stats_perc[j], cache_mode, fail_message) cores_stats[j], cores_stats_perc[j], cache_mode, fail_message)
validate_error_stats( validate_error_stats(
cores_error_stats[j], cores_error_stats_perc[j], cache_mode, fail_message) cores_error_stats[j], cores_error_stats_perc[j], fail_message)
def check_stats_sum(caches, cores): def check_stats_sum(caches, cores):
for i in range(caches_count): for i in range(caches_count):
cores_stats, cores_stats_perc, cache_stats = ( cores_stats, cores_stats_perc, cache_stats = (
get_stats_flat(cores[i], cache=caches[i]) get_stats(stat_filter=default_stat_filter, cores=cores[i], cache=caches[i])
) )
for cache_stat_name in cache_stats.keys(): for stat_name in cache_stats.keys():
if cache_stat_name.lower() != "free": if stat_name.startswith("Free"):
core_stat_name = cache_stat_name.replace("(s)", "") continue
core_stat_sum = 0 core_stat_sum = 0
try: try:
cache_stats[cache_stat_name] = cache_stats[cache_stat_name].value cache_stats[stat_name] = cache_stats[stat_name].value
for j in range(cores_per_cache):
cores_stats[j][core_stat_name] = cores_stats[j][core_stat_name].value
except AttributeError:
pass
for j in range(cores_per_cache): for j in range(cores_per_cache):
core_stat_sum += cores_stats[j][core_stat_name] cores_stats[j][stat_name] = cores_stats[j][stat_name].value
if core_stat_sum != cache_stats[cache_stat_name]: except AttributeError:
TestRun.LOGGER.error( pass
f"For cache ID {caches[i].cache_id} sum of core's " for j in range(cores_per_cache):
f"'{core_stat_name}' values is {core_stat_sum}, " core_stat_sum += cores_stats[j][stat_name]
f"should equal {cache_stats[cache_stat_name]}\n") if core_stat_sum != cache_stats[stat_name]:
TestRun.LOGGER.error(
f"For cache ID {caches[i].cache_id} sum of cores' "
f"'{stat_name}' values is {core_stat_sum}, "
f"should equal {cache_stats[stat_name]}\n")
def validate_usage_stats(stats, stats_perc, cache, cache_mode, fail_message): def validate_usage_stats(stats, stats_perc, cache, cache_mode, fail_message):
@ -692,7 +700,7 @@ def validate_block_stats(stats, stats_perc, cache_mode, fail_message):
f"should equal 100\n") f"should equal 100\n")
def validate_error_stats(stats, stats_perc, cache_mode, fail_message): def validate_error_stats(stats, stats_perc, fail_message):
fail_message += f"in 'error' stats" fail_message += f"in 'error' stats"
for stat_name, stat_value in stats.items(): for stat_name, stat_value in stats.items():
if stat_value != 0: if stat_value != 0: