Thorough update of test_block_stats.py
Signed-off-by: Daniel Madej <daniel.madej@huawei.com>
This commit is contained in:
parent
205af2ab99
commit
aa0dc4d7ee
@ -1,322 +1,261 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from api.cas import casadm
|
from api.cas import casadm
|
||||||
from api.cas import ioclass_config
|
from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait
|
||||||
from api.cas.cache_config import CacheMode, CleaningPolicy
|
|
||||||
from api.cas.casadm import StatsFilter
|
from api.cas.casadm import StatsFilter
|
||||||
|
from api.cas.statistics import get_stats_dict, get_stat_value, OperationType
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools.dd import Dd
|
from test_tools.dd import Dd
|
||||||
from test_utils.os_utils import Udev
|
from test_utils.os_utils import Udev
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
ioclass_config_path = "/tmp/opencas_ioclass.conf"
|
iterations = 10
|
||||||
mountpoint = "/tmp/cas1-1"
|
dd_block_size = Size(1, Unit.Blocks4096)
|
||||||
exported_obj_path_prefix = "/dev/cas1-"
|
dd_count = 10
|
||||||
cache_id = 1
|
cores_no = 3
|
||||||
|
|
||||||
# lists of cache and core block stats, that should have zero value for particular cache modes
|
|
||||||
write_wb_zero_stats = [
|
|
||||||
"reads from core(s)",
|
|
||||||
"writes to core(s)",
|
|
||||||
"total to/from core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"reads from exported object(s)",
|
|
||||||
"reads from core",
|
|
||||||
"writes to core",
|
|
||||||
"total to/from core",
|
|
||||||
"reads from cache",
|
|
||||||
"reads from exported object",
|
|
||||||
]
|
|
||||||
write_wt_zero_stats = [
|
|
||||||
"reads from core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"reads from exported object(s)",
|
|
||||||
"reads from core",
|
|
||||||
"reads from exported object",
|
|
||||||
]
|
|
||||||
write_pt_zero_stats = [
|
|
||||||
"reads from core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"writes to cache",
|
|
||||||
"total to/from cache",
|
|
||||||
"reads from exported object(s)",
|
|
||||||
"reads from core",
|
|
||||||
"reads from exported object",
|
|
||||||
]
|
|
||||||
write_wa_zero_stats = [
|
|
||||||
"reads from core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"writes to cache",
|
|
||||||
"total to/from cache",
|
|
||||||
"reads from exported object(s)",
|
|
||||||
"reads from core",
|
|
||||||
"reads from exported object",
|
|
||||||
]
|
|
||||||
write_wo_zero_stats = [
|
|
||||||
"reads from core(s)",
|
|
||||||
"writes to core(s)",
|
|
||||||
"total to/from core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"reads from exported object(s)",
|
|
||||||
"reads from core",
|
|
||||||
"writes to core",
|
|
||||||
"total to/from core",
|
|
||||||
"reads from exported object",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize("cache_mode", CacheMode)
|
||||||
"cache_mode,zero_stats",
|
def test_block_stats_write_miss(cache_mode: CacheMode):
|
||||||
[
|
"""
|
||||||
(CacheMode.WB, write_wb_zero_stats),
|
title: Block statistics after write miss operations
|
||||||
(CacheMode.WT, write_wt_zero_stats),
|
description: |
|
||||||
(CacheMode.PT, write_pt_zero_stats),
|
Perform write miss operations to cached volume and check if block stats values are correct
|
||||||
(CacheMode.WA, write_wa_zero_stats),
|
for configured cache mode.
|
||||||
(CacheMode.WO, write_wo_zero_stats),
|
pass_criteria:
|
||||||
],
|
- Correct block stats values
|
||||||
)
|
"""
|
||||||
def test_block_stats_write(cache_mode, zero_stats):
|
|
||||||
"""Perform read and write operations to cache instance in different cache modes
|
|
||||||
and check if block stats values are correct"""
|
|
||||||
cache, cores = prepare(cache_mode)
|
|
||||||
iterations = 10
|
|
||||||
dd_size = Size(4, Unit.KibiByte)
|
|
||||||
dd_count = 10
|
|
||||||
|
|
||||||
flush(cache)
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)] * cores_no)
|
||||||
|
|
||||||
|
cache_device = cache_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and set NOP cleaning policy"):
|
||||||
|
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
|
||||||
|
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||||
|
|
||||||
|
with TestRun.step("Add core devices"):
|
||||||
|
cores = [cache.add_core(part) for part in core_device.partitions]
|
||||||
|
|
||||||
|
with TestRun.step("Reset cache stats"):
|
||||||
|
cache.reset_counters()
|
||||||
|
|
||||||
|
with TestRun.step("Write data in parts to exported objects and verify block statistics "
|
||||||
|
"after each part is done"):
|
||||||
|
expected_zero_stats = get_expected_zero_stats(cache_mode, OperationType.write)
|
||||||
|
|
||||||
# Check stats for cache after performing write operation
|
|
||||||
for core in cores:
|
|
||||||
dd_seek = 0
|
dd_seek = 0
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd()
|
||||||
.input("/dev/zero")
|
.input("/dev/zero")
|
||||||
.output(f"{core.path}")
|
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_block_size)
|
||||||
.oflag("direct")
|
.oflag("direct")
|
||||||
)
|
)
|
||||||
# Since every IO has the same size, every stat should be increased with the same step.
|
|
||||||
# So there is no need to keep value of every stat in separate variable
|
|
||||||
cache_stat = (
|
|
||||||
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
|
|
||||||
)
|
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
|
core_stat_expected = dd_block_size * dd_count * (i + 1)
|
||||||
|
core_stat_expected.set_unit(Unit.Blocks4096)
|
||||||
dd.seek(dd_seek)
|
dd.seek(dd_seek)
|
||||||
dd.run()
|
for j, core in enumerate(cores):
|
||||||
cache_stats = cache.get_statistics_flat(stat_filter=[StatsFilter.blk])
|
# expect previous iterations + already written data in this iteration
|
||||||
core_stats = core.get_statistics_flat(stat_filter=[StatsFilter.blk])
|
cache_stat_expected = dd_block_size * dd_count * (i * cores_no + j + 1)
|
||||||
|
cache_stat_expected.set_unit(Unit.Blocks4096)
|
||||||
|
dd.output(core.path)
|
||||||
|
dd.run()
|
||||||
|
cache_stats = get_stats_dict(filter=[StatsFilter.blk], cache_id=cache.cache_id)
|
||||||
|
core_stats = get_stats_dict(
|
||||||
|
filter=[StatsFilter.blk], cache_id=cache.cache_id, core_id=core.core_id
|
||||||
|
)
|
||||||
|
|
||||||
# Check cache stats
|
# Check cache stats after write operation
|
||||||
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
|
fail = False
|
||||||
for key, value in cache_stats.items():
|
for key, value in cache_stats.items():
|
||||||
if key in zero_stats:
|
if key.endswith('[%]'):
|
||||||
assert value.get_value(Unit.Blocks4096) == 0, (
|
continue
|
||||||
f"{key} has invalid value\n"
|
stat = get_stat_value(cache_stats, key)
|
||||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
if any(key.startswith(s) for s in expected_zero_stats):
|
||||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
if stat != Size.zero():
|
||||||
f"dd count: {dd_count}, cache_stat {cache_stat}"
|
TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
|
||||||
)
|
fail = True
|
||||||
else:
|
elif stat != cache_stat_expected:
|
||||||
# For each next tested core, cache stats has to include
|
TestRun.LOGGER.error(
|
||||||
# sum of each previous core
|
f"{key} has invalid value of {stat}\n"
|
||||||
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
|
f"expected: {cache_stat_expected}"
|
||||||
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
|
)
|
||||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
fail = True
|
||||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
if fail:
|
||||||
f"dd count: {dd_count}, cache_stat {cache_stat}"
|
TestRun.fail(
|
||||||
|
"Incorrect cache block stats\n"
|
||||||
|
f"iteration {i}, core id: {core.core_id}\n"
|
||||||
|
f"cache_stats:\n{json.dumps(cache_stats, indent=0)}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check single core stats
|
# Check per-core stats
|
||||||
for key, value in core_stats.items():
|
for key, value in core_stats.items():
|
||||||
if key in zero_stats:
|
if key.endswith('[%]'):
|
||||||
assert value.get_value(Unit.Blocks4096) == 0, (
|
continue
|
||||||
f"{key} has invalid value of \n"
|
stat = get_stat_value(core_stats, key)
|
||||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
if any(key.startswith(s) for s in expected_zero_stats):
|
||||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
if stat != Size.zero():
|
||||||
f"dd count: {dd_count}, cache_stat {cache_stat}"
|
TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
|
||||||
|
fail = True
|
||||||
|
elif stat != core_stat_expected:
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"{key} has invalid value of {stat}\n"
|
||||||
|
f"expected: {core_stat_expected}"
|
||||||
|
)
|
||||||
|
if fail:
|
||||||
|
TestRun.fail(
|
||||||
|
"Incorrect core block stats\n"
|
||||||
|
f"iteration {i}, core id: {core.core_id}\n"
|
||||||
|
f"core_stats:\n{json.dumps(core_stats, indent=0)}"
|
||||||
)
|
)
|
||||||
else:
|
dd_seek += dd_count
|
||||||
assert assumed_value == value.get_value(Unit.Blocks4096), (
|
|
||||||
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
|
|
||||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
|
||||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
|
||||||
f"dd count: {dd_count}, dd seek: {dd_seek}. Cache mode {cache_mode}"
|
|
||||||
)
|
|
||||||
dd_seek += dd_count
|
|
||||||
|
|
||||||
|
|
||||||
# lists of cache and core block stats, that should have zero value for particular cache modes
|
|
||||||
read_wb_zero_stats = [
|
|
||||||
"writes to core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"writes to exported object(s)",
|
|
||||||
"writes to core",
|
|
||||||
"writes to exported object",
|
|
||||||
]
|
|
||||||
read_wt_zero_stats = [
|
|
||||||
"writes to core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"writes to exported object(s)",
|
|
||||||
"writes to core",
|
|
||||||
"writes to exported object",
|
|
||||||
]
|
|
||||||
read_pt_zero_stats = [
|
|
||||||
"writes to core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"writes to cache",
|
|
||||||
"total to/from cache",
|
|
||||||
"writes to exported object(s)",
|
|
||||||
"writes to core",
|
|
||||||
"writes to exported object",
|
|
||||||
]
|
|
||||||
read_wa_zero_stats = [
|
|
||||||
"writes to core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"writes to exported object(s)",
|
|
||||||
"writes to core",
|
|
||||||
"writes to exported object",
|
|
||||||
]
|
|
||||||
read_wo_zero_stats = [
|
|
||||||
"writes to core(s)",
|
|
||||||
"reads from cache",
|
|
||||||
"writes to cache",
|
|
||||||
"total to/from cache",
|
|
||||||
"writes to exported object(s)",
|
|
||||||
"writes to core",
|
|
||||||
"writes to exported object",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize("cache_mode", CacheMode)
|
||||||
"cache_mode,zero_stats",
|
def test_block_stats_read_miss(cache_mode: CacheMode):
|
||||||
[
|
"""
|
||||||
(CacheMode.WB, read_wb_zero_stats),
|
title: Block statistics after read miss operations
|
||||||
(CacheMode.WT, read_wt_zero_stats),
|
description: |
|
||||||
(CacheMode.PT, read_pt_zero_stats),
|
Perform read miss operations from cached volume and check if block stats values are correct
|
||||||
(CacheMode.WA, read_wa_zero_stats),
|
for configured cache mode.
|
||||||
(CacheMode.WO, read_wo_zero_stats),
|
pass_criteria:
|
||||||
],
|
- Correct block stats values
|
||||||
)
|
"""
|
||||||
def test_block_stats_read(cache_mode, zero_stats):
|
|
||||||
"""Perform read and write operations to cache instance in different cache modes
|
|
||||||
and check if block stats values are correct"""
|
|
||||||
cache, cores = prepare(cache_mode)
|
|
||||||
iterations = 10
|
|
||||||
dd_size = Size(4, Unit.KibiByte)
|
|
||||||
dd_count = 10
|
|
||||||
|
|
||||||
flush(cache)
|
with TestRun.step("Prepare cache and core devices"):
|
||||||
|
cache_device = TestRun.disks['cache']
|
||||||
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
|
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
||||||
|
core_device.create_partitions([Size(1, Unit.GibiByte)] * cores_no)
|
||||||
|
|
||||||
|
cache_device = cache_device.partitions[0]
|
||||||
|
|
||||||
|
with TestRun.step("Disable udev"):
|
||||||
|
Udev.disable()
|
||||||
|
|
||||||
|
with TestRun.step("Start cache and set NOP cleaning policy"):
|
||||||
|
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
|
||||||
|
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||||
|
|
||||||
|
with TestRun.step("Add core devices"):
|
||||||
|
cores = [cache.add_core(part) for part in core_device.partitions]
|
||||||
|
|
||||||
|
with TestRun.step("Reset cache stats"):
|
||||||
|
cache.reset_counters()
|
||||||
|
|
||||||
|
with TestRun.step("Read data in parts from exported objects and verify block statistics "
|
||||||
|
"after each part is done"):
|
||||||
|
expected_zero_stats = get_expected_zero_stats(cache_mode, OperationType.read)
|
||||||
|
|
||||||
# Check stats for cache after performing read operation
|
|
||||||
for core in cores:
|
|
||||||
dd_skip = 0
|
dd_skip = 0
|
||||||
dd = (
|
dd = (
|
||||||
Dd()
|
Dd()
|
||||||
.output("/dev/zero")
|
.output("/dev/null")
|
||||||
.input(f"{core.path}")
|
|
||||||
.count(dd_count)
|
.count(dd_count)
|
||||||
.block_size(dd_size)
|
.block_size(dd_block_size)
|
||||||
.iflag("direct")
|
.iflag("direct")
|
||||||
)
|
)
|
||||||
# Since every IO has the same size, every stat should be increased with the same step.
|
|
||||||
# So there is no need to keep value of every stat in separate variable
|
|
||||||
cache_stat = (
|
|
||||||
(dd_size.get_value(Unit.Blocks4096) * dd_count) * (core.core_id - 1) * iterations
|
|
||||||
)
|
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
|
core_stat_expected = dd_block_size * dd_count * (i + 1)
|
||||||
|
core_stat_expected.set_unit(Unit.Blocks4096)
|
||||||
dd.skip(dd_skip)
|
dd.skip(dd_skip)
|
||||||
dd.run()
|
for j, core in enumerate(cores):
|
||||||
cache_stats = cache.get_statistics_flat(stat_filter=[StatsFilter.blk])
|
# expect previous iterations + already read data in this iteration
|
||||||
core_stats = core.get_statistics_flat(stat_filter=[StatsFilter.blk])
|
cache_stat_expected = dd_block_size * dd_count * (i * cores_no + j + 1)
|
||||||
|
cache_stat_expected.set_unit(Unit.Blocks4096)
|
||||||
|
dd.input(core.path)
|
||||||
|
dd.run()
|
||||||
|
cache_stats = get_stats_dict(filter=[StatsFilter.blk], cache_id=cache.cache_id)
|
||||||
|
core_stats = get_stats_dict(
|
||||||
|
filter=[StatsFilter.blk], cache_id=cache.cache_id, core_id=core.core_id
|
||||||
|
)
|
||||||
|
|
||||||
# Check cache stats
|
# Check cache stats after read operation
|
||||||
assumed_value = (dd_size.get_value(Unit.Blocks4096) * dd_count) * (i + 1)
|
fail = False
|
||||||
for key, value in cache_stats.items():
|
for key, value in cache_stats.items():
|
||||||
if key in zero_stats:
|
if key.endswith('[%]'):
|
||||||
assert value.get_value(Unit.Blocks4096) == 0, (
|
continue
|
||||||
f"{key} has invalid value\n"
|
stat = get_stat_value(cache_stats, key)
|
||||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
if any(key.startswith(s) for s in expected_zero_stats):
|
||||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
if stat != Size.zero():
|
||||||
f"dd count: {dd_count}, cache_stat {cache_stat}"
|
TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
|
||||||
)
|
fail = True
|
||||||
else:
|
elif stat != cache_stat_expected:
|
||||||
# For each next tested core, cache stats has to include
|
TestRun.LOGGER.error(
|
||||||
# sum of each previous core
|
f"{key} has invalid value of {stat}\n"
|
||||||
assert cache_stat + assumed_value == value.get_value(Unit.Blocks4096), (
|
f"expected: {cache_stat_expected}"
|
||||||
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
|
)
|
||||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
fail = True
|
||||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
if fail:
|
||||||
f"dd count: {dd_count}. Cache mode: {cache_mode}"
|
TestRun.fail(
|
||||||
|
"Incorrect cache block stats\n"
|
||||||
|
f"iteration {i}, core id: {core.core_id}\n"
|
||||||
|
f"cache_stats:\n{json.dumps(cache_stats, indent=0)}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check single core stats
|
# Check per-core stats
|
||||||
for key, value in core_stats.items():
|
for key, value in core_stats.items():
|
||||||
if key in zero_stats:
|
if key.endswith('[%]'):
|
||||||
assert value.get_value(Unit.Blocks4096) == 0, (
|
continue
|
||||||
f"{key} has invalid value\n"
|
stat = get_stat_value(core_stats, key)
|
||||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
if any(key.startswith(s) for s in expected_zero_stats):
|
||||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
if stat != Size.zero():
|
||||||
f"dd count: {dd_count}. Cache mode: {cache_mode}"
|
TestRun.LOGGER.error(f"{key} has non-zero value of {stat}")
|
||||||
|
fail = True
|
||||||
|
elif stat != core_stat_expected:
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"{key} has invalid value of {stat}\n"
|
||||||
|
f"expected: {core_stat_expected}"
|
||||||
|
)
|
||||||
|
if fail:
|
||||||
|
TestRun.fail(
|
||||||
|
"Incorrect core block stats\n"
|
||||||
|
f"iteration {i}, core id: {core.core_id}\n"
|
||||||
|
f"core_stats:\n{json.dumps(core_stats, indent=0)}"
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
assert assumed_value == value.get_value(Unit.Blocks4096), (
|
|
||||||
f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
|
|
||||||
f"core id {core.core_id}, i: {i}, dd_size: "
|
|
||||||
f"{dd_size.get_value(Unit.Blocks4096)}\n"
|
|
||||||
f"dd count: {dd_count} dd skip {dd_skip}. Cache mode: {cache_mode}"
|
|
||||||
)
|
|
||||||
|
|
||||||
dd_skip += dd_count
|
dd_skip += dd_count
|
||||||
|
|
||||||
|
|
||||||
def flush(cache):
|
def get_expected_zero_stats(cache_mode: CacheMode, direction: OperationType):
|
||||||
cache.flush_cache()
|
traits = CacheMode.get_traits(cache_mode)
|
||||||
cache.reset_counters()
|
|
||||||
stats = cache.get_statistics_flat(stat_filter=[StatsFilter.blk])
|
|
||||||
for key, value in stats.items():
|
|
||||||
assert value.get_value(Unit.Blocks4096) == 0
|
|
||||||
|
|
||||||
|
stat_list = ["Reads from cache"]
|
||||||
|
if direction == OperationType.write:
|
||||||
|
stat_list.append("Reads from core")
|
||||||
|
stat_list.append("Reads from exported object")
|
||||||
|
if direction == OperationType.read or CacheModeTrait.LazyWrites in traits:
|
||||||
|
stat_list.append("Writes to core")
|
||||||
|
if direction == OperationType.read:
|
||||||
|
stat_list.append("Writes to exported object")
|
||||||
|
if ((direction == OperationType.read and CacheModeTrait.InsertRead not in traits)
|
||||||
|
or (direction == OperationType.write and CacheModeTrait.InsertWrite not in traits)):
|
||||||
|
stat_list.append("Writes to cache")
|
||||||
|
stat_list.append("Total to/from cache")
|
||||||
|
if direction == OperationType.write and CacheModeTrait.LazyWrites in traits:
|
||||||
|
stat_list.append("Total to/from core")
|
||||||
|
|
||||||
def prepare(cache_mode: CacheMode):
|
return stat_list
|
||||||
ioclass_config.remove_ioclass_config()
|
|
||||||
cache_device = TestRun.disks['cache']
|
|
||||||
core_device = TestRun.disks['core']
|
|
||||||
|
|
||||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
|
||||||
core_device.create_partitions(
|
|
||||||
[Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]
|
|
||||||
)
|
|
||||||
|
|
||||||
cache_device = cache_device.partitions[0]
|
|
||||||
core_device_1 = core_device.partitions[0]
|
|
||||||
core_device_2 = core_device.partitions[1]
|
|
||||||
core_device_3 = core_device.partitions[2]
|
|
||||||
|
|
||||||
Udev.disable()
|
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Starting cache")
|
|
||||||
cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
|
|
||||||
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
|
|
||||||
casadm.set_param_cleaning(cache_id=cache_id, policy=CleaningPolicy.nop)
|
|
||||||
TestRun.LOGGER.info(f"Adding core devices")
|
|
||||||
core_1 = cache.add_core(core_dev=core_device_1)
|
|
||||||
core_2 = cache.add_core(core_dev=core_device_2)
|
|
||||||
core_3 = cache.add_core(core_dev=core_device_3)
|
|
||||||
|
|
||||||
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
|
|
||||||
if output.exit_code != 0:
|
|
||||||
raise Exception(f"Failed to create mountpoint")
|
|
||||||
|
|
||||||
return cache, [core_1, core_2, core_3]
|
|
||||||
|
Loading…
Reference in New Issue
Block a user