Merge pull request #1538 from Kamoppl/kamilg/fix_scope_bugs_v4

Kamilg/fix scope bugs v4
This commit is contained in:
Katarzyna Treder 2024-10-11 11:26:58 +02:00 committed by GitHub
commit e7f14f7d00
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 334 additions and 174 deletions

View File

@ -285,7 +285,7 @@ standby_help = [
] ]
zero_metadata_help = [ zero_metadata_help = [
r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option\.\.\.\]]", r"Usage: casadm --zero-metadata --device \<DEVICE\> \[option\.\.\.\]",
r"Clear metadata from caching device", r"Clear metadata from caching device",
r"Options that are valid with --zero-metadata are:", r"Options that are valid with --zero-metadata are:",
r"-d --device \<DEVICE\> Path to device on which metadata would be cleared", r"-d --device \<DEVICE\> Path to device on which metadata would be cleared",

View File

@ -173,8 +173,11 @@ class CacheConfigStats:
self.cache_line_size = parse_value( self.cache_line_size = parse_value(
value=stats_dict["Cache line size [KiB]"], unit_type=UnitType.kibibyte value=stats_dict["Cache line size [KiB]"], unit_type=UnitType.kibibyte
) )
footprint_prefix = "Metadata Memory Footprint "
footprint_key = next(k for k in stats_dict if k.startswith(footprint_prefix))
self.metadata_memory_footprint = parse_value( self.metadata_memory_footprint = parse_value(
value=stats_dict["Metadata Memory Footprint [MiB]"], unit_type=UnitType.mebibyte value=stats_dict[footprint_key],
unit_type=UnitType(footprint_key[len(footprint_prefix) :]),
) )
self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type=UnitType.seconds) self.dirty_for = parse_value(value=stats_dict["Dirty for [s]"], unit_type=UnitType.seconds)
self.status = stats_dict["Status"] self.status = stats_dict["Status"]

View File

@ -1,5 +1,6 @@
# #
# Copyright(c) 2020-2022 Intel Corporation # Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -317,6 +318,9 @@ def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage):
with TestRun.step("Prepare cache and core."): with TestRun.step("Prepare cache and core."):
cache_part, core_part = prepare() cache_part, core_part = prepare()
with TestRun.step("Disable udev"):
Udev.disable()
for _ in TestRun.iteration( for _ in TestRun.iteration(
range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times." range(iterations_per_config), f"Reload cache configuration {iterations_per_config} times."
): ):
@ -331,16 +335,21 @@ def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage):
core = cache.add_core(core_part) core = cache.add_core(core_part)
with TestRun.step(f"Create test file in mount point of exported object."): with TestRun.step(f"Create test file in mount point of exported object."):
test_file_size = Size(1024, Unit.MebiByte) test_file_size = Size(4, Unit.GibiByte)
test_file = fs_utils.create_random_test_file(test_file_path, test_file_size) test_file = fs_utils.create_random_test_file(test_file_path, test_file_size)
with TestRun.step("Check md5 sum of test file."): with TestRun.step("Check md5 sum of test file."):
test_file_md5_before = test_file.md5sum() test_file_md5_before = test_file.md5sum()
with TestRun.step("Export file to CAS"): with TestRun.step("Export file to CAS"):
Dd().block_size(test_file_size).input(test_file.full_path).output(core.path).oflag( dd = (
"direct" Dd()
).run() .block_size(test_file_size)
.input(test_file.full_path)
.output(core.path)
.oflag("direct")
)
dd.run()
with TestRun.step("Get number of dirty data on exported object before interruption."): with TestRun.step("Get number of dirty data on exported object before interruption."):
os_utils.sync() os_utils.sync()
@ -378,7 +387,7 @@ def test_interrupt_cache_mode_switch_parametrized(cache_mode, stop_percentage):
if cache.get_cache_mode() != cache_mode: if cache.get_cache_mode() != cache_mode:
TestRun.LOGGER.error("Cache mode should remain the same.") TestRun.LOGGER.error("Cache mode should remain the same.")
with TestRun.step("Unmount core and stop cache."): with TestRun.step("Stop cache."):
cache.stop() cache.stop()
with TestRun.step("Check md5 sum of test file again."): with TestRun.step("Check md5 sum of test file again."):

View File

@ -1,13 +1,15 @@
# #
# Copyright(c) 2019-2021 Intel Corporation # Copyright(c) 2019-2021 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import pytest import pytest
from api.cas import casadm from api.cas import casadm
from api.cas.cache_config import CacheMode from api.cas.cache_config import CacheMode
from api.cas.casadm_parser import get_cas_devices_dict
from api.cas.core import Core, CoreStatus
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.dd import Dd from test_tools.dd import Dd
@ -16,152 +18,238 @@ from test_utils.size import Size, Unit
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_core_inactive(): def test_core_inactive_stats_conf():
""" """
1. Start cache with 3 cores. title: Test for inactive core configuration statistics.
2. Stop cache. description: |
3. Remove one of core devices. Test the cache inactive core configuration statistics after removing one of core devices
4. Load cache. and loading cache.
5. Check if cache has appropriate number of valid and inactive core devices. pass_criteria:
- Cache can be loaded with inactive core device.
- CAS correctly reports inactive core statistics in cache configuration statistics after
loading cache.
""" """
cache, core_device = prepare() core_number = 3
cache_device = cache.cache_device with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)] * core_number)
cache_device = cache_device.partitions[0]
core_device_partitions = core_device.partitions
with TestRun.step("Start cache"):
cache = casadm.start_cache(cache_device, force=True)
with TestRun.step("Add cores to the cache"):
for core_device_part in core_device_partitions:
cache.add_core(core_dev=core_device_part)
with TestRun.step("Check if correct number of inactive cores is displayed in cache statistics"):
stats = cache.get_statistics() stats = cache.get_statistics()
if stats.config_stats.inactive_core_devices != 0:
TestRun.fail("Inactive core in statistics after starting cache")
assert stats.config_stats.core_dev == 3 with TestRun.step("Stop cache"):
assert stats.config_stats.inactive_core_dev == 0
TestRun.LOGGER.info("Stopping cache")
cache.stop() cache.stop()
TestRun.LOGGER.info("Removing one of core devices") with TestRun.step("Remove last core device"):
core_device.remove_partitions() core_device.remove_partition(part=core_device_partitions[-1])
core_device.create_partitions([Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)])
TestRun.LOGGER.info("Loading cache with missing core device") with TestRun.step("Load cache with missing core device"):
cache = casadm.start_cache(cache_device, load=True) cache = casadm.start_cache(cache_device, load=True)
stats = cache.get_statistics()
assert stats.config_stats.core_dev == 3 with TestRun.step(
assert stats.config_stats.inactive_core_dev == 1 "Check if correct number of cores and inactive cores is displayed in cache statistics"
):
stats = cache.get_statistics()
if stats.config_stats.core_dev != core_number:
TestRun.fail(
"Wrong number of cores after loading the cache\n"
f"Actual number of cores: {stats.config_stats.core_dev}\n"
f"Expected number of cores: {core_number}"
)
if stats.config_stats.inactive_core_devices != 1:
TestRun.fail(
"Wrong number of inactive cores after loading the cache\n"
f"Actual number of inactive cores: {stats.config_stats.inactive_core_devices}\n"
"Expected number of inactive cores: 1"
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_core_inactive_stats(): def test_core_inactive_stats_usage():
""" """
1. Start cache with 3 cores. title: Test for inactive core usage statistics.
2. Switch cache into WB mode. description: |
3. Issue IO to each core. Test the cache inactive core usage statistics after removing one of core devices and loading
4. Stop cache without flush. cache.
5. Remove two core devices. pass_criteria:
6. Load cache. - Cache can be loaded with inactive core device.
7. Check if cache stats are equal to sum of valid and inactive cores stats. - CAS correctly reports inactive core statistics in cache usage statistics after loading
8. Check if percentage values are calculated properly. cache.
""" """
cache, core_device = prepare()
cache_device = cache.cache_device core_number = 3
TestRun.LOGGER.info("Switching cache mode to WB") with TestRun.step("Prepare cache and core devices"):
cache.set_cache_mode(cache_mode=CacheMode.WB) cache_device = TestRun.disks["cache"]
cores = cache.get_core_devices() core_device = TestRun.disks["core"]
TestRun.LOGGER.info("Issue IO to each core")
for core in cores: cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)] * core_number)
cache_device = cache_device.partitions[0]
core_device_partitions = core_device.partitions
with TestRun.step("Start cache"):
cache = casadm.start_cache(cache_device, force=True, cache_mode=CacheMode.WB)
with TestRun.step("Add cores to the cache"):
core_list = [
cache.add_core(core_dev=core_device_part) for core_device_part in core_device_partitions
]
with TestRun.step("Run I/O to each core"):
for core in core_list:
dd = ( dd = (
Dd() Dd()
.input("/dev/zero") .input("/dev/zero")
.output(core.path) .output(core.path)
.count(1000) .count(1000)
.block_size(Size(4, Unit.KibiByte)) .block_size(Size(4, Unit.KibiByte))
).run() )
dd.run()
TestRun.LOGGER.info("Stopping cache with dirty data") with TestRun.step("Flush last core"):
cores[2].flush_core() core_list[-1].flush_core()
with TestRun.step("Stop cache with dirty data"):
cache.stop(no_data_flush=True) cache.stop(no_data_flush=True)
TestRun.LOGGER.info("Removing two of core devices") with TestRun.step("Removing two of core devices"):
core_device.remove_partitions() core_device.remove_partition(part=core_device_partitions[0])
core_device.create_partitions([Size(1, Unit.GibiByte)]) core_device.remove_partition(part=core_device_partitions[1])
TestRun.LOGGER.info("Loading cache with missing core device") with TestRun.step("Load cache with missing core devices"):
cache = casadm.start_cache(cache_device, load=True) cache = casadm.start_cache(cache_device, load=True)
# Accumulate valid cores stats with TestRun.step("Check cores statistics"):
cores_occupancy = 0 active_cores_occupancy_stats = 0
cores_clean = 0 active_cores_clean_stats = 0
cores_dirty = 0 active_cores_dirty_stats = 0
cores = cache.get_core_devices()
for core in cores: active_cores = cache.get_core_devices()
for core in active_cores:
core_stats = core.get_statistics() core_stats = core.get_statistics()
cores_occupancy += core_stats.usage_stats.occupancy.value active_cores_occupancy_stats += core_stats.usage_stats.occupancy
cores_clean += core_stats.usage_stats.clean.value active_cores_clean_stats += core_stats.usage_stats.clean
cores_dirty += core_stats.usage_stats.dirty.value active_cores_dirty_stats += core_stats.usage_stats.dirty
inactive_cores_occupancy_stats = 0
inactive_cores_clean_stats = 0
inactive_cores_dirty_stats = 0
inactive_cores = get_inactive_cores(cache_id=cache.cache_id)
for core in inactive_cores:
core_stats = core.get_statistics()
inactive_cores_occupancy_stats += core_stats.usage_stats.occupancy
inactive_cores_clean_stats += core_stats.usage_stats.clean
inactive_cores_dirty_stats += core_stats.usage_stats.dirty
cache_stats = cache.get_statistics() cache_stats = cache.get_statistics()
# Add inactive core stats cache_usage_stats = cache_stats.usage_stats
cores_occupancy += cache_stats.inactive_usage_stats.inactive_occupancy.value
cores_clean += cache_stats.inactive_usage_stats.inactive_clean.value
cores_dirty += cache_stats.inactive_usage_stats.inactive_dirty.value
assert cache_stats.usage_stats.occupancy.value == cores_occupancy total_cores_occupancy_stats = active_cores_occupancy_stats + inactive_cores_occupancy_stats
assert cache_stats.usage_stats.dirty.value == cores_dirty total_cores_dirty_stats = active_cores_dirty_stats + inactive_cores_dirty_stats
assert cache_stats.usage_stats.clean.value == cores_clean total_cores_clean_stats = active_cores_clean_stats + inactive_cores_clean_stats
if cache_usage_stats.occupancy != total_cores_occupancy_stats:
TestRun.LOGGER.error(
"Wrong number of occupancy blocks in cache usage stats\n"
f"Actual number of occupancy blocks: {cache_usage_stats.occupancy}\n"
f"Expected number of occupancy blocks: {total_cores_occupancy_stats}"
)
if cache_usage_stats.dirty != total_cores_dirty_stats:
TestRun.LOGGER.error(
"Wrong number of dirty blocks in cache usage stats\n"
f"Actual number of dirty blocks: {cache_usage_stats.dirty}\n"
f"Expected number of dirty blocks: {total_cores_dirty_stats}"
)
if cache_usage_stats.clean != total_cores_clean_stats:
TestRun.LOGGER.error(
"Wrong number of clean blocks in cache usage stats\n"
f"Actual number of clean blocks: {cache_usage_stats.clean}\n"
f"Expected number of clean blocks: {total_cores_clean_stats}"
)
if cache_usage_stats.inactive_occupancy != inactive_cores_occupancy_stats:
TestRun.LOGGER.error(
"Wrong number of occupancy blocks in inactive cache usage stats\n"
f"Actual number of occupancy blocks: {cache_usage_stats.inactive_occupancy}\n"
f"Expected number of occupancy blocks: {inactive_cores_occupancy_stats}"
)
if cache_usage_stats.inactive_dirty != inactive_cores_dirty_stats:
TestRun.LOGGER.error(
"Wrong number of dirty blocks in cache inactive usage stats\n"
f"Actual number of dirty blocks: {cache_usage_stats.inactive_dirty}\n"
f"Expected number of dirty blocks: {inactive_cores_dirty_stats}"
)
if cache_usage_stats.inactive_clean != inactive_cores_clean_stats:
TestRun.LOGGER.error(
"Wrong number of clean blocks in cache inactive usage stats\n"
f"Actual number of clean blocks: {cache_usage_stats.inactive_clean}\n"
f"Expected number of clean blocks: {inactive_cores_clean_stats}"
)
cache_usage_stats_percentage = cache.get_statistics(percentage_val=True).usage_stats
cache_stats_percentage = cache.get_statistics(percentage_val=True)
# Calculate expected percentage value of inactive core stats # Calculate expected percentage value of inactive core stats
inactive_occupancy_perc = ( inactive_occupancy_perc = round(
cache_stats.inactive_usage_stats.inactive_occupancy.value 100 * (cache_usage_stats.inactive_occupancy / cache_stats.config_stats.cache_size), 1
/ cache_stats.config_stats.cache_size.value
) )
inactive_clean_perc = ( inactive_dirty_perc = round(
cache_stats.inactive_usage_stats.inactive_clean.value 100 * (cache_usage_stats.inactive_dirty / cache_stats.usage_stats.occupancy), 1
/ cache_stats.usage_stats.occupancy.value
) )
inactive_dirty_perc = ( inactive_clean_perc = round(
cache_stats.inactive_usage_stats.inactive_dirty.value 100 * (cache_usage_stats.inactive_clean / cache_stats.usage_stats.occupancy), 1
/ cache_stats.usage_stats.occupancy.value
) )
inactive_occupancy_perc = round(100 * inactive_occupancy_perc, 1) if cache_usage_stats_percentage.inactive_occupancy != inactive_occupancy_perc:
inactive_clean_perc = round(100 * inactive_clean_perc, 1) TestRun.LOGGER.error(
inactive_dirty_perc = round(100 * inactive_dirty_perc, 1) "Wrong occupancy blocks percentage in usage stats\n"
f"Actual number of occupancy blocks percentage:"
TestRun.LOGGER.info(str(cache_stats_percentage)) f" {cache_usage_stats_percentage.inactive_occupancy}\n"
assert ( f"Expected number of occupancy blocks percentage: {inactive_occupancy_perc}"
inactive_occupancy_perc
== cache_stats_percentage.inactive_usage_stats.inactive_occupancy
) )
assert ( if cache_usage_stats_percentage.inactive_dirty != inactive_dirty_perc:
inactive_clean_perc TestRun.LOGGER.error(
== cache_stats_percentage.inactive_usage_stats.inactive_clean "Wrong dirty blocks percentage in usage stats\n "
"Actual number of dirty blocks percentage: "
f"{cache_usage_stats_percentage.inactive_dirty}\n"
f"Expected number of dirty blocks percentage: {inactive_dirty_perc}"
) )
assert ( if cache_usage_stats_percentage.inactive_clean != inactive_clean_perc:
inactive_dirty_perc TestRun.LOGGER.error(
== cache_stats_percentage.inactive_usage_stats.inactive_dirty "Wrong clean blocks percentage in usage stats\n"
"Actual number of clean blocks percentage: "
f"{cache_usage_stats.inactive_clean}\n"
f"Expected number of clean blocks percentage: {inactive_cores_clean_stats}"
) )
def prepare(): def get_inactive_cores(cache_id: int) -> list:
cache_device = TestRun.disks["cache"] cores_dict = get_cas_devices_dict()["cores"].values()
core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(500, Unit.MebiByte)]) def is_active(core):
core_device.create_partitions(
[Size(1, Unit.GibiByte), Size(1, Unit.GibiByte), Size(1, Unit.GibiByte)]
)
cache_device = cache_device.partitions[0] return CoreStatus[core["status"].lower()] == CoreStatus.inactive
core_device_1 = core_device.partitions[0]
core_device_2 = core_device.partitions[1]
core_device_3 = core_device.partitions[2]
TestRun.LOGGER.info("Staring cache") return [
cache = casadm.start_cache(cache_device, force=True) Core(core["device_path"], core["cache_id"])
TestRun.LOGGER.info("Adding core device") for core in cores_dict
core_1 = cache.add_core(core_dev=core_device_1) if is_active(core) and core["cache_id"] == cache_id
core_2 = cache.add_core(core_dev=core_device_2) ]
core_3 = cache.add_core(core_dev=core_device_3)
return cache, core_device

View File

@ -87,7 +87,7 @@ def test_recover_cache_verify_core_device(filesystem):
TestRun.fail(f"Wrong error message during cache stop") TestRun.fail(f"Wrong error message during cache stop")
with TestRun.step("Plug cache device"): with TestRun.step("Plug cache device"):
cache_device.plug() cache_device.plug_all()
with TestRun.step("Load cache"): with TestRun.step("Load cache"):
cache = casadm.load_cache(cache_dev) cache = casadm.load_cache(cache_dev)
@ -172,7 +172,7 @@ def test_recover_cache_verify_exp_obj(filesystem):
TestRun.fail(f"Wrong error message during cache stop") TestRun.fail(f"Wrong error message during cache stop")
with TestRun.step("Plug cache device"): with TestRun.step("Plug cache device"):
cache_device.plug() cache_device.plug_all()
with TestRun.step("Load cache"): with TestRun.step("Load cache"):
casadm.load_cache(cache_dev) casadm.load_cache(cache_dev)

View File

@ -54,7 +54,10 @@ def test_simulation_startup_from_config():
TestRun.executor.run_expect_success(ctl_init()) TestRun.executor.run_expect_success(ctl_init())
with TestRun.step("Verify if cache is working"): with TestRun.step("Verify if cache is working"):
cache = casadm_parser.get_caches()[0] caches = casadm_parser.get_caches()
if not caches:
TestRun.fail("Cache is not working")
cache = caches[0]
if cache.get_status() is not CacheStatus.running: if cache.get_status() is not CacheStatus.running:
TestRun.fail( TestRun.fail(
f"Cache {cache.cache_id} should be running but is in {cache.get_status()} " f"Cache {cache.cache_id} should be running but is in {cache.get_status()} "
@ -75,7 +78,10 @@ def test_simulation_startup_from_config():
TestRun.executor.run_expect_success(f"udevadm trigger") TestRun.executor.run_expect_success(f"udevadm trigger")
with TestRun.step("Verify if cache is working"): with TestRun.step("Verify if cache is working"):
cache = casadm_parser.get_caches()[0] caches = casadm_parser.get_caches()
if not caches:
TestRun.fail("Cache is not working")
cache = caches[0]
if cache.get_status() is not CacheStatus.running: if cache.get_status() is not CacheStatus.running:
TestRun.fail( TestRun.fail(
f"Cache {cache.cache_id} should be running but is in {cache.get_status()} " f"Cache {cache.cache_id} should be running but is in {cache.get_status()} "
@ -83,7 +89,10 @@ def test_simulation_startup_from_config():
) )
with TestRun.step("Verify if core is working"): with TestRun.step("Verify if core is working"):
core = cache.get_core_devices()[0] cores = cache.get_core_devices()
if not cores:
TestRun.fail("Core is not working")
core = cores[0]
if core.get_status() is not CoreStatus.active: if core.get_status() is not CoreStatus.active:
TestRun.fail( TestRun.fail(
f"Core {core.core_id} should be active but is in {core.get_status()} " f"state." f"Core {core.core_id} should be active but is in {core.get_status()} " f"state."

View File

@ -215,11 +215,11 @@ def test_cas_startup_lazy():
power_control.power_cycle() power_control.power_cycle()
with TestRun.step("Verify if all the devices are initialized properly"): with TestRun.step("Verify if all the devices are initialized properly"):
core_pool_list = get_cas_devices_dict()["core_pool"] core_pool_list = get_cas_devices_dict()["core_pool"].values()
caches_list = get_cas_devices_dict()["caches"].values() caches_list = get_cas_devices_dict()["caches"].values()
cores_list = get_cas_devices_dict()["cores"].values() cores_list = get_cas_devices_dict()["cores"].values()
core_pool_paths = {c["device"] for c in core_pool_list} core_pool_paths = {c["device_path"] for c in core_pool_list}
if core_pool_paths != expected_core_pool_paths: if core_pool_paths != expected_core_pool_paths:
TestRun.error( TestRun.error(
f"Expected the following devices in core pool " f"Expected the following devices in core pool "
@ -228,7 +228,7 @@ def test_cas_startup_lazy():
else: else:
TestRun.LOGGER.info("Core pool is ok") TestRun.LOGGER.info("Core pool is ok")
caches_paths = {c["device"] for c in caches_list} caches_paths = {c["device_path"] for c in caches_list}
if caches_paths != expected_caches_paths: if caches_paths != expected_caches_paths:
TestRun.error( TestRun.error(
f"Expected the following devices as caches " f"Expected the following devices as caches "
@ -237,7 +237,7 @@ def test_cas_startup_lazy():
else: else:
TestRun.LOGGER.info("Caches are ok") TestRun.LOGGER.info("Caches are ok")
cores_paths = {c["device"] for c in cores_list} cores_paths = {c["device_path"] for c in cores_list}
if cores_paths != expected_cores_paths: if cores_paths != expected_cores_paths:
TestRun.error( TestRun.error(
f"Expected the following devices as cores " f"Expected the following devices as cores "
@ -246,8 +246,7 @@ def test_cas_startup_lazy():
else: else:
TestRun.LOGGER.info("Core devices are ok") TestRun.LOGGER.info("Core devices are ok")
cores_paths = {c["device"] for c in cores_list} cores_states = {c["device_path"]: c["status"] for c in cores_list}
cores_states = {c["device"]: c["status"] for c in cores_list}
if cores_states[active_core_path] != "Active": if cores_states[active_core_path] != "Active":
TestRun.LOGGER.error( TestRun.LOGGER.error(
f"Core {active_core_path} should be Active " f"Core {active_core_path} should be Active "

View File

@ -4,6 +4,7 @@
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
import os import os
import posixpath
import time import time
import pytest import pytest
@ -59,9 +60,8 @@ def test_trim_start_discard():
with TestRun.step("Starting cache"): with TestRun.step("Starting cache"):
cache = casadm.start_cache(cas_part, force=True) cache = casadm.start_cache(cas_part, force=True)
dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
metadata_size = get_metadata_size_on_device(dmesg_out) metadata_size = get_metadata_size_on_device(cache_id=cache.cache_id)
with TestRun.step("Stop blktrace and check if discard requests were issued"): with TestRun.step("Stop blktrace and check if discard requests were issued"):
cache_reqs = blktrace.stop_monitoring() cache_reqs = blktrace.stop_monitoring()
@ -236,7 +236,7 @@ def test_trim_device_discard_support(
with TestRun.step("Create random file."): with TestRun.step("Create random file."):
test_file = fs_utils.create_random_test_file( test_file = fs_utils.create_random_test_file(
os.path.join(mount_point, "test_file"), core_dev.size * 0.2 posixpath.join(mount_point, "test_file"), core_dev.size * 0.2
) )
occupancy_before = core.get_occupancy() occupancy_before = core.get_occupancy()
TestRun.LOGGER.info(str(core.get_statistics())) TestRun.LOGGER.info(str(core.get_statistics()))

View File

@ -1,5 +1,6 @@
# #
# Copyright(c) 2020-2022 Intel Corporation # Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -353,23 +354,38 @@ def test_ioclass_occupancy_sum_cache():
cache.purge_cache() cache.purge_cache()
with TestRun.step("Verify stats before IO"): with TestRun.step("Verify stats before IO"):
usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0)) usage_stats_occupancy_sum = Size.zero()
for i in io_classes: usage_stats_clean_sum = Size.zero()
usage_stats_sum += get_io_class_usage(cache, i.id) usage_stats_dirty_sum = Size.zero()
usage_stats_sum += get_io_class_usage(cache, default_ioclass_id)
cache_stats = cache.get_statistics().usage_stats all_io_class_usage_stats = []
cache_stats.free = Size(0) for i in io_classes:
io_class_usage_stats = get_io_class_usage(cache, i.id)
usage_stats_occupancy_sum += io_class_usage_stats.occupancy
usage_stats_clean_sum += io_class_usage_stats.clean
usage_stats_dirty_sum += io_class_usage_stats.dirty
all_io_class_usage_stats.append(io_class_usage_stats)
io_class_usage_stats = get_io_class_usage(cache, default_ioclass_id)
usage_stats_occupancy_sum += io_class_usage_stats.occupancy
usage_stats_clean_sum += io_class_usage_stats.clean
usage_stats_dirty_sum += io_class_usage_stats.dirty
cache_usage_stats = cache.get_statistics().usage_stats
cache_usage_stats.free = Size.zero()
if ( if (
cache_stats.occupancy != usage_stats_sum.occupancy cache_usage_stats.occupancy != usage_stats_occupancy_sum
or cache_stats.clean != usage_stats_sum.clean or cache_usage_stats.clean != usage_stats_clean_sum
or cache_stats.dirty != usage_stats_sum.dirty or cache_usage_stats.dirty != usage_stats_dirty_sum
): ):
TestRun.LOGGER.error( TestRun.LOGGER.error(
"Initial cache usage stats doesn't match sum of io classes stats\n" "Initial cache usage stats doesn't match sum of io classes stats\n"
f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n" f"Cache usage stats: {cache_usage_stats}\n"
f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}" f"Usage stats occupancy sum: {usage_stats_occupancy_sum}\n"
f"Usage stats clean sum: {usage_stats_clean_sum}\n"
f"Usage stats dirty sum: {usage_stats_dirty_sum}\n"
f"Particular stats {all_io_class_usage_stats}"
) )
with TestRun.step(f"Trigger IO to each directory"): with TestRun.step(f"Trigger IO to each directory"):
@ -380,23 +396,38 @@ def test_ioclass_occupancy_sum_cache():
) )
with TestRun.step("Verify stats after IO"): with TestRun.step("Verify stats after IO"):
usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0)) usage_stats_occupancy_sum = Size.zero()
for i in io_classes: usage_stats_clean_sum = Size.zero()
usage_stats_sum += get_io_class_usage(cache, i.id) usage_stats_dirty_sum = Size.zero()
usage_stats_sum += get_io_class_usage(cache, default_ioclass_id)
cache_stats = cache.get_statistics().usage_stats all_io_class_usage_stats = []
cache_stats.free = Size(0) for i in io_classes:
io_class_usage_stats = get_io_class_usage(cache, i.id)
usage_stats_occupancy_sum += io_class_usage_stats.occupancy
usage_stats_clean_sum += io_class_usage_stats.clean
usage_stats_dirty_sum += io_class_usage_stats.dirty
all_io_class_usage_stats.append(io_class_usage_stats)
io_class_usage_stats = get_io_class_usage(cache, default_ioclass_id)
usage_stats_occupancy_sum += io_class_usage_stats.occupancy
usage_stats_clean_sum += io_class_usage_stats.clean
usage_stats_dirty_sum += io_class_usage_stats.dirty
cache_usage_stats = cache.get_statistics().usage_stats
cache_usage_stats.free = Size.zero()
if ( if (
cache_stats.occupancy != usage_stats_sum.occupancy cache_usage_stats.occupancy != usage_stats_occupancy_sum
or cache_stats.clean != usage_stats_sum.clean or cache_usage_stats.clean != usage_stats_clean_sum
or cache_stats.dirty != usage_stats_sum.dirty or cache_usage_stats.dirty != usage_stats_dirty_sum
): ):
TestRun.LOGGER.error( TestRun.LOGGER.error(
"Cache usage stats doesn't match sum of ioclasses stats\n" "Initial cache usage stats doesn't match sum of io classes stats\n"
f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n" f"Cache usage stats: {cache_usage_stats}\n"
f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}" f"Usage stats occupancy sum: {usage_stats_occupancy_sum}\n"
f"Usage stats clean sum: {usage_stats_clean_sum}\n"
f"Usage stats dirty sum: {usage_stats_dirty_sum}\n"
f"particular stats {all_io_class_usage_stats}"
) )

View File

@ -1,5 +1,6 @@
# #
# Copyright(c) 2020-2022 Intel Corporation # Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -103,18 +104,32 @@ def get_io_class_usage(cache, io_class_id):
def verify_ioclass_usage_stats(cache, ioclasses_ids): def verify_ioclass_usage_stats(cache, ioclasses_ids):
cache_size = cache.get_statistics().config_stats.cache_size cache_size = cache.get_statistics().config_stats.cache_size
usage_stats_sum = IoClassUsageStats(Size(0), Size(0), Size(0)) usage_stats_occupancy_sum = Size.zero()
usage_stats_clean_sum = Size.zero()
usage_stats_dirty_sum = Size.zero()
all_io_class_usage_stats = []
for i in ioclasses_ids: for i in ioclasses_ids:
usage_stats_sum += get_io_class_usage(cache, i) io_class_usage_stats = get_io_class_usage(cache, i)
usage_stats_occupancy_sum += io_class_usage_stats.occupancy
usage_stats_clean_sum += io_class_usage_stats.clean
usage_stats_dirty_sum += io_class_usage_stats.dirty
all_io_class_usage_stats.append(io_class_usage_stats)
cache_usage_stats = cache.get_statistics().usage_stats cache_usage_stats = cache.get_statistics().usage_stats
if usage_stats_sum != cache_usage_stats: if (
cache_usage_stats.occupancy != usage_stats_occupancy_sum
or cache_usage_stats.clean != usage_stats_clean_sum
or cache_usage_stats.dirty != usage_stats_dirty_sum
):
TestRun.LOGGER.error( TestRun.LOGGER.error(
"Sum of io classes usage stats doesn't match cache usage stats!" "Sum of io classes usage stats doesn't match cache usage stats!\n"
f" Cache stats:\n{cache_usage_stats} io classes sum:\n{usage_stats_sum}" f"Cache usage stats: {cache_usage_stats}\n"
f" Stats of particular io classes:\n" f"Usage stats occupancy sum: {usage_stats_occupancy_sum}\n"
f"{[get_io_class_usage(cache, i) for i in ioclasses_ids]}" f"Usage stats clean sum: {usage_stats_clean_sum}\n"
f"Usage stats dirty sum: {usage_stats_dirty_sum}\n"
f"{all_io_class_usage_stats}"
) )
if cache_usage_stats.occupancy + cache_usage_stats.free > cache_size: if cache_usage_stats.occupancy + cache_usage_stats.free > cache_size:

View File

@ -1,5 +1,6 @@
# #
# Copyright(c) 2020-2022 Intel Corporation # Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -233,6 +234,9 @@ def test_acp_param_flush_max_buffers(cache_line_size, cache_mode):
fio = get_fio_cmd(core, core_size) fio = get_fio_cmd(core, core_size)
fio.run_in_background() fio.run_in_background()
time.sleep(10) time.sleep(10)
cache_stats = cache.get_statistics()
if cache_stats.usage_stats.dirty == Size.zero():
TestRun.fail("There are no dirty data on cache")
with TestRun.step("Set cleaning policy to ACP."): with TestRun.step("Set cleaning policy to ACP."):
cache.set_cleaning_policy(CleaningPolicy.acp) cache.set_cleaning_policy(CleaningPolicy.acp)

View File

@ -1,5 +1,6 @@
# #
# Copyright(c) 2022 Intel Corporation # Copyright(c) 2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause # SPDX-License-Identifier: BSD-3-Clause
# #
@ -7,6 +8,7 @@ import pytest
from api.cas import casadm from api.cas import casadm
from api.cas.cache_config import CacheLineSize, CacheMode, SeqCutOffPolicy from api.cas.cache_config import CacheLineSize, CacheMode, SeqCutOffPolicy
from api.cas.dmesg import get_metadata_size_on_device
from core.test_run import TestRun from core.test_run import TestRun
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools.fio.fio import Fio from test_tools.fio.fio import Fio
@ -178,8 +180,7 @@ def validate_memory_consumption(cache_device, expected_maximum, actual_size):
"""Checks memory consumption""" """Checks memory consumption"""
stats = cache_device.get_statistics() stats = cache_device.get_statistics()
TestRun.LOGGER.info(f"Statistics: {stats}") TestRun.LOGGER.info(f"Statistics: {stats}")
stat_footprint = get_metadata_size_on_device(cache_device.cache_id)
stat_footprint = Size(int(cache_device.get_metadata_size()))
TestRun.LOGGER.info(f"Allowed limit for current configuration is: {expected_maximum}") TestRun.LOGGER.info(f"Allowed limit for current configuration is: {expected_maximum}")
is_memory_within_limit("Reported Metadata Memory Footprint", expected_maximum, stat_footprint) is_memory_within_limit("Reported Metadata Memory Footprint", expected_maximum, stat_footprint)

View File

@ -70,6 +70,7 @@ repo_files_perms_exceptions = {
"ocf/tests/unit/framework/prepare_sources_for_testing.py": 755, "ocf/tests/unit/framework/prepare_sources_for_testing.py": 755,
"ocf/tests/unit/framework/run_unit_tests.py": 755, "ocf/tests/unit/framework/run_unit_tests.py": 755,
"ocf/tests/unit/tests/add_new_test_file.py": 755, "ocf/tests/unit/tests/add_new_test_file.py": 755,
"test/functional/test-framework/test_tools/checksec.sh": 744,
} }
build_files_perms_exceptions = { build_files_perms_exceptions = {