Merge pull request #309 from Deixx/ioclass-stats
IO class stats - refactor and test for statistics sections
This commit is contained in:
commit
c7f3e52411
@ -33,6 +33,14 @@ stop_cache_incomplete = [
|
|||||||
r"Cache is in incomplete state - at least one core is inactive"
|
r"Cache is in incomplete state - at least one core is inactive"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
get_stats_ioclass_id_not_configured = [
|
||||||
|
r"IO class \d+ is not configured\."
|
||||||
|
]
|
||||||
|
|
||||||
|
get_stats_ioclass_id_out_of_range = [
|
||||||
|
r"Invalid IO class id, must be in the range 0-32\."
|
||||||
|
]
|
||||||
|
|
||||||
remove_multilevel_core = [
|
remove_multilevel_core = [
|
||||||
r"Error while removing core device \d+ from cache instance \d+",
|
r"Error while removing core device \d+ from cache instance \d+",
|
||||||
r"Device opens or mount are pending to this cache"
|
r"Device opens or mount are pending to this cache"
|
||||||
@ -52,9 +60,19 @@ stop_cache_mounted_core = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def check_msg(output: Output, expected_messages):
|
def check_stderr_msg(output: Output, expected_messages):
|
||||||
result = '\n'.join([output.stdout, output.stderr])
|
return __check_string_msg(output.stderr, expected_messages)
|
||||||
|
|
||||||
|
|
||||||
|
def check_stdout_msg(output: Output, expected_messages):
|
||||||
|
return __check_string_msg(output.stdout, expected_messages)
|
||||||
|
|
||||||
|
|
||||||
|
def __check_string_msg(text: str, expected_messages):
|
||||||
|
msg_ok = True
|
||||||
for msg in expected_messages:
|
for msg in expected_messages:
|
||||||
matches = re.search(msg, result)
|
matches = re.search(msg, text)
|
||||||
if not matches:
|
if not matches:
|
||||||
TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {result}.")
|
TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {text}.")
|
||||||
|
msg_ok = False
|
||||||
|
return msg_ok
|
||||||
|
@ -2,14 +2,13 @@
|
|||||||
# Copyright(c) 2020 Intel Corporation
|
# Copyright(c) 2020 Intel Corporation
|
||||||
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||||
#
|
#
|
||||||
from time import sleep
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from api.cas import casadm, casadm_parser, cli, cli_messages
|
from api.cas import casadm, casadm_parser, cli, cli_messages
|
||||||
from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait
|
from api.cas.cache_config import CacheMode, CleaningPolicy, CacheModeTrait
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools import fs_utils
|
from test_tools import fs_utils
|
||||||
from test_tools.dd import Dd
|
from test_tools.dd import Dd
|
||||||
from test_tools.disk_utils import Filesystem
|
from test_tools.disk_utils import Filesystem
|
||||||
@ -69,7 +68,7 @@ def test_stop_no_flush_load_cache(cache_mode, filesystem):
|
|||||||
output = TestRun.executor.run_expect_fail(cli.start_cmd(
|
output = TestRun.executor.run_expect_fail(cli.start_cmd(
|
||||||
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()),
|
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()),
|
||||||
force=False, load=False))
|
force=False, load=False))
|
||||||
cli_messages.check_msg(output, cli_messages.start_cache_with_existing_metadata)
|
cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata)
|
||||||
|
|
||||||
with TestRun.step("Load cache."):
|
with TestRun.step("Load cache."):
|
||||||
cache = casadm.load_cache(cache.cache_device)
|
cache = casadm.load_cache(cache.cache_device)
|
||||||
@ -146,7 +145,7 @@ def test_stop_no_flush_load_cache_no_fs(cache_mode):
|
|||||||
output = TestRun.executor.run_expect_fail(cli.start_cmd(
|
output = TestRun.executor.run_expect_fail(cli.start_cmd(
|
||||||
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()),
|
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()),
|
||||||
force=False, load=False))
|
force=False, load=False))
|
||||||
cli_messages.check_msg(output, cli_messages.start_cache_with_existing_metadata)
|
cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata)
|
||||||
|
|
||||||
with TestRun.step("Load cache."):
|
with TestRun.step("Load cache."):
|
||||||
cache = casadm.load_cache(cache.cache_device)
|
cache = casadm.load_cache(cache.cache_device)
|
||||||
|
@ -7,8 +7,8 @@ import pytest
|
|||||||
|
|
||||||
from api.cas import casadm, casadm_parser, cli, cli_messages
|
from api.cas import casadm, casadm_parser, cli, cli_messages
|
||||||
from api.cas.cache_config import CacheMode
|
from api.cas.cache_config import CacheMode
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools import fs_utils
|
from test_tools import fs_utils
|
||||||
from test_tools.disk_utils import Filesystem
|
from test_tools.disk_utils import Filesystem
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
@ -109,11 +109,11 @@ def test_stop_cache_with_mounted_partition(cache_mode):
|
|||||||
with TestRun.step("Try to remove core from cache."):
|
with TestRun.step("Try to remove core from cache."):
|
||||||
output = TestRun.executor.run_expect_fail(cli.remove_core_cmd(cache_id=str(cache.cache_id),
|
output = TestRun.executor.run_expect_fail(cli.remove_core_cmd(cache_id=str(cache.cache_id),
|
||||||
core_id=str(core.core_id)))
|
core_id=str(core.core_id)))
|
||||||
cli_messages.check_msg(output, cli_messages.remove_mounted_core)
|
cli_messages.check_stderr_msg(output, cli_messages.remove_mounted_core)
|
||||||
|
|
||||||
with TestRun.step("Try to stop CAS."):
|
with TestRun.step("Try to stop CAS."):
|
||||||
output = TestRun.executor.run_expect_fail(cli.stop_cmd(cache_id=str(cache.cache_id)))
|
output = TestRun.executor.run_expect_fail(cli.stop_cmd(cache_id=str(cache.cache_id)))
|
||||||
cli_messages.check_msg(output, cli_messages.stop_cache_mounted_core)
|
cli_messages.check_stderr_msg(output, cli_messages.stop_cache_mounted_core)
|
||||||
|
|
||||||
with TestRun.step("Unmount core device."):
|
with TestRun.step("Unmount core device."):
|
||||||
core.unmount()
|
core.unmount()
|
||||||
@ -158,12 +158,12 @@ def test_add_cached_core(cache_mode):
|
|||||||
output = TestRun.executor.run_expect_fail(
|
output = TestRun.executor.run_expect_fail(
|
||||||
cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.system_path),
|
cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.system_path),
|
||||||
core_id=str(core.core_id)))
|
core_id=str(core.core_id)))
|
||||||
cli_messages.check_msg(output, cli_messages.add_cached_core)
|
cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)
|
||||||
|
|
||||||
with TestRun.step("Try adding the same core device to the same cache for the second time."):
|
with TestRun.step("Try adding the same core device to the same cache for the second time."):
|
||||||
output = TestRun.executor.run_expect_fail(
|
output = TestRun.executor.run_expect_fail(
|
||||||
cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.system_path)))
|
cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.system_path)))
|
||||||
cli_messages.check_msg(output, cli_messages.add_cached_core)
|
cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)
|
||||||
|
|
||||||
with TestRun.step("Stop caches."):
|
with TestRun.step("Stop caches."):
|
||||||
casadm.stop_all_caches()
|
casadm.stop_all_caches()
|
||||||
|
@ -6,8 +6,8 @@
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from api.cas import casadm, cli, cli_messages
|
from api.cas import casadm, cli, cli_messages
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
|
|
||||||
@ -49,7 +49,7 @@ def test_remove_multilevel_core():
|
|||||||
output = TestRun.executor.run_expect_fail(cli.remove_core_cmd(cache_id=str(cache1.cache_id),
|
output = TestRun.executor.run_expect_fail(cli.remove_core_cmd(cache_id=str(cache1.cache_id),
|
||||||
core_id=str(core1.core_id),
|
core_id=str(core1.core_id),
|
||||||
force=True))
|
force=True))
|
||||||
cli_messages.check_msg(output, cli_messages.remove_multilevel_core)
|
cli_messages.check_stderr_msg(output, cli_messages.remove_multilevel_core)
|
||||||
|
|
||||||
with TestRun.step("Stop cache."):
|
with TestRun.step("Stop cache."):
|
||||||
casadm.stop_all_caches()
|
casadm.stop_all_caches()
|
||||||
|
@ -1,16 +1,18 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019 Intel Corporation
|
# Copyright(c) 2019-2020 Intel Corporation
|
||||||
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||||
#
|
#
|
||||||
|
|
||||||
|
import time
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from random import shuffle
|
from random import shuffle
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import time
|
|
||||||
from api.cas import casadm, init_config, cli, cli_messages
|
from api.cas import casadm, cli, cli_messages
|
||||||
from api.cas.cache_config import CacheStatus, SeqCutOffPolicy
|
from api.cas.cache_config import CacheStatus, SeqCutOffPolicy
|
||||||
from api.cas.core import CoreStatus, CacheMode, CleaningPolicy, FlushParametersAlru, File
|
from api.cas.core import CoreStatus, CacheMode, CleaningPolicy, FlushParametersAlru, File
|
||||||
|
from api.cas.init_config import InitConfig
|
||||||
from api.cas.statistics import CacheStats
|
from api.cas.statistics import CacheStats
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
|
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
|
||||||
@ -47,7 +49,7 @@ def test_attach_core_to_incomplete_cache_volume():
|
|||||||
cache = casadm.start_cache(cache_dev, force=True)
|
cache = casadm.start_cache(cache_dev, force=True)
|
||||||
core = cache.add_core(core_dev)
|
core = cache.add_core(core_dev)
|
||||||
with TestRun.step("Create init config file using current CAS configuration."):
|
with TestRun.step("Create init config file using current CAS configuration."):
|
||||||
init_config.create_init_config_from_running_configuration()
|
InitConfig.create_init_config_from_running_configuration()
|
||||||
with TestRun.step("Stop cache."):
|
with TestRun.step("Stop cache."):
|
||||||
cache.stop()
|
cache.stop()
|
||||||
with TestRun.step("Load cache."):
|
with TestRun.step("Load cache."):
|
||||||
@ -106,7 +108,7 @@ def test_flush_inactive_devices():
|
|||||||
first_core = cache.add_core(first_core_dev)
|
first_core = cache.add_core(first_core_dev)
|
||||||
second_core = cache.add_core(second_core_dev)
|
second_core = cache.add_core(second_core_dev)
|
||||||
with TestRun.step("Create init config file using running CAS configuration."):
|
with TestRun.step("Create init config file using running CAS configuration."):
|
||||||
init_config.create_init_config_from_running_configuration()
|
InitConfig.create_init_config_from_running_configuration()
|
||||||
with TestRun.step("Run random writes to CAS device."):
|
with TestRun.step("Run random writes to CAS device."):
|
||||||
run_fio([first_core.system_path, second_core.system_path])
|
run_fio([first_core.system_path, second_core.system_path])
|
||||||
with TestRun.step("Stop cache without flushing dirty data."):
|
with TestRun.step("Stop cache without flushing dirty data."):
|
||||||
@ -180,7 +182,7 @@ def test_list_cache_and_cache_volumes():
|
|||||||
if cache_status != CacheStatus.running:
|
if cache_status != CacheStatus.running:
|
||||||
TestRun.fail(f"Cache should be in running state. Actual state: {cache_status}")
|
TestRun.fail(f"Cache should be in running state. Actual state: {cache_status}")
|
||||||
with TestRun.step("Create init config file using current CAS configuration."):
|
with TestRun.step("Create init config file using current CAS configuration."):
|
||||||
init_config.create_init_config_from_running_configuration()
|
InitConfig.create_init_config_from_running_configuration()
|
||||||
with TestRun.step("Stop cache."):
|
with TestRun.step("Stop cache."):
|
||||||
cache.stop()
|
cache.stop()
|
||||||
with TestRun.step("Unplug core device."):
|
with TestRun.step("Unplug core device."):
|
||||||
@ -226,14 +228,14 @@ def test_load_cache_with_inactive_core():
|
|||||||
cache = casadm.start_cache(cache_dev, force=True)
|
cache = casadm.start_cache(cache_dev, force=True)
|
||||||
core = cache.add_core(core_dev)
|
core = cache.add_core(core_dev)
|
||||||
with TestRun.step("Create init config file using current CAS configuration."):
|
with TestRun.step("Create init config file using current CAS configuration."):
|
||||||
init_config.create_init_config_from_running_configuration()
|
InitConfig.create_init_config_from_running_configuration()
|
||||||
with TestRun.step("Stop cache."):
|
with TestRun.step("Stop cache."):
|
||||||
cache.stop()
|
cache.stop()
|
||||||
with TestRun.step("Unplug core device."):
|
with TestRun.step("Unplug core device."):
|
||||||
plug_device.unplug()
|
plug_device.unplug()
|
||||||
with TestRun.step("Load cache."):
|
with TestRun.step("Load cache."):
|
||||||
output = TestRun.executor.run(cli.load_cmd(cache_dev.system_path))
|
output = TestRun.executor.run(cli.load_cmd(cache_dev.system_path))
|
||||||
cli_messages.check_msg(output, cli_messages.load_inactive_core_missing)
|
cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing)
|
||||||
with TestRun.step("Plug missing device and stop cache."):
|
with TestRun.step("Plug missing device and stop cache."):
|
||||||
plug_device.plug()
|
plug_device.plug()
|
||||||
core.wait_for_status_change(CoreStatus.active)
|
core.wait_for_status_change(CoreStatus.active)
|
||||||
@ -267,7 +269,7 @@ def test_preserve_data_for_inactive_device():
|
|||||||
cache.set_cleaning_policy(CleaningPolicy.nop)
|
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||||
core = cache.add_core(core_dev)
|
core = cache.add_core(core_dev)
|
||||||
with TestRun.step("Create init config file using current CAS configuration."):
|
with TestRun.step("Create init config file using current CAS configuration."):
|
||||||
init_config.create_init_config_from_running_configuration()
|
InitConfig.create_init_config_from_running_configuration()
|
||||||
with TestRun.step("Create filesystem on CAS device and mount it."):
|
with TestRun.step("Create filesystem on CAS device and mount it."):
|
||||||
core.create_filesystem(Filesystem.ext3)
|
core.create_filesystem(Filesystem.ext3)
|
||||||
core.mount(mount_dir)
|
core.mount(mount_dir)
|
||||||
@ -361,7 +363,7 @@ def test_print_statistics_inactive(cache_mode):
|
|||||||
first_core = cache.add_core(first_core_dev)
|
first_core = cache.add_core(first_core_dev)
|
||||||
second_core = cache.add_core(second_core_dev)
|
second_core = cache.add_core(second_core_dev)
|
||||||
with TestRun.step("Create init config file using current CAS configuration."):
|
with TestRun.step("Create init config file using current CAS configuration."):
|
||||||
init_config.create_init_config_from_running_configuration()
|
InitConfig.create_init_config_from_running_configuration()
|
||||||
with TestRun.step("Run IO."):
|
with TestRun.step("Run IO."):
|
||||||
run_fio([first_core.system_path, second_core.system_path])
|
run_fio([first_core.system_path, second_core.system_path])
|
||||||
with TestRun.step("Print statistics and check if there is no inactive usage section."):
|
with TestRun.step("Print statistics and check if there is no inactive usage section."):
|
||||||
@ -448,7 +450,7 @@ def test_remove_detached_cores():
|
|||||||
for d in core_devs:
|
for d in core_devs:
|
||||||
cores.append(cache.add_core(d))
|
cores.append(cache.add_core(d))
|
||||||
with TestRun.step("Create init config file using current CAS configuration."):
|
with TestRun.step("Create init config file using current CAS configuration."):
|
||||||
init_config.create_init_config_from_running_configuration()
|
InitConfig.create_init_config_from_running_configuration()
|
||||||
with TestRun.step("Run random writes to all CAS devices."):
|
with TestRun.step("Run random writes to all CAS devices."):
|
||||||
run_fio([c.system_path for c in cores])
|
run_fio([c.system_path for c in cores])
|
||||||
with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain "
|
with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain "
|
||||||
@ -507,7 +509,7 @@ def test_remove_inactive_devices():
|
|||||||
for d in core_devs:
|
for d in core_devs:
|
||||||
cores.append(cache.add_core(d))
|
cores.append(cache.add_core(d))
|
||||||
with TestRun.step("Create init config file using current CAS configuration."):
|
with TestRun.step("Create init config file using current CAS configuration."):
|
||||||
init_config.create_init_config_from_running_configuration()
|
InitConfig.create_init_config_from_running_configuration()
|
||||||
with TestRun.step("Run random writes to all CAS devices."):
|
with TestRun.step("Run random writes to all CAS devices."):
|
||||||
run_fio([c.system_path for c in cores])
|
run_fio([c.system_path for c in cores])
|
||||||
with TestRun.step("Flush dirty data from two CAS devices and verify than other two "
|
with TestRun.step("Flush dirty data from two CAS devices and verify than other two "
|
||||||
@ -549,7 +551,7 @@ def test_remove_inactive_devices():
|
|||||||
"command returned an error.")
|
"command returned an error.")
|
||||||
TestRun.LOGGER.info("Remove operation without force option is blocked for "
|
TestRun.LOGGER.info("Remove operation without force option is blocked for "
|
||||||
"dirty CAS device as expected.")
|
"dirty CAS device as expected.")
|
||||||
cli_messages.check_msg(e.output, cli_messages.remove_inactive_core)
|
cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_core)
|
||||||
output = casadm.list_caches().stdout
|
output = casadm.list_caches().stdout
|
||||||
if core.system_path not in output:
|
if core.system_path not in output:
|
||||||
TestRun.fail(f"CAS device is not listed in casadm list output but it should be."
|
TestRun.fail(f"CAS device is not listed in casadm list output but it should be."
|
||||||
@ -582,7 +584,7 @@ def test_stop_cache_with_inactive_devices():
|
|||||||
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
|
cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB, force=True)
|
||||||
core = cache.add_core(core_dev)
|
core = cache.add_core(core_dev)
|
||||||
with TestRun.step("Create init config file using current CAS configuration."):
|
with TestRun.step("Create init config file using current CAS configuration."):
|
||||||
init_config.create_init_config_from_running_configuration()
|
InitConfig.create_init_config_from_running_configuration()
|
||||||
with TestRun.step("Run random writes and verify that CAS device contains dirty data."):
|
with TestRun.step("Run random writes and verify that CAS device contains dirty data."):
|
||||||
run_fio([core.system_path])
|
run_fio([core.system_path])
|
||||||
if core.get_dirty_blocks() == Size.zero():
|
if core.get_dirty_blocks() == Size.zero():
|
||||||
@ -629,7 +631,7 @@ def try_stop_incomplete_cache(cache):
|
|||||||
cache.stop()
|
cache.stop()
|
||||||
except CmdException as e:
|
except CmdException as e:
|
||||||
TestRun.LOGGER.info("Stopping cache without 'no data flush' option is blocked as expected.")
|
TestRun.LOGGER.info("Stopping cache without 'no data flush' option is blocked as expected.")
|
||||||
cli_messages.check_msg(e.output, cli_messages.stop_cache_incomplete)
|
cli_messages.check_stderr_msg(e.output, cli_messages.stop_cache_incomplete)
|
||||||
|
|
||||||
|
|
||||||
def check_inactive_usage_stats(stats_before, stats_after, stat_name, should_be_zero):
|
def check_inactive_usage_stats(stats_before, stats_after, stat_name, should_be_zero):
|
||||||
|
@ -1,111 +1,157 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2019 Intel Corporation
|
# Copyright(c) 2019-2020 Intel Corporation
|
||||||
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||||
#
|
#
|
||||||
|
import random
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from api.cas import casadm
|
from api.cas import casadm
|
||||||
from api.cas import casadm_parser
|
|
||||||
from api.cas import ioclass_config
|
from api.cas import ioclass_config
|
||||||
from api.cas.cache_config import CleaningPolicy
|
from api.cas.cache_config import CleaningPolicy, CacheMode, CacheLineSize
|
||||||
from api.cas.casadm import StatsFilter
|
from api.cas.casadm import StatsFilter
|
||||||
|
from api.cas.cli_messages import (
|
||||||
|
check_stderr_msg,
|
||||||
|
get_stats_ioclass_id_not_configured,
|
||||||
|
get_stats_ioclass_id_out_of_range
|
||||||
|
)
|
||||||
|
from api.cas.statistics import (
|
||||||
|
config_stats_ioclass, usage_stats, request_stats, block_stats_core, block_stats_cache
|
||||||
|
)
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from test_tools.disk_utils import Filesystem
|
from test_tools.disk_utils import Filesystem
|
||||||
from test_utils.filesystem.file import File
|
from test_utils.filesystem.file import File
|
||||||
from test_utils.os_utils import sync, Udev
|
from test_utils.os_utils import sync, Udev
|
||||||
|
from test_utils.output import CmdException
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
ioclass_config_path = "/tmp/opencas_ioclass.conf"
|
IoClass = ioclass_config.IoClass
|
||||||
|
|
||||||
mountpoint = "/tmp/cas1-1"
|
mountpoint = "/tmp/cas1-1"
|
||||||
cache_id = 1
|
cache_id = 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_stats_set():
|
@pytest.mark.parametrize("random_cls", [random.choice(list(CacheLineSize))])
|
||||||
"""Try to retrieve stats for all set ioclasses"""
|
def test_ioclass_stats_basic(random_cls):
|
||||||
prepare()
|
"""
|
||||||
min_ioclass_id = 1
|
title: Basic test for retrieving IO class statistics.
|
||||||
max_ioclass_id = 11
|
description: |
|
||||||
|
Check if statistics are retrieved only for configured IO classes.
|
||||||
|
pass_criteria:
|
||||||
|
- Statistics are retrieved for configured IO classes.
|
||||||
|
- Error is displayed when retrieving statistics for non-configured IO class.
|
||||||
|
- Error is displayed when retrieving statistics for out of range IO class id.
|
||||||
|
"""
|
||||||
|
|
||||||
ioclass_config.create_ioclass_config(
|
min_ioclass_id = 11
|
||||||
add_default_rule=True, ioclass_config_path=ioclass_config_path
|
max_ioclass_id = 21
|
||||||
)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Preparing ioclass config file")
|
with TestRun.step("Test prepare"):
|
||||||
for i in range(min_ioclass_id, max_ioclass_id):
|
prepare(random_cls)
|
||||||
ioclass_config.add_ioclass(
|
|
||||||
ioclass_id=(i + 10),
|
|
||||||
eviction_priority=22,
|
|
||||||
allocation=True,
|
|
||||||
rule=f"file_size:le:{4096*i}&done",
|
|
||||||
ioclass_config_path=ioclass_config_path,
|
|
||||||
)
|
|
||||||
casadm.load_io_classes(cache_id, file=ioclass_config_path)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Preparing ioclass config file")
|
with TestRun.step("Prepare IO class config file"):
|
||||||
for i in range(32):
|
ioclass_list = []
|
||||||
if i != 0 or i not in range(min_ioclass_id, max_ioclass_id):
|
for class_id in range(min_ioclass_id, max_ioclass_id):
|
||||||
with pytest.raises(Exception):
|
ioclass_list.append(IoClass(
|
||||||
assert casadm_parser.get_statistics(
|
class_id=class_id,
|
||||||
cache_id=cache_id, io_class_id=True, filter=[StatsFilter.conf]
|
rule=f"file_size:le:{4096 * class_id}&done",
|
||||||
)
|
priority=22
|
||||||
|
))
|
||||||
|
IoClass.save_list_to_config_file(ioclass_list, True)
|
||||||
|
|
||||||
|
with TestRun.step("Load IO class config file"):
|
||||||
|
casadm.load_io_classes(cache_id, file=ioclass_config.default_config_file_path)
|
||||||
|
|
||||||
|
with TestRun.step("Try retrieving IO class stats for all allowed id values "
|
||||||
|
"and one out of range id"):
|
||||||
|
for class_id in range(ioclass_config.MAX_IO_CLASS_ID + 2):
|
||||||
|
out_of_range = " out of range" if class_id > ioclass_config.MAX_IO_CLASS_ID else ""
|
||||||
|
with TestRun.group(f"Checking{out_of_range} IO class id {class_id}..."):
|
||||||
|
expected = class_id == 0 or class_id in range(min_ioclass_id, max_ioclass_id)
|
||||||
|
try:
|
||||||
|
casadm.print_statistics(
|
||||||
|
cache_id=cache_id,
|
||||||
|
io_class_id=class_id,
|
||||||
|
per_io_class=True)
|
||||||
|
if not expected:
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"Stats retrieved for not configured IO class {class_id}")
|
||||||
|
except CmdException as e:
|
||||||
|
if expected:
|
||||||
|
TestRun.LOGGER.error(f"Stats not retrieved for IO class id: {class_id}")
|
||||||
|
elif class_id <= ioclass_config.MAX_IO_CLASS_ID:
|
||||||
|
if not check_stderr_msg(e.output, get_stats_ioclass_id_not_configured):
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"Wrong message for unused IO class id: {class_id}")
|
||||||
|
elif not check_stderr_msg(e.output, get_stats_ioclass_id_out_of_range):
|
||||||
|
TestRun.LOGGER.error(
|
||||||
|
f"Wrong message for out of range IO class id: {class_id}")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_ioclass_stats_sum():
|
@pytest.mark.parametrize("random_cls", [random.choice(list(CacheLineSize))])
|
||||||
"""Check if stats for all set ioclasses sum up to cache stats"""
|
def test_ioclass_stats_sum(random_cls):
|
||||||
cache, core = prepare()
|
"""
|
||||||
|
title: Test for sum of IO class statistics.
|
||||||
|
description: |
|
||||||
|
Check if statistics for configured IO classes sum up to cache/core statistics.
|
||||||
|
pass_criteria:
|
||||||
|
- Per class cache IO class statistics sum up to cache statistics.
|
||||||
|
- Per class core IO class statistics sum up to core statistics.
|
||||||
|
"""
|
||||||
|
|
||||||
min_ioclass_id = 1
|
min_ioclass_id = 1
|
||||||
max_ioclass_id = 11
|
max_ioclass_id = 11
|
||||||
file_size_base = Unit.KibiByte.value * 4
|
file_size_base = Unit.Blocks4096.value
|
||||||
|
|
||||||
TestRun.LOGGER.info("Preparing ioclass config file")
|
with TestRun.step("Test prepare"):
|
||||||
ioclass_config.create_ioclass_config(
|
caches, cores = prepare(random_cls)
|
||||||
add_default_rule=True, ioclass_config_path=ioclass_config_path
|
cache, core = caches[0], cores[0]
|
||||||
)
|
|
||||||
for i in range(min_ioclass_id, max_ioclass_id):
|
|
||||||
ioclass_config.add_ioclass(
|
|
||||||
ioclass_id=i,
|
|
||||||
eviction_priority=22,
|
|
||||||
allocation=True,
|
|
||||||
rule=f"file_size:le:{file_size_base*i}&done",
|
|
||||||
ioclass_config_path=ioclass_config_path,
|
|
||||||
)
|
|
||||||
cache.load_io_class(ioclass_config_path)
|
|
||||||
|
|
||||||
TestRun.LOGGER.info("Generating files with particular sizes")
|
with TestRun.step("Prepare IO class config file"):
|
||||||
files_list = []
|
ioclass_list = []
|
||||||
for i in range(min_ioclass_id, max_ioclass_id):
|
for class_id in range(min_ioclass_id, max_ioclass_id):
|
||||||
path = f"/tmp/test_file_{file_size_base*i}"
|
ioclass_list.append(IoClass(
|
||||||
File.create_file(path)
|
class_id=class_id,
|
||||||
f = File(path)
|
rule=f"file_size:le:{file_size_base * class_id}&done",
|
||||||
f.padding(Size(file_size_base * i, Unit.Byte))
|
priority=22
|
||||||
files_list.append(f)
|
))
|
||||||
|
IoClass.save_list_to_config_file(ioclass_list, True)
|
||||||
|
|
||||||
core.create_filesystem(Filesystem.ext4)
|
with TestRun.step("Load IO class config file"):
|
||||||
|
cache.load_io_class(ioclass_config.default_config_file_path)
|
||||||
|
|
||||||
cache.reset_counters()
|
with TestRun.step("Generate files with particular sizes in temporary folder"):
|
||||||
|
files_list = []
|
||||||
|
for class_id in range(min_ioclass_id, max_ioclass_id):
|
||||||
|
path = f"/tmp/test_file_{file_size_base * class_id}"
|
||||||
|
File.create_file(path)
|
||||||
|
f = File(path)
|
||||||
|
f.padding(Size(file_size_base * class_id, Unit.Byte))
|
||||||
|
files_list.append(f)
|
||||||
|
|
||||||
# Name of stats, which should not be compared
|
with TestRun.step("Copy files to mounted core"):
|
||||||
not_compare_stats = ["clean", "occupancy"]
|
|
||||||
ioclass_id_list = list(range(min_ioclass_id, max_ioclass_id))
|
|
||||||
# Append default ioclass id
|
|
||||||
ioclass_id_list.append(0)
|
|
||||||
TestRun.LOGGER.info("Copying files to mounted core and stats check")
|
|
||||||
for f in files_list:
|
|
||||||
# To prevent stats pollution by filesystem requests, umount core device
|
|
||||||
# after file is copied
|
|
||||||
core.mount(mountpoint)
|
core.mount(mountpoint)
|
||||||
f.copy(mountpoint)
|
for f in files_list:
|
||||||
sync()
|
TestRun.LOGGER.info(f"Copying file {f.name} to mounted core")
|
||||||
|
f.copy(mountpoint)
|
||||||
|
sync()
|
||||||
|
# To prevent stats pollution by filesystem requests, umount core device
|
||||||
|
# after files are copied
|
||||||
core.unmount()
|
core.unmount()
|
||||||
sync()
|
sync()
|
||||||
|
|
||||||
|
with TestRun.step("Check if per class cache IO class statistics sum up to cache statistics"):
|
||||||
|
# Name of stats, which should not be compared
|
||||||
|
not_compare_stats = ["clean", "occupancy"]
|
||||||
|
ioclass_id_list = list(range(min_ioclass_id, max_ioclass_id))
|
||||||
|
# Append default IO class id
|
||||||
|
ioclass_id_list.append(0)
|
||||||
|
|
||||||
cache_stats = cache.get_statistics_flat(
|
cache_stats = cache.get_statistics_flat(
|
||||||
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
|
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
|
||||||
)
|
)
|
||||||
@ -127,44 +173,158 @@ def test_ioclass_stats_sum():
|
|||||||
if isinstance(cache_stats[stat_name], Size)
|
if isinstance(cache_stats[stat_name], Size)
|
||||||
else cache_stats[stat_name]
|
else cache_stats[stat_name]
|
||||||
)
|
)
|
||||||
assert stat_val == 0, f"{stat_name} diverged!\n"
|
if stat_val != 0:
|
||||||
|
TestRun.LOGGER.error(f"{stat_name} diverged for cache!\n")
|
||||||
|
|
||||||
# Test cleanup
|
with TestRun.step("Check if per class core IO class statistics sum up to core statistics"):
|
||||||
for f in files_list:
|
core_stats = core.get_statistics_flat(
|
||||||
f.remove()
|
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
|
||||||
|
)
|
||||||
|
for ioclass_id in ioclass_id_list:
|
||||||
|
ioclass_stats = core.get_statistics_flat(
|
||||||
|
stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk],
|
||||||
|
io_class_id=ioclass_id,
|
||||||
|
)
|
||||||
|
for stat_name in core_stats:
|
||||||
|
if stat_name in not_compare_stats:
|
||||||
|
continue
|
||||||
|
core_stats[stat_name] -= ioclass_stats[stat_name]
|
||||||
|
|
||||||
|
for stat_name in core_stats:
|
||||||
|
if stat_name in not_compare_stats:
|
||||||
|
continue
|
||||||
|
stat_val = (
|
||||||
|
core_stats[stat_name].get_value()
|
||||||
|
if isinstance(core_stats[stat_name], Size)
|
||||||
|
else core_stats[stat_name]
|
||||||
|
)
|
||||||
|
if stat_val != 0:
|
||||||
|
TestRun.LOGGER.error(f"{stat_name} diverged for core!\n")
|
||||||
|
|
||||||
|
with TestRun.step("Test cleanup"):
|
||||||
|
for f in files_list:
|
||||||
|
f.remove()
|
||||||
|
|
||||||
|
|
||||||
def flush_cache(cache_id):
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
casadm.flush(cache_id=cache_id)
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
sync()
|
@pytest.mark.parametrize("stat_filter", [StatsFilter.req, StatsFilter.usage, StatsFilter.conf,
|
||||||
casadm.reset_counters(cache_id=cache_id)
|
StatsFilter.blk])
|
||||||
stats = casadm_parser.get_statistics(cache_id=cache_id, filter=[StatsFilter.blk])
|
@pytest.mark.parametrize("per_core", [True, False])
|
||||||
for key, value in stats.items():
|
@pytest.mark.parametrize("random_cls", [random.choice(list(CacheLineSize))])
|
||||||
assert value.get_value(Unit.Blocks4096) == 0
|
def test_ioclass_stats_sections(stat_filter, per_core, random_cls):
|
||||||
|
"""
|
||||||
|
title: Test for cache/core IO class statistics sections.
|
||||||
|
description: |
|
||||||
|
Check if IO class statistics sections for cache/core print all required entries and
|
||||||
|
no additional ones.
|
||||||
|
pass_criteria:
|
||||||
|
- Section statistics contain all required entries.
|
||||||
|
- Section statistics do not contain any additional entries.
|
||||||
|
"""
|
||||||
|
with TestRun.step("Test prepare"):
|
||||||
|
caches, cores = prepare(random_cls, cache_count=4, cores_per_cache=3)
|
||||||
|
|
||||||
|
with TestRun.step(f"Validate displayed {stat_filter.name} statistics for default IO class for "
|
||||||
|
f"{'cores' if per_core else 'caches'}"):
|
||||||
|
for cache in caches:
|
||||||
|
with TestRun.group(f"Cache {cache.cache_id}"):
|
||||||
|
for core in cache.get_core_devices():
|
||||||
|
if per_core:
|
||||||
|
TestRun.LOGGER.info(f"Core {core.cache_id}-{core.core_id}")
|
||||||
|
statistics = (
|
||||||
|
core.get_statistics_flat(
|
||||||
|
io_class_id=0, stat_filter=[stat_filter]) if per_core
|
||||||
|
else cache.get_statistics_flat(
|
||||||
|
io_class_id=0, stat_filter=[stat_filter]))
|
||||||
|
validate_statistics(statistics, stat_filter, per_core)
|
||||||
|
if not per_core:
|
||||||
|
break
|
||||||
|
|
||||||
|
with TestRun.step("Load random IO class configuration for each cache"):
|
||||||
|
for cache in caches:
|
||||||
|
random_list = IoClass.generate_random_ioclass_list(ioclass_config.MAX_IO_CLASS_ID + 1)
|
||||||
|
IoClass.save_list_to_config_file(random_list, add_default_rule=False)
|
||||||
|
cache.load_io_class(ioclass_config.default_config_file_path)
|
||||||
|
|
||||||
|
with TestRun.step(f"Validate displayed {stat_filter.name} statistics for every configured IO "
|
||||||
|
f"class for all {'cores' if per_core else 'caches'}"):
|
||||||
|
for cache in caches:
|
||||||
|
with TestRun.group(f"Cache {cache.cache_id}"):
|
||||||
|
for core in cache.get_core_devices():
|
||||||
|
core_info = f"Core {core.cache_id}-{core.core_id} ," if per_core else ""
|
||||||
|
for class_id in range(ioclass_config.MAX_IO_CLASS_ID + 1):
|
||||||
|
with TestRun.group(core_info + f"IO class id {class_id}"):
|
||||||
|
statistics = (
|
||||||
|
core.get_statistics_flat(class_id, [stat_filter]) if per_core
|
||||||
|
else cache.get_statistics_flat(class_id, [stat_filter]))
|
||||||
|
validate_statistics(statistics, stat_filter, per_core)
|
||||||
|
if stat_filter == StatsFilter.conf: # no percentage statistics for conf
|
||||||
|
continue
|
||||||
|
statistics_percents = (
|
||||||
|
core.get_statistics_flat(
|
||||||
|
class_id, [stat_filter], percentage_val=True) if per_core
|
||||||
|
else cache.get_statistics_flat(
|
||||||
|
class_id, [stat_filter], percentage_val=True))
|
||||||
|
validate_statistics(statistics_percents, stat_filter, per_core)
|
||||||
|
if not per_core:
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
def prepare():
|
def get_checked_statistics(stat_filter: StatsFilter, per_core: bool):
|
||||||
|
if stat_filter == StatsFilter.conf:
|
||||||
|
return config_stats_ioclass
|
||||||
|
if stat_filter == StatsFilter.usage:
|
||||||
|
return usage_stats
|
||||||
|
if stat_filter == StatsFilter.blk:
|
||||||
|
return block_stats_core if per_core else block_stats_cache
|
||||||
|
if stat_filter == StatsFilter.req:
|
||||||
|
return request_stats
|
||||||
|
|
||||||
|
|
||||||
|
def validate_statistics(statistics: dict, stat_filter: StatsFilter, per_core: bool):
|
||||||
|
for stat_name in get_checked_statistics(stat_filter, per_core):
|
||||||
|
if stat_name not in statistics.keys():
|
||||||
|
TestRun.LOGGER.error(f"Value for {stat_name} not displayed in output")
|
||||||
|
else:
|
||||||
|
del statistics[stat_name]
|
||||||
|
if len(statistics.keys()):
|
||||||
|
TestRun.LOGGER.error(f"Additional statistics found: {', '.join(statistics.keys())}")
|
||||||
|
|
||||||
|
|
||||||
|
def prepare(random_cls, cache_count=1, cores_per_cache=1):
|
||||||
|
cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
|
||||||
ioclass_config.remove_ioclass_config()
|
ioclass_config.remove_ioclass_config()
|
||||||
|
|
||||||
cache_device = TestRun.disks['cache']
|
cache_device = TestRun.disks['cache']
|
||||||
core_device = TestRun.disks['core']
|
core_device = TestRun.disks['core']
|
||||||
|
|
||||||
cache_device.create_partitions([Size(500, Unit.MebiByte)])
|
cache_device.create_partitions([Size(500, Unit.MebiByte)] * cache_count)
|
||||||
core_device.create_partitions([Size(2, Unit.GibiByte)])
|
core_device.create_partitions([Size(2, Unit.GibiByte)] * cache_count * cores_per_cache)
|
||||||
|
|
||||||
cache_device = cache_device.partitions[0]
|
cache_devices = cache_device.partitions
|
||||||
core_device_1 = core_device.partitions[0]
|
core_devices = core_device.partitions
|
||||||
|
for core_device in core_devices:
|
||||||
|
core_device.create_filesystem(Filesystem.ext4)
|
||||||
|
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
caches, cores = [], []
|
||||||
|
for i, cache_device in enumerate(cache_devices):
|
||||||
|
TestRun.LOGGER.info(f"Starting cache on {cache_device.system_path}")
|
||||||
|
cache = casadm.start_cache(cache_device,
|
||||||
|
force=True,
|
||||||
|
cache_mode=cache_modes[i],
|
||||||
|
cache_line_size=random_cls)
|
||||||
|
caches.append(cache)
|
||||||
|
TestRun.LOGGER.info("Setting cleaning policy to NOP")
|
||||||
|
cache.set_cleaning_policy(CleaningPolicy.nop)
|
||||||
|
for core_device in core_devices[i * cores_per_cache:(i + 1) * cores_per_cache]:
|
||||||
|
TestRun.LOGGER.info(
|
||||||
|
f"Adding core device {core_device.system_path} to cache {cache.cache_id}")
|
||||||
|
core = cache.add_core(core_dev=core_device)
|
||||||
|
core.reset_counters()
|
||||||
|
cores.append(core)
|
||||||
|
|
||||||
TestRun.LOGGER.info(f"Staring cache")
|
TestRun.executor.run_expect_success(f"mkdir -p {mountpoint}")
|
||||||
cache = casadm.start_cache(cache_device, force=True)
|
|
||||||
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
|
|
||||||
cache.set_cleaning_policy(CleaningPolicy.nop)
|
|
||||||
TestRun.LOGGER.info(f"Adding core devices")
|
|
||||||
core = cache.add_core(core_dev=core_device_1)
|
|
||||||
|
|
||||||
output = TestRun.executor.run(f"mkdir -p {mountpoint}")
|
return caches, cores
|
||||||
if output.exit_code != 0:
|
|
||||||
raise Exception(f"Failed to create mountpoint")
|
|
||||||
|
|
||||||
return cache, core
|
|
||||||
|
Loading…
Reference in New Issue
Block a user