Merge pull request #773 from katlapinka/test-fixes

Scope minor tests fixes
This commit is contained in:
Katarzyna Łapińska 2021-05-10 13:56:13 +02:00 committed by GitHub
commit c9938d1e4c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 67 additions and 39 deletions

View File

@ -12,6 +12,7 @@ from typing import List
from api.cas import casadm
from api.cas.cache_config import *
from api.cas.casadm_params import *
from api.cas.ioclass_config import IoClass
from api.cas.version import CasVersion
from storage_devices.device import Device
from test_utils.size import parse_unit
@ -281,11 +282,6 @@ def get_io_class_list(cache_id: int):
casadm_output.pop(0) # Remove header
for line in casadm_output:
values = line.split(",")
ioclass = {
"id": int(values[0]),
"rule": values[1],
"eviction_priority": int(values[2]),
"allocation": float(values[3]),
}
ioclass = IoClass(int(values[0]), values[1], int(values[2]), values[3])
ret.append(ioclass)
return ret

View File

@ -13,7 +13,7 @@ config_stats_core = [
"core id", "core device", "exported object", "core size", "dirty for", "status",
"seq cutoff threshold", "seq cutoff policy"
]
config_stats_ioclass = ["io class id", "io class name", "eviction priority", "selective allocation"]
config_stats_ioclass = ["io class id", "io class name", "eviction priority", "max size"]
usage_stats = ["occupancy", "free", "clean", "dirty"]
usage_stats_ioclass = ["occupancy", "clean", "dirty"]
inactive_usage_stats = ["inactive occupancy", "inactive clean", "inactive dirty"]

View File

@ -18,10 +18,10 @@ from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
random_thresholds = random.sample(range(1028, 1024 ** 2, 4), 3)
random_stream_numbers = random.sample(range(2, 256), 3)
random_stream_numbers = random.sample(range(2, 128), 3)
@pytest.mark.parametrizex("streams_number", [1, 256] + random_stream_numbers)
@pytest.mark.parametrizex("streams_number", [1, 128] + random_stream_numbers)
@pytest.mark.parametrizex("threshold",
[Size(1, Unit.MebiByte), Size(1, Unit.GibiByte)]
+ [Size(x, Unit.KibiByte) for x in random_thresholds])
@ -50,6 +50,7 @@ def test_multistream_seq_cutoff_functional(threshold, streams_number):
f"and reset statistics counters."):
core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
core.set_seq_cutoff_threshold(threshold)
core.set_seq_cutoff_promotion_count(1)
core.reset_counters()
with TestRun.step(f"Run {streams_number} I/O streams with amount of sequential writes equal to "

View File

@ -18,7 +18,7 @@ from test_utils.size import Size, Unit
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.require_disk("core", DiskTypeSet([DiskType.hdd, DiskType.sata]))
def test_udev_core_partition():
"""
title: |

View File

@ -47,7 +47,7 @@ def test_trim_eviction(cache_mode, cache_line_size, filesystem, cleaning):
cache_block_size = disk_utils.get_block_size(cache_disk)
with TestRun.step("Start cache on device supporting trim and add core."):
cache = casadm.start_cache(cache_dev, cache_mode, cache_line_size)
cache = casadm.start_cache(cache_dev, cache_mode, cache_line_size, force=True)
cache.set_cleaning_policy(cleaning)
Udev.disable()
core = cache.add_core(core_dev)

View File

@ -145,7 +145,12 @@ def test_ioclass_directory_file_operations(filesystem):
Udev.disable()
with TestRun.step("Create and load IO class config file."):
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
ioclass_id = random.randint(2, ioclass_config.MAX_IO_CLASS_ID)
ioclass_config.add_ioclass(ioclass_id=1,
eviction_priority=1,
allocation="1.00",
rule="metadata",
ioclass_config_path=ioclass_config_path)
# directory IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
@ -441,4 +446,4 @@ def check_occupancy(expected: Size, actual: Size):
def ioclass_is_enabled(cache, ioclass_id: int):
return [i["allocation"] for i in cache.list_io_classes() if i["id"] == ioclass_id].pop() > 0.00
return [float(i.allocation) for i in cache.list_io_classes() if i.id == ioclass_id].pop() > 0.00

View File

@ -155,13 +155,13 @@ def test_ioclass_occuppancy_load(cache_line_size):
f"{len(ioclass_list_after_load)}"
)
original_sorted = sorted(original_ioclass_list, key=lambda k: k["id"])
loaded_sorted = sorted(ioclass_list_after_load, key=lambda k: k["id"])
original_sorted = sorted(original_ioclass_list, key=lambda k: k.id)
loaded_sorted = sorted(ioclass_list_after_load, key=lambda k: k.id)
for original, loaded in zip(original_sorted, loaded_sorted):
original_allocation = original["allocation"]
loaded_allocation = loaded["allocation"]
ioclass_id = original["id"]
original_allocation = original.allocation
loaded_allocation = loaded.allocation
ioclass_id = original.id
if original_allocation != loaded_allocation:
TestRun.LOGGER.error(
f"Occupancy limit doesn't match for ioclass {ioclass_id}: "

View File

@ -68,6 +68,13 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
with TestRun.step("Add default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="1.00")).split(","))
ioclass_config.add_ioclass(
ioclass_id=5,
rule="metadata",
eviction_priority=1,
allocation="1.00",
ioclass_config_path=ioclass_config_path
)
with TestRun.step("Add ioclasses for all dirs"):
for io_class in io_classes:
@ -116,7 +123,7 @@ def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size * ioclass_size_multiplicatior)
(io_class.max_occupancy * cache_size)
.align_down(Unit.Blocks4096.get_value())
.set_unit(Unit.Blocks4096)
)

View File

@ -21,7 +21,7 @@ from recordclass import recordclass
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrize("new_occupancy", [0, 20, 70, 100])
@pytest.mark.parametrize("new_occupancy", [25, 50, 70, 100])
def test_ioclass_resize(cache_line_size, new_occupancy):
"""
title: Resize ioclass
@ -46,17 +46,25 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
with TestRun.step("Prepare test dirs"):
IoclassConfig = recordclass("IoclassConfig", "id eviction_prio max_occupancy dir_path")
io_class = IoclassConfig(1, 3, 0.50, f"{mountpoint}/A")
io_class = IoclassConfig(2, 3, 0.10, f"{mountpoint}/A")
fs_utils.create_directory(io_class.dir_path, parents=True)
fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"):
ioclass_config.add_ioclass(
ioclass_id=1,
rule="metadata&done",
eviction_priority=1,
allocation="1.00",
ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add directory for ioclass"):
ioclass_config.add_ioclass(
io_class.id,
@ -91,7 +99,8 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
.set_unit(Unit.Blocks4096)
)
if actuall_occupancy > occupancy_limit:
# Divergency may be casued be rounding max occupancy
if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
@ -106,6 +115,13 @@ def test_ioclass_resize(cache_line_size, new_occupancy):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
ioclass_config.add_ioclass(
ioclass_id=1,
rule="metadata&done",
eviction_priority=1,
allocation="1.00",
ioclass_config_path=ioclass_config_path
)
ioclass_config.add_ioclass(
io_class.id,
f"directory:{io_class.dir_path}&done",

View File

@ -7,8 +7,9 @@ import math
import pytest
import os
from api.cas import casadm, cli_messages
from api.cas.cache_config import CacheLineSize
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from storage_devices.disk import DiskTypeSet, DiskType
from storage_devices.partition import Partition
from test_tools import disk_utils, fs_utils
from test_utils.output import CmdException
@ -16,8 +17,8 @@ from test_utils.size import Size, Unit
@pytest.mark.os_dependent
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane]))
@pytest.mark.require_disk("core", DiskTypeSet([DiskType.nand]))
@pytest.mark.require_plugin("scsi_debug")
def test_device_capabilities():
"""
@ -35,19 +36,21 @@ def test_device_capabilities():
'queue/max_sectors_kb')
default_max_io_size = fs_utils.read_file(max_io_size_path)
iteration_settings = [{"device": "SCSI-debug module",
"dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 1024},
{"device": "SCSI-debug module",
"dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 256},
{"device": "SCSI-debug module",
"dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 128},
{"device": "SCSI-debug module",
"dev_size_mb": 2048, "logical_block_size": 2048, "max_sectors_kb": 1024},
{"device": "standard core device",
"max_sectors_kb": int(default_max_io_size)},
{"device": "standard core device", "max_sectors_kb": 128}]
iteration_settings = [
{"device": "SCSI-debug module",
"dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 1024},
{"device": "SCSI-debug module",
"dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 256},
{"device": "SCSI-debug module",
"dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 128},
{"device": "SCSI-debug module",
"dev_size_mb": 2048, "logical_block_size": 2048, "max_sectors_kb": 1024},
{"device": "standard core device",
"max_sectors_kb": int(default_max_io_size)},
{"device": "standard core device", "max_sectors_kb": 128}
]
for i in range(0, 6):
for i in range(0, len(iteration_settings)):
device = iteration_settings[i]["device"]
group_title = f"{device} | "
if device == "SCSI-debug module":
@ -99,7 +102,7 @@ def create_scsi_debug_device(sector_size: int, physblk_exp: int, dev_size_mb=102
def prepare_cas_device(cache_device, core_device):
cache = casadm.start_cache(cache_device, force=True)
cache = casadm.start_cache(cache_device, cache_line_size=CacheLineSize.LINE_64KiB, force=True)
try:
cache_dev_bs = disk_utils.get_block_size(cache_device.get_device_id())
core_dev_bs = disk_utils.get_block_size(core_device.get_device_id())