Merge pull request #1277 from pdebski21/test_fixes3

Tests fixes
This commit is contained in:
Robert Baldyga 2022-09-13 09:41:45 +02:00 committed by GitHub
commit a9260524e0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 51 additions and 39 deletions

View File

@ -1,5 +1,5 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
@ -11,14 +11,15 @@ from api.cas.core import Core
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from core.test_run import TestRun
from test_tools.dd import Dd
from test_utils.os_utils import Udev
from test_utils.os_utils import Udev, sync
from test_utils.size import Size, Unit
block_size = Size(1, Unit.Blocks4096)
@pytest.mark.parametrize("cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead
| CacheModeTrait.InsertWrite))
@pytest.mark.parametrize(
"cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite)
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_one_core_remove(cache_mode):
@ -35,10 +36,10 @@ def test_one_core_remove(cache_mode):
- Removing core frees cache blocks occupied by this core.
"""
with TestRun.step("Prepare one device for cache and two for core."):
cache_dev = TestRun.disks['cache']
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(512, Unit.MebiByte)])
cache_dev = cache_dev.partitions[0]
core_dev = TestRun.disks['core']
core_dev = TestRun.disks["core"]
core_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
core_part1 = core_dev.partitions[0]
core_part2 = core_dev.partitions[1]
@ -74,8 +75,10 @@ def test_one_core_remove(cache_mode):
with TestRun.step("Check if occupancy from the first core is removed from cache."):
# Blocks occupied by the first core should be completely released.
if cache.get_occupancy() != occupied_blocks_before - core1_occupied_blocks:
TestRun.LOGGER.error("Blocks previously occupied by the first core "
"aren't released by removing this core.")
TestRun.LOGGER.error(
"Blocks previously occupied by the first core "
"aren't released by removing this core."
)
with TestRun.step("Check if the remaining core is able to use cache."):
dd_builder(cache_mode, core2, Size(100, Unit.MebiByte)).run()
@ -86,8 +89,9 @@ def test_one_core_remove(cache_mode):
casadm.stop_all_caches()
@pytest.mark.parametrize("cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead
| CacheModeTrait.InsertWrite))
@pytest.mark.parametrize(
"cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite)
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_one_core_release(cache_mode):
@ -103,10 +107,10 @@ def test_one_core_release(cache_mode):
- OpenCAS frees blocks occupied by unused core and allocates it to the remaining core.
"""
with TestRun.step("Prepare two cache and one core devices."):
cache_dev = TestRun.disks['cache']
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(512, Unit.MebiByte)])
cache_part = cache_dev.partitions[0]
core_dev = TestRun.disks['core']
core_dev = TestRun.disks["core"]
core_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
core_part1 = core_dev.partitions[0]
core_part2 = core_dev.partitions[1]
@ -140,45 +144,45 @@ def test_one_core_release(cache_mode):
# The first core's occupancy should be lower than cache's occupancy
# by the value of the remaining core's occupancy because cache
# should reallocate blocks from unused core to used core.
if core1_occupied_blocks_after >= core1_occupied_blocks_before \
or cache.get_occupancy() <= core1_occupied_blocks_after \
or not float(core2.get_occupancy().get_value()) > 0:
if (
core1_occupied_blocks_after >= core1_occupied_blocks_before
or cache.get_occupancy() <= core1_occupied_blocks_after
or not float(core2.get_occupancy().get_value()) > 0
):
TestRun.LOGGER.error("Blocks previously occupied by the first core aren't released.")
with TestRun.step("Stop cache."):
casadm.stop_all_caches()
@pytest.mark.parametrize("cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead
| CacheModeTrait.InsertWrite))
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core1", DiskTypeLowerThan("cache"))
@pytest.mark.require_disk("core2", DiskTypeLowerThan("cache"))
def test_one_core_fail(cache_mode):
def test_one_core_fail():
"""
title: Test if OpenCAS correctly handles failure of one of multiple core devices.
description: |
When one core device fails in a single cache instance all blocks previously occupied
When one core device fails in a single cache instance all clean blocks previously occupied
should be available to other core devices.
Test is without pass through mode.
pass_criteria:
- No system crash.
- Second core is able to use OpenCAS.
"""
with TestRun.step("Prepare one cache and two core devices."):
cache_dev = TestRun.disks['cache']
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
cache_part = cache_dev.partitions[0]
core_dev1 = TestRun.disks['core1'] # This device would be unplugged.
core_dev1 = TestRun.disks["core1"] # This device would be unplugged.
core_dev1.create_partitions([Size(2, Unit.GibiByte)])
core_part1 = core_dev1.partitions[0]
core_dev2 = TestRun.disks['core2']
core_dev2 = TestRun.disks["core2"]
core_dev2.create_partitions([Size(2, Unit.GibiByte)])
core_part2 = core_dev2.partitions[0]
Udev.disable()
with TestRun.step("Start cache"):
cache = casadm.start_cache(cache_part, cache_mode, force=True)
cache_mode = CacheMode.WT
cache = casadm.start_cache(cache_part, cache_mode=cache_mode, force=True)
caches_count = len(casadm_parser.get_caches())
if caches_count != 1:
TestRun.fail(f"Expected caches count: 1; Actual caches count: {caches_count}.")
@ -214,10 +218,14 @@ def test_one_core_fail(cache_mode):
# Cache occupancy cannot be lower than before the first core fails and after that
# should be equal to the sum of occupancy of the first and the remaining core
cache_occupied_blocks_after = cache.get_occupancy()
if cache_occupied_blocks_before > cache_occupied_blocks_after \
or cache_occupied_blocks_after != core2.get_occupancy() + core1.get_occupancy():
TestRun.fail("Blocks previously occupied by the first core "
"aren't released after this core failure.")
if (
cache_occupied_blocks_before > cache_occupied_blocks_after
or cache_occupied_blocks_after != core2.get_occupancy() + core1.get_occupancy()
):
TestRun.fail(
"Blocks previously occupied by the first core "
"aren't released after this core failure."
)
with TestRun.step("Stop cache."):
casadm.stop_all_caches()
@ -228,11 +236,9 @@ def test_one_core_fail(cache_mode):
def dd_builder(cache_mode: CacheMode, dev: Core, size: Size):
blocks = int(size.value / block_size.value)
dd = (Dd()
.block_size(block_size)
.count(blocks))
dd = Dd().block_size(block_size).count(blocks)
if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
dd.input(dev.path).output("/dev/null")
dd.input(dev.path).output("/dev/null").iflag("direct")
else:
dd.input("/dev/urandom").output(dev.path)
dd.input("/dev/urandom").output(dev.path).oflag("direct")
return dd

View File

@ -1,7 +1,8 @@
#
# Copyright(c) 2020-2021 Intel Corporation
# Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import pytest
import uuid
@ -11,6 +12,7 @@ from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskTypeLowerThan, DiskType
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import IoEngine, ReadWrite
from test_utils.filesystem.symlink import Symlink
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
@ -18,7 +20,7 @@ from test_utils.size import Size, Unit
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", [mode for mode in CacheMode if
CacheModeTrait.InsertWrite & mode.get_traits(mode)])
CacheModeTrait.InsertWrite & mode.get_traits(mode)])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_write_fetch_full_misses(cache_mode, cache_line_size):
"""
@ -35,7 +37,8 @@ def test_write_fetch_full_misses(cache_mode, cache_line_size):
with TestRun.step("Start cache and add core."):
cache_disk = TestRun.disks['cache']
core_disk = TestRun.disks['core']
cache = casadm.start_cache(cache_disk, cache_mode, cache_line_size)
cache = casadm.start_cache(cache_dev=cache_disk, cache_mode=cache_mode,
cache_line_size=cache_line_size, force=True)
Udev.disable()
core = cache.add_core(core_disk)
with TestRun.step("Run writes to CAS device using fio."):
@ -62,7 +65,7 @@ def test_write_fetch_full_misses(cache_mode, cache_line_size):
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", [mode for mode in CacheMode if
CacheModeTrait.InsertWrite & CacheMode.get_traits(mode)])
CacheModeTrait.InsertWrite & CacheMode.get_traits(mode)])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_write_fetch_partial_misses(cache_mode, cache_line_size):
"""
@ -97,7 +100,8 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
TestRun.LOGGER.info(f"Skipped for {cache_mode} cache mode.")
with TestRun.step("Start cache and add core."):
cache = casadm.start_cache(cache_disk, cache_mode, cache_line_size)
cache = casadm.start_cache(cache_dev=cache_disk, cache_mode=cache_mode,
cache_line_size=cache_line_size, force=True)
Udev.disable()
core = cache.add_core(core_part)
with TestRun.step("Cache half of file."):
@ -142,8 +146,10 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
# Methods used in tests:
def check_io_stats(cache_disk, cache, io_stats_before, io_size, blocksize, skip_size):
io_stats_after = cache_disk.get_io_stats()
target = Symlink(cache_disk.path).get_target()
device_name = os.path.basename(target)
logical_block_size = int(TestRun.executor.run(
f"cat /sys/block/{cache_disk.device_name}/queue/logical_block_size").stdout)
f"cat /sys/block/{device_name}/queue/logical_block_size").stdout)
diff = io_stats_after.sectors_written - io_stats_before.sectors_written
written_sector_size = Size(logical_block_size) * diff
TestRun.LOGGER.info(f"Sectors written: "