fix for test test_one_core_fail
Occupancy does not change when cache data is dirty and dd is executed for cache in write-only mode. Fix required to prefill cache with clean data. Signed-off-by: Piotr Debski <piotr.debski@intel.com>
This commit is contained in:
parent
98e2cb02c5
commit
5aba9efda1
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright(c) 2020-2021 Intel Corporation
|
# Copyright(c) 2020-2022 Intel Corporation
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -11,14 +11,15 @@ from api.cas.core import Core
|
|||||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||||
from core.test_run import TestRun
|
from core.test_run import TestRun
|
||||||
from test_tools.dd import Dd
|
from test_tools.dd import Dd
|
||||||
from test_utils.os_utils import Udev
|
from test_utils.os_utils import Udev, sync
|
||||||
from test_utils.size import Size, Unit
|
from test_utils.size import Size, Unit
|
||||||
|
|
||||||
block_size = Size(1, Unit.Blocks4096)
|
block_size = Size(1, Unit.Blocks4096)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead
|
@pytest.mark.parametrize(
|
||||||
| CacheModeTrait.InsertWrite))
|
"cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite)
|
||||||
|
)
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_one_core_remove(cache_mode):
|
def test_one_core_remove(cache_mode):
|
||||||
@ -35,10 +36,10 @@ def test_one_core_remove(cache_mode):
|
|||||||
- Removing core frees cache blocks occupied by this core.
|
- Removing core frees cache blocks occupied by this core.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare one device for cache and two for core."):
|
with TestRun.step("Prepare one device for cache and two for core."):
|
||||||
cache_dev = TestRun.disks['cache']
|
cache_dev = TestRun.disks["cache"]
|
||||||
cache_dev.create_partitions([Size(512, Unit.MebiByte)])
|
cache_dev.create_partitions([Size(512, Unit.MebiByte)])
|
||||||
cache_dev = cache_dev.partitions[0]
|
cache_dev = cache_dev.partitions[0]
|
||||||
core_dev = TestRun.disks['core']
|
core_dev = TestRun.disks["core"]
|
||||||
core_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
|
core_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
|
||||||
core_part1 = core_dev.partitions[0]
|
core_part1 = core_dev.partitions[0]
|
||||||
core_part2 = core_dev.partitions[1]
|
core_part2 = core_dev.partitions[1]
|
||||||
@ -74,8 +75,10 @@ def test_one_core_remove(cache_mode):
|
|||||||
with TestRun.step("Check if occupancy from the first core is removed from cache."):
|
with TestRun.step("Check if occupancy from the first core is removed from cache."):
|
||||||
# Blocks occupied by the first core should be completely released.
|
# Blocks occupied by the first core should be completely released.
|
||||||
if cache.get_occupancy() != occupied_blocks_before - core1_occupied_blocks:
|
if cache.get_occupancy() != occupied_blocks_before - core1_occupied_blocks:
|
||||||
TestRun.LOGGER.error("Blocks previously occupied by the first core "
|
TestRun.LOGGER.error(
|
||||||
"aren't released by removing this core.")
|
"Blocks previously occupied by the first core "
|
||||||
|
"aren't released by removing this core."
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Check if the remaining core is able to use cache."):
|
with TestRun.step("Check if the remaining core is able to use cache."):
|
||||||
dd_builder(cache_mode, core2, Size(100, Unit.MebiByte)).run()
|
dd_builder(cache_mode, core2, Size(100, Unit.MebiByte)).run()
|
||||||
@ -86,8 +89,9 @@ def test_one_core_remove(cache_mode):
|
|||||||
casadm.stop_all_caches()
|
casadm.stop_all_caches()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead
|
@pytest.mark.parametrize(
|
||||||
| CacheModeTrait.InsertWrite))
|
"cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead | CacheModeTrait.InsertWrite)
|
||||||
|
)
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
|
||||||
def test_one_core_release(cache_mode):
|
def test_one_core_release(cache_mode):
|
||||||
@ -103,10 +107,10 @@ def test_one_core_release(cache_mode):
|
|||||||
- OpenCAS frees blocks occupied by unused core and allocates it to the remaining core.
|
- OpenCAS frees blocks occupied by unused core and allocates it to the remaining core.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare two cache and one core devices."):
|
with TestRun.step("Prepare two cache and one core devices."):
|
||||||
cache_dev = TestRun.disks['cache']
|
cache_dev = TestRun.disks["cache"]
|
||||||
cache_dev.create_partitions([Size(512, Unit.MebiByte)])
|
cache_dev.create_partitions([Size(512, Unit.MebiByte)])
|
||||||
cache_part = cache_dev.partitions[0]
|
cache_part = cache_dev.partitions[0]
|
||||||
core_dev = TestRun.disks['core']
|
core_dev = TestRun.disks["core"]
|
||||||
core_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
|
core_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
|
||||||
core_part1 = core_dev.partitions[0]
|
core_part1 = core_dev.partitions[0]
|
||||||
core_part2 = core_dev.partitions[1]
|
core_part2 = core_dev.partitions[1]
|
||||||
@ -140,45 +144,45 @@ def test_one_core_release(cache_mode):
|
|||||||
# The first core's occupancy should be lower than cache's occupancy
|
# The first core's occupancy should be lower than cache's occupancy
|
||||||
# by the value of the remaining core's occupancy because cache
|
# by the value of the remaining core's occupancy because cache
|
||||||
# should reallocate blocks from unused core to used core.
|
# should reallocate blocks from unused core to used core.
|
||||||
if core1_occupied_blocks_after >= core1_occupied_blocks_before \
|
if (
|
||||||
or cache.get_occupancy() <= core1_occupied_blocks_after \
|
core1_occupied_blocks_after >= core1_occupied_blocks_before
|
||||||
or not float(core2.get_occupancy().get_value()) > 0:
|
or cache.get_occupancy() <= core1_occupied_blocks_after
|
||||||
|
or not float(core2.get_occupancy().get_value()) > 0
|
||||||
|
):
|
||||||
TestRun.LOGGER.error("Blocks previously occupied by the first core aren't released.")
|
TestRun.LOGGER.error("Blocks previously occupied by the first core aren't released.")
|
||||||
|
|
||||||
with TestRun.step("Stop cache."):
|
with TestRun.step("Stop cache."):
|
||||||
casadm.stop_all_caches()
|
casadm.stop_all_caches()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("cache_mode", CacheMode.with_any_trait(CacheModeTrait.InsertRead
|
|
||||||
| CacheModeTrait.InsertWrite))
|
|
||||||
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||||
@pytest.mark.require_disk("core1", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core1", DiskTypeLowerThan("cache"))
|
||||||
@pytest.mark.require_disk("core2", DiskTypeLowerThan("cache"))
|
@pytest.mark.require_disk("core2", DiskTypeLowerThan("cache"))
|
||||||
def test_one_core_fail(cache_mode):
|
def test_one_core_fail():
|
||||||
"""
|
"""
|
||||||
title: Test if OpenCAS correctly handles failure of one of multiple core devices.
|
title: Test if OpenCAS correctly handles failure of one of multiple core devices.
|
||||||
description: |
|
description: |
|
||||||
When one core device fails in a single cache instance all blocks previously occupied
|
When one core device fails in a single cache instance all clean blocks previously occupied
|
||||||
should be available to other core devices.
|
should be available to other core devices.
|
||||||
Test is without pass through mode.
|
|
||||||
pass_criteria:
|
pass_criteria:
|
||||||
- No system crash.
|
- No system crash.
|
||||||
- Second core is able to use OpenCAS.
|
- Second core is able to use OpenCAS.
|
||||||
"""
|
"""
|
||||||
with TestRun.step("Prepare one cache and two core devices."):
|
with TestRun.step("Prepare one cache and two core devices."):
|
||||||
cache_dev = TestRun.disks['cache']
|
cache_dev = TestRun.disks["cache"]
|
||||||
cache_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
|
cache_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
|
||||||
cache_part = cache_dev.partitions[0]
|
cache_part = cache_dev.partitions[0]
|
||||||
core_dev1 = TestRun.disks['core1'] # This device would be unplugged.
|
core_dev1 = TestRun.disks["core1"] # This device would be unplugged.
|
||||||
core_dev1.create_partitions([Size(2, Unit.GibiByte)])
|
core_dev1.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
core_part1 = core_dev1.partitions[0]
|
core_part1 = core_dev1.partitions[0]
|
||||||
core_dev2 = TestRun.disks['core2']
|
core_dev2 = TestRun.disks["core2"]
|
||||||
core_dev2.create_partitions([Size(2, Unit.GibiByte)])
|
core_dev2.create_partitions([Size(2, Unit.GibiByte)])
|
||||||
core_part2 = core_dev2.partitions[0]
|
core_part2 = core_dev2.partitions[0]
|
||||||
Udev.disable()
|
Udev.disable()
|
||||||
|
|
||||||
with TestRun.step("Start cache"):
|
with TestRun.step("Start cache"):
|
||||||
cache = casadm.start_cache(cache_part, cache_mode, force=True)
|
cache_mode = CacheMode.WT
|
||||||
|
cache = casadm.start_cache(cache_part, cache_mode=cache_mode, force=True)
|
||||||
caches_count = len(casadm_parser.get_caches())
|
caches_count = len(casadm_parser.get_caches())
|
||||||
if caches_count != 1:
|
if caches_count != 1:
|
||||||
TestRun.fail(f"Expected caches count: 1; Actual caches count: {caches_count}.")
|
TestRun.fail(f"Expected caches count: 1; Actual caches count: {caches_count}.")
|
||||||
@ -214,10 +218,14 @@ def test_one_core_fail(cache_mode):
|
|||||||
# Cache occupancy cannot be lower than before the first core fails and after that
|
# Cache occupancy cannot be lower than before the first core fails and after that
|
||||||
# should be equal to the sum of occupancy of the first and the remaining core
|
# should be equal to the sum of occupancy of the first and the remaining core
|
||||||
cache_occupied_blocks_after = cache.get_occupancy()
|
cache_occupied_blocks_after = cache.get_occupancy()
|
||||||
if cache_occupied_blocks_before > cache_occupied_blocks_after \
|
if (
|
||||||
or cache_occupied_blocks_after != core2.get_occupancy() + core1.get_occupancy():
|
cache_occupied_blocks_before > cache_occupied_blocks_after
|
||||||
TestRun.fail("Blocks previously occupied by the first core "
|
or cache_occupied_blocks_after != core2.get_occupancy() + core1.get_occupancy()
|
||||||
"aren't released after this core failure.")
|
):
|
||||||
|
TestRun.fail(
|
||||||
|
"Blocks previously occupied by the first core "
|
||||||
|
"aren't released after this core failure."
|
||||||
|
)
|
||||||
|
|
||||||
with TestRun.step("Stop cache."):
|
with TestRun.step("Stop cache."):
|
||||||
casadm.stop_all_caches()
|
casadm.stop_all_caches()
|
||||||
@ -228,11 +236,9 @@ def test_one_core_fail(cache_mode):
|
|||||||
|
|
||||||
def dd_builder(cache_mode: CacheMode, dev: Core, size: Size):
|
def dd_builder(cache_mode: CacheMode, dev: Core, size: Size):
|
||||||
blocks = int(size.value / block_size.value)
|
blocks = int(size.value / block_size.value)
|
||||||
dd = (Dd()
|
dd = Dd().block_size(block_size).count(blocks)
|
||||||
.block_size(block_size)
|
|
||||||
.count(blocks))
|
|
||||||
if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
|
if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
|
||||||
dd.input(dev.path).output("/dev/null")
|
dd.input(dev.path).output("/dev/null").iflag("direct")
|
||||||
else:
|
else:
|
||||||
dd.input("/dev/urandom").output(dev.path)
|
dd.input("/dev/urandom").output(dev.path).oflag("direct")
|
||||||
return dd
|
return dd
|
||||||
|
Loading…
Reference in New Issue
Block a user