Merge pull request #1349 from DocentSzachista/test-multilevel-cache
tests: add multilevel cache test
This commit is contained in:
commit
0ff4698f29
42
test/functional/tests/volumes/common.py
Normal file
42
test/functional/tests/volumes/common.py
Normal file
@ -0,0 +1,42 @@
|
||||
#
|
||||
# Copyright(c) 2022 Intel Corporation
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
from test_tools import fs_utils
|
||||
from core.test_run import TestRun
|
||||
from test_utils.size import Size, Unit
|
||||
|
||||
test_file_size = Size(500, Unit.KiloByte)
|
||||
|
||||
|
||||
def create_files_with_md5sums(destination_path, files_count):
|
||||
md5sums = list()
|
||||
for i in range(0, files_count):
|
||||
temp_file = f"/tmp/file{i}"
|
||||
destination_file = f"{destination_path}/file{i}"
|
||||
|
||||
test_file = fs_utils.create_random_test_file(temp_file, test_file_size)
|
||||
test_file.copy(destination_file, force=True)
|
||||
|
||||
md5sums.append(test_file.md5sum())
|
||||
|
||||
TestRun.LOGGER.info(f"Files created and copied to core successfully.")
|
||||
return md5sums
|
||||
|
||||
|
||||
def compare_md5sums(md5_sums_source, files_to_check_path, copy_to_tmp=False):
|
||||
md5_sums_elements = len(md5_sums_source)
|
||||
|
||||
for i in range(md5_sums_elements):
|
||||
file_to_check_path = f"{files_to_check_path}/file{i}"
|
||||
file_to_check = fs_utils.parse_ls_output(fs_utils.ls_item(file_to_check_path))[0]
|
||||
|
||||
if copy_to_tmp:
|
||||
file_to_check_path = f"{files_to_check_path}/filetmp"
|
||||
file_to_check.copy(file_to_check_path, force=True)
|
||||
file_to_check = fs_utils.parse_ls_output(fs_utils.ls_item(file_to_check_path))[0]
|
||||
|
||||
if md5_sums_source[i] != file_to_check.md5sum():
|
||||
TestRun.fail(f"Source and target files {file_to_check_path} checksums are different.")
|
||||
|
||||
TestRun.LOGGER.info(f"Successful verification, md5sums match.")
|
74
test/functional/tests/volumes/test_multilevel_cache_3.py
Normal file
74
test/functional/tests/volumes/test_multilevel_cache_3.py
Normal file
@ -0,0 +1,74 @@
|
||||
#
|
||||
# Copyright(c) 2022 Intel Corporation
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
|
||||
import pytest
|
||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||
from core.test_run import TestRun
|
||||
from api.cas.cache import CacheMode, casadm
|
||||
from test_utils.size import Size, Unit
|
||||
from test_tools.disk_utils import Filesystem
|
||||
from .common import create_files_with_md5sums, compare_md5sums
|
||||
|
||||
mount_point = "/mnt/test"
|
||||
|
||||
|
||||
@pytest.mark.require_disk("cache_1", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@pytest.mark.require_disk("cache_2", DiskTypeSet([DiskType.optane, DiskType.nand]))
|
||||
@pytest.mark.require_disk("core_1", DiskTypeLowerThan("cache_1"))
|
||||
@pytest.mark.require_disk("cache_3", DiskTypeSet([DiskType.hdd]))
|
||||
def test_multilevel_cache_3():
|
||||
"""
|
||||
title:
|
||||
Test multilevel cache.
|
||||
description:
|
||||
CAS Linux is able to use 3-level cache in different cache mode and remove it gracefully.
|
||||
pass_criteria:
|
||||
- Succesfully created 3-level cache.
|
||||
- Succesfully mounted filesystem on CAS device.
|
||||
- md5 sums are correct.
|
||||
"""
|
||||
with TestRun.step("Prepare devices"):
|
||||
core_dev_1 = TestRun.disks["core_1"]
|
||||
|
||||
cache_hdd = TestRun.disks["cache_3"]
|
||||
cache_hdd.create_partitions([Size(3.2, Unit.GibiByte)])
|
||||
cache_hdd = cache_hdd.partitions[0]
|
||||
|
||||
cache_dev_1 = TestRun.disks["cache_1"]
|
||||
cache_dev_1.create_partitions([Size(3.2, Unit.GibiByte)])
|
||||
cache_dev_1 = cache_dev_1.partitions[0]
|
||||
|
||||
cache_dev_2 = TestRun.disks["cache_2"]
|
||||
cache_dev_2.create_partitions([Size(3.2, Unit.GibiByte)])
|
||||
cache_dev_2 = cache_dev_2.partitions[0]
|
||||
|
||||
with TestRun.step("Create cache in WT mode and add core to it"):
|
||||
cache_WT = casadm.start_cache(cache_dev=cache_dev_1, cache_mode=CacheMode.WT)
|
||||
core_WT = cache_WT.add_core(core_dev=core_dev_1)
|
||||
|
||||
with TestRun.step("Create second layer cache in WB mode"):
|
||||
cache_WB = casadm.start_cache(cache_dev=cache_dev_2, cache_mode=CacheMode.WB)
|
||||
|
||||
with TestRun.step("Add first CAS device by setting exported object as core to second layer"):
|
||||
core_WB = cache_WB.add_core(core_WT)
|
||||
|
||||
with TestRun.step("Create third layer cache in WA mode"):
|
||||
cache_WA = casadm.start_cache(cache_dev=cache_hdd, cache_mode=CacheMode.WA)
|
||||
|
||||
with TestRun.step("Add second CAS device by setting exported object as core to third layer"):
|
||||
core = cache_WA.add_core(core_WB)
|
||||
|
||||
with TestRun.step("Create and mount filesystem on third CAS device"):
|
||||
core.create_filesystem(Filesystem.ext3, blocksize=int(Size(1, Unit.Blocks4096)))
|
||||
core.mount(mount_point)
|
||||
|
||||
with TestRun.step("Create files and copy them to mounted directory"):
|
||||
md5_sums = create_files_with_md5sums(mount_point, 100)
|
||||
with TestRun.step(
|
||||
"""Compare md5 susms between original files, files on IntelCAS device
|
||||
and files copied from intelCAS device"""
|
||||
):
|
||||
compare_md5sums(md5_sums, mount_point, copy_to_tmp=True)
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright(c) 2020-2021 Intel Corporation
|
||||
# Copyright(c) 2020-2022 Intel Corporation
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
@ -7,6 +7,7 @@ import pytest
|
||||
|
||||
from api.cas import casadm
|
||||
from api.cas.cache_config import CacheMode
|
||||
from .common import compare_md5sums, create_files_with_md5sums
|
||||
from core.test_run import TestRun
|
||||
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
|
||||
from storage_devices.raid import Raid, RaidConfiguration, MetadataVariant, Level
|
||||
@ -37,23 +38,22 @@ def test_raid_as_cache(cache_mode):
|
||||
- Files copied successfully, the md5sum match the origin one.
|
||||
"""
|
||||
with TestRun.step("Create RAID1."):
|
||||
raid_disk = TestRun.disks['cache1']
|
||||
raid_disk = TestRun.disks["cache1"]
|
||||
raid_disk.create_partitions([Size(2, Unit.GibiByte)])
|
||||
raid_disk_1 = raid_disk.partitions[0]
|
||||
raid_disk2 = TestRun.disks['cache2']
|
||||
raid_disk2 = TestRun.disks["cache2"]
|
||||
raid_disk2.create_partitions([Size(2, Unit.GibiByte)])
|
||||
raid_disk_2 = raid_disk2.partitions[0]
|
||||
|
||||
config = RaidConfiguration(
|
||||
level=Level.Raid1,
|
||||
metadata=MetadataVariant.Legacy,
|
||||
number_of_devices=2)
|
||||
level=Level.Raid1, metadata=MetadataVariant.Legacy, number_of_devices=2
|
||||
)
|
||||
|
||||
raid_volume = Raid.create(config, [raid_disk_1, raid_disk_2])
|
||||
TestRun.LOGGER.info(f"RAID created successfully.")
|
||||
|
||||
with TestRun.step("Prepare core device."):
|
||||
core_disk = TestRun.disks['core']
|
||||
core_disk = TestRun.disks["core"]
|
||||
core_disk.create_partitions([Size(2, Unit.GibiByte)])
|
||||
core_dev = core_disk.partitions[0]
|
||||
|
||||
@ -96,10 +96,10 @@ def test_many_cores_raid_as_cache(cache_mode):
|
||||
- Successful creation and copy files to each core and verification of theirs md5sum.
|
||||
"""
|
||||
with TestRun.step("Create cache with RAID0 as caching device."):
|
||||
raid_disk = TestRun.disks['cache1']
|
||||
raid_disk = TestRun.disks["cache1"]
|
||||
raid_disk.create_partitions([Size(2, Unit.GibiByte)])
|
||||
raid_disk_1 = raid_disk.partitions[0]
|
||||
raid_disk2 = TestRun.disks['cache2']
|
||||
raid_disk2 = TestRun.disks["cache2"]
|
||||
raid_disk2.create_partitions([Size(2, Unit.GibiByte)])
|
||||
raid_disk_2 = raid_disk2.partitions[0]
|
||||
|
||||
@ -107,7 +107,8 @@ def test_many_cores_raid_as_cache(cache_mode):
|
||||
level=Level.Raid0,
|
||||
metadata=MetadataVariant.Legacy,
|
||||
number_of_devices=2,
|
||||
size=Size(1, Unit.GiB))
|
||||
size=Size(1, Unit.GiB),
|
||||
)
|
||||
|
||||
raid_volume = Raid.create(config, [raid_disk_1, raid_disk_2])
|
||||
TestRun.LOGGER.info(f"RAID created successfully.")
|
||||
@ -115,7 +116,7 @@ def test_many_cores_raid_as_cache(cache_mode):
|
||||
cache = casadm.start_cache(raid_volume, cache_mode, force=True)
|
||||
|
||||
with TestRun.step("Add core device to cache, create filesystem and mount it."):
|
||||
core_disk1 = TestRun.disks['core1']
|
||||
core_disk1 = TestRun.disks["core1"]
|
||||
core_disk1.create_partitions([Size(2, Unit.GibiByte)])
|
||||
core_dev1 = core_disk1.partitions[0]
|
||||
|
||||
@ -124,7 +125,7 @@ def test_many_cores_raid_as_cache(cache_mode):
|
||||
core1.mount(mount_point)
|
||||
|
||||
with TestRun.step("Add second core device to cache, create filesystem and mount it."):
|
||||
core_disk2 = TestRun.disks['core2']
|
||||
core_disk2 = TestRun.disks["core2"]
|
||||
core_disk2.create_partitions([Size(2, Unit.GibiByte)])
|
||||
core_dev2 = core_disk2.partitions[0]
|
||||
|
||||
@ -143,31 +144,3 @@ def test_many_cores_raid_as_cache(cache_mode):
|
||||
|
||||
with TestRun.step("Compare checksum on second core."):
|
||||
compare_md5sums(core2_md5sums, mount_point2)
|
||||
|
||||
|
||||
def create_files_with_md5sums(destination_path, files_count):
|
||||
md5sums = list()
|
||||
for i in range(0, files_count):
|
||||
temp_file = f"/tmp/file{i}"
|
||||
destination_file = f"{destination_path}/file{i}"
|
||||
|
||||
test_file = fs_utils.create_random_test_file(temp_file, test_file_size)
|
||||
test_file.copy(destination_file, force=True)
|
||||
|
||||
md5sums.append(test_file.md5sum())
|
||||
|
||||
TestRun.LOGGER.info(f"Files created and copied to core successfully.")
|
||||
return md5sums
|
||||
|
||||
|
||||
def compare_md5sums(md5_sums_source, files_to_check_path):
|
||||
md5_sums_elements = len(md5_sums_source)
|
||||
|
||||
for i in range(md5_sums_elements):
|
||||
file_to_check_path = f"{files_to_check_path}/file{i}"
|
||||
file_to_check = fs_utils.parse_ls_output(fs_utils.ls_item(file_to_check_path))[0]
|
||||
|
||||
if md5_sums_source[i] != file_to_check.md5sum():
|
||||
TestRun.fail(f"Source and target files {file_to_check_path} checksums are different.")
|
||||
|
||||
TestRun.LOGGER.info(f"Successful verification, md5sums match.")
|
||||
|
Loading…
Reference in New Issue
Block a user