Merge pull request #535 from Ostrokrzew/by-id

Disallow to use other than by-id path to core or cache device
This commit is contained in:
Robert Baldyga
2020-12-23 14:38:07 +01:00
committed by GitHub
99 changed files with 740 additions and 724 deletions

View File

@@ -19,12 +19,12 @@ class Cache:
self.__metadata_size = None
def __get_cache_id(self):
cmd = f"{list_cmd()} | grep {self.cache_device.system_path}"
cmd = f"{list_cmd(by_id_path=False)} | grep {self.cache_device.short_path}"
output = TestRun.executor.run(cmd)
if output.exit_code == 0 and output.stdout.strip():
return output.stdout.split()[1]
else:
raise Exception(f"There is no cache started on {self.cache_device.system_path}.")
raise Exception(f"There is no cache started on {self.cache_device.path}.")
def get_core_devices(self):
return get_cores(self.cache_id)

View File

@@ -35,7 +35,7 @@ def start_cache(cache_dev: Device, cache_mode: CacheMode = None,
_cache_id = None if cache_id is None else str(cache_id)
_cache_mode = None if cache_mode is None else cache_mode.name.lower()
output = TestRun.executor.run(start_cmd(
cache_dev=cache_dev.system_path, cache_mode=_cache_mode, cache_line_size=_cache_line_size,
cache_dev=cache_dev.path, cache_mode=_cache_mode, cache_line_size=_cache_line_size,
cache_id=_cache_id, force=force, load=load, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to start cache.", output)
@@ -53,11 +53,11 @@ def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = Fals
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False):
_core_id = None if core_id is None else str(core_id)
output = TestRun.executor.run(
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.system_path,
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.path,
core_id=_core_id, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to add core.", output)
core = Core(core_dev.system_path, cache.cache_id)
core = Core(core_dev.path, cache.cache_id)
return core
@@ -71,18 +71,18 @@ def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool
def remove_detached(core_device: Device, shortcut: bool = False):
output = TestRun.executor.run(
remove_detached_cmd(core_device=core_device.system_path, shortcut=shortcut))
remove_detached_cmd(core_device=core_device.path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to remove detached core.", output)
return output
def try_add(core_device: Device, cache_id: int, core_id: int = None):
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.system_path,
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path,
str(core_id) if core_id is not None else None))
if output.exit_code != 0:
raise CmdException("Failed to execute try add script command.", output)
return Core(core_device.system_path, cache_id)
return Core(core_device.path, cache_id)
def purge_cache(cache_id: int):
@@ -128,16 +128,17 @@ def flush(cache_id: int, core_id: int = None, shortcut: bool = False):
def load_cache(device: Device, shortcut: bool = False):
output = TestRun.executor.run(
load_cmd(cache_dev=device.system_path, shortcut=shortcut))
load_cmd(cache_dev=device.path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to load cache.", output)
return Cache(device)
def list_caches(output_format: OutputFormat = None, shortcut: bool = False):
def list_caches(output_format: OutputFormat = None, by_id_path: bool = True,
shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
list_cmd(output_format=_output_format, shortcut=shortcut))
list_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to list caches.", output)
return output
@@ -154,7 +155,7 @@ def print_version(output_format: OutputFormat = None, shortcut: bool = False):
def zero_metadata(cache_dev: Device, shortcut: bool = False):
output = TestRun.executor.run(
zero_metadata_cmd(cache_dev=cache_dev.system_path, shortcut=shortcut))
zero_metadata_cmd(cache_dev=cache_dev.path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to wipe metadata.", output)
return output
@@ -179,7 +180,8 @@ def remove_all_detached_cores():
def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = False,
io_class_id: int = None, filter: List[StatsFilter] = None,
output_format: OutputFormat = None, shortcut: bool = False):
output_format: OutputFormat = None, by_id_path: bool = True,
shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
_core_id = None if core_id is None else str(core_id)
_io_class_id = None if io_class_id is None else str(io_class_id)
@@ -192,7 +194,8 @@ def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = Fa
print_statistics_cmd(
cache_id=str(cache_id), core_id=_core_id,
per_io_class=per_io_class, io_class_id=_io_class_id,
filter=_filter, output_format=_output_format, shortcut=shortcut))
filter=_filter, output_format=_output_format,
by_id_path=by_id_path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Printing statistics failed.", output)
return output

View File

@@ -2,24 +2,20 @@
# Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import csv
import csv
import json
import re
from api.cas import casadm
from test_utils.output import CmdException
from test_utils.size import parse_unit
from storage_devices.device import Device
from api.cas.cache_config import *
from api.cas.casadm_params import *
from api.cas.version import CasVersion
from datetime import timedelta
from typing import List
from api.cas import casadm
from api.cas.cache_config import *
from api.cas.casadm_params import *
from api.cas.version import CasVersion
from storage_devices.device import Device
from test_utils.size import parse_unit
from test_utils.output import CmdException
class Stats(dict):

View File

@@ -95,7 +95,8 @@ def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = Non
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False,
io_class_id: str = None, filter: str = None,
output_format: str = None, shortcut: bool = False):
output_format: str = None, by_id_path: bool = True,
shortcut: bool = False):
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
@@ -109,6 +110,8 @@ def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool
command += (" -f " if shortcut else " --filter ") + filter
if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path:
command += (" -b " if shortcut else " --by-id-path ")
return casadm_bin + command
@@ -126,10 +129,12 @@ def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False)
return casadm_bin + command
def list_cmd(output_format: str = None, shortcut: bool = False):
def list_cmd(output_format: str = None, by_id_path: bool = True, shortcut: bool = False):
command = " -L" if shortcut else " --list-caches"
if output_format == "table" or output_format == "csv":
command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path:
command += (" -b " if shortcut else " --by-id-path ")
return casadm_bin + command

View File

@@ -26,7 +26,7 @@ SEQ_CUT_OFF_THRESHOLD_DEFAULT = Size(1, Unit.MebiByte)
class Core(Device):
def __init__(self, core_device: str, cache_id: int):
self.core_device = Device(core_device)
self.system_path = None
self.path = None
core_info = self.__get_core_info()
if core_info["core_id"] != "-":
self.core_id = int(core_info["core_id"])
@@ -38,14 +38,14 @@ class Core(Device):
def __get_core_info(self):
output = TestRun.executor.run(
list_cmd(OutputFormat.csv.name))
list_cmd(OutputFormat.csv.name, by_id_path=False))
if output.exit_code != 0:
raise Exception("Failed to execute list caches command.")
output_lines = output.stdout.splitlines()
for line in output_lines:
split_line = line.split(',')
if split_line[0] == "core" and (split_line[2] == self.core_device.system_path
or split_line[5] == self.system_path):
if split_line[0] == "core" and (split_line[2] == self.core_device.short_path
or split_line[5] == self.path):
return {"core_id": split_line[1],
"core_device": split_line[2],
"status": split_line[3],
@@ -132,7 +132,7 @@ class Core(Device):
def check_if_is_present_in_os(self, should_be_visible=True):
device_in_system_message = "CAS device exists in OS."
device_not_in_system_message = "CAS device does not exist in OS."
item = fs_utils.ls_item(f"{self.system_path}")
item = fs_utils.ls_item(f"{self.path}")
if item is not None:
if should_be_visible:
TestRun.LOGGER.info(device_in_system_message)

View File

@@ -70,11 +70,8 @@ class CacheConfigLine:
self.extra_flags = extra_flags
def __str__(self):
cache_symlink = self.cache_device.get_device_link("/dev/disk/by-id")
cache_device_path = (
cache_symlink.full_path if cache_symlink is not None else self.cache_device.system_path
)
params = [str(self.cache_id), cache_device_path, self.cache_mode.name, self.extra_flags]
params = [str(self.cache_id), self.cache_device.path,
self.cache_mode.name, self.extra_flags]
return '\t'.join(params)
@@ -88,9 +85,6 @@ class CoreConfigLine:
self.extra_flags = extra_flags
def __str__(self):
core_symlink = self.core_device.get_device_link("/dev/disk/by-id")
core_device_path = (
core_symlink.full_path if core_symlink is not None else self.core_device.system_path
)
params = [str(self.cache_id), str(self.core_id), core_device_path, self.extra_flags]
params = [str(self.cache_id), str(self.core_id),
self.core_device.path, self.extra_flags]
return '\t'.join(params)

View File

@@ -73,7 +73,7 @@ def test_cleaning_policies_in_write_back(cleaning_policy):
with TestRun.step("Run 'fio'"):
fio = fio_prepare()
for i in range(cores_count):
fio.add_job().target(core[i].system_path)
fio.add_job().target(core[i].path)
fio.run()
time.sleep(3)
core_writes_before_wait_for_cleaning = (
@@ -138,7 +138,7 @@ def test_cleaning_policies_in_write_through(cleaning_policy):
with TestRun.step("Run 'fio'"):
fio = fio_prepare()
for i in range(cores_count):
fio.add_job().target(core[i].system_path)
fio.add_job().target(core[i].path)
fio.run()
time.sleep(3)

View File

@@ -57,13 +57,13 @@ def test_concurrent_cores_flush(cache_mode):
block_size = Size(4, Unit.MebiByte)
count = int(cache_size.value / 2 / block_size.value)
dd_pid = Dd().output(core1.system_path) \
dd_pid = Dd().output(core1.path) \
.input("/dev/urandom") \
.block_size(block_size) \
.count(count) \
.run_in_background()
Dd().output(core2.system_path) \
Dd().output(core2.path) \
.input("/dev/urandom") \
.block_size(block_size) \
.count(count) \
@@ -160,7 +160,7 @@ def test_concurrent_caches_flush(cache_mode):
count = int(cache_size.value / block_size.value)
total_saturation = block_size * count
for core in cores:
Dd().output(core.system_path) \
Dd().output(core.path) \
.input("/dev/urandom") \
.block_size(block_size) \
.count(count) \

View File

@@ -363,7 +363,7 @@ def fio_prepare(core, io_mode, io_size=io_size):
.io_engine(IoEngine.libaio)
.size(io_size)
.read_write(io_mode)
.target(core.system_path)
.target(core.path)
.direct(1)
)
return fio

View File

@@ -84,14 +84,14 @@ def test_seq_cutoff_multi_core(thresholds_list, cache_mode, io_type, io_type_las
fio_job = fio.add_job(job_name=f"core_{core.core_id}")
fio_job.size(io_sizes[i])
fio_job.read_write(io_type)
fio_job.target(core.system_path)
fio_job.target(core.path)
writes_before.append(core.get_statistics().block_stats.cache.writes)
# Run random IO against the last core
fio_job = fio.add_job(job_name=f"core_{cores[-1].core_id}")
fio_job.size(io_sizes[-1])
fio_job.read_write(io_type_last)
fio_job.target(cores[-1].system_path)
fio_job.target(cores[-1].path)
writes_before.append(cores[-1].get_statistics().block_stats.cache.writes)
with TestRun.step("Running IO against all cores"):
@@ -150,7 +150,7 @@ def test_seq_cutoff_thresh(threshold_param, cls, io_dir, policy, verify_type):
.io_engine(IoEngine.libaio)
.size(io_size)
.read_write(io_dir)
.target(f"{cores[0].system_path}")
.target(f"{cores[0].path}")
.direct()
).run()
@@ -194,7 +194,7 @@ def test_seq_cutoff_thresh_fill(threshold_param, cls, io_dir):
.io_engine(IoEngine.libaio)
.size(cache.cache_device.size)
.read_write(io_dir)
.target(f"{cores[0].system_path}")
.target(f"{cores[0].path}")
.direct()
).run()
@@ -218,7 +218,7 @@ def test_seq_cutoff_thresh_fill(threshold_param, cls, io_dir):
.io_engine(IoEngine.libaio)
.size(io_size)
.read_write(io_dir)
.target(f"{cores[0].system_path}")
.target(f"{cores[0].path}")
.direct()
).run()

View File

@@ -44,7 +44,7 @@ def test_purge(purge_target):
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(100)
.block_size(Size(1, Unit.Blocks512))
.oflag("direct")

View File

@@ -40,10 +40,10 @@ def test_cli_start_stop_default_id(shortcut):
if len(caches) != 1:
TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}. "
f"Should be only 1.")
if cache.cache_device.system_path != cache_device.system_path:
if cache.cache_device.path != cache_device.path:
TestRun.fail(f"The cache has started using a wrong device:"
f" {cache.cache_device.system_path}."
f"\nShould use {cache_device.system_path}.")
f" {cache.cache_device.path}."
f"\nShould use {cache_device.path}.")
with TestRun.step("Stop the cache."):
casadm.stop_cache(cache.cache_id, shortcut=shortcut)
@@ -83,10 +83,10 @@ def test_cli_start_stop_custom_id(shortcut):
if len(caches) != 1:
TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}. "
f"Should be only 1.")
if cache.cache_device.system_path != cache_device.system_path:
if cache.cache_device.path != cache_device.path:
TestRun.fail(f"The cache has started using a wrong device:"
f" {cache.cache_device.system_path}."
f"\nShould use {cache_device.system_path}.")
f" {cache.cache_device.path}."
f"\nShould use {cache_device.path}.")
with TestRun.step("Stop the cache."):
casadm.stop_cache(cache.cache_id, shortcut=shortcut)
@@ -127,7 +127,7 @@ def test_cli_add_remove_default_id(shortcut):
caches = casadm_parser.get_caches()
if len(caches[0].get_core_devices()) != 1:
TestRun.fail("One core should be present in the cache.")
if caches[0].get_core_devices()[0].system_path != core.system_path:
if caches[0].get_core_devices()[0].path != core.path:
TestRun.fail("The core path should be equal to the path of the core added.")
with TestRun.step("Remove the core from the cache."):
@@ -180,7 +180,7 @@ def test_cli_add_remove_custom_id(shortcut):
caches = casadm_parser.get_caches()
if len(caches[0].get_core_devices()) != 1:
TestRun.fail("One core should be present in the cache.")
if caches[0].get_core_devices()[0].system_path != core.system_path:
if caches[0].get_core_devices()[0].path != core.path:
TestRun.fail("The core path should be equal to the path of the core added.")
with TestRun.step("Remove the core from the cache."):
@@ -227,7 +227,7 @@ def test_cli_load_and_force(shortcut):
with TestRun.step("Try to load cache with 'force'."):
output = TestRun.executor.run(
start_cmd(cache_dev=cache_device.system_path, force=True, load=True, shortcut=shortcut)
start_cmd(cache_dev=cache_device.path, force=True, load=True, shortcut=shortcut)
)
if output.exit_code == 0:
TestRun.fail("Loading cache with 'force' option should fail.")

View File

@@ -292,7 +292,7 @@ def check_seqcutoff_parameters(core, seqcutoff_params):
if failed_params:
TestRun.LOGGER.error(
f"Sequential cut-off parameters are not correct "
f"for {core.system_path}:\n{failed_params}"
f"for {core.path}:\n{failed_params}"
)

View File

@@ -188,18 +188,18 @@ def base_prepare(item):
# stop only those RAIDs, which are comprised of test disks
if all(map(lambda device:
any(map(lambda disk_path:
disk_path in device.system_path,
[bd.system_path for bd in TestRun.dut.disks])),
disk_path in device.path,
[bd.path for bd in TestRun.dut.disks])),
raid.array_devices)):
raid.umount_all_partitions()
raid.remove_partitions()
raid.stop()
for device in raid.array_devices:
Mdadm.zero_superblock(device.system_path)
Mdadm.zero_superblock(device.path)
for disk in TestRun.dut.disks:
disk.umount_all_partitions()
Mdadm.zero_superblock(disk.system_path)
Mdadm.zero_superblock(disk.path)
TestRun.executor.run_expect_success("udevadm settle")
disk.remove_partitions()
create_partition_table(disk, PartitionTable.gpt)

View File

@@ -40,10 +40,10 @@ def test_data_integrity_12h(cache_mode):
cache, core = prepare(cache_mode)
with TestRun.step("Fill cache"):
fill_cache(core.system_path)
fill_cache(core.path)
with TestRun.step("Run test workloads with verification"):
run_workload(core.system_path)
run_workload(core.path)
def prepare(cache_mode):

View File

@@ -71,12 +71,12 @@ def test_data_integrity_5d_dss(filesystems):
with TestRun.step("Create filesystems and mount cores"):
for i, core in enumerate(cores):
mount_point = core.system_path.replace('/dev/', '/mnt/')
mount_point = core.path.replace('/dev/', '/mnt/')
if not fs_utils.check_if_directory_exists(mount_point):
fs_utils.create_directory(mount_point)
TestRun.LOGGER.info(f"Create filesystem {filesystems[i].name} on {core.system_path}")
TestRun.LOGGER.info(f"Create filesystem {filesystems[i].name} on {core.path}")
core.create_filesystem(filesystems[i])
TestRun.LOGGER.info(f"Mount filesystem {filesystems[i].name} on {core.system_path} to "
TestRun.LOGGER.info(f"Mount filesystem {filesystems[i].name} on {core.path} to "
f"{mount_point}")
core.mount(mount_point)
sync()

View File

@@ -29,7 +29,7 @@ def test_another_cache_with_same_id():
cache_dev_1.create_partitions([Size(2, Unit.GibiByte)])
TestRun.executor.run_expect_success(
cli.start_cmd(
cache_dev_1.partitions[0].system_path, cache_id="1", force=True
cache_dev_1.partitions[0].path, cache_id="1", force=True
)
)
@@ -38,7 +38,7 @@ def test_another_cache_with_same_id():
cache_dev_2.create_partitions([Size(2, Unit.GibiByte)])
TestRun.executor.run_expect_fail(
cli.start_cmd(
cache_dev_2.partitions[0].system_path, cache_id="1", force=True
cache_dev_2.partitions[0].path, cache_id="1", force=True
)
)
@@ -69,7 +69,7 @@ def test_another_core_with_same_id():
TestRun.executor.run_expect_success(
cli.add_core_cmd(
cache_id=f"{cache.cache_id}",
core_dev=f"{core_dev_1.partitions[0].system_path}",
core_dev=f"{core_dev_1.partitions[0].path}",
core_id="1",
)
)
@@ -80,7 +80,7 @@ def test_another_core_with_same_id():
TestRun.executor.run_expect_fail(
cli.add_core_cmd(
cache_id=f"{cache.cache_id}",
core_dev=f"{core_dev_2.partitions[0].system_path}",
core_dev=f"{core_dev_2.partitions[0].path}",
core_id="1",
)
)

View File

@@ -202,7 +202,7 @@ def test_one_core_fail(cache_mode):
with TestRun.step("Check if core device is really out of cache."):
output = str(casadm.list_caches().stdout.splitlines())
if core_part1.system_path in output:
if core_part1.path in output:
TestRun.fail("The first core device should be unplugged!")
with TestRun.step("Check if the remaining core is able to use cache."):
@@ -232,7 +232,7 @@ def dd_builder(cache_mode: CacheMode, dev: Core, size: Size):
.block_size(block_size)
.count(blocks))
if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
dd.input(dev.system_path).output("/dev/null")
dd.input(dev.path).output("/dev/null")
else:
dd.input("/dev/urandom").output(dev.system_path)
dd.input("/dev/urandom").output(dev.path)
return dd

View File

@@ -66,7 +66,7 @@ def test_stop_no_flush_load_cache(cache_mode, filesystem):
with TestRun.step("Try to start cache without loading metadata."):
output = TestRun.executor.run_expect_fail(cli.start_cmd(
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()),
cache_dev=str(cache_part.path), cache_mode=str(cache_mode.name.lower()),
force=False, load=False))
cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata)
@@ -127,7 +127,7 @@ def test_stop_no_flush_load_cache_no_fs(cache_mode):
with TestRun.step("Fill exported object with data."):
dd = (Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.block_size(Size(1, Unit.Blocks4096))
.oflag("direct"))
dd.run()
@@ -143,7 +143,7 @@ def test_stop_no_flush_load_cache_no_fs(cache_mode):
with TestRun.step("Try to start cache without loading metadata."):
output = TestRun.executor.run_expect_fail(cli.start_cmd(
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()),
cache_dev=str(cache_part.path), cache_mode=str(cache_mode.name.lower()),
force=False, load=False))
cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata)

View File

@@ -156,13 +156,13 @@ def test_add_cached_core(cache_mode):
with TestRun.step("Try adding the same core device to the second cache instance."):
output = TestRun.executor.run_expect_fail(
cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.system_path),
cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.path),
core_id=str(core.core_id)))
cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)
with TestRun.step("Try adding the same core device to the same cache for the second time."):
output = TestRun.executor.run_expect_fail(
cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.system_path)))
cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.path)))
cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)
with TestRun.step("Stop caches."):

View File

@@ -37,7 +37,7 @@ def test_remove_core_during_io():
.io_engine(IoEngine.libaio)
.block_size(Size(4, Unit.KibiByte))
.read_write(ReadWrite.randrw)
.target(f"{core.system_path}")
.target(f"{core.path}")
.direct(1)
.run_time(timedelta(minutes=4))
.time_based()
@@ -78,7 +78,7 @@ def test_stop_cache_during_io():
.io_engine(IoEngine.libaio)
.block_size(Size(4, Unit.KibiByte))
.read_write(ReadWrite.randrw)
.target(f"{core.system_path}")
.target(f"{core.path}")
.direct(1)
.run_time(timedelta(minutes=4))
.time_based()

View File

@@ -205,7 +205,7 @@ def fio_prepare(core):
.create_command()
.io_engine(IoEngine.libaio)
.read_write(ReadWrite.randrw)
.target(core.system_path)
.target(core.path)
.continue_on_error(ErrorFilter.io)
.direct(1)
.run_time(timedelta(seconds=30))

View File

@@ -72,7 +72,7 @@ def test_core_inactive_stats():
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(1000)
.block_size(Size(4, Unit.KibiByte))
).run()

View File

@@ -126,7 +126,7 @@ def test_flush_inactive_devices():
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to CAS device."):
run_fio([first_core.system_path, second_core.system_path])
run_fio([first_core.path, second_core.path])
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
@@ -273,7 +273,7 @@ def test_load_cache_with_inactive_core():
plug_device.unplug()
with TestRun.step("Load cache."):
output = TestRun.executor.run(cli.load_cmd(cache_dev.system_path))
output = TestRun.executor.run(cli.load_cmd(cache_dev.path))
cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing)
with TestRun.step("Plug missing device and stop cache."):
@@ -429,7 +429,7 @@ def test_print_statistics_inactive(cache_mode):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run IO."):
run_fio([first_core.system_path, second_core.system_path])
run_fio([first_core.path, second_core.path])
with TestRun.step("Print statistics and check if there is no inactive usage section."):
active_stats = cache.get_statistics()
@@ -460,7 +460,7 @@ def test_print_statistics_inactive(cache_mode):
time.sleep(1)
first_core_status = first_core.get_status()
if first_core_status != CoreStatus.active:
TestRun.fail(f"Core {first_core.system_path} should be in active state but it is not. "
TestRun.fail(f"Core {first_core.path} should be in active state but it is not. "
f"Actual state: {first_core_status}.")
with TestRun.step("Check cache statistics section of inactive devices."):
@@ -543,7 +543,7 @@ def test_remove_detached_cores():
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores])
run_fio([c.path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain "
"dirty data."):
@@ -577,7 +577,7 @@ def test_remove_detached_cores():
with TestRun.step("Verify that cores are no longer listed."):
output = casadm.list_caches().stdout
for dev in core_devs:
if dev.system_path in output:
if dev.path in output:
TestRun.fail(f"CAS device is still listed in casadm list output:\n{output}")
@@ -612,7 +612,7 @@ def test_remove_inactive_devices():
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores])
run_fio([c.path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two "
"contain dirty data."):
@@ -657,7 +657,7 @@ def test_remove_inactive_devices():
"dirty CAS device as expected.")
cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_core)
output = casadm.list_caches().stdout
if core.system_path not in output:
if core.path not in output:
TestRun.fail(f"CAS device is not listed in casadm list output but it should be."
f"\n{output}")
core.remove_core(force=True)
@@ -695,7 +695,7 @@ def test_stop_cache_with_inactive_devices():
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes and verify that CAS device contains dirty data."):
run_fio([core.system_path])
run_fio([core.path])
if core.get_dirty_blocks() == Size.zero():
TestRun.fail("There is no dirty data on core device.")

View File

@@ -203,10 +203,10 @@ def test_udev_cache_load(cache_mode):
elif len(caches) > 1:
caches_list = '\n'.join(caches)
TestRun.fail(f"There is more than 1 cache loaded:\n{caches_list}")
elif caches[0].cache_device.system_path != cache_dev.system_path:
elif caches[0].cache_device.path != cache_dev.path:
TestRun.fail(f"Cache loaded on wrong device. "
f"Actual: {caches[0].cache_device.system_path}, "
f"expected: {cache_dev.system_path}")
f"Actual: {caches[0].cache_device.path}, "
f"expected: {cache_dev.path}")
elif caches[0].get_cache_mode() != cache_mode:
TestRun.fail(f"Cache did load with different cache mode. "
f"Actual: {caches[0].get_cache_mode()}, expected: {cache_mode}")
@@ -268,7 +268,7 @@ def test_neg_udev_cache_load():
if len(cas_devices["caches"]) != 1:
TestRun.LOGGER.error(f"There is wrong number of caches. Expected: 1, actual: "
f"{len(cas_devices['caches'])}")
elif cas_devices["caches"][1]["device"] != cache_disk.partitions[0].system_path or \
elif cas_devices["caches"][1]["device"] != cache_disk.partitions[0].path or \
CacheStatus[(cas_devices["caches"][1]["status"]).lower()] != CacheStatus.running:
TestRun.LOGGER.error(f"Cache did not load properly: {cas_devices['caches'][1]}")
if len(cas_devices["cores"]) != 2:
@@ -277,7 +277,7 @@ def test_neg_udev_cache_load():
correct_core_devices = []
for i in first_cache_core_numbers:
correct_core_devices.append(core_disk.partitions[i].system_path)
correct_core_devices.append(core_disk.partitions[i].path)
for core in cas_devices["cores"].values():
if core["device"] not in correct_core_devices or \
CoreStatus[core["status"].lower()] != CoreStatus.active or \
@@ -297,7 +297,7 @@ def test_neg_udev_cache_load():
core_pool_expected_devices = []
for i in range(0, cores_count):
if i not in first_cache_core_numbers:
core_pool_expected_devices.append(core_disk.partitions[i].system_path)
core_pool_expected_devices.append(core_disk.partitions[i].path)
for c in cas_devices["core_pool"]:
if c["device"] not in core_pool_expected_devices:
TestRun.LOGGER.error(f"Wrong core device added to core pool: {c}.")
@@ -305,11 +305,11 @@ def test_neg_udev_cache_load():
def check_if_dev_in_core_pool(dev, should_be_in_core_pool=True):
cas_devices_dict = casadm_parser.get_cas_devices_dict()
is_in_core_pool = any(dev.system_path == d["device"] for d in cas_devices_dict["core_pool"])
is_in_core_pool = any(dev.path == d["device"] for d in cas_devices_dict["core_pool"])
if not (should_be_in_core_pool ^ is_in_core_pool):
TestRun.LOGGER.info(f"Core device {dev.system_path} is"
TestRun.LOGGER.info(f"Core device {dev.path} is"
f"{'' if should_be_in_core_pool else ' not'} listed in core pool "
f"as expected.")
else:
TestRun.fail(f"Core device {dev.system_path} is{' not' if should_be_in_core_pool else ''} "
TestRun.fail(f"Core device {dev.path} is{' not' if should_be_in_core_pool else ''} "
f"listed in core pool.")

View File

@@ -64,7 +64,7 @@ def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
else:
power_control = TestRun.plugin_manager.get_plugin('power_control')
power_control.power_cycle()
cache_dev.system_path = cache_dev_link.get_target()
cache_dev.path = cache_dev_link.get_target()
with TestRun.step("Load cache."):
casadm.load_cache(cache_dev)

View File

@@ -86,7 +86,7 @@ def test_load_x_to_one_without_params(cache_mode, cleaning_policy, cache_line_si
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Stop cache."):
@@ -134,7 +134,7 @@ def test_load_x_to_one_without_params(cache_mode, cleaning_policy, cache_line_si
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Check if there are no error statistics."):
@@ -213,7 +213,7 @@ def test_load_x_to_one_with_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Stop cache."):
@@ -261,7 +261,7 @@ def test_load_x_to_one_with_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Check if there are no error statistics."):
@@ -347,7 +347,7 @@ def test_load_x_to_one_diff_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Stop cache."):
@@ -403,7 +403,7 @@ def test_load_x_to_one_diff_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Check if there are no error statistics."):

View File

@@ -51,7 +51,7 @@ def test_load_occupied_id():
caches = casadm_parser.get_caches()
if len(caches) != 1:
TestRun.LOGGER.error("Inappropriate number of caches after load!")
if caches[0].cache_device.system_path != cache_device_2.system_path:
if caches[0].cache_device.path != cache_device_2.path:
TestRun.LOGGER.error("Wrong cache device system path!")
if caches[0].cache_id != 1:
TestRun.LOGGER.error("Wrong cache id.")

View File

@@ -42,7 +42,7 @@ def test_write_fetch_full_misses(cache_mode, cache_line_size):
io_stats_before_io = cache_disk.get_io_stats()
blocksize = cache_line_size.value / 2
skip_size = cache_line_size.value / 2
run_fio(target=core.system_path,
run_fio(target=core.path,
operation_type=ReadWrite.write,
skip=skip_size,
blocksize=blocksize,
@@ -87,7 +87,7 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
with TestRun.step("Fill core partition with pattern."):
cache_mode_traits = CacheMode.get_traits(cache_mode)
if CacheModeTrait.InsertRead in cache_mode_traits:
run_fio(target=core_part.system_path,
run_fio(target=core_part.path,
operation_type=ReadWrite.write,
blocksize=Size(4, Unit.KibiByte),
io_size=io_size,
@@ -103,7 +103,7 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
with TestRun.step("Cache half of file."):
operation_type = ReadWrite.read if CacheModeTrait.InsertRead in cache_mode_traits \
else ReadWrite.write
run_fio(target=core.system_path,
run_fio(target=core.path,
operation_type=operation_type,
skip=cache_line_size.value,
blocksize=cache_line_size.value,
@@ -117,7 +117,7 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
io_stats_before_io = cache_disk.get_io_stats()
blocksize = cache_line_size.value / 2 * 3
skip_size = cache_line_size.value / 2
run_fio(target=core.system_path,
run_fio(target=core.path,
operation_type=ReadWrite.write,
skip=skip_size,
blocksize=blocksize,

View File

@@ -41,8 +41,8 @@ def test_trim_start_discard():
non_cas_part = dev.partitions[1]
with TestRun.step("Writing different pattern on partitions"):
cas_fio = write_pattern(cas_part.system_path)
non_cas_fio = write_pattern(non_cas_part.system_path)
cas_fio = write_pattern(cas_part.path)
non_cas_fio = write_pattern(non_cas_part.path)
cas_fio.run()
non_cas_fio.run()
@@ -206,15 +206,15 @@ def check_discards(discards_count, device, discards_expected):
if discards_expected:
if discards_count > 0:
TestRun.LOGGER.info(
f"{discards_count} TRIM instructions generated for {device.system_path}")
f"{discards_count} TRIM instructions generated for {device.path}")
else:
TestRun.LOGGER.error(f"No TRIM instructions found in requests to {device.system_path}")
TestRun.LOGGER.error(f"No TRIM instructions found in requests to {device.path}")
else:
if discards_count > 0:
TestRun.LOGGER.error(
f"{discards_count} TRIM instructions generated for {device.system_path}")
f"{discards_count} TRIM instructions generated for {device.path}")
else:
TestRun.LOGGER.info(f"No TRIM instructions found in requests to {device.system_path}")
TestRun.LOGGER.info(f"No TRIM instructions found in requests to {device.path}")
def start_monitoring(core_dev, cache_dev, cas_dev):

View File

@@ -81,7 +81,7 @@ def test_ioclass_core_id(filesystem):
if filesystem:
dd_dst_paths = [cached_mountpoint + "/test_file", not_cached_mountpoint + "/test_file"]
else:
dd_dst_paths = [core_1.system_path, core_2.system_path]
dd_dst_paths = [core_1.path, core_2.path]
for path in dd_dst_paths:
dd = (

View File

@@ -39,7 +39,7 @@ def test_ioclass_directory_depth(filesystem):
cache, core = prepare()
Udev.disable()
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
@@ -157,7 +157,7 @@ def test_ioclass_directory_file_operations(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}."):
f"and mounting {core.path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
@@ -290,7 +290,7 @@ def test_ioclass_directory_dir_operations(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
f"and mount {core.path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()

View File

@@ -53,7 +53,7 @@ def test_ioclass_file_extension():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
@@ -128,7 +128,7 @@ def test_ioclass_file_name_prefix():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}"):
previous_occupancy = cache.get_occupancy()
core.create_filesystem(Filesystem.ext3)
@@ -285,7 +285,7 @@ def test_ioclass_file_offset():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
@@ -374,7 +374,7 @@ def test_ioclass_file_size(filesystem):
with TestRun.step("Prepare and load IO class config."):
load_file_size_io_classes(cache, base_size)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)

View File

@@ -53,7 +53,7 @@ def test_ioclass_process_name():
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(dd_count)
.block_size(dd_size)
.seek(i)
@@ -91,7 +91,7 @@ def test_ioclass_pid():
dd_command = str(
Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(dd_count)
.block_size(dd_size)
)

View File

@@ -39,7 +39,7 @@ def test_ioclass_usage_sum():
Udev.disable()
with TestRun.step(
f"Prepare filesystem and mount {core.system_path} at {mountpoint}"
f"Prepare filesystem and mount {core.path} at {mountpoint}"
):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)

View File

@@ -66,7 +66,7 @@ def test_ioclass_lba():
for lba in range(min_cached_lba, max_cached_lba, 8):
dd = (
Dd().input("/dev/zero")
.output(f"{core.system_path}")
.output(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.seek(lba)
@@ -90,7 +90,7 @@ def test_ioclass_lba():
continue
dd = (
Dd().input("/dev/zero")
.output(f"{core.system_path}")
.output(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.seek(rand_lba)
@@ -140,7 +140,7 @@ def test_ioclass_request_size():
req_size = random.choice(cached_req_sizes)
dd = (
Dd().input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(1)
.block_size(req_size)
.oflag("direct")
@@ -163,7 +163,7 @@ def test_ioclass_request_size():
req_size = random.choice(not_cached_req_sizes)
dd = (
Dd().input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(1)
.block_size(req_size)
.oflag("direct")
@@ -212,12 +212,12 @@ def test_ioclass_direct(filesystem):
.io_engine(IoEngine.libaio) \
.size(io_size).offset(io_size) \
.read_write(ReadWrite.write) \
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path)
.target(f"{mountpoint}/tmp_file" if filesystem else core.path)
with TestRun.step("Prepare filesystem."):
if filesystem:
TestRun.LOGGER.info(
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at"
f"Preparing {filesystem.name} filesystem and mounting {core.path} at"
f" {mountpoint}"
)
core.create_filesystem(filesystem)
@@ -305,7 +305,7 @@ def test_ioclass_metadata(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
@@ -444,7 +444,7 @@ def test_ioclass_id_as_condition(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
f"and mount {core.path} at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(base_dir_path)
@@ -553,7 +553,7 @@ def test_ioclass_conditions_or(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
f"and mount {core.path} at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
for i in range(1, 6):
@@ -614,7 +614,7 @@ def test_ioclass_conditions_and(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
f"and mounting {core.path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
@@ -662,7 +662,7 @@ def test_ioclass_effective_ioclass(filesystem):
f"file_size:ge:{file_size_bytes // 2}"]
with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}"):
f"and mounting {core.path} at {mountpoint}"):
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(test_dir)

View File

@@ -72,7 +72,7 @@ def test_acp_functional(cache_mode):
.direct()
.size(chunk_size)
.block_size(Size(1, Unit.Blocks4096))
.target(f"{core.system_path}"))
.target(f"{core.path}"))
for chunk in chunk_list:
fio.add_job().offset(chunk.offset).io_size(chunk.writes_size)
fio.run()

View File

@@ -88,8 +88,8 @@ def test_recovery_all_options(cache_mode, cache_line_size, cleaning_policy, file
core.unmount()
TestRun.LOGGER.info(f"Number of dirty blocks in cache: {cache.get_dirty_blocks()}")
power_cycle_dut()
cache_device.system_path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target()
cache_device.path = cache_device_link.get_target()
core_device.path = core_device_link.get_target()
with TestRun.step("Try to start cache without load and force option."):
try:

View File

@@ -56,12 +56,12 @@ def test_recovery_flush_reset_raw(cache_mode):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
with TestRun.step("Copy file to CAS."):
copy_file(source=source_file.full_path, target=core.system_path, size=test_file_size,
copy_file(source=source_file.full_path, target=core.path, size=test_file_size,
direct="oflag")
with TestRun.step("Sync and flush buffers."):
os_utils.sync()
output = TestRun.executor.run(f"hdparm -f {core.system_path}")
output = TestRun.executor.run(f"hdparm -f {core.path}")
if output.exit_code != 0:
raise CmdException("Error during hdparm", output)
@@ -70,8 +70,8 @@ def test_recovery_flush_reset_raw(cache_mode):
with TestRun.step("Hard reset DUT during data flushing."):
power_cycle_dut(wait_for_flush_begin=True, core_device=core_device)
cache_device.system_path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target()
cache_device.path = cache_device_link.get_target()
core_device.path = core_device_link.get_target()
with TestRun.step("Copy file from core and check if current md5sum is different than "
"before restart."):
@@ -155,8 +155,8 @@ def test_recovery_flush_reset_fs(cache_mode, fs):
with TestRun.step("Hard reset DUT during data flushing."):
power_cycle_dut(True, core_device)
cache_device.system_path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target()
cache_device.path = cache_device_link.get_target()
core_device.path = core_device_link.get_target()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_device)

View File

@@ -131,7 +131,7 @@ def test_recovery_unplug_cache_raw(cache_mode, cls):
core = cache.add_core(core_device)
with TestRun.step("Copy file to CAS."):
copy_file(source=source_file.full_path, target=core.system_path,
copy_file(source=source_file.full_path, target=core.path,
size=test_file_size, direct="oflag")
TestRun.LOGGER.info(str(core.get_statistics()))
@@ -156,7 +156,7 @@ def test_recovery_unplug_cache_raw(cache_mode, cls):
cache.stop()
with TestRun.step("Copy file from core device and check md5sum."):
copy_file(source=core_device.system_path, target=target_file.full_path,
copy_file(source=core_device.path, target=target_file.full_path,
size=test_file_size, direct="iflag")
compare_files(source_file, target_file)

View File

@@ -170,4 +170,4 @@ def test_flush_over_640_gibibytes_raw_device(cache_mode):
def check_disk_size(device: Device):
if device.size < required_disk_size:
pytest.skip(f"Not enough space on device {device.system_path}.")
pytest.skip(f"Not enough space on device {device.path}.")

View File

@@ -60,7 +60,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Write data to the exported object."):
test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
dd = Dd().output(core.system_path) \
dd = Dd().output(core.path) \
.input(test_file_main.full_path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
@@ -85,7 +85,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Read data from the exported object."):
test_file_1 = File.create_file("/tmp/test_file_1")
dd = Dd().output(test_file_1.full_path) \
.input(core.system_path) \
.input(core.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@@ -100,7 +100,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Read data from the core device."):
test_file_2 = File.create_file("/tmp/test_file_2")
dd = Dd().output(test_file_2.full_path) \
.input(core_part.system_path) \
.input(core_part.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@@ -133,7 +133,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Read data from the core device."):
test_file_3 = File.create_file("/tmp/test_file_2")
dd = Dd().output(test_file_3.full_path) \
.input(core_part.system_path) \
.input(core_part.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@@ -277,7 +277,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Write data to exported object."):
test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
dd = Dd().output(core.system_path) \
dd = Dd().output(core.path) \
.input(test_file_main.full_path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
@@ -302,7 +302,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Read data from the exported object."):
test_file_1 = File.create_file("/tmp/test_file_1")
dd = Dd().output(test_file_1.full_path) \
.input(core.system_path) \
.input(core.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@@ -317,7 +317,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Read data from the core device."):
test_file_2 = File.create_file("/tmp/test_file_2")
dd = Dd().output(test_file_2.full_path) \
.input(core_part.system_path) \
.input(core_part.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@@ -350,7 +350,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Read data from core device again."):
test_file_3 = File.create_file("/tmp/test_file_3")
dd = Dd().output(test_file_3.full_path) \
.input(core_part.system_path) \
.input(core_part.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")

View File

@@ -76,7 +76,7 @@ def test_user_cli():
with TestRun.step("Try to start cache."):
try:
output = run_as_other_user(cli.start_cmd(cache_dev.system_path), user_name)
output = run_as_other_user(cli.start_cmd(cache_dev.path), user_name)
if output.exit_code == 0:
TestRun.LOGGER.error("Starting cache should fail!")
except CmdException:
@@ -105,7 +105,7 @@ def test_user_cli():
with TestRun.step("Try to add core to cache."):
try:
output = run_as_other_user(cli.add_core_cmd(str(cache.cache_id),
core_part2.system_path), user_name)
core_part2.path), user_name)
if output.exit_code == 0:
TestRun.LOGGER.error("Adding core to cache should fail!")
except CmdException:
@@ -244,7 +244,7 @@ def test_user_cli():
with TestRun.step("Try to start cache with 'sudo'."):
try:
run_as_other_user(cli.start_cmd(cache_dev.system_path, force=True), user_name, True)
run_as_other_user(cli.start_cmd(cache_dev.path, force=True), user_name, True)
except CmdException:
TestRun.LOGGER.error("Non-root sudoer user should be able to start cache.")
@@ -259,7 +259,7 @@ def test_user_cli():
with TestRun.step("Try to add core to cache with 'sudo'."):
try:
run_as_other_user(cli.add_core_cmd(str(cache.cache_id),
core_part1.system_path), user_name, True)
core_part1.path), user_name, True)
except CmdException:
TestRun.LOGGER.error("Non-root sudoer user should be able to add core to cache.")

View File

@@ -100,7 +100,7 @@ def test_block_stats_write(cache_mode, zero_stats):
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.output(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.oflag("direct")
@@ -225,7 +225,7 @@ def test_block_stats_read(cache_mode, zero_stats):
dd = (
Dd()
.output("/dev/zero")
.input(f"{core.system_path}")
.input(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.iflag("direct")

View File

@@ -271,8 +271,8 @@ def dd_builder(cache_mode, cache_line_size, count, device):
.count(count))
if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
dd.input(device.system_path).output("/dev/null").iflag("direct")
dd.input(device.path).output("/dev/null").iflag("direct")
else:
dd.input("/dev/urandom").output(device.system_path).oflag("direct")
dd.input("/dev/urandom").output(device.path).oflag("direct")
return dd

View File

@@ -66,7 +66,7 @@ def test_cache_config_stats():
fio = fio_prepare()
for i in range(caches_count):
for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path)
fio.add_job().target(cores[i][j].path)
fio_pid = fio.run_in_background()
with TestRun.step(f"Wait {time_to_wait} seconds"):
@@ -107,7 +107,7 @@ def test_core_config_stats():
fio = fio_prepare()
for i in range(caches_count):
for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path)
fio.add_job().target(cores[i][j].path)
fio_pid = fio.run_in_background()
with TestRun.step(f"Wait {time_to_wait} seconds"):
@@ -255,11 +255,11 @@ def validate_cache_config_statistics(caches, after_io: bool = False):
failed_stats += (
f"For cache number {caches[i].cache_id} cache ID is "
f"{caches_stats[i].config_stats.cache_id}\n")
if caches_stats[i].config_stats.cache_dev != caches[i].cache_device.system_path:
if caches_stats[i].config_stats.cache_dev != caches[i].cache_device.path:
failed_stats += (
f"For cache number {caches[i].cache_id} cache device "
f"is {caches_stats[i].config_stats.cache_dev}, "
f"should be {caches[i].cache_device.system_path}\n")
f"should be {caches[i].cache_device.path}\n")
if caches_stats[i].config_stats.cache_size.value != caches[i].size.value:
failed_stats += (
f"For cache number {caches[i].cache_id} cache size is "
@@ -344,23 +344,23 @@ def validate_core_config_statistics(cores, caches=None):
for j in range(cores_per_cache)
]
for j in range(cores_per_cache):
if cores_stats[j].config_stats.exp_obj != cores[i][j].system_path:
if cores_stats[j].config_stats.exp_obj != cores[i][j].path:
failed_stats += (
f"For exported object {cores[i][j].system_path} "
f"For exported object {cores[i][j].path} "
f"value in stats is {cores_stats[j].config_stats.exp_obj}\n")
if cores_stats[j].config_stats.core_id != cores[i][j].core_id:
failed_stats += (
f"For exported object {cores[i][j].system_path} "
f"For exported object {cores[i][j].path} "
f"core ID is {cores_stats[j].config_stats.core_id}, "
f"should be {cores[i][j].core_id}\n")
if cores_stats[j].config_stats.core_dev != cores[i][j].core_device.system_path:
if cores_stats[j].config_stats.core_dev != cores[i][j].core_device.path:
failed_stats += (
f"For exported object {cores[i][j].system_path} "
f"For exported object {cores[i][j].path} "
f"core device is {cores_stats[j].config_stats.core_dev}, "
f"should be {cores[i][j].core_device.system_path}\n")
f"should be {cores[i][j].core_device.path}\n")
if cores_stats[j].config_stats.core_size.value != cores[i][j].size.value:
failed_stats += (
f"For exported object {cores[i][j].system_path} "
f"For exported object {cores[i][j].path} "
f"core size is {cores_stats[j].config_stats.core_size.value}, "
f"should be {cores[i][j].size.value}\n")
if (
@@ -368,16 +368,16 @@ def validate_core_config_statistics(cores, caches=None):
!= cores[i][j].get_status()
):
failed_stats += (
f"For exported object {cores[i][j].system_path} core "
f"For exported object {cores[i][j].path} core "
f"status is {cores_stats[j].config_stats.status}, should be "
f"{str(cores[i][j].get_status()).split('.')[1].capitalize()}\n")
if cores_stats[j].config_stats.seq_cutoff_policy is None:
failed_stats += (
f"For exported object {cores[i][j].system_path} value of "
f"For exported object {cores[i][j].path} value of "
f"Sequential cut-off policy should not be empty\n")
if cores_stats[j].config_stats.seq_cutoff_threshold.value <= 0:
failed_stats += (
f"For exported object {cores[i][j].system_path} value of "
f"For exported object {cores[i][j].path} value of "
f"Sequential cut-off threshold should be greater then 0\n")
if caches:
cache_mode = CacheMode[
@@ -386,21 +386,21 @@ def validate_core_config_statistics(cores, caches=None):
if CacheModeTrait.LazyWrites in CacheMode.get_traits(cache_mode):
if cores_stats[j].config_stats.dirty_for.total_seconds() <= 0:
failed_stats += (
f"For exported object {cores[i][j].system_path} in "
f"For exported object {cores[i][j].path} in "
f"{cache_mode} cache mode, value of 'Dirty for' "
f"after IO is {cores_stats[j].config_stats.dirty_for}, "
f"should be greater then 0\n")
else:
if cores_stats[j].config_stats.dirty_for.total_seconds() != 0:
failed_stats += (
f"For exported object {cores[i][j].system_path} in "
f"For exported object {cores[i][j].path} in "
f"{cache_mode} cache mode, value of 'Dirty for' "
f"after IO is {cores_stats[j].config_stats.dirty_for}, "
f"should equal 0\n")
else:
if cores_stats[j].config_stats.dirty_for.total_seconds() < 0:
failed_stats += (
f"For exported object {cores[i][j].system_path} value of "
f"For exported object {cores[i][j].path} value of "
f"'Dirty for' is {cores_stats[j].config_stats.dirty_for}, "
f"should be greater or equal 0\n")
@@ -412,7 +412,7 @@ def validate_core_config_statistics(cores, caches=None):
def validate_statistics_flat(device, stats, stat_filter, per_core: bool):
device_name = (
f"core device {device.system_path}" if per_core else
f"core device {device.path}" if per_core else
f"cache number {device.cache_id}")
failed_stats = ""
if stat_filter == StatsFilter.usage:

View File

@@ -310,7 +310,7 @@ def prepare(random_cls, cache_count=1, cores_per_cache=1):
Udev.disable()
caches, cores = [], []
for i, cache_device in enumerate(cache_devices):
TestRun.LOGGER.info(f"Starting cache on {cache_device.system_path}")
TestRun.LOGGER.info(f"Starting cache on {cache_device.path}")
cache = casadm.start_cache(cache_device,
force=True,
cache_mode=cache_modes[i],
@@ -320,7 +320,7 @@ def prepare(random_cls, cache_count=1, cores_per_cache=1):
cache.set_cleaning_policy(CleaningPolicy.nop)
for core_device in core_devices[i * cores_per_cache:(i + 1) * cores_per_cache]:
TestRun.LOGGER.info(
f"Adding core device {core_device.system_path} to cache {cache.cache_id}")
f"Adding core device {core_device.path} to cache {cache.cache_id}")
core = cache.add_core(core_dev=core_device)
core.reset_counters()
cores.append(core)

View File

@@ -78,7 +78,7 @@ def test_stat_max_cache():
fio = fio_prepare()
for i in range(caches_count):
for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path)
fio.add_job().target(cores[i][j].path)
fio.run()
sleep(3)
@@ -128,7 +128,7 @@ def test_stat_max_core(cache_mode):
with TestRun.step("Run 'fio'"):
fio = fio_prepare()
for j in range(cores_per_cache):
fio.add_job().target(cores[j].system_path)
fio.add_job().target(cores[j].path)
fio.run()
sleep(3)

View File

@@ -61,7 +61,7 @@ def test_stats_values():
fio = fio_prepare()
for i in range(caches_count):
for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path)
fio.add_job().target(cores[i][j].path)
fio.run()
sleep(3)
@@ -156,22 +156,22 @@ def check_stats_initial(caches, cores):
if stat_name.lower() == "free":
if stat_value != caches[i].size.value:
TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} "
f"For core device {cores[i][j].path} "
f"value for '{stat_name}' is {stat_value}, "
f"should equal cache size: {caches[i].size.value}\n")
elif stat_value != 0:
TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} value for "
f"For core device {cores[i][j].path} value for "
f"'{stat_name}' is {stat_value}, should equal 0\n")
for stat_name, stat_value in cores_stats_perc[j].items():
if stat_name.lower() == "free":
if stat_value != 100:
TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} percentage value "
f"For core device {cores[i][j].path} percentage value "
f"for '{stat_name}' is {stat_value}, should equal 100\n")
elif stat_value != 0:
TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} percentage value "
f"For core device {cores[i][j].path} percentage value "
f"for '{stat_name}' is {stat_value}, should equal 0\n")
@@ -191,7 +191,7 @@ def check_stats_after_io(caches, cores, after_reload: bool = False):
)
for j in range(cores_per_cache):
fail_message = (
f"For core device {cores[i][j].system_path} in {cache_mode} cache mode ")
f"For core device {cores[i][j].path} in {cache_mode} cache mode ")
if after_reload:
validate_usage_stats(
cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message)

View File

@@ -194,7 +194,7 @@ def test_kedr_basic_io_raw(module, unload_modules, install_kedr):
.run_time(timedelta(minutes=4))
.time_based()
.read_write(ReadWrite.randrw)
.target(f"{core.system_path}")
.target(f"{core.path}")
.direct()
).run()

View File

@@ -79,7 +79,7 @@ def test_stress_small_cas_device(cache_line_size, cores_number, cache_config):
.num_jobs(cores_number)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
output = fio.run()[0]
TestRun.LOGGER.info(f"Total read I/O [KiB]: {str(output.read_io())}\n"
f"Total write I/O [KiB]: {str(output.write_io())}")
@@ -88,7 +88,7 @@ def test_stress_small_cas_device(cache_line_size, cores_number, cache_config):
md5sum_core = []
for core in cores:
md5sum_core.append(TestRun.executor.run(
f"md5sum -b {core.system_path}").stdout.split(" ")[0])
f"md5sum -b {core.path}").stdout.split(" ")[0])
with TestRun.step("Stop cache."):
cache.stop()
@@ -97,7 +97,7 @@ def test_stress_small_cas_device(cache_line_size, cores_number, cache_config):
md5sum_core_dev = []
for core_dev in core_dev.partitions:
md5sum_core_dev.append(TestRun.executor.run(
f"md5sum -b {core_dev.system_path}").stdout.split(" ")[0])
f"md5sum -b {core_dev.path}").stdout.split(" ")[0])
with TestRun.step("Compare md5 sum of exported objects and cores."):
if md5sum_core_dev != md5sum_core:

View File

@@ -105,5 +105,5 @@ def run_io(exported_objects):
.io_depth(32) \
.run_time(timedelta(minutes=5)) \
.num_jobs(5) \
.target(exported_objects[i].system_path)
.target(exported_objects[i].path)
fio.run_in_background()

View File

@@ -39,8 +39,8 @@ def test_trim_start_discard():
non_cas_part = dev.partitions[1]
with TestRun.step("Writing different pattern on partitions"):
cas_fio = write_pattern(cas_part.system_path)
non_cas_fio = write_pattern(non_cas_part.system_path)
cas_fio = write_pattern(cas_part.path)
non_cas_fio = write_pattern(non_cas_part.path)
cas_fio.run()
non_cas_fio.run()

View File

@@ -44,7 +44,7 @@ def test_discard_on_huge_core():
# RCU-sched type stall sometimes appears in dmesg log after more
# than one execution of blkdiscard.
for _ in range(8):
TestRun.executor.run_expect_success(f"blkdiscard {core.system_path}")
TestRun.executor.run_expect_success(f"blkdiscard {core.path}")
with TestRun.step("Check dmesg for RCU-sched stall."):
check_for_rcu_sched_type_stall()

View File

@@ -28,13 +28,13 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS_OPTION="1" make_primary_partitions
# Start cache with ID=1 on device ${CACHE_DEVICE}1 (/dev/sda1, for instance)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Use the same device but a different ID - negative test
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" NEGATIVE_TEST_OPTION="1" start_cache
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" NEGATIVE_TEST_OPTION="1" start_cache
# Use the same ID but a different device - another negative test
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" NEGATIVE_TEST_OPTION="1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" NEGATIVE_TEST_OPTION="1" start_cache
# Clear up after test
CACHE_ID_OPTION="1" stop_cache

View File

@@ -31,22 +31,22 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE1 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Try to add already taken CORE device and a non-existing core to cache 1
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Try to add already taken CORE device and a non-existing core to cache 2
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Remove the core device from cache
CACHE_ID_OPTION="1" CORE_ID_OPTION="1" remove_core

View File

@@ -28,21 +28,21 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Create 3 primary partitions on CORE_DEVICE, each of 4000M size
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
run_cmd dd if=/dev/zero of="${CORE_DEVICE}1" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}2" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}3" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part1" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part2" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part3" bs=1M count=1 oflag=direct
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@@ -68,7 +68,7 @@ done
for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
CACHE_ID_OPTION="$ID" stop_cache
run_cmd "mount ${CORE_DEVICE}${ID} ${MOUNTPOINT}-${ID}-1"
run_cmd "mount ${CORE_DEVICE}-part${ID} ${MOUNTPOINT}-${ID}-1"
done
# Now check for files' presence and umount core devices

View File

@@ -31,7 +31,7 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
# Try to start positive caches in loop and later stop them - if any of those operations fails, it
# means the cache ID is invalid
for ID in $POSITIVE_IDS ; do
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="$ID" stop_cache
done
@@ -41,7 +41,7 @@ done
# automatically.
for ID in $NEGATIVE_IDS ; do
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="$ID"
CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="yes" start_cache
CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="yes" start_cache
done
# Always return 0 at the end of the test - if at any point something has failed

View File

@@ -4,7 +4,7 @@
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
#DESCRIPTION --set-param option fuzzing.
# DESCRIPTION --set-param option fuzzing.
# This tests checks whether CLI accepts correct data and rejects incorrect
# data for "--flush-parameters" option. It tries to invoke CLI using different
@@ -22,15 +22,9 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS_OPTION="1" make_primary_partitions
# create cache in WT mode and try to change flush-parameters
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" start_cache
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="1" start_cache
# changing flush parameters should be prohibited while core is added to cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="$CORE_DEVICE" add_core
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
# remove core
sleep 1
CACHE_ID_OPTION="1" CORE_ID_OPTION="1" remove_core
printf "\n============Running negative tests============\n"
@@ -45,8 +39,8 @@ CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STA
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="-1" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="-1" NEGATIVE_TEST_OPTION="1" set_flush_params
# test for 0 wake_up_time and 0 flush buffers
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="0" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
# test for 0 staleness-time and 0 flush buffers
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="0" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="0" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
printf "\n============Running positive tests============\n"
@@ -58,9 +52,10 @@ CACHE_ID_OPTION="1" CLEAN_POL_OPTION="acp" set_cleaning_policy
CACHE_ID_OPTION="1" CLEAN_POL_OPTION="alru" set_cleaning_policy
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="1" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="500" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="500" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="0" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="0" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="500" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="500" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="1" ACTIVITY_THRESH_OPTION="500" set_flush_params
# stop cache now
CACHE_ID_OPTION="1" stop_cache

View File

@@ -42,14 +42,14 @@ do
echo "------Start CAS Linux in $mode mode"
# This is where the real test starts
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}"1 CACHE_MODE_OPTION="$mode"
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}"-part1 CACHE_MODE_OPTION="$mode"
CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}"1 add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}"-part1 add_core
sleep 2
# iostat read
TMP_CACHE_DEVICE=$(echo "${CACHE_DEVICE}" | cut -c6-)
TMP_CACHE_DEVICE=$(echo "$(realpath ${CACHE_DEVICE})" | cut -c6-)
run_cmd "dd if=/dev/cas1-1 of=$TMP_DIR/file001.bin bs=$BLOCK_SIZE count=$COUNT skip=10000 iflag=direct"
READ_CACHE_1=$(iostat "${CACHE_DEVICE}" | grep $TMP_CACHE_DEVICE | awk 'NR==1 {print $5}')
run_cmd "dd if=/dev/cas1-1 of=$TMP_DIR/file001.bin bs=$BLOCK_SIZE count=$COUNT skip=10000 iflag=direct"
@@ -74,14 +74,14 @@ do
echo "------Start CAS Linux in $mode mode"
# This is where the real test starts
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}"1 CACHE_MODE_OPTION="$mode"
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}"-part1 CACHE_MODE_OPTION="$mode"
CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}"1 add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}"-part1 add_core
sleep 2
# iostat write and write
TMP_CORE_DEVICE=$(echo "${CORE_DEVICE}" | cut -c6-)
TMP_CORE_DEVICE=$(echo "$(realpath ${CORE_DEVICE})" | cut -c6-)
WRITE_CORE_0=$(iostat "${CORE_DEVICE}" | grep $TMP_CORE_DEVICE | awk 'NR==1 {print $6}')
WRITE_CACHE_0=$(iostat "${CACHE_DEVICE}" | grep $TMP_CACHE_DEVICE | awk 'NR==1 {print $6}')
run_cmd "dd if=$TMP_DIR/file001.bin of=/dev/cas1-1 bs=$BLOCK_SIZE count=$COUNT seek=20000 oflag=direct"

View File

@@ -28,12 +28,12 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1" make_primary_partitions
# Start caches
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" start_cache
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" start_cache
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" start_cache
#Assembly multi-level cache (add cores)
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${DEVICE_NAME}1-1" add_core
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${DEVICE_NAME}2-1" add_core

View File

@@ -31,16 +31,16 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@@ -67,8 +67,8 @@ for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
CACHE_ID_OPTION="$ID" flush_cache
CACHE_ID_OPTION="$ID" CACHE_DONT_FLUSH_DATA_OPTION="1" stop_cache
run_cmd dd if=/dev/zero of="${CACHE_DEVICE}${ID}" bs=1M count=1 oflag=direct
run_cmd "mount ${CORE_DEVICE}${ID} ${MOUNTPOINT}-${ID}-1"
run_cmd dd if=/dev/zero of="${CACHE_DEVICE}-part${ID}" bs=1M count=1 oflag=direct
run_cmd "mount ${CORE_DEVICE}-part${ID} ${MOUNTPOINT}-${ID}-1"
done
# Now check for files' presence and umount core devices

View File

@@ -31,16 +31,16 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@@ -67,8 +67,8 @@ for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
CACHE_ID_OPTION="$ID" flush_cache
CACHE_ID_OPTION="$ID" CACHE_DONT_FLUSH_DATA_OPTION="1" stop_cache
run_cmd dd if=/dev/zero of="${CACHE_DEVICE}${ID}" bs=1M count=1 oflag=direct
run_cmd "mount ${CORE_DEVICE}${ID} ${MOUNTPOINT}-${ID}-1"
run_cmd dd if=/dev/zero of="${CACHE_DEVICE}-part${ID}" bs=1M count=1 oflag=direct
run_cmd "mount ${CORE_DEVICE}-part${ID} ${MOUNTPOINT}-${ID}-1"
done
# Now check for files' presence and umount core devices

View File

@@ -23,14 +23,14 @@ TEST_DEVICE=${DEVICE_NAME}1-1
#param device
get_stat_sectors_read() {
L_DEVICE=$(basename $1)
L_DEVICE=$(basename $(realpath $1))
L_STAT=$(cat /proc/diskstats | grep $L_DEVICE | awk '{ print $6 }')
echo $L_STAT
}
#param device
get_stat_sectors_written() {
L_DEVICE=$(basename $1)
L_DEVICE=$(basename $(realpath $1))
L_STAT=$(cat /proc/diskstats | grep $L_DEVICE | awk '{ print $10 }')
echo $L_STAT
}
@@ -46,13 +46,13 @@ cache_suspend_init() {
# Create 1 primary partitions on CORE_DEVICE
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
run_cmd dd if=/dev/zero of="${CORE_DEVICE}1" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part1" bs=1M count=1 oflag=direct
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
cache_suspend_deinit() {
@@ -80,7 +80,7 @@ cache_suspend_test() {
CACHE_MODE_FLUSH_OPTION="yes" CACHE_ID_OPTION="1" CACHE_MODE_OPTION="pt" set_cache_mode
# Get read cache statistics before.
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
# Read file
test_log_trace "Read 4k, Read has to be performed from core"
@@ -89,7 +89,7 @@ cache_suspend_test() {
# Sync
sync && echo 3 > /proc/sys/vm/drop_caches
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
test_log_trace "Cache reads before : $L_CACHE_READS_BEFORE"
test_log_trace "Cache reads after : $L_CACHE_READS_AFTER"
@@ -107,9 +107,9 @@ cache_suspend_test() {
sync && echo 3 > /proc/sys/vm/drop_caches
# Get statistics
L_CACHE_WRITES_BEFORE=$(get_stat_sectors_written ${CACHE_DEVICE}1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CACHE_WRITES_BEFORE=$(get_stat_sectors_written ${CACHE_DEVICE}-part1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
# Read file
test_log_trace "Read 4k, read form core only"
@@ -119,9 +119,9 @@ cache_suspend_test() {
sync && echo 3 > /proc/sys/vm/drop_caches
# Get statistics
L_CACHE_WRITES_AFTER=$(get_stat_sectors_written ${CACHE_DEVICE}1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CACHE_WRITES_AFTER=$(get_stat_sectors_written ${CACHE_DEVICE}-part1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
test_log_trace "Core reads before : $L_CORE_READS_BEFORE"
test_log_trace "Core reads after : $L_CORE_READS_AFTER"
@@ -146,8 +146,8 @@ cache_suspend_test() {
# Resume the cache
CACHE_ID_OPTION="1" CACHE_MODE_OPTION="wt" set_cache_mode
L_CACHE_WRITES_BEFORE=$(get_stat_sectors_written ${CACHE_DEVICE}1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_WRITES_BEFORE=$(get_stat_sectors_written ${CACHE_DEVICE}-part1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
test_log_trace "Read 4k, read form core, write on cache"
# Read file
@@ -156,8 +156,8 @@ cache_suspend_test() {
# Sync
sync && echo 3 > /proc/sys/vm/drop_caches
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_WRITES_AFTER=$(get_stat_sectors_written ${CACHE_DEVICE}1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
L_CACHE_WRITES_AFTER=$(get_stat_sectors_written ${CACHE_DEVICE}-part1)
test_log_trace "Core reads before : $L_CORE_READS_BEFORE"
test_log_trace "Core reads after : $L_CORE_READS_AFTER"

View File

@@ -27,10 +27,10 @@ TEST_COUNT_HALF=16384
#param device
get_stat_sectors_read() {
L_DEVICE=$(basename $1)
L_DEVICE=$(basename $(realpath $1))
if [[ ${L_DEVICE} =~ "nvme" ]]
then
L_DEVICE="${L_DEVICE:0:${#L_DEVICE}-1}p${L_DEVICE: -1}"
L_DEVICE="${L_DEVICE:0:${#L_DEVICE}-1}-part${L_DEVICE: -1}"
fi
L_STAT=$(cat /proc/diskstats | grep $L_DEVICE | awk '{ print $6 }')
echo $L_STAT
@@ -38,7 +38,7 @@ get_stat_sectors_read() {
#param device
get_stat_sectors_written() {
L_DEVICE=$(basename $1)
L_DEVICE=$(basename $(realpath $1))
L_STAT=$(cat /proc/diskstats | grep $L_DEVICE | awk '{ print $10 }')
echo $L_STAT
}
@@ -54,13 +54,13 @@ cache_suspend_init() {
# Create 1 primary partitions on CORE_DEVICE
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
run_cmd dd if=/dev/zero of="${CORE_DEVICE}1" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part1" bs=1M count=1 oflag=direct
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
cache_suspend_deinit() {
@@ -107,8 +107,8 @@ cache_suspend_test() {
CACHE_ID_OPTION="1" CACHE_MODE_OPTION="wt" set_cache_mode
# Get statistics before
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
# Read from
test_log_trace "Read from CAS"
@@ -117,8 +117,8 @@ cache_suspend_test() {
sync && echo 3 > /proc/sys/vm/drop_caches
# Get statistics after
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
test_log_trace "Cache reads before : $L_CACHE_READS_BEFORE"
test_log_trace "Cache reads after : $L_CACHE_READS_AFTER"

View File

@@ -74,12 +74,12 @@ check_config() {
fi
# Check if core and cache devices are free for us or not
if [ -z $IGNORE_WARNINGS ] && [ -n "$(ls ${CORE_DEVICE}[0-9]* 2> /dev/null)" ] ; then
if [ -z $IGNORE_WARNINGS ] && [ -n "$(ls ${CORE_DEVICE}-part[0-9]* 2> /dev/null)" ] ; then
warning "The core device $CORE_DEVICE is partitioned! Some tests may remove partitions from this device"
warning "Use --ignore | -i flag to force using this core device"
exit 1
fi
if [ -z $IGNORE_WARNINGS ] && [ -n "$(ls ${CACHE_DEVICE}[0-9]* 2> /dev/null)" ] ; then
if [ -z $IGNORE_WARNINGS ] && [ -n "$(ls ${CACHE_DEVICE}-part[0-9]* 2> /dev/null)" ] ; then
warning "The cache device $CACHE_DEVICE is partitioned!"
warning "Use --ignore | -i flag to force using this cache device"
exit 1
@@ -93,11 +93,15 @@ check_config() {
umount $DEVICE_TO_UMOUNT
done
if [ -n "$(mount | grep $CACHE_DEVICE)" ] ; then
SHORT_CACHE_LINK=$(realpath $CACHE_DEVICE)
OUTPUT_MOUNT_CACHE=$(mount | grep -E "$CACHE_DEVICE|$SHORT_CACHE_LINK")
if [ -n "$OUTPUT_MOUNT_CACHE" ] ; then
error "The cache device $CACHE_DEVICE or one of its partitions is mounted!"
exit 1
fi
if [ -n "$(mount | grep $CORE_DEVICE)" ] ; then
SHORT_CORE_LINK=$(realpath $CORE_DEVICE)
OUTPUT_MOUNT_CORE=$(mount | grep -E $CORE_DEVICE|$SHORT_CORE_LINK)
if [ -n "$OUTPUT_MOUNT_CORE" ] ; then
error "The core device $CORE_DEVICE or one of its partitions is mounted!"
exit 1
fi

View File

@@ -16,6 +16,7 @@ OCF
RHEL
SLES
SSD
SSDP
SUSE
TERM
TTY
@@ -48,7 +49,10 @@ reseller
resizing
runtime
sdb
sdx
utf
wa
wb
wt
wo
wwn

View File

@@ -30,18 +30,6 @@ restore_config() {
start_cache() {
check_options ${FUNCNAME[0]}
CACHE_DEVICE_NVME_REGEX="(/dev/nvme[0-9]n[0-9])([0-9]*)"
if [ ! -b ${CACHE_DEVICE_OPTION} ]
then
if [[ "${CACHE_DEVICE_OPTION}" =~ ${CACHE_DEVICE_NVME_REGEX} ]]
then
if [ -b ${BASH_REMATCH[1]}p${BASH_REMATCH[2]} ]
then
CACHE_DEVICE_OPTION=${BASH_REMATCH[1]}p${BASH_REMATCH[2]}
fi
fi
fi
local COMMAND="$CAS --start-cache --cache-device $CACHE_DEVICE_OPTION --cache-id $CACHE_ID_OPTION"
if [ -n "$CACHE_FORCE_OPTION" ] ; then
@@ -113,7 +101,8 @@ add_core() {
return 0
fi
local i=0
local CAS_DEV=` casadm -L | egrep "^.core +[0-9]+ +$CORE_DEVICE_OPTION" | awk '{print $NF}'`
local SHORT_LINK=$(realpath $CORE_DEVICE_OPTION)
local CAS_DEV=` casadm -L | egrep "^.core +[0-9]+ +$SHORT_LINK" | awk '{print $NF}'`
clear_options
while [ ! -e $CAS_DEV ]; do
sleep 2
@@ -202,19 +191,6 @@ get_stats_value() {
clear_options
}
format_nvme() {
check_options ${FUNCNAME[0]}
local COMMAND="$CAS --nvme --format $NVME_FORMAT_MODE_OPTION --device $DEVICE_OPTION"
if [ -n "$NVME_FORMAT_FORCE_OPTION" ] ; then
COMMAND="$COMMAND --force"
fi
run_cmd $COMMAND
clear_options
}
init() {
check_options ${FUNCNAME[0]}
@@ -304,10 +280,10 @@ init() {
CACHE_LINE_SIZE="$L_CACHE_LINE_SIZE"
fi
CACHE_ID_OPTION="$i" CACHE_DEVICE_OPTION="${CACHE_DEVICE}$i" start_cache
CACHE_ID_OPTION="$i" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part$i" start_cache
for ((j = 1; j <= L_NUMBER_OF_CORE_PARTITIONS && j <= MAX_NUMBER_OF_CORE_PARTITIONS; j++)); do
CACHE_ID_OPTION="$i" CORE_DEVICE_OPTION="${CORE_DEVICE}$k" add_core
CACHE_ID_OPTION="$i" CORE_DEVICE_OPTION="${CORE_DEVICE}-part$k" add_core
k=`expr $k \+ 1`
done
done
@@ -717,7 +693,6 @@ export -f try_add_core
export -f remove_core
export -f check_device_state
export -f get_stats_value
export -f format_nvme
export -f init
export -f iteration
export -f deinit
@@ -822,9 +797,9 @@ make_primary_partitions() {
for ID in `seq 1 $PART_NUM`; do
local i=0
local TEST_DEV="${TARGET_DEVICE_OPTION}${ID}"
local TEST_DEV_P="${TARGET_DEVICE_OPTION}p${ID}"
while ! [[ -b $TEST_DEV || -b $TEST_DEV_P ]] ; do
local TEST_DEV="${DEV_NAME}-part${ID}"
local TEST_DEV_P="${DEV_NAME}-part${ID}"
while ! [[ -L $TEST_DEV || -L $TEST_DEV_P ]] ; do
# make sure that partition is detected if it was created
partprobe
sleep 1
@@ -843,7 +818,7 @@ make_primary_partitions() {
# erase all filesystem/cas cache metadata that may have existed on it
# before.
if [ -b $TEST_DEV ]
if [ -L $TEST_DEV ]
then
run_cmd dd if=/dev/zero of="${TEST_DEV}" bs=1M count=1 oflag=direct
else
@@ -927,6 +902,7 @@ remove_caches() {
}
turn_on_device() {
# Use realpath resolved before turning off device
check_options ${FUNCNAME[0]}
if [[ $CACHE_DEVICE_OPTION == "/dev/nvme"* ]] ; then
turn_on_nvme_device
@@ -939,17 +915,19 @@ turn_on_device() {
turn_off_device() {
check_options ${FUNCNAME[0]}
if [[ $CACHE_DEVICE_OPTION == "/dev/nvme"* ]] ; then
SHORT_LINK=$(realpath $CACHE_DEVICE_OPTION)
if [[ $SHORT_LINK == "/dev/nvme"* ]] ; then
turn_off_nvme_device
else
local COMMAND="echo 'offline' > /sys/block/${CACHE_DEVICE_OPTION:4}/device/state"
local COMMAND="echo 'offline' > /sys/block/${SHORT_LINK:4}/device/state"
run_cmd $COMMAND
fi
}
turn_off_nvme_device() {
check_options ${FUNCNAME[0]}
COMMAND="echo '1' > /sys/block/${CACHE_DEVICE_OPTION:4}/device/device/remove"
SHORT_LINK=$(realpath $CACHE_DEVICE_OPTION)
COMMAND="echo '1' > /sys/block/${SHORT_LINK:4}/device/device/remove"
run_cmd $COMMAND
clear_options
}

View File

@@ -30,6 +30,21 @@ check_if_root_or_exit() {
fi
}
resolve_path() {
local BY_ID_DIR="/dev/disk/by-id"
local BY_ID_LINKS=$(ls $BY_ID_DIR)
for BY_ID_PATH in $BY_ID_LINKS
do
FULL_PATH="${BY_ID_DIR}/${BY_ID_PATH}"
if [[ "$(realpath $FULL_PATH)" -ef "$(realpath $DEVICE)" ]]
then
DEVICE=$FULL_PATH
break
fi
done
}
parse_args() {
while [ -n "$1" ] ; do
@@ -51,10 +66,14 @@ parse_args() {
-i | --ignore ) export IGNORE_WARNINGS="1"
;;
-c | --cache ) shift
CACHE_DEVICE="$1"
DEVICE="$1"
resolve_path
CACHE_DEVICE=$DEVICE
;;
-d | --core ) shift
CORE_DEVICE="$1"
DEVICE="$1"
resolve_path
CORE_DEVICE=$DEVICE
;;
* ) echo "Unrecognized option"
usage

View File

@@ -9,8 +9,8 @@
# If you want to use this file, rename it to "cas_local_config".
# Default core and cache devices - note that we require whole devices, not partitions
export CORE_DEVICE="/dev/sdd"
export CACHE_DEVICE="/dev/sdf"
export CORE_DEVICE="/dev/disk/by-id/ata-SUPER_SPEED_DISK_SSD"
export CACHE_DEVICE="/dev/disk/by-id/nvme-BETTER_SSD_KINGOFSSDS"
# Default size of partition for cache/core device. This is used only for
# the DEFAULT_* API functions

View File

@@ -20,7 +20,7 @@ export ALL_OPTIONS="
PROMO_POL_NS_OPTION PROMO_POL_VALUE THRESHOLD_OPTION TRIGGER_OPTION THRESHOLD_VALUE TRIGGER_VALUE THRESHOLD_VALUE_ERROR TRIGGER_VALUE_ERROR
TARGET_DEVICE_OPTION FILESYSTEM_TYPE
IO_CLASS_ID IO_CLASS_PRIORITY IO_CLASS_SIZE_MIN IO_CLASS_SIZE_MAX IO_CLASS_NAME IO_CLASS_CACHE_MODE
FORMAT_NVME_REQUIRED_OPTIONS CHECK_IS_NVME_ATOMIC TURN_OFF_NVME_DEVICE TURN_ON_NVME_DEVICE
CHECK_IS_NVME_ATOMIC TURN_OFF_NVME_DEVICE TURN_ON_NVME_DEVICE
DEVICE_ID_OPTION DEMANDED_STATE_OPTION
STAT_UNIT_OPTION STAT_NAME_OPTION
STORE_CONFIG_OPTION
@@ -50,7 +50,6 @@ export SET_FLUSH_PARAMS_REQUIRED_OPTIONS="CACHE_ID_OPTION CLEAN_POL_NS_OPTION"
export GET_FLUSH_PARAMS_REQUIRED_OPTIONS="CACHE_ID_OPTION CLEAN_POL_NS_OPTION"
export SET_PROMOTION_PARAMS_REQUIRED_OPTIONS="CACHE_ID_OPTION PROMO_POL_NS_OPTION"
export CHECK_PROMOTION_PARAMS_REQUIRED_OPTIONS="CACHE_ID_OPTION PROMO_POL_NS_OPTION"
export FORMAT_NVME_REQUIRED_OPTIONS="NVME_FORMAT_MODE_OPTION DEVICE_OPTION"
export CHECK_IS_NVME_ATOMIC_REQUIRED_OPTIONS="DEVICE_OPTION"
export CREATE_PARTITION_REQUIRED_OPTIONS="CACHE_ID_OPTION PARTITION_ID_OPTION PRIORITY_OPTION MIN_SIZE_OPTION MAX_SIZE_OPTION CLEANING_POLICY_OPTION"

View File

@@ -48,10 +48,10 @@ eviction_policy_init() {
fi
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
eviction_policy_flush() {

View File

@@ -34,7 +34,7 @@ eviction_policy_init() {
# Create 1 primary partitions on CACHE_DEVICE
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION=$CACHE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
# Make empty cache device, clear previous content, clear previous metadata
dd if=/dev/zero of="${CACHE_DEVICE}1" bs="4k" count=$CACHE_DEVICE_SIZE &>/dev/null
dd if=/dev/zero of="${CACHE_DEVICE}-part1" bs="4k" count=$CACHE_DEVICE_SIZE &>/dev/null
# Create 1 primary partitions on CORE_DEVICE
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
@@ -50,10 +50,10 @@ eviction_policy_init() {
fi
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
eviction_policy_flush() {
@@ -322,7 +322,7 @@ test_log_start
run_cmd eviction_policy_init
run_cmd eviction_policy_test $TEST_DEVICE "${CORE_DEVICE}1" $(get_bytes $CORE_DEVICE_SIZE)
run_cmd eviction_policy_test $TEST_DEVICE "${CORE_DEVICE}-part1" $(get_bytes $CORE_DEVICE_SIZE)
run_cmd eviction_policy_deinit

View File

@@ -34,7 +34,7 @@ eviction_policy_init() {
# Create 1 primary partitions on CACHE_DEVICE
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION=$CACHE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
# Make empty cache device, clear previous content, clear previous metadata
dd if=/dev/zero of="${CACHE_DEVICE}1" bs="4k" count=$CACHE_DEVICE_SIZE &>/dev/null
dd if=/dev/zero of="${CACHE_DEVICE}-part1" bs="4k" count=$CACHE_DEVICE_SIZE &>/dev/null
# Create 1 primary partitions on CORE_DEVICE
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
@@ -50,10 +50,10 @@ eviction_policy_init() {
fi
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
eviction_policy_flush() {
@@ -315,7 +315,7 @@ test_log_start
run_cmd eviction_policy_init "YES"
run_cmd eviction_policy_test $TEST_DEVICE "${CORE_DEVICE}1" $(get_bytes $CORE_DEVICE_SIZE)
run_cmd eviction_policy_test $TEST_DEVICE "${CORE_DEVICE}-part1" $(get_bytes $CORE_DEVICE_SIZE)
run_cmd eviction_policy_deinit

View File

@@ -36,10 +36,10 @@ CACHE_LINE_SIZES="4 8 16 32 64"
for mode in $CACHE_MODES; do
for line_size in $CACHE_LINE_SIZES; do
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" \
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" \
CACHE_MODE_OPTION="$mode" CACHE_LINE_SIZE="$line_size" \
CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
for engine in $IO_ENGINES; do
run_cmd "fio --ioengine=${engine} --direct=1 --name=test \

View File

@@ -31,12 +31,12 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Prepare cache on CACHE_DEVICE1 (/dev/sdd1, e.g.) and add core device using CORE_DEVICE1, CORE_DEVICE2 and CORE_DEVICE3 (/dev/sde1, /dev/sde2, /dev/sde3, e.g)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state
@@ -46,19 +46,19 @@ CACHE_ID_OPTION="1" stop_cache
# Add cores to pool, then load cache and check if cache is running
# Try to add core devices and check their states
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" CORE_ID_OPTION="1" try_add_core
DEVICE_ID_OPTION=${CORE_DEVICE}1 DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" CORE_ID_OPTION="1" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}-part1" DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" CORE_ID_OPTION="2" try_add_core
DEVICE_ID_OPTION=${CORE_DEVICE}2 DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" CORE_ID_OPTION="2" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}-part2" DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" CORE_ID_OPTION="3" try_add_core
DEVICE_ID_OPTION=${CORE_DEVICE}3 DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" CORE_ID_OPTION="3" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}-part3" DEMANDED_STATE_OPTION="Detached" check_device_state
# Try to load cache device, check if it is running and if all cores status is appropirate
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state

View File

@@ -31,12 +31,12 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Prepare cache on CACHE_DEVICE1 (/dev/sdd1, e.g.) and add core device using CORE_DEVICE1, CORE_DEVICE2 and CORE_DEVICE3 (/dev/sde1, /dev/sde2, /dev/sde3, e.g)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state
@@ -48,8 +48,8 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_
sleep 1
# Load cache, then add cores and check if chache is running
# Try to load cache device, check its state and cores state
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Incomplete" check_device_state
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Incomplete" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Inactive" check_device_state
@@ -59,16 +59,16 @@ CACHE_ID_OPTION="1" CACHE_DONT_FLUSH_DATA_OPTION="1" stop_cache
TARGET_DEVICE_OPTION="$CORE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
sleep 1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" CORE_ID_OPTION="1" try_add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" CORE_ID_OPTION="2" try_add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" CORE_ID_OPTION="3" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}1" DEMANDED_STATE_OPTION="Detached" check_device_state
DEVICE_ID_OPTION="${CORE_DEVICE}2" DEMANDED_STATE_OPTION="Detached" check_device_state
DEVICE_ID_OPTION="${CORE_DEVICE}3" DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" CORE_ID_OPTION="1" try_add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" CORE_ID_OPTION="2" try_add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" CORE_ID_OPTION="3" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}-part1" DEMANDED_STATE_OPTION="Detached" check_device_state
DEVICE_ID_OPTION="${CORE_DEVICE}-part2" DEMANDED_STATE_OPTION="Detached" check_device_state
DEVICE_ID_OPTION="${CORE_DEVICE}-part3" DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_LOAD_METADATA_OPTION="y" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state

View File

@@ -31,18 +31,18 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_
cat > $CAS_CONFIG_PATH <<- EOM
${CAS_CONFIG_VERSION_TAG}
[caches]
1 ${CACHE_DEVICE}1 WT
1 ${CACHE_DEVICE}-part1 WT
[cores]
1 1 ${CORE_DEVICE}1
1 2 ${CORE_DEVICE}2
1 3 ${CORE_DEVICE}3
1 1 ${CORE_DEVICE}-part1
1 2 ${CORE_DEVICE}-part2
1 3 ${CORE_DEVICE}-part3
EOM
run_cmd "casctl init"
run_cmd "udevadm settle"
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state
@@ -53,9 +53,7 @@ check_no_cache_running
run_cmd "casctl start"
run_cmd "udevadm settle"
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state

View File

@@ -35,16 +35,16 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_
cat > $CAS_CONFIG_PATH <<- EOM
${CAS_CONFIG_VERSION_TAG}
[caches]
16384 ${CACHE_DEVICE}1 WB cleaning_policy=acp
16384 ${CACHE_DEVICE}-part1 WB cleaning_policy=acp
[cores]
16384 4095 ${CORE_DEVICE}1
16384 4094 ${CORE_DEVICE}2
16384 0 ${CORE_DEVICE}3
16384 4095 ${CORE_DEVICE}-part1
16384 4094 ${CORE_DEVICE}-part2
16384 0 ${CORE_DEVICE}-part3
EOM
run_cmd "casctl init"
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-0" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-4095" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-4094" DEMANDED_STATE_OPTION="Active" check_device_state
@@ -57,7 +57,7 @@ check_no_cache_running
run_cmd "casctl start"
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-0" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-4095" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-4094" DEMANDED_STATE_OPTION="Active" check_device_state

View File

@@ -30,11 +30,11 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_
cat > $CAS_CONFIG_PATH <<- EOM
${CAS_CONFIG_VERSION_TAG}
[caches]
1 ${CACHE_DEVICE}1 WB cleaning_policy=nop
1 ${CACHE_DEVICE}-part1 WB cleaning_policy=nop
[cores]
1 1 ${CORE_DEVICE}1
1 2 ${CORE_DEVICE}2
1 3 ${CORE_DEVICE}3
1 1 ${CORE_DEVICE}-part1
1 2 ${CORE_DEVICE}-part2
1 3 ${CORE_DEVICE}-part3
EOM
run_cmd "casctl init"
@@ -46,7 +46,7 @@ run_cmd "udevadm settle"
run_cmd "casctl stop"
# We shouldn't be able to start cache on this device, it contains dirty data
NEGATIVE_TEST_OPTION=1 CACHE_DEVICE_OPTION=${CACHE_DEVICE}1 CACHE_ID_OPTION=1 start_cache
NEGATIVE_TEST_OPTION=1 CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_ID_OPTION=1 start_cache
NEGATIVE_TEST_OPTION=1 run_cmd "casctl init"
clear_options
@@ -56,7 +56,7 @@ run_cmd "casctl start"
run_cmd "casctl stop"
# We still shouldn't be able to start
NEGATIVE_TEST_OPTION=1 CACHE_DEVICE_OPTION=${CACHE_DEVICE}1 CACHE_ID_OPTION=1 start_cache
NEGATIVE_TEST_OPTION=1 CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_ID_OPTION=1 start_cache
NEGATIVE_TEST_OPTION=1 run_cmd "casctl init"
clear_options
@@ -69,7 +69,7 @@ run_cmd "udevadm settle"
run_cmd "casctl stop --flush"
run_cmd "casadm -S -d ${CACHE_DEVICE}1 --force"
run_cmd "casadm -S -d ${CACHE_DEVICE}-part1 --force"
run_cmd "casctl stop"

View File

@@ -46,10 +46,10 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="200M" PARTITION_IDS_
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="400M" PARTITION_IDS_OPTION="1" make_primary_partitions
# Start cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Prepare IO class config with wlth specified
io_class_config_wlth

View File

@@ -48,10 +48,10 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="200M" PARTITION_IDS_
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="400M" PARTITION_IDS_OPTION="1" make_primary_partitions
# Start cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Prepare IO class config with wlth specified
io_class_config_wlth

View File

@@ -24,13 +24,13 @@ test_log_start
# Corrupts random bit from byte on given by $1 address from ${CACHE_DEVICE}1
corrupt_byte(){
OFFSET=$1
READ_VALUE=`od -N 1 -j $OFFSET -A n -t x1 ${CACHE_DEVICE}1`
READ_VALUE=`od -N 1 -j $OFFSET -A n -t x1 ${CACHE_DEVICE}-part1`
READ_VALUE="0x`echo $READ_VALUE`"
RANDOM_UINT32=`od -An -tu4 -N4 /dev/urandom`
MASK=$(( 1 << $(( $RANDOM_UINT32 % 8 )) ))
echo -e "\x$(( ${READ_VALUE} ^ ${MASK} ))"| dd bs=1 count=1 seek=$OFFSET conv=notrunc of=${CACHE_DEVICE}1 1>&2 2>/dev/null
echo -e "\x$(( ${READ_VALUE} ^ ${MASK} ))"| dd bs=1 count=1 seek=$OFFSET conv=notrunc of=${CACHE_DEVICE}-part1 1>&2 2>/dev/null
}
# Use CACHE_DEVICE and CORE_DEVICE provided by configuration file and remove partitions from those devices
@@ -52,7 +52,7 @@ do
fi
# Start cache on CACHE_DEVICE to repair it and also to make log.
CACHE_ID_OPTION="1" CACHE_FORCE_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_FORCE_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Stop cache.
CACHE_ID_OPTION="1" stop_cache
@@ -88,7 +88,7 @@ do
corrupt_byte ${CORRUPT_ADDRESS}
# Start again with load option, this should fail, metadata is corrupted.
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CACHE_LOAD_METADATA_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CACHE_LOAD_METADATA_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
done

View File

@@ -40,7 +40,7 @@ do
fi
# Start cache on CACHE_DEVICE to repair it and also to make log.
CACHE_ID_OPTION="1" CACHE_FORCE_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_FORCE_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Stop cache.
CACHE_ID_OPTION="1" stop_cache
@@ -59,10 +59,10 @@ do
test_log_trace "Corrupting 64K bytes in ${SECTION}"
# Corrupt cache metadata
run_cmd "dd if=/dev/urandom of="${CACHE_DEVICE}1" bs=1 count=64K conv=notrunc seek=${METADATA_SECTION_OFFSET}K "
run_cmd "dd if=/dev/urandom of=${CACHE_DEVICE}-part1 bs=1 count=64K conv=notrunc seek=${METADATA_SECTION_OFFSET}K "
# Start again with load option, this should fail, metadata is corrupted.
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CACHE_LOAD_METADATA_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CACHE_LOAD_METADATA_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
done
test_log_stop

View File

@@ -26,7 +26,7 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS_OPTION="1" make_primary_partitions
# create cache in WT mode and try to change promotion parameters
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" start_cache
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="1" start_cache
# changing promotion parameters should not be prohibited while core is added to cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="$CORE_DEVICE" add_core
@@ -108,7 +108,7 @@ CACHE_ID_OPTION="1" PROMO_POL_NS_OPTION="promotion-nhit" THRESHOLD_OPTION="451"
CACHE_ID_OPTION="1" PROMO_POL_NS_OPTION="promotion-nhit" THRESHOLD_OPTION="812" TRIGGER_OPTION="49" set_promotion_params
CACHE_ID_OPTION="1" PROMO_POL_OPTION="nhit" set_promotion_policy
CACHE_ID_OPTION="1" stop_cache
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_LOAD_METADATA_OPTION="1" start_cache
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_LOAD_METADATA_OPTION="1" start_cache
CACHE_ID_OPTION="1" PROMO_POL_OPTION="nhit" check_promotion_policy
CACHE_ID_OPTION="1" PROMO_POL_NS_OPTION="promotion-nhit" THRESHOLD_OPTION="812" TRIGGER_OPTION="49" check_promotion_params

View File

@@ -28,16 +28,16 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@@ -64,16 +64,17 @@ for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
done
SHORT_LINK=$(realpath ${CACHE_DEVICE})
CACHE_DEVICE_OPTION="${CACHE_DEVICE}" turn_off_device
for ID in 1 2 3 ; do
DONT_FAIL_ON_ERROR_OPTION="YES" CACHE_ID_OPTION="$ID" stop_cache
done
CACHE_DEVICE_OPTION="${CACHE_DEVICE}" turn_on_device
CACHE_DEVICE_OPTION="${SHORT_LINK}" turn_on_device
for ID in 1 2 3 ; do
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}$ID" CACHE_LOAD_METADATA_OPTION="y" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part${ID}" CACHE_LOAD_METADATA_OPTION="y" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="$ID" stop_cache
run_cmd "mount ${CORE_DEVICE}${ID} ${MOUNTPOINT}-${ID}-1"
run_cmd "mount ${CORE_DEVICE}-part${ID} ${MOUNTPOINT}-${ID}-1"
done
# Now check for files' presence and umount core devices

View File

@@ -28,16 +28,16 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@@ -64,14 +64,15 @@ for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
done
SHORT_LINK=$(realpath ${CACHE_DEVICE})
CACHE_DEVICE_OPTION="${CACHE_DEVICE}" turn_off_device
for ID in 1 2 3 ; do
DONT_FAIL_ON_ERROR_OPTION="YES" CACHE_ID_OPTION="$ID" stop_cache
done
CACHE_DEVICE_OPTION="${CACHE_DEVICE}" turn_on_device
CACHE_DEVICE_OPTION="${SHORT_LINK}" turn_on_device
for ID in 1 2 3 ; do
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}$ID" CACHE_LOAD_METADATA_OPTION="y" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part${ID}" CACHE_LOAD_METADATA_OPTION="y" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="$ID" CORE_ID_OPTION="1" mount_cache
done

View File

@@ -36,13 +36,13 @@ wb_init() {
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
# Make ext3 file system
TARGET_DEVICE_OPTION="${CORE_DEVICE}1" FILESYSTEM_TYPE="ext3" make_filesystem
TARGET_DEVICE_OPTION="${CORE_DEVICE}-part1" FILESYSTEM_TYPE="ext3" make_filesystem
# Start cache on CACHE_DEVICE1
CACHE_MODE_OPTION="wb" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_MODE_OPTION="wb" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
#Mount file system
CACHE_ID_OPTION="1" CORE_ID_OPTION="1" mount_cache

View File

@@ -272,25 +272,6 @@ def test_cas_config_add_same_core_symlinked_02(mock_realpath):
config.insert_core(core_symlinked)
@patch("os.path.realpath")
@patch("os.listdir")
def test_cas_config_get_by_id_path(mock_listdir, mock_realpath):
mock_listdir.return_value = [
"wwn-1337deadbeef-x0x0",
"wwn-1337deadbeef-x0x0-part1",
"nvme-INTEL_SSDAAAABBBBBCCC_0984547ASDDJHHHFH",
]
mock_realpath.side_effect = (
lambda x: "/dev/dummy1"
if x == "/dev/disk/by-id/wwn-1337deadbeef-x0x0-part1"
else x
)
path = opencas.cas_config.get_by_id_path("/dev/dummy1")
assert path == "/dev/disk/by-id/wwn-1337deadbeef-x0x0-part1"
@patch("os.path.realpath")
@patch("os.listdir")
def test_cas_config_get_by_id_path_not_found(mock_listdir, mock_realpath):