Merge pull request #568 from mmichal10/occupancy-per-ioclass

Occupancy per ioclass
This commit is contained in:
Robert Baldyga 2020-12-23 14:36:02 +01:00 committed by GitHub
commit 2753bad018
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 1209 additions and 105 deletions

View File

@ -2032,11 +2032,10 @@ void partition_list_line(FILE *out, struct kcas_io_class *cls, bool csv)
{
char buffer[128];
const char *prio;
const char *allocation;
if (cls->info.cache_mode != ocf_cache_mode_pt)
allocation = csv ? "1" : "YES";
else
allocation = csv ? "0" : "NO";
char allocation_str[MAX_STR_LEN];
snprintf(allocation_str, sizeof(allocation_str), "%d.%02d",
cls->info.max_size/100, cls->info.max_size%100);
if (OCF_IO_CLASS_PRIO_PINNED == cls->info.priority) {
prio = csv ? "" : "Pinned";
@ -2046,7 +2045,7 @@ void partition_list_line(FILE *out, struct kcas_io_class *cls, bool csv)
}
fprintf(out, TAG(TABLE_ROW)"%u,%s,%s,%s\n",
cls->class_id, cls->info.name, prio, allocation);
cls->class_id, cls->info.name, prio, allocation_str);
}
@ -2062,6 +2061,7 @@ int partition_list(unsigned int cache_id, unsigned int output_format)
if (fd == -1 )
return FAILURE;
if (create_pipe_pair(intermediate_file)) {
cas_printf(LOG_ERR,"Failed to create unidirectional pipe.\n");
close(fd);
@ -2119,8 +2119,11 @@ int partition_list(unsigned int cache_id, unsigned int output_format)
}
enum {
part_csv_coll_id = 0, part_csv_coll_name, part_csv_coll_prio,
part_csv_coll_alloc, part_csv_coll_max
part_csv_coll_id = 0,
part_csv_coll_name,
part_csv_coll_prio,
part_csv_coll_alloc,
part_csv_coll_max
};
int partition_is_name_valid(const char *name)
@ -2158,6 +2161,28 @@ static inline const char *partition_get_csv_col(CSVFILE *csv, int col,
return val;
}
static int calculate_max_allocation(uint16_t cache_id, const char *allocation,
uint32_t *part_size)
{
float alloc = 0;
char *end;
if (strnlen(allocation, MAX_STR_LEN) > 4)
return FAILURE;
alloc = strtof(allocation, &end);
if (alloc > 1 || alloc < 0)
return FAILURE;
if (allocation + strnlen(allocation, MAX_STR_LEN) != end)
return FAILURE;
*part_size = (uint32_t)(alloc * 100);
return SUCCESS;
}
static inline int partition_get_line(CSVFILE *csv,
struct kcas_io_classes *cnfg,
int *error_col)
@ -2228,20 +2253,13 @@ static inline int partition_get_line(CSVFILE *csv,
if (strempty(alloc)) {
return FAILURE;
}
if (validate_str_num(alloc, "alloc", 0, 1)) {
return FAILURE;
}
value = strtoul(alloc, NULL, 10);
if (0 == value) {
cnfg->info[part_id].cache_mode = ocf_cache_mode_pt;
} else if (1 == value) {
cnfg->info[part_id].cache_mode = ocf_cache_mode_max;
} else {
return FAILURE;
}
if (calculate_max_allocation(cnfg->cache_id, alloc, &value) == FAILURE)
return FAILURE;
cnfg->info[part_id].cache_mode = ocf_cache_mode_max;
cnfg->info[part_id].min_size = 0;
cnfg->info[part_id].max_size = UINT32_MAX;
cnfg->info[part_id].max_size = value;
return 0;
}

View File

@ -403,9 +403,7 @@ static void print_stats_ioclass_conf(const struct kcas_io_class* io_class,
print_kv_pair(outfile, "Eviction priority", "%d",
io_class->info.priority);
}
print_kv_pair(outfile, "Selective allocation", "%s",
io_class->info.cache_mode != ocf_cache_mode_pt ?
"Yes" : "No");
print_kv_pair(outfile, "Max size", "%u%%", io_class->info.max_size);
}

View File

@ -533,8 +533,10 @@ static cas_cls_eval_t _cas_cls_extension_test(
return cas_cls_eval_no;
/* First character of @extension is '.', which we don't want to compare */
len = strnlen(extension + 1, dentry->d_name.len);
len = min(ctx->len, len);
len = dentry->d_name.len - (extension - (char*)dentry->d_name.name) - 1;
if (len != ctx->len)
return cas_cls_eval_no;
if (strncmp(ctx->string, extension + 1, len) == 0)
return cas_cls_eval_yes;
@ -615,7 +617,9 @@ static cas_cls_eval_t _cas_cls_process_name_test(
get_task_comm(comm, ti);
len = strnlen(comm, TASK_COMM_LEN);
len = min(ctx->len, len);
if (len != ctx->len)
return cas_cls_eval_no;
if (strncmp(ctx->string, comm, len) == 0)
return cas_cls_eval_yes;

View File

@ -625,6 +625,7 @@ static inline uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len)
/* *** LOGGING *** */
#define ENV_PRIu64 "llu"
#define ENV_PRId64 "lld"
#define ENV_WARN(cond, fmt...) WARN(cond, fmt)
#define ENV_WARN_ON(cond) WARN_ON(cond)

2
ocf

@ -1 +1 @@
Subproject commit 0964e0e9df6419cf80f063e20f5bcb074e01caaf
Subproject commit 7f60d735110750fd90047cd47b4d47f087671c40

View File

@ -139,8 +139,8 @@ class Cache:
def load_io_class(self, file_path: str):
return casadm.load_io_classes(self.cache_id, file_path)
def list_io_classes(self, output_format: OutputFormat):
return casadm.list_io_classes(self.cache_id, output_format)
def list_io_classes(self):
return get_io_class_list(self.cache_id)
def set_seq_cutoff_parameters(self, seq_cutoff_param: SeqCutOffParameters):
return casadm.set_param_cutoff(self.cache_id,

View File

@ -275,3 +275,19 @@ def get_casadm_version():
casadm_output = casadm.print_version(OutputFormat.csv).stdout.split('\n')
version_str = casadm_output[1].split(',')[-1]
return CasVersion.from_version_string(version_str)
def get_io_class_list(cache_id: int):
ret = []
casadm_output = casadm.list_io_classes(cache_id, OutputFormat.csv).stdout.splitlines()
casadm_output.pop(0) # Remove header
for line in casadm_output:
values = line.split(",")
ioclass = {
"id": int(values[0]),
"rule": values[1],
"eviction_priority": int(values[2]),
"allocation": float(values[3]),
}
ret.append(ioclass)
return ret

View File

@ -21,6 +21,9 @@ default_config_file_path = "/tmp/opencas_ioclass.conf"
MAX_IO_CLASS_ID = 32
MAX_IO_CLASS_PRIORITY = 255
DEFAULT_IO_CLASS_ID = 0
DEFAULT_IO_CLASS_PRIORITY = 255
DEFAULT_IO_CLASS_RULE = "unclassified"
MAX_CLASSIFICATION_DELAY = timedelta(seconds=6)
IO_CLASS_CONFIG_HEADER = "IO class id,IO class name,Eviction priority,Allocation"
@ -28,7 +31,7 @@ IO_CLASS_CONFIG_HEADER = "IO class id,IO class name,Eviction priority,Allocation
@functools.total_ordering
class IoClass:
def __init__(self, class_id: int, rule: str = '', priority: int = None,
allocation: bool = True):
allocation: str = "1.00"):
self.id = class_id
self.rule = rule
self.priority = priority
@ -36,7 +39,7 @@ class IoClass:
def __str__(self):
return (f'{self.id},{self.rule},{"" if self.priority is None else self.priority}'
f',{int(self.allocation)}')
f',{self.allocation}')
def __eq__(self, other):
return ((self.id, self.rule, self.priority, self.allocation)
@ -53,7 +56,7 @@ class IoClass:
class_id=int(parts[0]),
rule=parts[1],
priority=int(parts[2]),
allocation=parts[3] in ['1', 'YES'])
allocation=parts[3])
@staticmethod
def list_to_csv(ioclass_list: [], add_default_rule: bool = True):
@ -81,8 +84,8 @@ class IoClass:
IoClass.list_to_csv(ioclass_list, add_default_rule))
@staticmethod
def default(priority: int = 255, allocation: bool = True):
return IoClass(0, 'unclassified', priority, allocation)
def default(priority=DEFAULT_IO_CLASS_PRIORITY, allocation="1.00"):
return IoClass(DEFAULT_IO_CLASS_ID, DEFAULT_IO_CLASS_RULE, priority, allocation)
@staticmethod
def compare_ioclass_lists(list1: [], list2: []):
@ -91,10 +94,10 @@ class IoClass:
@staticmethod
def generate_random_ioclass_list(count: int, max_priority: int = MAX_IO_CLASS_PRIORITY):
random_list = [IoClass.default(priority=random.randint(0, max_priority),
allocation=bool(random.randint(0, 1)))]
allocation=f"{random.randint(0,100)/100:0.2f}")]
for i in range(1, count):
random_list.append(IoClass(i, priority=random.randint(0, max_priority),
allocation=bool(random.randint(0, 1)))
allocation=f"{random.randint(0,100)/100:0.2f}")
.set_random_rule())
return random_list
@ -146,9 +149,11 @@ def create_ioclass_config(
"Failed to create ioclass config file. "
+ f"stdout: {output.stdout} \n stderr :{output.stderr}"
)
if add_default_rule:
output = TestRun.executor.run(
f'echo "0,unclassified,22,1" >> {ioclass_config_path}'
f'echo "{DEFAULT_IO_CLASS_ID},{DEFAULT_IO_CLASS_RULE},{DEFAULT_IO_CLASS_PRIORITY},"'
+ f'"1.00" >> {ioclass_config_path}'
)
if output.exit_code != 0:
raise Exception(
@ -171,10 +176,10 @@ def add_ioclass(
ioclass_id: int,
rule: str,
eviction_priority: int,
allocation: bool,
allocation,
ioclass_config_path: str = default_config_file_path,
):
new_ioclass = f"{ioclass_id},{rule},{eviction_priority},{int(allocation)}"
new_ioclass = f"{ioclass_id},{rule},{eviction_priority},{allocation}"
TestRun.LOGGER.info(
f"Adding rule {new_ioclass} " + f"to config file {ioclass_config_path}"
)

View File

@ -1,2 +1,3 @@
attotime>=0.2.0
schema==0.7.2
recordclass>=0.8.4

View File

@ -47,8 +47,8 @@ def pytest_runtest_setup(item):
try:
with open(item.config.getoption('--dut-config')) as cfg:
dut_config = yaml.safe_load(cfg)
except Exception:
raise Exception("You need to specify DUT config. See the example_dut_config.py file.")
except Exception as ex:
raise Exception(f"{ex}\nYou need to specify DUT config. See the example_dut_config.py file")
dut_config['plugins_dir'] = os.path.join(os.path.dirname(__file__), "../lib")
dut_config['opt_plugins'] = {"test_wrapper": {}, "serial_log": {}, "power_control": {}}

View File

@ -3,36 +3,56 @@
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from datetime import timedelta
from api.cas import casadm
from api.cas import ioclass_config
from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy
from api.cas.cache_config import (
CacheLineSize,
CacheMode,
CleaningPolicy,
SeqCutOffPolicy,
)
from core.test_run import TestRun
from test_utils.os_utils import Udev
from test_tools.dd import Dd
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.os_utils import Udev, sync
from test_utils.os_utils import drop_caches, DropCachesMode
from test_utils.size import Size, Unit
ioclass_config_path = "/tmp/opencas_ioclass.conf"
mountpoint = "/tmp/cas1-1"
def prepare():
def prepare(
cache_size=Size(500, Unit.MebiByte),
core_size=Size(10, Unit.GibiByte),
cache_mode=CacheMode.WB,
cache_line_size=CacheLineSize.LINE_4KiB,
):
ioclass_config.remove_ioclass_config()
cache_device = TestRun.disks['cache']
core_device = TestRun.disks['core']
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(500, Unit.MebiByte)])
core_device.create_partitions([Size(1, Unit.GibiByte)])
cache_device.create_partitions([cache_size])
core_device.create_partitions([core_size])
cache_device = cache_device.partitions[0]
core_device = core_device.partitions[0]
TestRun.LOGGER.info(f"Starting cache")
cache = casadm.start_cache(cache_device, cache_mode=CacheMode.WB, force=True)
cache = casadm.start_cache(
cache_device, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True
)
Udev.disable()
TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
casadm.set_param_cleaning(cache_id=cache.cache_id, policy=CleaningPolicy.nop)
TestRun.LOGGER.info(f"Adding core device")
core = casadm.add_core(cache, core_dev=core_device)
TestRun.LOGGER.info(f"Setting seq cutoff policy to never")
core.set_seq_cutoff_policy(SeqCutOffPolicy.never)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
@ -40,10 +60,10 @@ def prepare():
# To make test more precise all workload except of tested ioclass should be
# put in pass-through mode
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
rule="unclassified",
ioclass_id=ioclass_config.DEFAULT_IO_CLASS_ID,
eviction_priority=ioclass_config.DEFAULT_IO_CLASS_PRIORITY,
allocation="0.00",
rule=ioclass_config.DEFAULT_IO_CLASS_RULE,
ioclass_config_path=ioclass_config_path,
)
@ -52,3 +72,53 @@ def prepare():
raise Exception(f"Failed to create mountpoint")
return cache, core
def get_io_class_occupancy(cache, io_class_id, percent=False):
return get_io_class_usage(cache, io_class_id, percent).occupancy
def get_io_class_dirty(cache, io_class_id):
return get_io_class_usage(cache, io_class_id).dirty
def get_io_class_usage(cache, io_class_id, percent=False):
return cache.get_io_class_statistics(
io_class_id=io_class_id, percentage_val=percent
).usage_stats
def run_io_dir(path, size_4k):
dd = (
Dd()
.input("/dev/zero")
.output(f"{path}")
.count(size_4k)
.block_size(Size(1, Unit.Blocks4096))
)
TestRun.LOGGER.info(f"{dd}")
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
def run_io_dir_read(path):
dd = Dd().output("/dev/null").input(f"{path}")
dd.run()
sync()
drop_caches(DropCachesMode.ALL)
def run_fio_count(core, blocksize, num_ios):
(
Fio()
.create_command()
.target(core)
.io_engine(IoEngine.libaio)
.read_write(ReadWrite.randread)
.block_size(blocksize)
.direct()
.file_size(Size(10, Unit.GibiByte))
.num_ios(num_ios)
.run()
)

View File

@ -49,14 +49,14 @@ def test_ioclass_core_id(filesystem):
ioclass_config.add_ioclass(
ioclass_id=cached_ioclass_id,
eviction_priority=22,
allocation=True,
allocation="1.00",
rule=f"core_id:eq:{core_1.core_id}&done",
ioclass_config_path=ioclass_config.default_config_file_path,
)
ioclass_config.add_ioclass(
ioclass_id=not_cached_ioclass_id,
eviction_priority=22,
allocation=False,
allocation="0.00",
rule=f"core_id:eq:{core_2.core_id}&done",
ioclass_config_path=ioclass_config.default_config_file_path,
)
@ -169,21 +169,21 @@ def prepare(filesystem, cores_number):
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=True,
allocation="1.00",
rule="unclassified",
ioclass_config_path=ioclass_config.default_config_file_path,
)
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=22,
allocation=True,
allocation="0.00",
rule="metadata",
ioclass_config_path=ioclass_config.default_config_file_path,
)
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=22,
allocation=False,
allocation="0.00",
rule="direct",
ioclass_config_path=ioclass_config.default_config_file_path,
)

View File

@ -75,7 +75,7 @@ def test_ioclass_directory_depth(filesystem):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"directory:{base_dir_path}",
ioclass_config_path=ioclass_config_path,
)
@ -94,7 +94,7 @@ def test_ioclass_directory_depth(filesystem):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + test_file_1.size:
TestRun.LOGGER.error("Wrong occupancy after reading file!\n"
"Expected: {base_occupancy + test_file_1.size}, "
f"Expected: {base_occupancy + test_file_1.size}, "
f"actual: {new_occupancy}")
# Test classification in nested dir by creating a file
@ -104,8 +104,8 @@ def test_ioclass_directory_depth(filesystem):
dd = (
Dd().input("/dev/urandom")
.output(test_file_2.full_path)
.count(random.randint(1, 200))
.block_size(Size(1, Unit.MebiByte))
.count(random.randint(25600, 51200)) # 100MB to 200MB
.block_size(Size(1, Unit.Blocks4096))
)
dd.run()
sync()
@ -114,9 +114,10 @@ def test_ioclass_directory_depth(filesystem):
with TestRun.step("Check occupancy after creating the second file."):
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
expected_occpuancy = (base_occupancy + test_file_2.size).set_unit(Unit.Blocks4096)
if new_occupancy != base_occupancy + test_file_2.size:
TestRun.LOGGER.error("Wrong occupancy after creating file!\n"
f"Expected: {base_occupancy + test_file_2.size}, "
f"Expected: {expected_occpuancy}, "
f"actual: {new_occupancy}")
@ -149,7 +150,7 @@ def test_ioclass_directory_file_operations(filesystem):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"directory:{test_dir_path}",
ioclass_config_path=ioclass_config_path,
)
@ -200,7 +201,7 @@ def test_ioclass_directory_file_operations(filesystem):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
.block_size(Size(1, Unit.Blocks4096)).run())
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
@ -229,7 +230,7 @@ def test_ioclass_directory_file_operations(filesystem):
classified_before = classified_after
non_classified_before = non_classified_after
(Dd().input(test_file.full_path).output("/dev/null")
.block_size(Size(1, Unit.MebiByte)).run())
.block_size(Size(1, Unit.Blocks4096)).run())
with TestRun.step("Check classified occupancy."):
classified_after = cache.get_io_class_statistics(
@ -275,14 +276,14 @@ def test_ioclass_directory_dir_operations(filesystem):
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_1,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"directory:{classified_dir_path_1}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=ioclass_id_2,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"directory:{classified_dir_path_2}",
ioclass_config_path=ioclass_config_path,
)
@ -330,7 +331,7 @@ def test_ioclass_directory_dir_operations(filesystem):
with TestRun.step("Read files with reclassification check."):
read_files_with_reclassification_check(cache,
target_ioclass_id=0, source_ioclass_id=ioclass_id_1,
directory=dir_2, with_delay=False)
directory=dir_2, with_delay=True)
with TestRun.step(f"Remove {classified_dir_path_2}."):
fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
@ -377,7 +378,7 @@ def create_files_with_classification_delay_check(cache, directory: Directory, io
unclassified_files.append(file_path)
if len(unclassified_files) == file_counter:
pytest.xfail("No files were properly classified within max delay time!")
TestRun.LOGGER.error("No files were properly classified within max delay time!")
if len(unclassified_files):
TestRun.LOGGER.info("Rewriting unclassified test files...")
@ -393,42 +394,52 @@ def read_files_with_reclassification_check(cache, target_ioclass_id: int, source
io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
unclassified_files = []
files_to_reclassify = []
target_ioclass_is_enabled = ioclass_is_enabled(cache, target_ioclass_id)
for file in [item for item in directory.ls() if isinstance(item, File)]:
target_occupancy_before = target_occupancy_after
source_occupancy_before = source_occupancy_after
time_from_start = datetime.now() - start_time
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
dd = Dd().input(file.full_path).output("/dev/null").block_size(Size(1, Unit.Blocks4096))
dd.run()
target_occupancy_after = cache.get_io_class_statistics(
io_class_id=target_ioclass_id).usage_stats.occupancy
source_occupancy_after = cache.get_io_class_statistics(
io_class_id=source_ioclass_id).usage_stats.occupancy
if target_occupancy_after < target_occupancy_before:
pytest.xfail("Target IO class occupancy lowered!")
elif target_occupancy_after - target_occupancy_before < file.size:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Target IO class occupancy not changed properly!")
if source_occupancy_after >= source_occupancy_before:
if file not in unclassified_files:
unclassified_files.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
pytest.xfail("Source IO class occupancy not changed properly!")
if len(unclassified_files):
if target_ioclass_is_enabled:
if target_occupancy_after < target_occupancy_before:
TestRun.LOGGER.error("Target IO class occupancy lowered!")
elif target_occupancy_after - target_occupancy_before < file.size:
files_to_reclassify.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
TestRun.LOGGER.error("Target IO class occupancy not changed properly!")
elif target_occupancy_after > target_occupancy_before and with_delay:
files_to_reclassify.append(file)
if source_occupancy_after >= source_occupancy_before:
if file not in files_to_reclassify:
files_to_reclassify.append(file)
if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
continue
TestRun.LOGGER.error("Source IO class occupancy not changed properly!")
if len(files_to_reclassify):
TestRun.LOGGER.info("Rereading unclassified test files...")
sync()
drop_caches(DropCachesMode.ALL)
for file in unclassified_files:
for file in files_to_reclassify:
(Dd().input(file.full_path).output("/dev/null")
.block_size(Size(1, Unit.Blocks4096)).run())
def check_occupancy(expected: Size, actual: Size):
if expected != actual:
pytest.xfail("Occupancy check failed!\n"
TestRun.LOGGER.error("Occupancy check failed!\n"
f"Expected: {expected}, actual: {actual}")
def ioclass_is_enabled(cache, ioclass_id: int):
return [i["allocation"] for i in cache.list_io_classes() if i["id"] == ioclass_id].pop() > 0.00

View File

@ -0,0 +1,160 @@
#
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from collections import namedtuple
from math import isclose
import pytest
from .io_class_common import *
from api.cas.cache_config import CacheMode, CacheLineSize
from api.cas.ioclass_config import IoClass
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_ioclass_eviction_priority(cache_line_size):
"""
title: Check whether eviction priorites are respected.
description: |
Create ioclass for 4 different directories, each with different
eviction priority configured. Saturate 3 of them and check if the
partitions are evicted in a good order during IO to the fourth
pass_criteria:
- Partitions are evicted in specified order
"""
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=CacheMode.WT, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(
f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Prepare test dirs"):
IoclassConfig = namedtuple(
"IoclassConfig", "id eviction_prio max_occupancy dir_path"
)
io_classes = [
IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
IoclassConfig(2, 4, 0.30, f"{mountpoint}/B"),
IoclassConfig(3, 5, 0.40, f"{mountpoint}/C"),
IoclassConfig(4, 1, 1.00, f"{mountpoint}/D"),
]
for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Adding default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Adding ioclasses for all dirs"):
for io_class in io_classes:
ioclass_config.add_ioclass(
io_class.id,
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy:0.2f}",
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Resetting cache stats"):
cache.purge_cache()
cache.reset_counters()
with TestRun.step("Checking initial occupancy"):
for io_class in io_classes:
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
with TestRun.step(
f"To A, B and C directories perform IO with size of max io_class occupancy"
):
for io_class in io_classes[0:3]:
run_io_dir(
f"{io_class.dir_path}/tmp_file",
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
)
with TestRun.step("Check if each ioclass reached it's occupancy limit"):
for io_class in io_classes[0:3]:
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size)
.align_down(Unit.Blocks4096.get_value())
.set_unit(Unit.Blocks4096)
)
if not isclose(actuall_occupancy.value, occupancy_limit.value, rel_tol=0.1):
TestRun.LOGGER.error(
f"Occupancy for ioclass {io_class.id} does not match. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
)
if get_io_class_occupancy(cache, io_classes[3].id).value != 0:
TestRun.LOGGER.error(
f"Occupancy for ioclass {io_classes[3].id} should be 0. "
f"Actuall: {actuall_occupancy}"
)
with TestRun.step(
"Perform IO to the fourth directory and check "
"if other partitions are evicted in a good order"
):
target_io_class = io_classes[3]
io_classes_to_evict = io_classes[:3][
::-1
] # List is ordered by eviction priority
io_classes_evicted = []
for io_class in io_classes_to_evict:
run_io_dir(
f"{target_io_class.dir_path}/tmp_file",
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
)
part_to_evict_end_occupancy = get_io_class_occupancy(
cache, io_class.id, percent=True
)
# Since number of evicted cachelines is always >= 128, occupancy is checked
# with approximation
if not isclose(part_to_evict_end_occupancy, 0, abs_tol=4):
TestRun.LOGGER.error(
f"Wrong percent of cache lines evicted from part {io_class.id}. "
f"Meant to be evicted {io_class.max_occupancy*100}%, actaully evicted "
f"{io_class.max_occupancy*100-part_to_evict_end_occupancy}%"
)
io_classes_evicted.append(io_class)
for i in io_classes_to_evict:
if i in io_classes_evicted:
continue
occupancy = get_io_class_occupancy(cache, i.id, percent=True)
if not isclose(occupancy, i.max_occupancy * 100, abs_tol=4):
TestRun.LOGGER.error(f"Ioclass {i.id} evicted incorrectly")

View File

@ -47,7 +47,7 @@ def test_ioclass_file_extension():
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"extension:{tested_extension}&done",
ioclass_config_path=ioclass_config_path,
)
@ -114,7 +114,7 @@ def test_ioclass_file_name_prefix():
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=255,
allocation=False,
allocation="0.00",
rule=f"unclassified",
ioclass_config_path=ioclass_config_path,
)
@ -122,7 +122,7 @@ def test_ioclass_file_name_prefix():
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"file_name_prefix:test&done",
ioclass_config_path=ioclass_config_path,
)
@ -225,7 +225,7 @@ def test_ioclass_file_extension_preexisting_filesystem():
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"{rule}&done",
ioclass_config_path=ioclass_config_path,
)
@ -279,7 +279,7 @@ def test_ioclass_file_offset():
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
ioclass_config_path=ioclass_config_path,
)
@ -405,7 +405,7 @@ def test_ioclass_file_size(filesystem):
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
allocation="0.00",
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
@ -430,7 +430,7 @@ def test_ioclass_file_size(filesystem):
ioclass_config.add_ioclass(
ioclass_id=0,
eviction_priority=22,
allocation=False,
allocation="0.00",
rule="unclassified",
ioclass_config_path=ioclass_config_path,
)
@ -464,35 +464,35 @@ def load_file_size_io_classes(cache, base_size):
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"file_size:eq:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"file_size:lt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"file_size:gt:{base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"file_size:le:{int(base_size_bytes / 2)}",
ioclass_config_path=ioclass_config_path,
)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"file_size:ge:{2 * base_size_bytes}",
ioclass_config_path=ioclass_config_path,
)

View File

@ -0,0 +1,384 @@
#
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from collections import namedtuple
from math import isclose
import pytest
from .io_class_common import *
from api.cas.cache_config import CacheMode, CacheLineSize
from api.cas.ioclass_config import IoClass
from api.cas.statistics import UsageStats
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("io_size_multiplication", [0.5, 2])
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_ioclass_occupancy_directory_write(io_size_multiplication, cache_mode, cache_line_size):
"""
title: Test for max occupancy set for ioclass based on directory
description: |
Create ioclass for 3 different directories, each with different
max cache occupancy configured. Run IO against each directory and see
if occupancy limit is repected.
pass_criteria:
- Max occupancy is set correctly for each ioclass
- Each ioclass does not exceed max occupancy
"""
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Prepare test dirs"):
IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path")
io_classes = [
IoclassConfig(1, 3, 0.10, f"{mountpoint}/A"),
IoclassConfig(2, 4, 0.20, f"{mountpoint}/B"),
IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
]
for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add ioclasses for all dirs"):
for io_class in io_classes:
ioclass_config.add_ioclass(
io_class.id,
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy:0.2f}",
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Reset cache stats"):
cache.purge_cache()
cache.reset_counters()
with TestRun.step("Check initial occupancy"):
for io_class in io_classes:
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
with TestRun.step(
f"To each directory perform IO with size of {io_size_multiplication} max io_class occupancy"
):
for io_class in io_classes:
original_occupacies = {}
tmp_io_class_list = [i for i in io_classes if i != io_class]
for i in tmp_io_class_list:
original_occupacies[i.id] = get_io_class_occupancy(cache, i.id)
run_io_dir(
f"{io_class.dir_path}/tmp_file",
int(
(io_class.max_occupancy * cache_size) / Unit.Blocks4096 * io_size_multiplication
),
)
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
io_size = io_class.max_occupancy * cache_size
if io_size_multiplication < 1:
io_size *= io_size_multiplication
io_size.set_unit(Unit.Blocks4096)
if not isclose(io_size.value, actuall_occupancy.value, rel_tol=0.1):
TestRun.LOGGER.error(
f"Occupancy for ioclass {i.id} should be equal {io_size} "
f"but is {actuall_occupancy} instead!"
)
for i in tmp_io_class_list:
actuall_occupancy = get_io_class_occupancy(cache, i.id)
if original_occupacies[i.id] != actuall_occupancy:
TestRun.LOGGER.error(
f"Occupancy for ioclass {i.id} should not change "
f"during IO to ioclass {io_class.id}. Original value: "
f"{original_occupacies[i.id]}, actuall: {actuall_occupancy}"
)
with TestRun.step("Check if none of ioclasses did not exceed specified occupancy"):
for io_class in io_classes:
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size)
.align_up(Unit.Blocks4096.get_value())
.set_unit(Unit.Blocks4096)
)
# Divergency may be casued be rounding max occupancy
if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("io_size_multiplication", [0.5, 2])
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_ioclass_occupancy_directory_read(io_size_multiplication, cache_line_size, cache_mode):
"""
title: Test for max occupancy set for ioclass based on directory - read
description: |
Set cache mode to pass-through and create files on mounted core
device. Swtich cache to write through, and load ioclasses applaying
to different files. Read files and check if occupancy threshold is
respected.
pass_criteria:
- Max occupancy is set correctly for each ioclass
- Each ioclass does not exceed max occupancy
"""
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Prepare test dirs"):
IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path")
io_classes = [
IoclassConfig(1, 3, 0.10, f"{mountpoint}/A"),
IoclassConfig(2, 4, 0.20, f"{mountpoint}/B"),
IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
]
for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step(
f"In each directory create file with size of {io_size_multiplication} "
f"max io_class occupancy for future read"
):
for io_class in io_classes:
run_io_dir(
f"{io_class.dir_path}/tmp_file",
int(
(io_class.max_occupancy * cache_size) / Unit.Blocks4096 * io_size_multiplication
),
)
with TestRun.step("Remove old ioclass config"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add ioclasses for all dirs"):
for io_class in io_classes:
ioclass_config.add_ioclass(
io_class.id,
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy:0.2f}",
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Reset cache stats"):
cache.purge_cache()
cache.reset_counters()
with TestRun.step("Check initial occupancy"):
for io_class in io_classes:
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
with TestRun.step(f"Read each file and check if data was inserted to appropriate ioclass"):
for io_class in io_classes:
original_occupacies = {}
tmp_io_class_list = [i for i in io_classes if i != io_class]
for i in tmp_io_class_list:
original_occupacies[i.id] = get_io_class_occupancy(cache, i.id)
run_io_dir_read(f"{io_class.dir_path}/tmp_file")
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
io_size = io_class.max_occupancy * cache_size
if io_size_multiplication < 1:
io_size *= io_size_multiplication
io_size.set_unit(Unit.Blocks4096)
if not isclose(io_size.value, actuall_occupancy.value, rel_tol=0.1):
TestRun.LOGGER.error(
f"Occupancy for ioclass {i.id} should be equal {io_size} "
f"but is {actuall_occupancy} instead!"
)
for i in tmp_io_class_list:
actuall_occupancy = get_io_class_occupancy(cache, i.id)
if original_occupacies[i.id] != actuall_occupancy:
TestRun.LOGGER.error(
f"Occupancy for ioclass {i.id} should not change "
f"during IO to ioclass {io_class.id}. Original value: "
f"{original_occupacies[i.id]}, actuall: {actuall_occupancy}"
)
with TestRun.step("Check if none of ioclasses did not exceed specified occupancy"):
for io_class in io_classes:
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size)
.align_up(Unit.Blocks4096.get_value())
.set_unit(Unit.Blocks4096)
)
# Divergency may be casued be rounding max occupancy
if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_occupancy_sum_cache():
"""
title: Test for ioclasses occupancy sum
description: |
Create ioclass for 3 different directories, each with different
max cache occupancy configured. Trigger IO to each ioclass and check
if sum of their Usage stats is equal to cache Usage stats.
pass_criteria:
- Max occupancy is set correctly for each ioclass
- Sum of ioclassess stats is equal to cache stats
"""
with TestRun.step("Prepare CAS device"):
cache, core = prepare()
cache_size = cache.get_statistics().config_stats.cache_size
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Prepare test dirs"):
default_ioclass_id = 0
IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path")
io_classes = [
IoclassConfig(1, 3, 0.10, f"{mountpoint}/A"),
IoclassConfig(2, 4, 0.20, f"{mountpoint}/B"),
IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
]
for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add ioclasses for all dirs"):
for io_class in io_classes:
ioclass_config.add_ioclass(
io_class.id,
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy:0.2f}",
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Purge cache"):
cache.purge_cache()
with TestRun.step("Verify stats before IO"):
usage_stats_sum = UsageStats(Size(0), Size(0), Size(0), Size(0))
for i in io_classes:
usage_stats_sum += get_io_class_usage(cache, i.id)
usage_stats_sum += get_io_class_usage(cache, default_ioclass_id)
cache_stats = cache.get_statistics().usage_stats
cache_stats.free = Size(0)
if (
cache_stats.occupancy != usage_stats_sum.occupancy
or cache_stats.clean != usage_stats_sum.clean
or cache_stats.dirty != usage_stats_sum.dirty
):
TestRun.LOGGER.error(
"Initial cache usage stats doesn't match sum of ioclasses stats\n"
f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n"
f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}"
)
with TestRun.step(f"Trigger IO to each directory"):
for io_class in io_classes:
run_io_dir(
f"{io_class.dir_path}/tmp_file",
int((io_class.max_occupancy * cache_size) / Unit.Blocks4096),
)
with TestRun.step("Verify stats after IO"):
usage_stats_sum = UsageStats(Size(0), Size(0), Size(0), Size(0))
for i in io_classes:
usage_stats_sum += get_io_class_usage(cache, i.id)
usage_stats_sum += get_io_class_usage(cache, default_ioclass_id)
cache_stats = cache.get_statistics().usage_stats
cache_stats.free = Size(0)
if (
cache_stats.occupancy != usage_stats_sum.occupancy
or cache_stats.clean != usage_stats_sum.clean
or cache_stats.dirty != usage_stats_sum.dirty
):
TestRun.LOGGER.error(
"Cache usage stats doesn't match sum of ioclasses stats\n"
f"cache stats: {cache_stats}, sumed up stats {usage_stats_sum}\n"
f"particular stats {[get_io_class_usage(cache, i.id) for i in io_classes]}"
)

View File

@ -0,0 +1,180 @@
#
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from collections import namedtuple
from math import isclose
import pytest
from .io_class_common import *
from api.cas.cache_config import CacheMode, CacheLineSize
from api.cas.casadm_params import OutputFormat
from api.cas.ioclass_config import IoClass
from storage_devices.device import Device
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
def test_ioclass_occuppancy_load(cache_line_size):
"""
title: Load cache with occupancy limit specified
description: |
Load cache and verify if occupancy limits are loaded correctly and if
each part has assigned apropriate number of
dirty blocks.
pass_criteria:
- Occupancy thresholds have correct values for each ioclass after load
"""
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=CacheMode.WB, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(
f"Prepare filesystem and mount {core.system_path} at {mountpoint}"
):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Prepare test dirs"):
IoclassConfig = namedtuple(
"IoclassConfig", "id eviction_prio max_occupancy dir_path"
)
io_classes = [
IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
IoclassConfig(2, 3, 0.30, f"{mountpoint}/B"),
IoclassConfig(3, 3, 0.30, f"{mountpoint}/C"),
]
for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add ioclasses for all dirs"):
for io_class in io_classes:
ioclass_config.add_ioclass(
io_class.id,
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy:0.2f}",
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Reset cache stats"):
cache.purge_cache()
cache.reset_counters()
with TestRun.step("Check initial occupancy"):
for io_class in io_classes:
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
with TestRun.step(f"Perform IO with size equal to cache size"):
for io_class in io_classes:
run_io_dir(
f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096)
)
with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
for io_class in io_classes:
actuall_dirty = get_io_class_dirty(cache, io_class.id)
dirty_limit = (
(io_class.max_occupancy * cache_size)
.align_down(Unit.Blocks4096.get_value())
.set_unit(Unit.Blocks4096)
)
if not isclose(
actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1
):
TestRun.LOGGER.error(
f"Dirty for ioclass id: {io_class.id} doesn't match expected."
f"Expected: {dirty_limit}, actuall: {actuall_dirty}"
)
with TestRun.step("Stop cache without flushing the data"):
original_usage_stats = {}
for io_class in io_classes:
original_usage_stats[io_class.id] = get_io_class_usage(cache, io_class.id)
original_ioclass_list = cache.list_io_classes()
cache_disk_path = cache.cache_device.system_path
core.unmount()
cache.stop(no_data_flush=True)
with TestRun.step("Load cache"):
cache = casadm.start_cache(Device(cache_disk_path), load=True)
with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
for io_class in io_classes:
actuall_dirty = get_io_class_dirty(cache, io_class.id)
dirty_limit = (
(io_class.max_occupancy * cache_size)
.align_down(Unit.Blocks4096.get_value())
.set_unit(Unit.Blocks4096)
)
if not isclose(
actuall_dirty.get_value(), dirty_limit.get_value(), rel_tol=0.1
):
TestRun.LOGGER.error(
f"Dirty for ioclass id: {io_class.id} doesn't match expected."
f"Expected: {dirty_limit}, actuall: {actuall_dirty}"
)
with TestRun.step("Compare ioclass configs"):
ioclass_list_after_load = cache.list_io_classes()
if len(ioclass_list_after_load) != len(original_ioclass_list):
TestRun.LOGGER.error(
f"Ioclass occupancy limit doesn't match. Original list size: "
f"{len(original_ioclass_list)}, loaded list size: "
f"{len(ioclass_list_after_load)}"
)
original_sorted = sorted(original_ioclass_list, key=lambda k: k["id"])
loaded_sorted = sorted(ioclass_list_after_load, key=lambda k: k["id"])
for original, loaded in zip(original_sorted, loaded_sorted):
original_allocation = original["allocation"]
loaded_allocation = loaded["allocation"]
ioclass_id = original["id"]
if original_allocation != loaded_allocation:
TestRun.LOGGER.error(
f"Occupancy limit doesn't match for ioclass {ioclass_id}: "
f"Original: {original_allocation}, loaded: {loaded_allocation}"
)
with TestRun.step("Compare usage stats before and after the load"):
for io_class in io_classes:
actuall_usage_stats = get_io_class_usage(cache, io_class.id)
if original_usage_stats[io_class.id] != actuall_usage_stats:
TestRun.LOGGER.error(
f"Usage stats doesn't match for ioclass {io_class.id}. "
f"Original: {original_usage_stats[io_class.id]}, "
f"loaded: {actuall_usage_stats}"
)

View File

@ -0,0 +1,123 @@
#
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from collections import namedtuple
from math import isclose
import pytest
from .io_class_common import *
from api.cas.cache_config import CacheMode, CacheLineSize
from api.cas.ioclass_config import IoClass
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrize("cache_mode", [CacheMode.WB, CacheMode.WT])
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrize("ioclass_size_multiplicatior", [0.5, 1])
def test_ioclass_repart(cache_mode, cache_line_size, ioclass_size_multiplicatior):
"""
title: Check whether occupancy limit is respected during repart
description: |
Create ioclass for 3 different directories, each with different max
occupancy threshold. Create 3 files classified on default ioclass.
Move files to directories created earlier and force repart by reading
theirs contents.
pass_criteria:
- Partitions are evicted in specified order
"""
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=cache_mode, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Prepare test dirs"):
IoclassConfig = namedtuple("IoclassConfig", "id eviction_prio max_occupancy dir_path")
io_classes = [
IoclassConfig(1, 3, 0.40, f"{mountpoint}/A"),
IoclassConfig(2, 4, 0.30, f"{mountpoint}/B"),
IoclassConfig(3, 5, 0.30, f"{mountpoint}/C"),
]
for io_class in io_classes:
fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="1.00")).split(","))
with TestRun.step("Add ioclasses for all dirs"):
for io_class in io_classes:
ioclass_config.add_ioclass(
io_class.id,
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy*ioclass_size_multiplicatior:0.2f}",
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Reset cache stats"):
cache.purge_cache()
cache.reset_counters()
with TestRun.step(f"Create 3 files classified in default ioclass"):
for i, io_class in enumerate(io_classes[0:3]):
run_io_dir(
f"{mountpoint}/{i}", int((io_class.max_occupancy * cache_size) / Unit.Blocks4096)
)
if not isclose(
get_io_class_occupancy(cache, ioclass_config.DEFAULT_IO_CLASS_ID).value,
cache_size.value,
rel_tol=0.1,
):
TestRun.fail(f"Failed to populte default ioclass")
with TestRun.step("Check initial occupancy"):
for io_class in io_classes:
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
with TestRun.step("Force repart - move files to created directories and read theirs contents"):
for i, io_class in enumerate(io_classes):
fs_utils.move(source=f"{mountpoint}/{i}", destination=io_class.dir_path)
run_io_dir_read(f"{io_class.dir_path}/{i}")
with TestRun.step("Check if each ioclass reached it's occupancy limit"):
for io_class in io_classes[0:3]:
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size * ioclass_size_multiplicatior)
.align_down(Unit.Blocks4096.get_value())
.set_unit(Unit.Blocks4096)
)
if not isclose(actuall_occupancy.value, occupancy_limit.value, rel_tol=0.1):
TestRun.LOGGER.error(
f"Occupancy for ioclass {io_class.id} does not match. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
)

View File

@ -0,0 +1,133 @@
#
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from recordclass import recordclass
import pytest
from .io_class_common import *
from api.cas.cache_config import CacheMode, CacheLineSize
from api.cas.ioclass_config import IoClass
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.disk_utils import Filesystem
from test_utils.os_utils import sync, Udev
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrize("new_occupancy", [0, 20, 70, 100])
def test_ioclass_resize(cache_line_size, new_occupancy):
"""
title: Resize ioclass
description: |
Add ioclass, fill it with data, change it's size and check if new
limit is respected
pass_criteria:
- Occupancy threshold is respected
"""
with TestRun.step("Prepare CAS device"):
cache, core = prepare(cache_mode=CacheMode.WT, cache_line_size=cache_line_size)
cache_size = cache.get_statistics().config_stats.cache_size
with TestRun.step("Disable udev"):
Udev.disable()
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
with TestRun.step("Prepare test dirs"):
IoclassConfig = recordclass("IoclassConfig", "id eviction_prio max_occupancy dir_path")
io_class = IoclassConfig(1, 3, 0.50, f"{mountpoint}/A")
fs_utils.create_directory(io_class.dir_path, parents=True)
with TestRun.step("Remove old ioclass config"):
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
with TestRun.step("Add default ioclasses"):
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
with TestRun.step("Add directory for ioclass"):
ioclass_config.add_ioclass(
io_class.id,
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy:0.2f}",
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step("Reset cache stats"):
cache.purge_cache()
cache.reset_counters()
with TestRun.step("Check initial occupancy"):
occupancy = get_io_class_occupancy(cache, io_class.id)
if occupancy.get_value() != 0:
TestRun.LOGGER.error(
f"Incorrect inital occupancy for ioclass id: {io_class.id}."
f" Expected 0, got: {occupancy}"
)
with TestRun.step(f"Perform IO with size equal to cache size"):
run_io_dir(f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096))
with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size)
.align_up(Unit.Blocks4096.get_value())
.set_unit(Unit.Blocks4096)
)
if actuall_occupancy > occupancy_limit:
TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
)
with TestRun.step(
f"Resize ioclass from {io_class.max_occupancy*100}% to {new_occupancy}%" " cache occupancy"
):
io_class.max_occupancy = new_occupancy / 100
ioclass_config.remove_ioclass_config()
ioclass_config.create_ioclass_config(False)
ioclass_config.add_ioclass(*str(IoClass.default(allocation="0.00")).split(","))
ioclass_config.add_ioclass(
io_class.id,
f"directory:{io_class.dir_path}&done",
io_class.eviction_prio,
f"{io_class.max_occupancy:0.2f}",
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Perform IO with size equal to cache size"):
run_io_dir(f"{io_class.dir_path}/tmp_file", int((cache_size) / Unit.Blocks4096))
with TestRun.step("Check if the ioclass did not exceed specified occupancy"):
actuall_occupancy = get_io_class_occupancy(cache, io_class.id)
occupancy_limit = (
(io_class.max_occupancy * cache_size)
.align_up(Unit.Blocks4096.get_value())
.set_unit(Unit.Blocks4096)
)
# Divergency may be casued be rounding max occupancy
if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
TestRun.LOGGER.error(
f"Occupancy for ioclass id exceeded: {io_class.id}. "
f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}"
)

View File

@ -38,7 +38,7 @@ def test_ioclass_process_name():
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"process_name:dd&done",
ioclass_config_path=ioclass_config_path,
)
@ -113,7 +113,7 @@ def test_ioclass_pid():
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
allocation="1.00",
rule=f"pid:eq:{pid}&done",
ioclass_config_path=ioclass_config_path,
)

View File

@ -134,7 +134,7 @@ def add_io_class(class_id, eviction_prio, rule):
ioclass_config.add_ioclass(
ioclass_id=class_id,
eviction_priority=eviction_prio,
allocation=True,
allocation="1.00",
rule=rule,
ioclass_config_path=ioclass_config_path,
)