From aaedfb35dd9d6224785e95cd1a0b9e2f7910bba4 Mon Sep 17 00:00:00 2001 From: Jan Musial Date: Wed, 23 Oct 2019 16:18:00 +0200 Subject: [PATCH 1/2] Change startup procedure Current startup procedure works on an assumption that we will deal with asynchronously appearing devices in asynchronous way (udev rules) and synchronous events in the system (systemd units) won't interfere. If we would break anything (mounts) we would just take those units and restart them. This tactic was working as long as resetting systemd units took reasonable time. As hackish as it sounds it worked in all systems that the software has been validated on. Unfortunately it stopped working because of *.mount units taking MUCH longer time to restart even on mainstream OSes, so it's time to change. This change implements open-cas systemd service which will wait synchronously with systemd bootup process for all required Open CAS devices to start. If they don't we fail the boot process just as failing mounts would. We also make sure that this process takes place before any mounts (aside from root FS and other critical FS's) are even attempted. Now opencas-mount-utility can be discarded. To override this behaviour on per-core basis you can specify lazy_startup=true option in opencas.conf. Signed-off-by: Jan Musial --- utils/60-persistent-storage-cas-load.rules | 5 - utils/Makefile | 33 +++--- utils/casctl | 71 ++++++++++-- utils/casctl.8 | 15 +++ utils/open-cas-mount-utility | 26 ----- utils/open-cas.service | 22 ++++ utils/opencas.conf.5 | 1 + utils/opencas.py | 119 +++++++++++++++++++-- 8 files changed, 227 insertions(+), 65 deletions(-) delete mode 100755 utils/open-cas-mount-utility create mode 100644 utils/open-cas.service diff --git a/utils/60-persistent-storage-cas-load.rules b/utils/60-persistent-storage-cas-load.rules index 6933cfa..31a93eb 100644 --- a/utils/60-persistent-storage-cas-load.rules +++ b/utils/60-persistent-storage-cas-load.rules @@ -3,9 +3,4 @@ SUBSYSTEM!="block", GOTO="cas_loader_end" RUN+="/lib/opencas/open-cas-loader /dev/$name" -# Work around systemd<->udev interaction, make sure filesystems with labels on -# cas are mounted properly -KERNEL!="cas*", GOTO="cas_loader_end" -IMPORT{builtin}="blkid" -ENV{ID_FS_USAGE}=="filesystem|other", ENV{ID_FS_LABEL_ENC}=="?*", RUN+="/lib/opencas/open-cas-mount-utility $env{ID_FS_LABEL_ENC}" LABEL="cas_loader_end" diff --git a/utils/Makefile b/utils/Makefile index 3ab00d6..fc2fa6f 100644 --- a/utils/Makefile +++ b/utils/Makefile @@ -9,25 +9,11 @@ UDEV:=$(shell which udevadm) SYSTEMCTL := $(shell which systemctl) PYTHON3 := $(shell which python3) -ifeq (, $(shell which systemctl)) -define cas_install - install -m 755 open-cas-shutdown /etc/init.d/open-cas-shutdown - /sbin/chkconfig open-cas-shutdown on; service open-cas-shutdown start -endef -else ifneq "$(wildcard /usr/lib/systemd/system)" "" SYSTEMD_DIR=/usr/lib/systemd/system else SYSTEMD_DIR=/lib/systemd/system endif -define cas_install - install -m 644 open-cas-shutdown.service $(SYSTEMD_DIR)/open-cas-shutdown.service - install -m 755 -d $(SYSTEMD_DIR)/../system-shutdown - install -m 755 open-cas.shutdown $(SYSTEMD_DIR)/../system-shutdown/open-cas.shutdown - $(SYSTEMCTL) daemon-reload - $(SYSTEMCTL) -q enable open-cas-shutdown -endef -endif # Just a placeholder when running make from parent dir without install/uninstall arg all: ; @@ -42,7 +28,6 @@ else @install -m 644 opencas.py $(CASCTL_DIR)/opencas.py @install -m 755 casctl $(CASCTL_DIR)/casctl @install -m 755 open-cas-loader $(CASCTL_DIR)/open-cas-loader - @install -m 755 open-cas-mount-utility $(CASCTL_DIR)/open-cas-mount-utility @ln -fs $(CASCTL_DIR)/casctl /sbin/casctl @@ -55,14 +40,19 @@ else @install -m 644 casctl.8 /usr/share/man/man8/casctl.8 - $(cas_install) + @install -m 644 open-cas-shutdown.service $(SYSTEMD_DIR)/open-cas-shutdown.service + @install -m 644 open-cas.service $(SYSTEMD_DIR)/open-cas.service + @install -m 755 -d $(SYSTEMD_DIR)/../system-shutdown + @install -m 755 open-cas.shutdown $(SYSTEMD_DIR)/../system-shutdown/open-cas.shutdown + @$(SYSTEMCTL) daemon-reload + @$(SYSTEMCTL) -q enable open-cas-shutdown + @$(SYSTEMCTL) -q enable open-cas endif uninstall: @rm $(CASCTL_DIR)/opencas.py @rm $(CASCTL_DIR)/casctl @rm $(CASCTL_DIR)/open-cas-loader - @rm $(CASCTL_DIR)/open-cas-mount-utility @rm -rf $(CASCTL_DIR) @rm /sbin/casctl @@ -71,6 +61,15 @@ uninstall: @rm /lib/udev/rules.d/60-persistent-storage-cas-load.rules @rm /lib/udev/rules.d/60-persistent-storage-cas.rules + @$(UDEV) control --reload-rules + + @$(SYSTEMCTL) -q disable open-cas-shutdown + @$(SYSTEMCTL) -q disable open-cas + @$(SYSTEMCTL) daemon-reload + + @rm $(SYSTEMD_DIR)/open-cas-shutdown.service + @rm $(SYSTEMD_DIR)/open-cas.service + @rm $(SYSTEMD_DIR)/../system-shutdown/open-cas.shutdown .PHONY: install uninstall clean distclean diff --git a/utils/casctl b/utils/casctl index 27be7e6..6d18a18 100755 --- a/utils/casctl +++ b/utils/casctl @@ -100,6 +100,27 @@ def init(force): exit(exit_code) + +def settle(timeout, interval): + try: + not_initialized = opencas.wait_for_startup(timeout, interval) + except Exception as e: + eprint(e) + exit(1) + + if not_initialized: + eprint("Open CAS initialization failed. Couldn't set up all required devices") + for device in not_initialized: + eprint( + "Couldn't add device {} as core {} in cache {}".format( + device.device, device.core_id, device.cache_id + ) + ) + exit(1) + + exit(0) + + # Stop - detach cores and stop caches def stop(flush): try: @@ -107,30 +128,55 @@ def stop(flush): except Exception as e: eprint(e) + # Command line arguments parsing + class cas: def __init__(self): - parser = argparse.ArgumentParser(prog = 'cas') - subparsers = parser.add_subparsers(title = 'actions') + parser = argparse.ArgumentParser(prog="casctl") + subparsers = parser.add_subparsers(title="actions") - parser_init = subparsers.add_parser('init', help = 'Setup initial configuration') - parser_init.set_defaults(command='init') - parser_init.add_argument ('--force', action='store_true', help = 'Force cache start') + parser_init = subparsers.add_parser("init", help="Setup initial configuration") + parser_init.set_defaults(command="init") + parser_init.add_argument( + "--force", action="store_true", help="Force cache start" + ) - parser_start = subparsers.add_parser('start', help = 'Start cache configuration') - parser_start.set_defaults(command='start') + parser_start = subparsers.add_parser("start", help="Start cache configuration") + parser_start.set_defaults(command="start") - parser_stop = subparsers.add_parser('stop', help = 'Stop cache configuration') - parser_stop.set_defaults(command='stop') - parser_stop.add_argument ('--flush', action='store_true', help = 'Flush data before stopping') + parser_settle = subparsers.add_parser( + "settle", help="Wait for startup of devices" + ) + parser_settle.set_defaults(command="settle") + parser_settle.add_argument( + "--timeout", + action="store", + help="How long should command wait [s]", + default=270, + type=int, + ) + parser_settle.add_argument( + "--interval", + action="store", + help="Polling interval [s]", + default=5, + type=int, + ) + + parser_stop = subparsers.add_parser("stop", help="Stop cache configuration") + parser_stop.set_defaults(command="stop") + parser_stop.add_argument( + "--flush", action="store_true", help="Flush data before stopping" + ) if len(sys.argv[1:]) == 0: parser.print_help() return args = parser.parse_args(sys.argv[1:]) - getattr(self, 'command_' + args.command)(args) + getattr(self, "command_" + args.command)(args) def command_init(self, args): init(args.force) @@ -138,6 +184,9 @@ class cas: def command_start(self, args): start() + def command_settle(self, args): + settle(args.timeout, args.interval) + def command_stop(self, args): stop(args.flush) diff --git a/utils/casctl.8 b/utils/casctl.8 index 97d8d2f..6180a36 100644 --- a/utils/casctl.8 +++ b/utils/casctl.8 @@ -23,6 +23,10 @@ Stop all cache instances. .B init Initial configuration of caches and core devices. +.TP +.B settle +Wait for all core devices to be added to respective caches. + .br .B CAUTION .br @@ -51,6 +55,17 @@ Flush data before stopping. .B --force Force cache start even if cache device contains partitions or metadata from previously running cache instances. +.TP +.SH Options that are valid with settle are: + +.TP +.B --timeout +How long will command block waiting for devices to start before timing out [s]. + +.TP +.B --interval +How often will command poll for status change [s]. + .TP .SH Command --help (-h) does not accept any options. diff --git a/utils/open-cas-mount-utility b/utils/open-cas-mount-utility deleted file mode 100755 index 687197c..0000000 --- a/utils/open-cas-mount-utility +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# -# Copyright(c) 2012-2019 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause-Clear -# - -# Find all mount units, cut to remove list-units decorations -logger "Open CAS Mount Utility checking for $1 FS label..." -MOUNT_UNITS=`systemctl --plain list-units | grep \.mount | grep -v '\-\.mount' | awk '{print $1}'` - -for unit in $MOUNT_UNITS -do - # Find BindsTo keyword, pry out FS label from the .device unit name - label=`systemctl show $unit | grep BindsTo | sed "s/.*label\-\(.*\)\.device/\1/;tx;d;:x"` - if [ "$label" == "" ]; then - continue - fi - label_unescaped=$(systemd-escape -u $(systemd-escape -u $label)) - if [ "$label_unescaped" == "$1" ]; then - # If FS label matches restart unit - logger "Open CAS Mount Utility restarting $unit..." - systemctl restart $unit &> /dev/null - exit 0 - fi -done - diff --git a/utils/open-cas.service b/utils/open-cas.service new file mode 100644 index 0000000..3afe8ea --- /dev/null +++ b/utils/open-cas.service @@ -0,0 +1,22 @@ +# +# Copyright(c) 2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +[Unit] +Description=opencas initialization service +After=systemd-remount-fs.service +Before=local-fs-pre.target local-fs.target +Wants=local-fs-pre.target local-fs.target +DefaultDependencies=no +OnFailure=emergency.target +OnFailureJobMode=isolate + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/sbin/casctl settle --timeout 150 --interval 5 +TimeoutStartSec=3min + +[Install] +RequiredBy=local-fs.target local-fs-pre.target diff --git a/utils/opencas.conf.5 b/utils/opencas.conf.5 index 4a993da..6c4defd 100644 --- a/utils/opencas.conf.5 +++ b/utils/opencas.conf.5 @@ -36,6 +36,7 @@ Core ID <0-4095> .br Core device .br +Extra fields (optional) lazy_startup= .RE .TP \fBNOTES\fR diff --git a/utils/opencas.py b/utils/opencas.py index 350ad80..9863791 100644 --- a/utils/opencas.py +++ b/utils/opencas.py @@ -8,6 +8,7 @@ import csv import re import os import stat +import time # Casadm functionality @@ -308,24 +309,35 @@ class cas_config(object): return ret class core_config(object): - def __init__(self, cache_id, core_id, path): + def __init__(self, cache_id, core_id, path, **params): self.cache_id = int(cache_id) self.core_id = int(core_id) self.device = path + self.params = params @classmethod def from_line(cls, line, allow_incomplete=False): values = line.split() - if len(values) > 3: - raise ValueError('Invalid core configuration (too many columns)') + if len(values) > 4: + raise ValueError("Invalid core configuration (too many columns)") elif len(values) < 3: - raise ValueError('Invalid core configuration (too few columns)') + raise ValueError("Invalid core configuration (too few columns)") cache_id = int(values[0]) core_id = int(values[1]) device = values[2] - core_config = cls(cache_id, core_id, device) + params = dict() + if len(values) > 3: + for param in values[3].lower().split(","): + param_name, param_value = param.split("=") + if param_name in params: + raise ValueError( + "Invalid core configuration (repeated parameter)" + ) + params[param_name] = param_value + + core_config = cls(cache_id, core_id, device, **params) core_config.validate_config(allow_incomplete) @@ -335,9 +347,24 @@ class cas_config(object): self.check_core_id_valid() self.check_recursive() cas_config.cache_config.check_cache_id_valid(self.cache_id) + + for param_name, param_value in self.params.items(): + self.validate_parameter(param_name, param_value) + if not allow_incomplete: cas_config.check_block_device(self.device) + def validate_parameter(self, param_name, param_value): + if param_name == "lazy_startup": + if param_value.lower() not in ["true", "false"]: + raise ValueError( + "{} is invalid value for '{}' core param".format( + param_value, param_name + ) + ) + else: + raise ValueError("'{}' is invalid core param name".format(param_name)) + def check_core_id_valid(self): if not 0 <= int(self.core_id) <= 4095: raise ValueError('{0} is invalid core id'.format(self.core_id)) @@ -353,7 +380,14 @@ class cas_config(object): raise ValueError('Recursive configuration detected') def to_line(self): - return '{0}\t{1}\t{2}\n'.format(self.cache_id, self.core_id, self.device) + ret = "{0}\t{1}\t{2}".format(self.cache_id, self.core_id, self.device) + for i, (param, value) in enumerate(self.params.items()): + ret += "," if i > 0 else "\t" + + ret += "{0}={1}".format(param, value) + ret += "\n" + + return ret def __init__(self, caches=None, cores=None, version_tag=None): self.caches = caches if caches else dict() @@ -494,6 +528,13 @@ class cas_config(object): except: raise Exception('Couldn\'t write config file') + def get_startup_cores(self): + return [ + core + for core in self.cores + if core.params.get("lazy_startup", "false") == "false" + ] + # Config helper functions @@ -689,3 +730,69 @@ def stop(flush): error.raise_nonempty() + +def get_devices_state(): + device_list = get_caches_list() + + devices = {"core_pool": [], "caches": {}, "cores": {}} + + core_pool = False + prev_cache_id = -1 + + for device in device_list: + if device["type"] == "core pool": + core_pool = True + continue + + if device["type"] == "cache": + core_pool = False + prev_cache_id = int(device["id"]) + devices["caches"].update( + { + int(device["id"]): { + "device": device["disk"], + "status": device["status"], + } + } + ) + elif device["type"] == "core": + core = {"device": device["disk"], "status": device["status"]} + if core_pool: + devices["core_pool"].append(core) + else: + core.update({"cache_id": prev_cache_id}) + devices["cores"].update( + {(prev_cache_id, int(device["id"])): core} + ) + + return devices + + +def wait_for_startup(timeout=300, interval=5): + try: + config = cas_config.from_file( + cas_config.default_location, allow_incomplete=True + ) + except Exception as e: + raise Exception("Unable to load opencas config. Reason: {0}".format(str(e))) + + stop_time = time.time() + int(timeout) + + not_initialized = None + target_core_state = config.get_startup_cores() + + while stop_time > time.time(): + not_initialized = [] + runtime_core_state = get_devices_state()["cores"] + + for core in target_core_state: + runtime_state = runtime_core_state.get((core.cache_id, core.core_id), None) + if not runtime_state or runtime_state["status"] != "Active": + not_initialized.append(core) + + if not not_initialized: + break + + time.sleep(interval) + + return not_initialized From df1ba933def9cbeeccf1415fd4e2c015bb72c9a0 Mon Sep 17 00:00:00 2001 From: Jan Musial Date: Thu, 24 Oct 2019 14:13:24 +0200 Subject: [PATCH 2/2] Update tests Signed-off-by: Jan Musial --- test/utils_tests/opencas-py-tests/helpers.py | 10 +- .../opencas-py-tests/test_cas_config_01.py | 8 + .../test_cas_config_core_01.py | 67 +- .../test_helper_functions_01.py | 583 ++++++++++++++++++ 4 files changed, 641 insertions(+), 27 deletions(-) create mode 100644 test/utils_tests/opencas-py-tests/test_helper_functions_01.py diff --git a/test/utils_tests/opencas-py-tests/helpers.py b/test/utils_tests/opencas-py-tests/helpers.py index 9f5ac23..a552931 100644 --- a/test/utils_tests/opencas-py-tests/helpers.py +++ b/test/utils_tests/opencas-py-tests/helpers.py @@ -52,7 +52,7 @@ def get_hashed_config_list(conf): def get_conf_line_hash(line): """ - Removes whitespace, lowercases, comments and sorts cache params if present. + Removes whitespace, lowercases, comments and sorts params if present. Returns empty line for comment-only lines We don't care about order of params and kinds of whitespace in config lines @@ -60,15 +60,15 @@ def get_conf_line_hash(line): testing we pretend we don't. """ - def sort_cache_params(params): + def sort_params(params): return ",".join(sorted(params.split(","))) line = line.split("#")[0] - cache_params_pattern = re.compile(r"(.*?\s)(\S+=\S+)") - match = cache_params_pattern.search(line) + params_pattern = re.compile(r"(.*?\s)(\S+=\S+)") + match = params_pattern.search(line) if match: - sorted_params = sort_cache_params(match.group(2)) + sorted_params = sort_params(match.group(2)) line = match.group(1) + sorted_params return "".join(line.lower().split()) diff --git a/test/utils_tests/opencas-py-tests/test_cas_config_01.py b/test/utils_tests/opencas-py-tests/test_cas_config_01.py index d7031ed..ea9e764 100644 --- a/test/utils_tests/opencas-py-tests/test_cas_config_01.py +++ b/test/utils_tests/opencas-py-tests/test_cas_config_01.py @@ -339,6 +339,14 @@ def test_cas_config_get_by_id_path_not_found(mock_listdir, mock_realpath): ], [], ), + ( + [ + "1 /dev/dummy0n1 WT cleaning_policy=acp", + ], + [ + "1 1 /dev/dummy1 lazy_startup=true" + ], + ), ], ) @patch("builtins.open", new_callable=h.MockConfigFile) diff --git a/test/utils_tests/opencas-py-tests/test_cas_config_core_01.py b/test/utils_tests/opencas-py-tests/test_cas_config_core_01.py index 9b93acc..f011bea 100644 --- a/test/utils_tests/opencas-py-tests/test_cas_config_core_01.py +++ b/test/utils_tests/opencas-py-tests/test_cas_config_core_01.py @@ -18,12 +18,15 @@ import opencas " ", "#", " # ", - ("TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2Npbmcg" + ( + "TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2Npbmcg" "ZWxpdCwgc2VkIGRvIGVpdXNtb2QgdGVtcG9yIGluY2lkaWR1bnQgdXQgbGFib3JlI" - "GV0IGRvbG9yZSBtYWduYSBhbGlxdWEu"), + "GV0IGRvbG9yZSBtYWduYSBhbGlxdWEu" + ), " # ? } { ! ", - "1 1 /dev/sda /dev/sdb", - "1 2 1 /dev/sda ", + "1 1 /dev/not_a_real_device /dev/sdb", + "1 2 1 /dev/not_a_real_device ", + "1 2 1 /dev/not_a_real_device dinosaur=velociraptor", ], ) @mock.patch("opencas.cas_config.core_config.validate_config") @@ -32,17 +35,39 @@ def test_core_config_from_line_parsing_checks_01(mock_validate, line): opencas.cas_config.core_config.from_line(line) -@pytest.mark.parametrize("line", ["1 1 /dev/sda", "1 1 /dev/sda "]) -@mock.patch("opencas.cas_config.core_config.validate_config") -def test_core_config_from_line_parsing_checks_02(mock_validate, line): - opencas.cas_config.core_config.from_line(line) +@pytest.mark.parametrize( + "line", + [ + "1 1 /dev/not_a_real_device", + "1 1 /dev/not_a_real_device ", + "1 1 /dev/not_a_real_device lazy_startup=true", + "1 1 /dev/not_a_real_device lazy_startup=false", + "1 1 /dev/not_a_real_device lazy_startup=False", + "1 1 /dev/not_a_real_device lazy_startup=True", + ], +) +def test_core_config_from_line_parsing_checks_02(line): + opencas.cas_config.core_config.from_line(line, allow_incomplete=True) + + +@pytest.mark.parametrize( + "line", + [ + "1 1 /dev/not_a_real_device dinosaur=velociraptor", + "1 1 /dev/not_a_real_device lazy_startup=maybe", + "1 1 /dev/not_a_real_device lazy_saturday=definitely", + "1 1 /dev/not_a_real_device 00000=345", + "1 1 /dev/not_a_real_device eval(38+4)", + ], +) +def test_core_config_from_line_parsing_checks_params_01(line): + with pytest.raises(ValueError): + opencas.cas_config.core_config.from_line(line, allow_incomplete=True) @mock.patch("os.path.exists") @mock.patch("os.stat") -def test_core_config_from_line_device_is_directory( - mock_stat, mock_path_exists -): +def test_core_config_from_line_device_is_directory(mock_stat, mock_path_exists): mock_path_exists.side_effect = h.get_mock_os_exists(["/home/user/stuff"]) mock_stat.return_value = mock.Mock(st_mode=stat.S_IFDIR) @@ -57,7 +82,7 @@ def test_core_config_from_line_device_not_present(mock_stat, mock_path_exists): mock_stat.side_effect = ValueError() with pytest.raises(ValueError): - opencas.cas_config.core_config.from_line("1 1 /dev/sda") + opencas.cas_config.core_config.from_line("1 1 /dev/not_a_real_device") def test_core_config_from_line_recursive_multilevel(): @@ -72,7 +97,7 @@ def test_core_config_from_line_multilevel(): @mock.patch("opencas.cas_config.check_block_device") def test_core_config_from_line_allow_incomplete(mock_check_block,): opencas.cas_config.core_config.from_line( - "1 1 /dev/sda", allow_incomplete=True + "1 1 /dev/not_a_real_device", allow_incomplete=True ) assert not mock_check_block.called @@ -100,10 +125,10 @@ def test_core_config_from_line_allow_incomplete(mock_check_block,): def test_core_config_from_line_cache_id_validation_01( mock_stat, mock_path_exists, cache_id, core_id ): - mock_path_exists.side_effect = h.get_mock_os_exists(["/dev/sda"]) + mock_path_exists.side_effect = h.get_mock_os_exists(["/dev/not_a_real_device"]) mock_stat.return_value = mock.Mock(st_mode=stat.S_IFBLK) - line = "{0} {1} /dev/sda".format(cache_id, core_id) + line = "{0} {1} /dev/not_a_real_device".format(cache_id, core_id) with pytest.raises(ValueError): opencas.cas_config.core_config.from_line(line) @@ -117,10 +142,10 @@ def test_core_config_from_line_cache_id_validation_01( def test_core_config_from_line_cache_id_validation_02( mock_stat, mock_path_exists, cache_id, core_id ): - mock_path_exists.side_effect = h.get_mock_os_exists(["/dev/sda"]) + mock_path_exists.side_effect = h.get_mock_os_exists(["/dev/not_a_real_device"]) mock_stat.return_value = mock.Mock(st_mode=stat.S_IFBLK) - line = "{0} {1} /dev/sda".format(cache_id, core_id) + line = "{0} {1} /dev/not_a_real_device".format(cache_id, core_id) opencas.cas_config.core_config.from_line(line) @@ -128,8 +153,8 @@ def test_core_config_from_line_cache_id_validation_02( @pytest.mark.parametrize( "cache_id,core_id,device", [ - ("1", "1", "/dev/sda"), - ("16384", "4095", "/dev/sda1"), + ("1", "1", "/dev/not_a_real_device"), + ("16384", "4095", "/dev/not_a_real_device"), ("16384", "0", "/dev/nvme0n1p"), ("100", "5", "/dev/dm-10"), ], @@ -148,9 +173,7 @@ def test_core_config_from_line_cache_id_validation( core_reference.validate_config() - core_after = opencas.cas_config.core_config.from_line( - core_reference.to_line() - ) + core_after = opencas.cas_config.core_config.from_line(core_reference.to_line()) assert core_after.cache_id == core_reference.cache_id assert core_after.core_id == core_reference.core_id assert core_after.device == core_reference.device diff --git a/test/utils_tests/opencas-py-tests/test_helper_functions_01.py b/test/utils_tests/opencas-py-tests/test_helper_functions_01.py new file mode 100644 index 0000000..90983e2 --- /dev/null +++ b/test/utils_tests/opencas-py-tests/test_helper_functions_01.py @@ -0,0 +1,583 @@ +# +# Copyright(c) 2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +import pytest +from mock import patch +import time + +import opencas + + +@patch("opencas.cas_config.from_file") +def test_cas_settle_no_config(mock_config): + """ + Check if raises exception when no config is found + """ + + mock_config.side_effect = ValueError + + with pytest.raises(Exception): + opencas.wait_for_startup() + + +@patch("opencas.cas_config.from_file") +@patch("opencas.get_caches_list") +def test_cas_settle_cores_didnt_start_01(mock_list, mock_config): + """ + Check if properly returns uninitialized cores and waits for given time + + Single core in config, no devices in runtime config. + """ + + mock_config.return_value.get_startup_cores.return_value = [ + opencas.cas_config.core_config(42, 13, "/dev/dummy") + ] + + time_start = time.time() + + result = opencas.wait_for_startup(timeout=5, interval=1) + + time_stop = time.time() + + assert len(result) == 1, "didn't return single uninitialized core" + assert ( + result[0].cache_id == 42 + and result[0].core_id == 13 + and result[0].device == "/dev/dummy" + ) + assert 4.5 < time_stop - time_start < 5.5, "didn't wait the right amount of time" + assert mock_list.call_count == 5 + + +@patch("opencas.cas_config.from_file") +@patch("opencas.get_caches_list") +def test_cas_settle_cores_didnt_start_02(mock_list, mock_config): + """ + Check if properly returns uninitialized cores and waits for given time + + Single device in config, one device in runtime config, but not the configured core + """ + + mock_config.return_value.get_startup_cores.return_value = [ + opencas.cas_config.core_config(1, 1, "/dev/dummy") + ] + + mock_list.return_value = [ + { + "type": "cache", + "id": "1", + "disk": "/dev/dummy_cache", + "status": "Active", + "write policy": "wt", + "device": "-", + } + ] + + time_start = time.time() + + result = opencas.wait_for_startup(timeout=1, interval=0.1) + + time_stop = time.time() + + assert len(result) == 1, "didn't return uninitialized core" + assert 0.5 < time_stop - time_start < 1.5, "didn't wait the right amount of time" + + +@patch("opencas.cas_config.from_file") +@patch("opencas.get_caches_list") +def test_cas_settle_cores_didnt_start_02(mock_list, mock_config): + """ + Check if properly returns uninitialized cores and waits for given time + + The device waited for is in core pool. + """ + + mock_config.return_value.get_startup_cores.return_value = [ + opencas.cas_config.core_config(1, 1, "/dev/dummy") + ] + + mock_list.return_value = [ + { + "type": "core pool", + "id": "-", + "disk": "-", + "status": "-", + "write policy": "-", + "device": "-", + }, + { + "type": "core", + "id": "-", + "disk": "/dev/dummy", + "status": "Detached", + "write policy": "-", + "device": "-", + }, + { + "type": "cache", + "id": "2", + "disk": "/dev/dummy_cache", + "status": "Running", + "write policy": "wt", + "device": "-", + }, + { + "type": "core", + "id": "42", + "disk": "/dev/other_core", + "status": "Active", + "write policy": "-", + "device": "/dev/cas2-42", + }, + ] + + time_start = time.time() + + result = opencas.wait_for_startup(timeout=1, interval=0.1) + + time_stop = time.time() + + assert len(result) == 1, "didn't return uninitialized core" + assert 0.5 < time_stop - time_start < 1.5, "didn't wait the right amount of time" + # Assert the call count is within some small range in case something freezes up for a second + assert 9 <= mock_list.call_count <= 11 + + +@patch("opencas.cas_config.from_file") +@patch("opencas.get_caches_list") +def test_cas_settle_cores_didnt_start_03(mock_list, mock_config): + """ + Check if properly returns uninitialized cores and waits for given time + + The device waited for is not present, but its cache device is already started. + """ + + mock_config.return_value.get_startup_cores.return_value = [ + opencas.cas_config.core_config(1, 1, "/dev/dummy") + ] + + mock_list.return_value = [ + { + "type": "core pool", + "id": "-", + "disk": "-", + "status": "-", + "write policy": "-", + "device": "-", + }, + { + "type": "core", + "id": "-", + "disk": "/dev/other_core", + "status": "Detached", + "write policy": "-", + "device": "-", + }, + { + "type": "cache", + "id": "1", + "disk": "/dev/dummy_cache", + "status": "Incomplete", + "write policy": "wt", + "device": "-", + }, + { + "type": "core", + "id": "42", + "disk": "/dev/dummy", + "status": "Inactive", + "write policy": "-", + "device": "/dev/cas1-42", + }, + { + "type": "cache", + "id": "2", + "disk": "/dev/dummy_cache2", + "status": "Running", + "write policy": "wb", + "device": "-", + }, + { + "type": "core", + "id": "3", + "disk": "/dev/dummy2", + "status": "Active", + "write policy": "-", + "device": "/dev/cas1-42", + }, + ] + + time_start = time.time() + + result = opencas.wait_for_startup(timeout=1, interval=0.1) + + time_stop = time.time() + + assert len(result) == 1, "didn't return uninitialized core" + assert 0.5 < time_stop - time_start < 1.5, "didn't wait the right amount of time" + # Assert the call count is within some small range in case something freezes up for a second + assert 9 <= mock_list.call_count <= 11 + + +@patch("opencas.cas_config.from_file") +@patch("opencas.get_caches_list") +def test_cas_settle_cores_didnt_start_04(mock_list, mock_config): + """ + Check if properly returns uninitialized cores + + Two devices configured, both not present. + """ + + mock_config.return_value.get_startup_cores.return_value = [ + opencas.cas_config.core_config(1, 1, "/dev/dummy"), + opencas.cas_config.core_config(4, 44, "/dev/dosko"), + ] + + mock_list.return_value = [ + { + "type": "cache", + "id": "1", + "disk": "/dev/dummy_cache", + "status": "Incomplete", + "write policy": "wt", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dummy", + "status": "Inactive", + "write policy": "-", + "device": "/dev/cas1-1", + }, + { + "type": "core", + "id": "2", + "disk": "/dev/dummy3", + "status": "Active", + "write policy": "-", + "device": "/dev/cas1-2", + }, + { + "type": "cache", + "id": "2", + "disk": "/dev/dummy_cache2", + "status": "Running", + "write policy": "wb", + "device": "-", + }, + { + "type": "core", + "id": "3", + "disk": "/dev/dummy2", + "status": "Active", + "write policy": "-", + "device": "/dev/cas2-3", + }, + ] + + result = opencas.wait_for_startup(timeout=1, interval=0.1) + + assert len(result) == 2, "didn't return uninitialized cores" + + +@patch("opencas.cas_config.from_file") +@patch("opencas.get_caches_list") +def test_cas_settle_core_started_01(mock_list, mock_config): + """ + Check if properly returns uninitialized cores and doesn't return initialized ones + + Two devices configured, one present, one not present. + """ + + mock_config.return_value.get_startup_cores.return_value = [ + opencas.cas_config.core_config(1, 1, "/dev/dummy"), + opencas.cas_config.core_config(4, 44, "/dev/dosko"), + ] + + mock_list.return_value = [ + { + "type": "core pool", + "id": "-", + "disk": "-", + "status": "-", + "write policy": "-", + "device": "-", + }, + { + "type": "core", + "id": "-", + "disk": "/dev/other_core", + "status": "Detached", + "write policy": "-", + "device": "-", + }, + { + "type": "cache", + "id": "1", + "disk": "/dev/dummy_cache", + "status": "Incomplete", + "write policy": "wt", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dummy", + "status": "Active", + "write policy": "-", + "device": "/dev/cas1-1", + }, + { + "type": "cache", + "id": "2", + "disk": "/dev/dummy_cache2", + "status": "Running", + "write policy": "wb", + "device": "-", + }, + { + "type": "core", + "id": "3", + "disk": "/dev/dummy2", + "status": "Active", + "write policy": "-", + "device": "/dev/cas1-42", + }, + ] + + result = opencas.wait_for_startup(timeout=1, interval=0.1) + + assert len(result) == 1, "didn't return uninitialized core" + + +@patch("opencas.cas_config.from_file") +@patch("opencas.get_caches_list") +def test_cas_settle_core_started_02(mock_list, mock_config): + """ + Check if properly returns uninitialized cores and doesn't return initialized ones + + Two devices configured, both present and added. + """ + + mock_config.return_value.get_startup_cores.return_value = [ + opencas.cas_config.core_config(1, 1, "/dev/dummy"), + opencas.cas_config.core_config(4, 44, "/dev/dosko"), + ] + + mock_list.return_value = [ + { + "type": "core pool", + "id": "-", + "disk": "-", + "status": "-", + "write policy": "-", + "device": "-", + }, + { + "type": "core", + "id": "-", + "disk": "/dev/other_core", + "status": "Detached", + "write policy": "-", + "device": "-", + }, + { + "type": "cache", + "id": "1", + "disk": "/dev/dummy_cache", + "status": "Running", + "write policy": "wt", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dummy", + "status": "Active", + "write policy": "-", + "device": "/dev/cas1-42", + }, + { + "type": "cache", + "id": "2", + "disk": "/dev/dummy_cache2", + "status": "Running", + "write policy": "wb", + "device": "-", + }, + { + "type": "core", + "id": "3", + "disk": "/dev/dummy2", + "status": "Active", + "write policy": "-", + "device": "/dev/cas1-42", + }, + { + "type": "cache", + "id": "4", + "disk": "/dev/dummy_cache4", + "status": "Running", + "write policy": "wb", + "device": "-", + }, + { + "type": "core", + "id": "44", + "disk": "/dev/dosko", + "status": "Active", + "write policy": "-", + "device": "/dev/cas4-44", + }, + ] + + result = opencas.wait_for_startup(timeout=1, interval=0.1) + + assert len(result) == 0, "no cores should remain uninitialized" + + +@patch("opencas.cas_config.from_file") +@patch("opencas.get_caches_list") +def test_cas_settle_core_started_03(mock_list, mock_config): + """ + Check if properly returns uninitialized cores and doesn't return initialized ones + + Two devices configured, simulate them gradually showing up with each call to + get_caches_list() + """ + + mock_config.return_value.get_startup_cores.return_value = [ + opencas.cas_config.core_config(1, 1, "/dev/dummy"), + opencas.cas_config.core_config(2, 1, "/dev/dosko"), + ] + + mock_list.side_effect = [ + [], + [ + { + "type": "cache", + "id": "2", + "disk": "/dev/dummy_cache4", + "status": "Incomplete", + "write policy": "wb", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dosko", + "status": "Inactive", + "write policy": "-", + "device": "/dev/cas2-1", + }, + ], + [ + { + "type": "cache", + "id": "2", + "disk": "/dev/dummy_cache4", + "status": "Incomplete", + "write policy": "wb", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dosko", + "status": "Inactive", + "write policy": "-", + "device": "/dev/cas2-1", + }, + { + "type": "cache", + "id": "1", + "disk": "/dev/dummy_cache", + "status": "Incomplete", + "write policy": "wt", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dummy", + "status": "Active", + "write policy": "-", + "device": "/dev/cas1-1", + }, + ], + [ + { + "type": "cache", + "id": "2", + "disk": "/dev/dummy_cache4", + "status": "Running", + "write policy": "wb", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dosko", + "status": "Active", + "write policy": "-", + "device": "/dev/cas2-1", + }, + { + "type": "cache", + "id": "1", + "disk": "/dev/dummy_cache", + "status": "Incomplete", + "write policy": "wt", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dummy", + "status": "Inactive", + "write policy": "-", + "device": "/dev/cas1-1", + }, + ], + [ + { + "type": "cache", + "id": "2", + "disk": "/dev/dummy_cache4", + "status": "Running", + "write policy": "wb", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dosko", + "status": "Active", + "write policy": "-", + "device": "/dev/cas2-1", + }, + { + "type": "cache", + "id": "1", + "disk": "/dev/dummy_cache", + "status": "Running", + "write policy": "wt", + "device": "-", + }, + { + "type": "core", + "id": "1", + "disk": "/dev/dummy", + "status": "Active", + "write policy": "-", + "device": "/dev/cas1-1", + }, + ], + ] + + result = opencas.wait_for_startup(timeout=1, interval=0.1) + + assert len(result) == 0, "no cores should remain uninitialized"