pyocf: format all .py files with black -l 100

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2022-05-09 16:27:19 +02:00
parent 3a1b6fd718
commit 83bb7317bf
32 changed files with 284 additions and 565 deletions

View File

@ -14,10 +14,7 @@ class OcfLib:
def getInstance(cls):
if cls.__lib__ is None:
lib = cdll.LoadLibrary(
os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())),
"libocf.so",
)
os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), "libocf.so",)
)
lib.ocf_volume_get_uuid.restype = c_void_p
lib.ocf_volume_get_uuid.argtypes = [c_void_p]

View File

@ -154,12 +154,7 @@ class Rio:
data = Data(self.jobspec.bs) # TODO pattern and verify
io = self.jobspec.target.new_io(
self.queue,
next(iogen),
self.jobspec.bs,
iodir,
0,
0,
self.queue, next(iogen), self.jobspec.bs, iodir, 0, 0,
)
io.set_data(data)
io.callback = self.get_io_cb()

View File

@ -41,6 +41,7 @@ from .stats.shared import UsageStats, RequestsStats, BlocksStats, ErrorsStats
from .ctx import OcfCtx
from .volume import RamVolume, Volume
class Backfill(Structure):
_fields_ = [("_max_queue_size", c_uint32), ("_queue_unblock_size", c_uint32)]
@ -76,7 +77,7 @@ class CacheAttachConfig(Structure):
("_open_cores", c_bool),
("_force", c_bool),
("_discard_on_start", c_bool),
("_disable_cleaner", c_bool)
("_disable_cleaner", c_bool),
]
@ -214,8 +215,7 @@ class Cache:
_cache_line_size=self.cache_line_size,
_metadata_volatile=self.metadata_volatile,
_backfill=Backfill(
_max_queue_size=self.max_queue_size,
_queue_unblock_size=self.queue_unblock_size,
_max_queue_size=self.max_queue_size, _queue_unblock_size=self.queue_unblock_size,
),
_locked=locked,
_pt_unaligned_io=self.pt_unaligned_io,
@ -259,9 +259,7 @@ class Cache:
def standby_activate(self, device, open_cores=True):
device_cfg = Cache.generate_device_config(device)
activate_cfg = CacheStandbyActivateConfig(
_device=device_cfg, _open_cores=open_cores,
)
activate_cfg = CacheStandbyActivateConfig(_device=device_cfg, _open_cores=open_cores,)
self.write_lock()
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
@ -297,9 +295,7 @@ class Cache:
if c.results["error"]:
raise OcfError("Error changing cleaning policy", c.results["error"])
def set_cleaning_policy_param(
self, cleaning_policy: CleaningPolicy, param_id, param_value
):
def set_cleaning_policy_param(self, cleaning_policy: CleaningPolicy, param_id, param_value):
self.write_lock()
status = self.owner.lib.ocf_mngt_cache_cleaning_set_param(
@ -351,9 +347,7 @@ class Cache:
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
self.write_lock()
status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_policy_all(
self.cache_handle, policy
)
status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_policy_all(self.cache_handle, policy)
self.write_unlock()
@ -382,9 +376,7 @@ class Cache:
self.write_unlock()
if status:
raise OcfError(
"Error setting cache seq cut off policy promotion count", status
)
raise OcfError("Error setting cache seq cut off policy promotion count", status)
def get_partition_info(self, part_id: int):
ioclass_info = IoClassInfo()
@ -410,13 +402,7 @@ class Cache:
}
def add_partition(
self,
part_id: int,
name: str,
min_size: int,
max_size: int,
priority: int,
valid: bool,
self, part_id: int, name: str, min_size: int, max_size: int, priority: int, valid: bool,
):
self.write_lock()
@ -432,12 +418,7 @@ class Cache:
raise OcfError("Error adding partition to cache", status)
def configure_partition(
self,
part_id: int,
name: str,
max_size: int,
priority: int,
cache_mode=CACHE_MODE_NONE,
self, part_id: int, name: str, max_size: int, priority: int, cache_mode=CACHE_MODE_NONE,
):
ioclasses_info = IoClassesInfo()
@ -491,12 +472,7 @@ class Cache:
return device_config
def attach_device(
self,
device,
force=False,
perform_test=False,
cache_line_size=None,
open_cores=False,
self, device, force=False, perform_test=False, cache_line_size=None, open_cores=False,
):
self.device = device
self.device_name = device.uuid
@ -505,9 +481,7 @@ class Cache:
attach_cfg = CacheAttachConfig(
_device=device_config,
_cache_line_size=cache_line_size
if cache_line_size
else self.cache_line_size,
_cache_line_size=cache_line_size if cache_line_size else self.cache_line_size,
_open_cores=open_cores,
_force=force,
_discard_on_start=False,
@ -517,17 +491,14 @@ class Cache:
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_attach(
self.cache_handle, byref(attach_cfg), c, None
)
self.owner.lib.ocf_mngt_cache_attach(self.cache_handle, byref(attach_cfg), c, None)
c.wait()
self.write_unlock()
if c.results["error"]:
raise OcfError(
f"Attaching cache device failed",
c.results["error"],
f"Attaching cache device failed", c.results["error"],
)
def standby_attach(self, device, force=False):
@ -548,17 +519,14 @@ class Cache:
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_standby_attach(
self.cache_handle, byref(attach_cfg), c, None
)
self.owner.lib.ocf_mngt_cache_standby_attach(self.cache_handle, byref(attach_cfg), c, None)
c.wait()
self.write_unlock()
if c.results["error"]:
raise OcfError(
f"Attaching to standby cache failed",
c.results["error"],
f"Attaching to standby cache failed", c.results["error"],
)
def standby_load(self, device, perform_test=True):
@ -577,9 +545,7 @@ class Cache:
self.write_lock()
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_standby_load(
self.cache_handle, byref(attach_cfg), c, None
)
self.owner.lib.ocf_mngt_cache_standby_load(self.cache_handle, byref(attach_cfg), c, None)
c.wait()
self.write_unlock()
@ -616,9 +582,7 @@ class Cache:
self.write_lock()
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_load(
self.cache_handle, byref(attach_cfg), c, None
)
self.owner.lib.ocf_mngt_cache_load(self.cache_handle, byref(attach_cfg), c, None)
c.wait()
self.write_unlock()
@ -689,10 +653,7 @@ class Cache:
core_handle = c_void_p()
result = self.owner.lib.ocf_core_get_by_name(
self.cache_handle,
name.encode("ascii"),
len(name),
byref(core_handle),
self.cache_handle, name.encode("ascii"), len(name), byref(core_handle),
)
if result != 0:
raise OcfError("Failed getting core by name", result)
@ -714,12 +675,7 @@ class Cache:
self.write_lock()
c = OcfCompletion(
[
("cache", c_void_p),
("core", c_void_p),
("priv", c_void_p),
("error", c_int),
]
[("cache", c_void_p), ("core", c_void_p), ("priv", c_void_p), ("error", c_int),]
)
self.owner.lib.ocf_mngt_cache_add_core(self.cache_handle, byref(cfg), c, None)
@ -908,6 +864,7 @@ class Cache:
def settle(self):
Queue.settle_many(self.io_queues + [self.mngt_queue])
lib = OcfLib.getInstance()
lib.ocf_mngt_cache_remove_core.argtypes = [c_void_p, c_void_p, c_void_p]
lib.ocf_mngt_cache_add_core.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p]

View File

@ -80,10 +80,7 @@ class Core:
def get_config(self):
cfg = CoreConfig(
_uuid=Uuid(
_data=cast(
create_string_buffer(self.device.uuid.encode("ascii")),
c_char_p,
),
_data=cast(create_string_buffer(self.device.uuid.encode("ascii")), c_char_p,),
_size=len(self.device.uuid) + 1,
),
_name=self.name.encode("ascii"),
@ -123,9 +120,7 @@ class Core:
self.cache.read_unlock()
raise OcfError("Failed collecting core stats", status)
status = self.cache.owner.lib.ocf_core_get_info(
self.handle, byref(core_info)
)
status = self.cache.owner.lib.ocf_core_get_info(self.handle, byref(core_info))
if status:
self.cache.read_unlock()
raise OcfError("Failed getting core stats", status)
@ -145,9 +140,7 @@ class Core:
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
self.cache.write_lock()
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_policy(
self.handle, policy
)
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_policy(self.handle, policy)
self.cache.write_unlock()
if status:
raise OcfError("Error setting core seq cut off policy", status)
@ -155,9 +148,7 @@ class Core:
def set_seq_cut_off_threshold(self, threshold):
self.cache.write_lock()
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_threshold(
self.handle, threshold
)
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_threshold(self.handle, threshold)
self.cache.write_unlock()
if status:
raise OcfError("Error setting core seq cut off policy threshold", status)
@ -175,6 +166,7 @@ class Core:
def reset_stats(self):
self.cache.owner.lib.ocf_core_stats_initialize(self.handle)
lib = OcfLib.getInstance()
lib.ocf_core_get_uuid_wrapper.restype = POINTER(Uuid)
lib.ocf_core_get_uuid_wrapper.argtypes = [c_void_p]

View File

@ -13,6 +13,7 @@ from .shared import OcfError
from ..ocf import OcfLib
from .queue import Queue
class OcfCtxOps(Structure):
_fields_ = [
("data", DataOps),
@ -41,9 +42,7 @@ class OcfCtx:
self.cfg = OcfCtxCfg(
name=name,
ops=OcfCtxOps(
data=self.data.get_ops(),
cleaner=self.cleaner.get_ops(),
logger=logger.get_ops(),
data=self.data.get_ops(), cleaner=self.cleaner.get_ops(), logger=logger.get_ops(),
),
logger_priv=cast(pointer(logger.get_priv()), c_void_p),
)
@ -57,13 +56,7 @@ class OcfCtx:
@classmethod
def with_defaults(cls, logger):
return cls(
OcfLib.getInstance(),
b"PyOCF default ctx",
logger,
Data,
Cleaner,
)
return cls(OcfLib.getInstance(), b"PyOCF default ctx", logger, Data, Cleaner,)
@classmethod
def get_default(cls):
@ -90,9 +83,7 @@ class OcfCtx:
if not vol_type.type_id:
raise Exception("Already unregistered")
self.lib.ocf_ctx_unregister_volume_type(
self.ctx_handle, vol_type.type_id
)
self.lib.ocf_ctx_unregister_volume_type(self.ctx_handle, vol_type.type_id)
del self.volume_types[vol_type.type_id]

View File

@ -161,9 +161,7 @@ class Data:
@staticmethod
@DataOps.COPY
def _copy(dst, src, skip, seek, size):
return Data.get_instance(dst).copy(
Data.get_instance(src), skip, seek, size
)
return Data.get_instance(dst).copy(Data.get_instance(src), skip, seek, size)
@staticmethod
@DataOps.SECURE_ERASE

View File

@ -53,9 +53,7 @@ class Io(Structure):
def from_pointer(cls, ref):
c = cls.from_address(ref)
cls._instances_[ref] = c
OcfLib.getInstance().ocf_io_set_cmpl_wrapper(
byref(c), None, None, c.c_end
)
OcfLib.getInstance().ocf_io_set_cmpl_wrapper(byref(c), None, None, c.c_end)
return c
@classmethod

View File

@ -22,6 +22,7 @@ from ..ocf import OcfLib
logging.basicConfig(level=logging.DEBUG, handlers=[logging.NullHandler()])
class LogLevel(IntEnum):
EMERG = 0
ALERT = 1
@ -123,9 +124,7 @@ class DefaultLogger(Logger):
self.logger = logging.getLogger(name)
ch = logging.StreamHandler()
fmt = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(fmt)
ch.setLevel(LevelMapping[level])
self.logger.addHandler(ch)
@ -140,9 +139,7 @@ class DefaultLogger(Logger):
class FileLogger(Logger):
def __init__(self, f, console_level=None):
super().__init__()
fmt = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fh = logging.FileHandler(f)
fh.setLevel(logging.DEBUG)

View File

@ -69,9 +69,7 @@ class OcfCompletion:
class CompletionResult:
def __init__(self, completion_args):
self.completion_args = {
x[0]: i for i, x in enumerate(completion_args)
}
self.completion_args = {x[0]: i for i, x in enumerate(completion_args)}
self.results = None
self.arg_types = [x[1] for x in completion_args]
@ -131,9 +129,7 @@ class SharedOcfObject(Structure):
return cls._instances_[ref]
except: # noqa E722
logging.getLogger("pyocf").error(
"OcfSharedObject corruption. wanted: {} instances: {}".format(
ref, cls._instances_
)
"OcfSharedObject corruption. wanted: {} instances: {}".format(ref, cls._instances_)
)
return None

View File

@ -1,4 +1,3 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause

View File

@ -71,6 +71,7 @@ class VolumeProperties(Structure):
("_ops_", VolumeOps),
]
class VolumeIoPriv(Structure):
_fields_ = [("_data", c_void_p), ("_offset", c_uint64)]
@ -92,18 +93,14 @@ class Volume:
@VolumeOps.SUBMIT_IO
def _submit_io(io):
io_structure = cast(io, POINTER(Io))
volume = Volume.get_instance(
OcfLib.getInstance().ocf_io_get_volume(io_structure)
)
volume = Volume.get_instance(OcfLib.getInstance().ocf_io_get_volume(io_structure))
volume.submit_io(io_structure)
@VolumeOps.SUBMIT_FLUSH
def _submit_flush(flush):
io_structure = cast(flush, POINTER(Io))
volume = Volume.get_instance(
OcfLib.getInstance().ocf_io_get_volume(io_structure)
)
volume = Volume.get_instance(OcfLib.getInstance().ocf_io_get_volume(io_structure))
volume.submit_flush(io_structure)
@ -114,9 +111,7 @@ class Volume:
@VolumeOps.SUBMIT_DISCARD
def _submit_discard(discard):
io_structure = cast(discard, POINTER(Io))
volume = Volume.get_instance(
OcfLib.getInstance().ocf_io_get_volume(io_structure)
)
volume = Volume.get_instance(OcfLib.getInstance().ocf_io_get_volume(io_structure))
volume.submit_discard(io_structure)
@ -126,9 +121,7 @@ class Volume:
@VolumeOps.OPEN
def _open(ref):
uuid_ptr = cast(
OcfLib.getInstance().ocf_volume_get_uuid(ref), POINTER(Uuid)
)
uuid_ptr = cast(OcfLib.getInstance().ocf_volume_get_uuid(ref), POINTER(Uuid))
uuid = str(uuid_ptr.contents._data, encoding="ascii")
try:
volume = Volume.get_by_uuid(uuid)
@ -215,9 +208,7 @@ class Volume:
@staticmethod
@IoOps.SET_DATA
def _io_set_data(io, data, offset):
io_priv = cast(
OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
)
io_priv = cast(OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
data = Data.get_instance(data)
io_priv.contents._offset = offset
io_priv.contents._data = data.handle
@ -227,17 +218,13 @@ class Volume:
@staticmethod
@IoOps.GET_DATA
def _io_get_data(io):
io_priv = cast(
OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
)
io_priv = cast(OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
return io_priv.contents._data
def __init__(self, uuid=None):
if uuid:
if uuid in type(self)._uuid_:
raise Exception(
"Volume with uuid {} already created".format(uuid)
)
raise Exception("Volume with uuid {} already created".format(uuid))
self.uuid = uuid
else:
self.uuid = str(id(self))
@ -314,13 +301,7 @@ class Volume:
self._reject_io(io)
def new_io(
self,
queue: Queue,
addr: int,
length: int,
direction: IoDir,
io_class: int,
flags: int,
self, queue: Queue, addr: int, length: int, direction: IoDir, io_class: int, flags: int,
):
lib = OcfLib.getInstance()
io = lib.ocf_volume_new_io(
@ -370,8 +351,7 @@ class RamVolume(Volume):
def do_submit_io(self, io):
try:
io_priv = cast(
OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
io_priv = cast(OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
offset = io_priv.contents._offset
if io.contents._dir == IoDir.WRITE:
@ -407,12 +387,7 @@ class RamVolume(Volume):
class ErrorDevice(Volume):
def __init__(
self,
vol,
error_sectors: set = None,
error_seq_no: dict = None,
armed=True,
uuid=None,
self, vol, error_sectors: set = None, error_seq_no: dict = None, armed=True, uuid=None,
):
self.vol = vol
super().__init__(uuid)
@ -436,9 +411,7 @@ class ErrorDevice(Volume):
and direction in self.error_seq_no
and self.error_seq_no[direction] <= self.io_seq_no[direction]
)
sector_match = (
self.error_sectors is not None and io.contents._addr in self.error_sectors
)
sector_match = self.error_sectors is not None and io.contents._addr in self.error_sectors
self.io_seq_no[direction] += 1
@ -489,6 +462,7 @@ class ErrorDevice(Volume):
def get_copy(self):
return self.vol.get_copy()
lib = OcfLib.getInstance()
lib.ocf_io_get_priv.restype = POINTER(VolumeIoPriv)
lib.ocf_io_get_volume.argtypes = [c_void_p]

View File

@ -21,12 +21,9 @@ class CacheVolume(ExpObjVolume):
self.open()
def open(self):
return Volume.open(
self.lib.ocf_cache_get_front_volume(self.cache.cache_handle),
self
)
return Volume.open(self.lib.ocf_cache_get_front_volume(self.cache.cache_handle), self)
def md5(self):
out = self.cache.get_conf()
cache_line_size = int(out['cache_line_size'])
cache_line_size = int(out["cache_line_size"])
return self._exp_obj_md5(cache_line_size)

View File

@ -18,10 +18,7 @@ class CoreVolume(ExpObjVolume):
self.open()
def open(self):
return Volume.open(
self.lib.ocf_core_get_front_volume(self.core.handle),
self
)
return Volume.open(self.lib.ocf_core_get_front_volume(self.core.handle), self)
def md5(self):
return self._exp_obj_md5(4096)

View File

@ -22,9 +22,7 @@ class ExpObjVolume(Volume):
def __alloc_io(self, addr, _bytes, _dir, _class, _flags):
vol = self.parent.get_front_volume()
queue = self.parent.get_default_queue() # TODO multiple queues?
return vol.new_io(
queue, addr, _bytes, _dir, _class, _flags
)
return vol.new_io(queue, addr, _bytes, _dir, _class, _flags)
def _alloc_io(self, io):
exp_obj_io = self.__alloc_io(
@ -99,8 +97,7 @@ class ExpObjVolume(Volume):
position = 0
while position < read_buffer_all.size:
io = self.new_io(self.parent.get_default_queue(), position,
read_size, IoDir.READ, 0, 0)
io = self.new_io(self.parent.get_default_queue(), position, read_size, IoDir.READ, 0, 0)
io.set_data(read_buffer)
cmpl = OcfCompletion([("err", c_int)])

View File

@ -7,13 +7,7 @@ from ctypes import string_at
def print_buffer(
buf,
length,
offset=0,
width=16,
ignore=0,
stop_after_count_ignored=0,
print_fcn=print,
buf, length, offset=0, width=16, ignore=0, stop_after_count_ignored=0, print_fcn=print,
):
end = int(offset) + int(length)
offset = int(offset)
@ -27,10 +21,7 @@ def print_buffer(
byteline = ""
asciiline = ""
if not any(x != ignore for x in cur_line):
if (
stop_after_count_ignored
and ignored_lines > stop_after_count_ignored
):
if stop_after_count_ignored and ignored_lines > stop_after_count_ignored:
print_fcn(
"<{} bytes of '0x{:02X}' encountered, stopping>".format(
stop_after_count_ignored * width, ignore
@ -41,11 +32,7 @@ def print_buffer(
continue
if ignored_lines:
print_fcn(
"<{} of '0x{:02X}' bytes omitted>".format(
ignored_lines * width, ignore
)
)
print_fcn("<{} of '0x{:02X}' bytes omitted>".format(ignored_lines * width, ignore))
ignored_lines = 0
for byte in cur_line:
@ -76,10 +63,7 @@ class Size:
def __init__(self, b: int, sector_aligned: bool = False):
if sector_aligned:
self.bytes = int(
((b + self._SECTOR_SIZE - 1) // self._SECTOR_SIZE)
* self._SECTOR_SIZE
)
self.bytes = int(((b + self._SECTOR_SIZE - 1) // self._SECTOR_SIZE) * self._SECTOR_SIZE)
else:
self.bytes = int(b)

View File

@ -94,8 +94,9 @@ def test_load_cache_with_cores(pyocf_ctx, open_cores):
vol = CoreVolume(core, open=True)
write_data = Data.from_string("This is test data")
io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B,
write_data.size, IoDir.WRITE, 0, 0)
io = vol.new_io(
cache.get_default_queue(), S.from_sector(3).B, write_data.size, IoDir.WRITE, 0, 0
)
io.set_data(write_data)
cmpl = OcfCompletion([("err", c_int)])
@ -114,8 +115,7 @@ def test_load_cache_with_cores(pyocf_ctx, open_cores):
vol = CoreVolume(core, open=True)
read_data = Data(write_data.size)
io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B,
read_data.size, IoDir.READ, 0, 0)
io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B, read_data.size, IoDir.READ, 0, 0)
io.set_data(read_data)
cmpl = OcfCompletion([("err", c_int)])

View File

@ -18,6 +18,7 @@ from pyocf.types.ctx import OcfCtx
default_registered_volumes = [RamVolume, ErrorDevice, CacheVolume, CoreVolume, ReplicatedVolume]
def pytest_configure(config):
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))

View File

@ -97,6 +97,7 @@ def test_change_to_nhit_and_back_io_in_flight(pyocf_ctx):
r.abort()
assert r.error_count == 0, "No IO's should fail when turning NHIT policy off"
def fill_cache(cache, fill_ratio):
"""
Helper to fill cache from LBA 0.
@ -126,9 +127,7 @@ def fill_cache(cache, fill_ratio):
@pytest.mark.parametrize("fill_percentage", [0, 1, 50, 99])
@pytest.mark.parametrize("insertion_threshold", [2, 8])
def test_promoted_after_hits_various_thresholds(
pyocf_ctx, insertion_threshold, fill_percentage
):
def test_promoted_after_hits_various_thresholds(pyocf_ctx, insertion_threshold, fill_percentage):
"""
Check promotion policy behavior with various set thresholds
@ -195,8 +194,7 @@ def test_promoted_after_hits_various_thresholds(
cache.settle()
stats = cache.get_stats()
assert (
threshold_reached_occupancy
== stats["usage"]["occupancy"]["value"] - 1
threshold_reached_occupancy == stats["usage"]["occupancy"]["value"] - 1
), "Previous request should be promoted and occupancy should rise"
@ -232,12 +230,8 @@ def test_partial_hit_promotion(pyocf_ctx):
# Step 3
cache.set_promotion_policy(PromotionPolicy.NHIT)
cache.set_promotion_policy_param(
PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, 0
)
cache.set_promotion_policy_param(
PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, 100
)
cache.set_promotion_policy_param(PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, 0)
cache.set_promotion_policy_param(PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, 100)
# Step 4
req_size = Size(2 * cache_lines.line_size)
@ -245,6 +239,4 @@ def test_partial_hit_promotion(pyocf_ctx):
cache.settle()
stats = cache.get_stats()
assert (
stats["usage"]["occupancy"]["value"] == 2
), "Second cache line should be mapped"
assert stats["usage"]["occupancy"]["value"] == 2, "Second cache line should be mapped"

View File

@ -61,11 +61,7 @@ def sector_to_region(sector, region_start):
def region_end(region_start, region_no, total_sectors):
num_regions = len(region_start)
return (
region_start[region_no + 1] - 1
if region_no < num_regions - 1
else total_sectors - 1
)
return region_start[region_no + 1] - 1 if region_no < num_regions - 1 else total_sectors - 1
class SectorStatus(IntEnum):
@ -281,9 +277,7 @@ def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed)
# add randomly generated sector statuses
for _ in range(ITRATION_COUNT - len(region_statuses)):
region_statuses.append(
[random.choice(list(SectorStatus)) for _ in range(num_regions)]
)
region_statuses.append([random.choice(list(SectorStatus)) for _ in range(num_regions)])
# iterate over generated status combinations and perform the test
for region_state in region_statuses:
@ -302,9 +296,7 @@ def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed)
# randomize cacheline insertion order to exercise different
# paths with regard to cache I/O physical addresses continuousness
random.shuffle(insert_order)
sectors = [
insert_order[i // CLS] * CLS + (i % CLS) for i in range(SECTOR_COUNT)
]
sectors = [insert_order[i // CLS] * CLS + (i % CLS) for i in range(SECTOR_COUNT)]
# insert clean sectors - iterate over cachelines in @insert_order order
cache.change_cache_mode(cache_mode=CacheMode.WT)

View File

@ -112,9 +112,7 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
""" Verify if overflown pinned ioclass is evicted """
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(100))
cache = Cache.start_on_device(
cache_device, cache_mode=CacheMode.WT, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT, cache_line_size=cls)
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core, open=True)
@ -124,10 +122,7 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
pinned_ioclass_max_occupancy = 10
cache.configure_partition(
part_id=test_ioclass_id,
name="default_ioclass",
max_size=100,
priority=1,
part_id=test_ioclass_id, name="default_ioclass", max_size=100, priority=1,
)
cache.configure_partition(
part_id=pinned_ioclass_id,
@ -154,9 +149,7 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
), "Failed to populate the default partition"
# Repart - force overflow of second partition occupancy limit
pinned_double_size = ceil(
(cache_size.blocks_4k * pinned_ioclass_max_occupancy * 2) / 100
)
pinned_double_size = ceil((cache_size.blocks_4k * pinned_ioclass_max_occupancy * 2) / 100)
for i in range(pinned_double_size):
send_io(core, data, i * 4096, pinned_ioclass_id)
@ -175,21 +168,14 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
cache.get_partition_info(part_id=pinned_ioclass_id)["_curr_size"], cls
)
assert isclose(
part_current_size.blocks_4k,
ceil(cache_size.blocks_4k * 0.1),
abs_tol=Size(cls).blocks_4k,
part_current_size.blocks_4k, ceil(cache_size.blocks_4k * 0.1), abs_tol=Size(cls).blocks_4k,
), "Overflown part has not been evicted"
def send_io(core: Core, data: Data, addr: int = 0, target_ioclass: int = 0):
vol = core.get_front_volume()
io = vol.new_io(
core.cache.get_default_queue(),
addr,
data.size,
IoDir.WRITE,
target_ioclass,
0,
core.cache.get_default_queue(), addr, data.size, IoDir.WRITE, target_ioclass, 0,
)
io.set_data(data)

View File

@ -23,9 +23,7 @@ from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize
def test_adding_core(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
# Create core device
core_device = RamVolume(S.from_MiB(10))
@ -48,9 +46,7 @@ def test_adding_core(pyocf_ctx, cache_mode, cls):
def test_removing_core(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
# Create core device
core_device = RamVolume(S.from_MiB(10))
@ -72,9 +68,7 @@ def test_removing_core(pyocf_ctx, cache_mode, cls):
def test_remove_dirty_no_flush(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
# Create core device
core_device = RamVolume(S.from_MiB(10))
@ -133,8 +127,7 @@ def test_10add_remove_with_io(pyocf_ctx):
write_data = Data.from_string("Test data")
io = vol.new_io(
cache.get_default_queue(), S.from_sector(1).B, write_data.size,
IoDir.WRITE, 0, 0
cache.get_default_queue(), S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0
)
io.set_data(write_data)
@ -210,9 +203,7 @@ def test_adding_to_random_cache(pyocf_ctx):
def test_adding_core_twice(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
# Create core device
core_device = RamVolume(S.from_MiB(10))
@ -269,9 +260,7 @@ def test_adding_core_already_used(pyocf_ctx, cache_mode, cls):
def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
core_devices = []
core_amount = 5
@ -310,8 +299,7 @@ def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
def _io_to_core(vol: Volume, queue: Queue, data: Data):
io = vol.new_io(queue, 0, data.size,
IoDir.WRITE, 0, 0)
io = vol.new_io(queue, 0, data.size, IoDir.WRITE, 0, 0)
io.set_data(data)
completion = OcfCompletion([("err", c_int)])
@ -333,9 +321,7 @@ def test_try_add_core_with_changed_size(pyocf_ctx, cache_mode, cls):
"""
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
# Add core to cache
core_device = RamVolume(S.from_MiB(10))
@ -367,9 +353,7 @@ def test_load_with_changed_core_size(pyocf_ctx, cache_mode, cls):
"""
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cache_mode, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
# Add core to cache
core_device = RamVolume(S.from_MiB(10))

View File

@ -37,9 +37,7 @@ logger = logging.getLogger(__name__)
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.parametrize("mode", [CacheMode.WB, CacheMode.WT, CacheMode.WO])
@pytest.mark.parametrize("new_cache_size", [80, 120])
def test_attach_different_size(
pyocf_ctx, new_cache_size, mode: CacheMode, cls: CacheLineSize
):
def test_attach_different_size(pyocf_ctx, new_cache_size, mode: CacheMode, cls: CacheLineSize):
"""Start cache and add partition with limited occupancy. Fill partition with data,
attach cache with different size and trigger IO. Verify if occupancy thresold is
respected with both original and new cache device.
@ -53,9 +51,7 @@ def test_attach_different_size(
vol = CoreVolume(core, open=True)
queue = cache.get_default_queue()
cache.configure_partition(
part_id=1, name="test_part", max_size=50, priority=1
)
cache.configure_partition(part_id=1, name="test_part", max_size=50, priority=1)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
@ -67,9 +63,7 @@ def test_attach_different_size(
for i in range(cache_size.blocks_4k):
io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
part_current_size = CacheLines(
cache.get_partition_info(part_id=1)["_curr_size"], cls
)
part_current_size = CacheLines(cache.get_partition_info(part_id=1)["_curr_size"], cls)
assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
@ -82,9 +76,7 @@ def test_attach_different_size(
for i in range(cache_size.blocks_4k):
io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
part_current_size = CacheLines(
cache.get_partition_info(part_id=1)["_curr_size"], cls
)
part_current_size = CacheLines(cache.get_partition_info(part_id=1)["_curr_size"], cls)
assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5

View File

@ -18,9 +18,7 @@ from pyocf.types.shared import CacheLineSize
def test_change_cache_mode(pyocf_ctx, from_cm, to_cm, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=from_cm, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=from_cm, cache_line_size=cls)
# Change cache mode and check if stats are as expected
cache.change_cache_mode(to_cm)
@ -33,9 +31,7 @@ def test_change_cache_mode(pyocf_ctx, from_cm, to_cm, cls):
def test_change_cleaning_policy(pyocf_ctx, cm, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cm, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
# Check all possible cleaning policy switches
for cp_from in CleaningPolicy:
@ -58,9 +54,7 @@ def test_change_cleaning_policy(pyocf_ctx, cm, cls):
def test_cache_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cm, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
# Create 2 core devices
core_device1 = RamVolume(S.from_MiB(10))
@ -97,9 +91,7 @@ def test_cache_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
def test_core_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=cm, cache_line_size=cls
)
cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
# Create 2 core devices
core_device1 = RamVolume(S.from_MiB(10))

View File

@ -331,9 +331,7 @@ def test_failover_passive_first(pyocf_2_ctx):
cache1_cache_vol = ReplicatedVolume(prim_cache_backend_vol, cache2_exp_obj_vol)
# active cache
cache1 = Cache.start_on_device(
cache1_cache_vol, ctx1, cache_mode=mode, cache_line_size=cls
)
cache1 = Cache.start_on_device(cache1_cache_vol, ctx1, cache_mode=mode, cache_line_size=cls)
core = Core(core_backend_vol)
cache1.add_core(core)
core_vol = CoreVolume(core, open=True)
@ -550,9 +548,7 @@ def test_failover_passive_first(pyocf_2_ctx):
cache1_cache_vol = ReplicatedVolume(prim_cache_backend_vol, cache2_exp_obj_vol)
# active cache
cache1 = Cache.start_on_device(
cache1_cache_vol, ctx1, cache_mode=mode, cache_line_size=cls
)
cache1 = Cache.start_on_device(cache1_cache_vol, ctx1, cache_mode=mode, cache_line_size=cls)
core = Core(core_backend_vol)
cache1.add_core(core)
core_vol = CoreVolume(core, open=True)

View File

@ -18,7 +18,7 @@ from pyocf.types.cache import (
CleaningPolicy,
CacheConfig,
PromotionPolicy,
Backfill
Backfill,
)
from pyocf.types.core import Core
from pyocf.types.ctx import OcfCtx
@ -148,7 +148,7 @@ def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: Me
If possible check whether cache reports properly set parameters.
"""
cache_device = RamVolume(Size.from_MiB(50))
queue_size = randrange(60000, 2**32)
queue_size = randrange(60000, 2 ** 32)
unblock_size = randrange(1, queue_size)
volatile_metadata = randrange(2) == 1
unaligned_io = randrange(2) == 1
@ -165,7 +165,8 @@ def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: Me
max_queue_size=queue_size,
queue_unblock_size=unblock_size,
pt_unaligned_io=unaligned_io,
use_submit_fast=submit_fast)
use_submit_fast=submit_fast,
)
stats = cache.get_stats()
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
@ -198,8 +199,9 @@ def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool):
run_io_and_cache_data_if_possible(core, mode, cls, cls_no)
stats = cache.get_stats()
assert int(stats["conf"]["dirty"]) == (cls_no if mode.lazy_write() else 0),\
"Dirty data before MD5"
assert int(stats["conf"]["dirty"]) == (
cls_no if mode.lazy_write() else 0
), "Dirty data before MD5"
md5_exported_core = front_vol.md5()
@ -208,11 +210,13 @@ def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool):
cache.stop()
if mode.lazy_write() and not with_flush:
assert core_device.md5() != md5_exported_core, \
"MD5 check: core device vs exported object with dirty data"
assert (
core_device.md5() != md5_exported_core
), "MD5 check: core device vs exported object with dirty data"
else:
assert core_device.md5() == md5_exported_core, \
"MD5 check: core device vs exported object with clean data"
assert (
core_device.md5() == md5_exported_core
), "MD5 check: core device vs exported object with clean data"
def test_start_stop_multiple(pyocf_ctx):
@ -226,14 +230,12 @@ def test_start_stop_multiple(pyocf_ctx):
cache_device = RamVolume(Size.from_MiB(50))
cache_name = f"cache{i}"
cache_mode = CacheMode(randrange(0, len(CacheMode)))
size = 4096 * 2**randrange(0, len(CacheLineSize))
size = 4096 * 2 ** randrange(0, len(CacheLineSize))
cache_line_size = CacheLineSize(size)
cache = Cache.start_on_device(
cache_device,
name=cache_name,
cache_mode=cache_mode,
cache_line_size=cache_line_size)
cache_device, name=cache_name, cache_mode=cache_mode, cache_line_size=cache_line_size
)
caches.append(cache)
stats = cache.get_stats()
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
@ -258,14 +260,12 @@ def test_100_start_stop(pyocf_ctx):
cache_device = RamVolume(Size.from_MiB(50))
cache_name = f"cache{i}"
cache_mode = CacheMode(randrange(0, len(CacheMode)))
size = 4096 * 2**randrange(0, len(CacheLineSize))
size = 4096 * 2 ** randrange(0, len(CacheLineSize))
cache_line_size = CacheLineSize(size)
cache = Cache.start_on_device(
cache_device,
name=cache_name,
cache_mode=cache_mode,
cache_line_size=cache_line_size)
cache_device, name=cache_name, cache_mode=cache_mode, cache_line_size=cache_line_size
)
stats = cache.get_stats()
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
@ -293,14 +293,15 @@ def test_start_stop_incrementally(pyocf_ctx):
cache_device = RamVolume(Size.from_MiB(50))
cache_name = f"cache{next(counter)}"
cache_mode = CacheMode(randrange(0, len(CacheMode)))
size = 4096 * 2**randrange(0, len(CacheLineSize))
size = 4096 * 2 ** randrange(0, len(CacheLineSize))
cache_line_size = CacheLineSize(size)
cache = Cache.start_on_device(
cache_device,
name=cache_name,
cache_mode=cache_mode,
cache_line_size=cache_line_size)
cache_line_size=cache_line_size,
)
caches.append(cache)
stats = cache.get_stats()
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
@ -318,8 +319,9 @@ def test_start_stop_incrementally(pyocf_ctx):
stats = cache.get_stats()
cache_name = stats["conf"]["cache_name"]
cache.stop()
assert get_cache_by_name(pyocf_ctx, cache_name) != 0, \
"Try getting cache after stopping it"
assert (
get_cache_by_name(pyocf_ctx, cache_name) != 0
), "Try getting cache after stopping it"
add = not add
@ -333,17 +335,15 @@ def test_start_cache_same_id(pyocf_ctx, mode, cls):
cache_device1 = RamVolume(Size.from_MiB(50))
cache_device2 = RamVolume(Size.from_MiB(50))
cache_name = "cache"
cache = Cache.start_on_device(cache_device1,
cache_mode=mode,
cache_line_size=cls,
name=cache_name)
cache = Cache.start_on_device(
cache_device1, cache_mode=mode, cache_line_size=cls, name=cache_name
)
cache.get_stats()
with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"):
cache = Cache.start_on_device(cache_device2,
cache_mode=mode,
cache_line_size=cls,
name=cache_name)
cache = Cache.start_on_device(
cache_device2, cache_mode=mode, cache_line_size=cls, name=cache_name
)
cache.get_stats()
@ -354,6 +354,7 @@ def test_start_cache_huge_device(pyocf_ctx_log_buffer, cls):
pass_criteria:
- Starting cache on device too big to handle should fail
"""
class HugeDevice(Volume):
def get_length(self):
return Size.from_B((cls * c_uint32(-1).value))
@ -373,7 +374,6 @@ def test_start_cache_huge_device(pyocf_ctx_log_buffer, cls):
), "Expected to find log notifying that max size was exceeded"
@pytest.mark.parametrize("mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_start_cache_same_device(pyocf_ctx, mode, cls):
@ -382,9 +382,7 @@ def test_start_cache_same_device(pyocf_ctx, mode, cls):
"""
cache_device = RamVolume(Size.from_MiB(50))
cache = Cache.start_on_device(
cache_device, cache_mode=mode, cache_line_size=cls, name="cache1"
)
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls, name="cache1")
cache.get_stats()
with pytest.raises(OcfError, match="OCF_ERR_NOT_OPEN_EXC"):
@ -420,9 +418,7 @@ def test_start_stop_noqueue(pyocf_ctx):
assert not status, "Failed to start cache: {}".format(status)
# stop without creating mngmt queue
c = OcfCompletion(
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
)
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
pyocf_ctx.lib.ocf_mngt_cache_stop(cache_handle, c, None)
c.wait()
assert not c.results["error"], "Failed to stop cache: {}".format(c.results["error"])
@ -445,8 +441,9 @@ def run_io_and_cache_data_if_possible(core, mode, cls, cls_no):
io_to_core(front_vol, queue, test_data, 0)
stats = core.cache.get_stats()
assert stats["usage"]["occupancy"]["value"] == \
((cls_no * cls / CacheLineSize.LINE_4KiB) if mode != CacheMode.PT else 0), "Occupancy"
assert stats["usage"]["occupancy"]["value"] == (
(cls_no * cls / CacheLineSize.LINE_4KiB) if mode != CacheMode.PT else 0
), "Occupancy"
def io_to_core(vol: Volume, queue: Queue, data: Data, offset: int):
@ -479,13 +476,16 @@ def check_stats_read_empty(core: Core, mode: CacheMode, cls: CacheLineSize):
core.cache.settle()
stats = core.cache.get_stats()
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
assert core.cache.device.get_stats()[IoDir.WRITE] == (1 if mode.read_insert() else 0), \
"Writes to cache device"
assert core.cache.device.get_stats()[IoDir.WRITE] == (
1 if mode.read_insert() else 0
), "Writes to cache device"
assert core.device.get_stats()[IoDir.READ] == 1, "Reads from core device"
assert stats["req"]["rd_full_misses"]["value"] == (0 if mode == CacheMode.PT else 1), \
"Read full misses"
assert stats["usage"]["occupancy"]["value"] == \
((cls / CacheLineSize.LINE_4KiB) if mode.read_insert() else 0), "Occupancy"
assert stats["req"]["rd_full_misses"]["value"] == (
0 if mode == CacheMode.PT else 1
), "Read full misses"
assert stats["usage"]["occupancy"]["value"] == (
(cls / CacheLineSize.LINE_4KiB) if mode.read_insert() else 0
), "Occupancy"
def check_stats_write_empty(core: Core, mode: CacheMode, cls: CacheLineSize):
@ -493,75 +493,89 @@ def check_stats_write_empty(core: Core, mode: CacheMode, cls: CacheLineSize):
stats = core.cache.get_stats()
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
# TODO(ajrutkow): why 1 for WT ??
assert core.cache.device.get_stats()[IoDir.WRITE] == \
(2 if mode.lazy_write() else (1 if mode == CacheMode.WT else 0)), \
"Writes to cache device"
assert core.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
"Writes to core device"
assert stats["req"]["wr_full_misses"]["value"] == (1 if mode.write_insert() else 0), \
"Write full misses"
assert stats["usage"]["occupancy"]["value"] == \
((cls / CacheLineSize.LINE_4KiB) if mode.write_insert() else 0), \
"Occupancy"
assert core.cache.device.get_stats()[IoDir.WRITE] == (
2 if mode.lazy_write() else (1 if mode == CacheMode.WT else 0)
), "Writes to cache device"
assert core.device.get_stats()[IoDir.WRITE] == (
0 if mode.lazy_write() else 1
), "Writes to core device"
assert stats["req"]["wr_full_misses"]["value"] == (
1 if mode.write_insert() else 0
), "Write full misses"
assert stats["usage"]["occupancy"]["value"] == (
(cls / CacheLineSize.LINE_4KiB) if mode.write_insert() else 0
), "Occupancy"
def check_stats_write_after_read(core: Core,
mode: CacheMode,
cls: CacheLineSize,
read_from_empty=False):
def check_stats_write_after_read(
core: Core, mode: CacheMode, cls: CacheLineSize, read_from_empty=False
):
core.cache.settle()
stats = core.cache.get_stats()
assert core.cache.device.get_stats()[IoDir.WRITE] == \
(0 if mode in {CacheMode.WI, CacheMode.PT} else
(2 if read_from_empty and mode.lazy_write() else 1)), \
"Writes to cache device"
assert core.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
"Writes to core device"
assert stats["req"]["wr_hits"]["value"] == \
(1 if (mode.read_insert() and mode != CacheMode.WI)
or (mode.write_insert() and not read_from_empty) else 0), \
"Write hits"
assert stats["usage"]["occupancy"]["value"] == \
(0 if mode in {CacheMode.WI, CacheMode.PT} else (cls / CacheLineSize.LINE_4KiB)), \
"Occupancy"
assert core.cache.device.get_stats()[IoDir.WRITE] == (
0
if mode in {CacheMode.WI, CacheMode.PT}
else (2 if read_from_empty and mode.lazy_write() else 1)
), "Writes to cache device"
assert core.device.get_stats()[IoDir.WRITE] == (
0 if mode.lazy_write() else 1
), "Writes to core device"
assert stats["req"]["wr_hits"]["value"] == (
1
if (mode.read_insert() and mode != CacheMode.WI)
or (mode.write_insert() and not read_from_empty)
else 0
), "Write hits"
assert stats["usage"]["occupancy"]["value"] == (
0 if mode in {CacheMode.WI, CacheMode.PT} else (cls / CacheLineSize.LINE_4KiB)
), "Occupancy"
def check_stats_read_after_write(core, mode, cls, write_to_empty=False):
core.cache.settle()
stats = core.cache.get_stats()
assert core.cache.device.get_stats()[IoDir.WRITE] == \
(2 if mode.lazy_write() else (0 if mode == CacheMode.PT else 1)), \
"Writes to cache device"
assert core.cache.device.get_stats()[IoDir.READ] == \
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
or (mode == CacheMode.WA and not write_to_empty) else 0), \
"Reads from cache device"
assert core.device.get_stats()[IoDir.READ] == \
(0 if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT}
or (mode == CacheMode.WA and not write_to_empty) else 1), \
"Reads from core device"
assert stats["req"]["rd_full_misses"]["value"] == \
(1 if mode in {CacheMode.WA, CacheMode.WI} else 0) \
+ (0 if write_to_empty or mode in {CacheMode.PT, CacheMode.WA} else 1), \
"Read full misses"
assert stats["req"]["rd_hits"]["value"] == \
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
or (mode == CacheMode.WA and not write_to_empty) else 0), \
"Read hits"
assert stats["usage"]["occupancy"]["value"] == \
(0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy"
assert core.cache.device.get_stats()[IoDir.WRITE] == (
2 if mode.lazy_write() else (0 if mode == CacheMode.PT else 1)
), "Writes to cache device"
assert core.cache.device.get_stats()[IoDir.READ] == (
1
if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
or (mode == CacheMode.WA and not write_to_empty)
else 0
), "Reads from cache device"
assert core.device.get_stats()[IoDir.READ] == (
0
if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT}
or (mode == CacheMode.WA and not write_to_empty)
else 1
), "Reads from core device"
assert stats["req"]["rd_full_misses"]["value"] == (
1 if mode in {CacheMode.WA, CacheMode.WI} else 0
) + (0 if write_to_empty or mode in {CacheMode.PT, CacheMode.WA} else 1), "Read full misses"
assert stats["req"]["rd_hits"]["value"] == (
1
if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
or (mode == CacheMode.WA and not write_to_empty)
else 0
), "Read hits"
assert stats["usage"]["occupancy"]["value"] == (
0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)
), "Occupancy"
def check_md5_sums(core: Core, mode: CacheMode):
if mode.lazy_write():
assert core.device.md5() != core.get_front_volume().md5(), \
"MD5 check: core device vs exported object without flush"
assert (
core.device.md5() != core.get_front_volume().md5()
), "MD5 check: core device vs exported object without flush"
core.cache.flush()
assert core.device.md5() == core.get_front_volume().md5(), \
"MD5 check: core device vs exported object after flush"
assert (
core.device.md5() == core.get_front_volume().md5()
), "MD5 check: core device vs exported object after flush"
else:
assert core.device.md5() == core.get_front_volume().md5(), \
"MD5 check: core device vs exported object"
assert (
core.device.md5() == core.get_front_volume().md5()
), "MD5 check: core device vs exported object"
def get_cache_by_name(ctx, cache_name):

View File

@ -5,12 +5,7 @@
import os
import sys
from ctypes import (
c_uint64,
c_uint32,
c_uint16,
c_int
)
from ctypes import c_uint64, c_uint32, c_uint16, c_int
from tests.utils.random import RandomStringGenerator, RandomGenerator, DefaultRanges, Range
from pyocf.types.cache import CacheMode, MetadataLayout, PromotionPolicy
@ -63,9 +58,7 @@ def string_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheMode))
)
@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheMode)))
def not_cache_mode_randomize(request):
return request.param

View File

@ -199,9 +199,7 @@ def test_neg_core_set_seq_cut_off_promotion(pyocf_ctx, cm, cls):
for i in RandomGenerator(DefaultRanges.UINT32):
if i in ConfValidValues.seq_cutoff_promotion_range:
continue
with pytest.raises(
OcfError, match="Error setting core seq cut off policy promotion count"
):
with pytest.raises(OcfError, match="Error setting core seq cut off policy promotion count"):
core1.set_seq_cut_off_promotion(i)
print(f"\n{i}")
@ -235,9 +233,7 @@ def test_neg_cache_set_seq_cut_off_threshold(pyocf_ctx, cm, cls):
for i in RandomGenerator(DefaultRanges.UINT32):
if i in ConfValidValues.seq_cutoff_threshold_rage:
continue
with pytest.raises(
OcfError, match="Error setting cache seq cut off policy threshold"
):
with pytest.raises(OcfError, match="Error setting cache seq cut off policy threshold"):
cache.set_seq_cut_off_threshold(i)
print(f"\n{i}")
@ -268,9 +264,7 @@ def test_neg_core_set_seq_cut_off_threshold(pyocf_ctx, cm, cls):
for i in RandomGenerator(DefaultRanges.UINT32):
if i in ConfValidValues.seq_cutoff_threshold_rage:
continue
with pytest.raises(
OcfError, match="Error setting core seq cut off policy threshold"
):
with pytest.raises(OcfError, match="Error setting core seq cut off policy threshold"):
core.set_seq_cut_off_threshold(i)
print(f"\n{i}")
@ -468,10 +462,7 @@ def test_neg_set_nhit_promotion_policy_param(pyocf_ctx, cm, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device,
cache_mode=cm,
cache_line_size=cls,
promotion_policy=PromotionPolicy.NHIT,
cache_device, cache_mode=cm, cache_line_size=cls, promotion_policy=PromotionPolicy.NHIT,
)
# Set invalid promotion policy param id and check if failed
@ -498,10 +489,7 @@ def test_neg_set_nhit_promotion_policy_param_trigger(pyocf_ctx, cm, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device,
cache_mode=cm,
cache_line_size=cls,
promotion_policy=PromotionPolicy.NHIT,
cache_device, cache_mode=cm, cache_line_size=cls, promotion_policy=PromotionPolicy.NHIT,
)
# Set to invalid promotion policy trigger threshold and check if failed
@ -509,9 +497,7 @@ def test_neg_set_nhit_promotion_policy_param_trigger(pyocf_ctx, cm, cls):
if i in ConfValidValues.promotion_nhit_trigger_threshold_range:
continue
with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
cache.set_promotion_policy_param(
PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, i
)
cache.set_promotion_policy_param(PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, i)
print(f"\n{i}")
@ -530,10 +516,7 @@ def test_neg_set_nhit_promotion_policy_param_threshold(pyocf_ctx, cm, cls):
# Start cache device
cache_device = RamVolume(S.from_MiB(50))
cache = Cache.start_on_device(
cache_device,
cache_mode=cm,
cache_line_size=cls,
promotion_policy=PromotionPolicy.NHIT,
cache_device, cache_mode=cm, cache_line_size=cls, promotion_policy=PromotionPolicy.NHIT,
)
# Set to invalid promotion policy insertion threshold and check if failed
@ -568,11 +551,7 @@ def test_neg_set_ioclass_max_size(pyocf_ctx, cm, cls):
continue
with pytest.raises(OcfError, match="Error adding partition to cache"):
cache.configure_partition(
part_id=1,
name="unclassified",
max_size=i,
priority=0,
cache_mode=CACHE_MODE_NONE,
part_id=1, name="unclassified", max_size=i, priority=0, cache_mode=CACHE_MODE_NONE,
)
print(f"\n{i}")

View File

@ -21,6 +21,7 @@ def try_start_cache(**config):
cache = Cache.start_on_device(cache_device, **config)
cache.stop()
@pytest.mark.security
@pytest.mark.parametrize("cls", CacheLineSize)
def test_fuzzy_start_cache_mode(pyocf_ctx, cls, not_cache_mode_randomize):
@ -59,14 +60,16 @@ def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls):
:param cls: cache line size value to start cache with
"""
cache_device = RamVolume(Size.from_MiB(50))
incorrect_values = ['']
incorrect_values = [""]
try:
cache = Cache.start_on_device(cache_device, name=string_randomize, cache_mode=cm,
cache_line_size=cls)
cache = Cache.start_on_device(
cache_device, name=string_randomize, cache_mode=cm, cache_line_size=cls
)
except OcfError:
if string_randomize not in incorrect_values:
logger.error(
f"Cache did not start properly with correct name value: '{string_randomize}'")
f"Cache did not start properly with correct name value: '{string_randomize}'"
)
return
if string_randomize in incorrect_values:
logger.error(f"Cache started with incorrect name value: '{string_randomize}'")
@ -75,7 +78,7 @@ def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls):
@pytest.mark.security
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.parametrize('max_wb_queue_size', RandomGenerator(DefaultRanges.UINT32, 10))
@pytest.mark.parametrize("max_wb_queue_size", RandomGenerator(DefaultRanges.UINT32, 10))
def test_fuzzy_start_max_queue_size(pyocf_ctx, max_wb_queue_size, c_uint32_randomize, cls):
"""
Test whether it is impossible to start cache with invalid dependence between max queue size
@ -91,11 +94,14 @@ def test_fuzzy_start_max_queue_size(pyocf_ctx, max_wb_queue_size, c_uint32_rando
max_queue_size=max_wb_queue_size,
queue_unblock_size=c_uint32_randomize,
cache_mode=CacheMode.WB,
cache_line_size=cls)
cache_line_size=cls,
)
else:
logger.warning(f"Test skipped for valid values: "
f"'max_queue_size={max_wb_queue_size}, "
f"queue_unblock_size={c_uint32_randomize}'.")
logger.warning(
f"Test skipped for valid values: "
f"'max_queue_size={max_wb_queue_size}, "
f"queue_unblock_size={c_uint32_randomize}'."
)
@pytest.mark.security
@ -111,7 +117,5 @@ def test_fuzzy_start_promotion_policy(pyocf_ctx, not_promotion_policy_randomize,
"""
with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
try_start_cache(
cache_mode=cm,
cache_line_size=cls,
promotion_policy=not_promotion_policy_randomize
cache_mode=cm, cache_line_size=cls, promotion_policy=not_promotion_policy_randomize
)

View File

@ -132,8 +132,7 @@ def test_neg_offset_unaligned(pyocf_ctx, c_int_randomize):
data = Data(int(Size.from_KiB(1)))
if c_int_randomize % 512 != 0:
with pytest.raises(Exception):
vol.new_io(queue, c_int_randomize, data.size,
IoDir.WRITE, 0, 0)
vol.new_io(queue, c_int_randomize, data.size, IoDir.WRITE, 0, 0)
@pytest.mark.security
@ -147,8 +146,7 @@ def test_neg_size_unaligned(pyocf_ctx, c_uint16_randomize):
data = Data(int(Size.from_B(c_uint16_randomize)))
if c_uint16_randomize % 512 != 0:
with pytest.raises(Exception):
vol.new_io(queue, 0, data.size,
IoDir.WRITE, 0, 0)
vol.new_io(queue, 0, data.size, IoDir.WRITE, 0, 0)
@pytest.mark.security
@ -200,12 +198,7 @@ def prepare_cache_and_core(core_size: Size, cache_size: Size = Size.from_MiB(50)
def io_operation(
vol: Volume,
queue: Queue,
data: Data,
io_direction: int,
offset: int = 0,
io_class: int = 0,
vol: Volume, queue: Queue, data: Data, io_direction: int, offset: int = 0, io_class: int = 0,
):
io = vol.new_io(queue, offset, data.size, io_direction, io_class, 0)
io.set_data(data)

View File

@ -59,9 +59,7 @@ class DataCopyTracer(Data):
@pytest.mark.security
@pytest.mark.parametrize(
"cache_mode", [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WI]
)
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WI])
def test_secure_erase_simple_io_read_misses(cache_mode):
"""
Perform simple IO which will trigger read misses, which in turn should
@ -88,14 +86,7 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
queue = cache.get_default_queue()
write_data = DataCopyTracer(S.from_sector(1))
io = vol.new_io(
queue,
S.from_sector(1).B,
write_data.size,
IoDir.WRITE,
0,
0,
)
io = vol.new_io(queue, S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0,)
io.set_data(write_data)
cmpl = OcfCompletion([("err", c_int)])
@ -106,14 +97,7 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
cmpls = []
for i in range(100):
read_data = DataCopyTracer(S.from_sector(1))
io = vol.new_io(
queue,
i * S.from_sector(1).B,
read_data.size,
IoDir.READ,
0,
0,
)
io = vol.new_io(queue, i * S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0,)
io.set_data(read_data)
cmpl = OcfCompletion([("err", c_int)])
@ -137,17 +121,13 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
ctx.exit()
assert len(DataCopyTracer.needs_erase) == 0, "Not all locked Data instances were secure erased!"
assert len(DataCopyTracer.locked_instances) == 0, "Not all locked Data instances were unlocked!"
assert (
len(DataCopyTracer.needs_erase) == 0
), "Not all locked Data instances were secure erased!"
assert (
len(DataCopyTracer.locked_instances) == 0
), "Not all locked Data instances were unlocked!"
assert (
stats["req"]["rd_partial_misses"]["value"]
+ stats["req"]["rd_full_misses"]["value"]
stats["req"]["rd_partial_misses"]["value"] + stats["req"]["rd_full_misses"]["value"]
) > 0
@pytest.mark.security
def test_secure_erase_simple_io_cleaning():
"""
@ -201,10 +181,6 @@ def test_secure_erase_simple_io_cleaning():
ctx.exit()
assert (
len(DataCopyTracer.needs_erase) == 0
), "Not all locked Data instances were secure erased!"
assert (
len(DataCopyTracer.locked_instances) == 0
), "Not all locked Data instances were unlocked!"
assert len(DataCopyTracer.needs_erase) == 0, "Not all locked Data instances were secure erased!"
assert len(DataCopyTracer.locked_instances) == 0, "Not all locked Data instances were unlocked!"
assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"

View File

@ -101,9 +101,7 @@ def mngmt_op_surprise_shutdown_test(
pyocf_2_ctx, cache_backend_vol, error_io_seq_no
)
else:
cache, err_vol = prepare_normal(
pyocf_2_ctx, cache_backend_vol, error_io_seq_no
)
cache, err_vol = prepare_normal(pyocf_2_ctx, cache_backend_vol, error_io_seq_no)
if prepare_func:
prepare_func(cache)
@ -125,10 +123,7 @@ def mngmt_op_surprise_shutdown_test(
error_triggered = err_vol.error_triggered()
assert error_triggered == (status != 0)
if error_triggered:
assert (
status == OcfErrorCode.OCF_ERR_WRITE_CACHE
or status == OcfErrorCode.OCF_ERR_IO
)
assert status == OcfErrorCode.OCF_ERR_WRITE_CACHE or status == OcfErrorCode.OCF_ERR_IO
# stop cache with error injection still on
with pytest.raises(OcfError) as ex:
@ -174,9 +169,7 @@ def test_surprise_shutdown_add_core(pyocf_2_ctx, failover):
def check_func(cache, error_triggered):
check_core(cache, error_triggered)
mngmt_op_surprise_shutdown_test(
pyocf_2_ctx, failover, tested_func, None, check_func
)
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, None, check_func)
# power failure during core removal
@ -197,9 +190,7 @@ def test_surprise_shutdown_remove_core(pyocf_2_ctx, failover):
stats = cache.get_stats()
assert stats["conf"]["core_count"] == (1 if error_triggered else 0)
mngmt_op_surprise_shutdown_test(
pyocf_2_ctx, failover, tested_func, prepare_func, check_func
)
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, prepare_func, check_func)
@pytest.mark.security
@ -228,9 +219,7 @@ def test_surprise_shutdown_remove_core_with_data(pyocf_2_ctx, failover):
vol = CoreVolume(core, open=True)
assert ocf_read(vol, cache.get_default_queue(), io_offset) == 0xAA
mngmt_op_surprise_shutdown_test(
pyocf_2_ctx, failover, tested_func, prepare_func, check_func
)
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, prepare_func, check_func)
# power failure during core add after previous core removed
@ -266,9 +255,7 @@ def test_surprise_shutdown_swap_core(pyocf_2_ctx, failover):
core2 = cache.get_core_by_name("core2")
assert core2.device.uuid == "dev2"
mngmt_op_surprise_shutdown_test(
pyocf_2_ctx, failover, tested_func, prepare, check_func
)
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, prepare, check_func)
# power failure during core add after previous core removed
@ -286,10 +273,7 @@ def test_surprise_shutdown_swap_core_with_data(pyocf_2_ctx, failover):
vol = CoreVolume(core1, open=True)
cache.save()
ocf_write(
vol,
cache.get_default_queue(),
0xAA,
mngmt_op_surprise_shutdown_test_io_offset,
vol, cache.get_default_queue(), 0xAA, mngmt_op_surprise_shutdown_test_io_offset,
)
cache.remove_core(core1)
cache.save()
@ -316,16 +300,12 @@ def test_surprise_shutdown_swap_core_with_data(pyocf_2_ctx, failover):
assert core2.device.uuid == "dev2"
assert (
ocf_read(
vol2,
cache.get_default_queue(),
mngmt_op_surprise_shutdown_test_io_offset,
vol2, cache.get_default_queue(), mngmt_op_surprise_shutdown_test_io_offset,
)
== VOLUME_POISON
)
mngmt_op_surprise_shutdown_test(
pyocf_2_ctx, failover, tested_func, prepare, check_func
)
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, prepare, check_func)
# make sure there are no crashes when cache start is interrupted
@ -350,9 +330,7 @@ def test_surprise_shutdown_start_cache(pyocf_2_ctx, failover):
cache2.start_cache()
cache2.standby_attach(ramdisk)
cache2_exp_obj_vol = CacheVolume(cache2, open=True)
err_device = ErrorDevice(
cache2_exp_obj_vol, error_seq_no=error_io, armed=True
)
err_device = ErrorDevice(cache2_exp_obj_vol, error_seq_no=error_io, armed=True)
else:
err_device = ErrorDevice(ramdisk, error_seq_no=error_io, armed=True)
@ -415,9 +393,7 @@ def test_surprise_shutdown_stop_cache(pyocf_2_ctx, failover):
ramdisk = RamVolume(mngmt_op_surprise_shutdown_test_cache_size)
if failover:
cache, cache2, device = prepare_failover(
pyocf_2_ctx, ramdisk, error_io_seq_no
)
cache, cache2, device = prepare_failover(pyocf_2_ctx, ramdisk, error_io_seq_no)
else:
cache, device = prepare_normal(pyocf_2_ctx, ramdisk, error_io_seq_no)
@ -486,9 +462,7 @@ def test_surprise_shutdown_cache_reinit(pyocf_2_ctx, failover):
ramdisk = RamVolume(mngmt_op_surprise_shutdown_test_cache_size)
if failover:
cache, cache2, device = prepare_failover(
pyocf_2_ctx, ramdisk, error_io_seq_no
)
cache, cache2, device = prepare_failover(pyocf_2_ctx, ramdisk, error_io_seq_no)
else:
cache, device = prepare_normal(pyocf_2_ctx, ramdisk, error_io_seq_no)
@ -553,9 +527,7 @@ def test_surprise_shutdown_cache_reinit(pyocf_2_ctx, failover):
assert stats["usage"]["occupancy"]["value"] == 0
cache.add_core(core)
vol = CoreVolume(core, open=True)
assert (
ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
)
assert ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
cache.stop()
cache = None
@ -591,9 +563,7 @@ def test_surprise_shutdown_change_cache_mode(pyocf_2_ctx, failover):
@pytest.mark.parametrize("failover", [False, True])
@pytest.mark.parametrize("start_clp", CleaningPolicy)
@pytest.mark.parametrize("end_clp", CleaningPolicy)
def test_surprise_shutdown_set_cleaning_policy(
pyocf_2_ctx, failover, start_clp, end_clp
):
def test_surprise_shutdown_set_cleaning_policy(pyocf_2_ctx, failover, start_clp, end_clp):
core_device = RamVolume(S.from_MiB(10))
core = Core(device=core_device)
@ -644,9 +614,7 @@ def test_surprise_shutdown_set_seq_cut_off_promotion(pyocf_2_ctx, failover):
@pytest.mark.parametrize("failover", [False, True])
def test_surprise_shutdown_set_seq_cut_off_threshold(pyocf_2_ctx, failover):
_test_surprise_shutdown_mngmt_generic(
pyocf_2_ctx,
failover,
lambda cache, core: cache.set_seq_cut_off_threshold(S.from_MiB(2).B),
pyocf_2_ctx, failover, lambda cache, core: cache.set_seq_cut_off_threshold(S.from_MiB(2).B),
)
@ -706,9 +674,7 @@ def test_surprise_shutdown_set_cleaning_policy_param(pyocf_2_ctx, failover, clp)
@pytest.mark.parametrize("failover", [False, True])
@pytest.mark.parametrize("start_pp", PromotionPolicy)
@pytest.mark.parametrize("end_pp", PromotionPolicy)
def test_surprise_shutdown_set_promotion_policy(
pyocf_2_ctx, failover, start_pp, end_pp
):
def test_surprise_shutdown_set_promotion_policy(pyocf_2_ctx, failover, start_pp, end_pp):
core_device = RamVolume(S.from_MiB(10))
core = Core(device=core_device)
@ -801,9 +767,7 @@ def test_surprise_shutdown_set_io_class_config(pyocf_2_ctx, failover):
ioclasses_info._config[i]._priority = desc[i]["_priority"]
ioclasses_info._config[i]._cache_mode = desc[i]["_cache_mode"]
ioclasses_info._config[i]._max_size = desc[i]["_max_size"]
OcfLib.getInstance().ocf_mngt_cache_io_classes_configure(
cache, byref(ioclasses_info)
)
OcfLib.getInstance().ocf_mngt_cache_io_classes_configure(cache, byref(ioclasses_info))
def prepare(cache):
cache.add_core(core)
@ -1041,9 +1005,7 @@ def test_surprise_shutdown_standby_init_force_1(pyocf_ctx):
core = Core(device=core_device)
cache.add_core(core)
vol = CoreVolume(core, open=True)
assert (
ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
)
assert ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
cache.stop()
@ -1128,9 +1090,7 @@ def test_surprise_shutdown_standby_init_force_2(pyocf_ctx):
core = Core(device=core_device)
cache.add_core(core)
vol = CoreVolume(core, open=True)
assert (
ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
)
assert ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
if cache:
cache.stop()

View File

@ -7,14 +7,7 @@ import random
import string
import enum
from functools import reduce
from ctypes import (
c_uint64,
c_uint32,
c_uint16,
c_uint8,
c_int,
c_uint
)
from ctypes import c_uint64, c_uint32, c_uint16, c_uint8, c_int, c_uint
class Range:
@ -75,17 +68,20 @@ class RandomStringGenerator:
def __string_generator(self, len_range, extra_chars):
while True:
for t in [string.digits,
string.ascii_letters + string.digits,
string.ascii_lowercase,
string.ascii_uppercase,
string.printable,
string.punctuation,
string.hexdigits,
*extra_chars]:
yield ''.join(random.choice(t) for _ in range(
self.random.randint(len_range.min, len_range.max)
))
for t in [
string.digits,
string.ascii_letters + string.digits,
string.ascii_lowercase,
string.ascii_uppercase,
string.printable,
string.punctuation,
string.hexdigits,
*extra_chars,
]:
yield "".join(
random.choice(t)
for _ in range(self.random.randint(len_range.min, len_range.max))
)
def __iter__(self):
return self