Merge pull request #712 from arutk/black
pyocf: format all .py files with black -l 100
This commit is contained in:
commit
9646df431f
@ -14,10 +14,7 @@ class OcfLib:
|
|||||||
def getInstance(cls):
|
def getInstance(cls):
|
||||||
if cls.__lib__ is None:
|
if cls.__lib__ is None:
|
||||||
lib = cdll.LoadLibrary(
|
lib = cdll.LoadLibrary(
|
||||||
os.path.join(
|
os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), "libocf.so",)
|
||||||
os.path.dirname(inspect.getfile(inspect.currentframe())),
|
|
||||||
"libocf.so",
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
lib.ocf_volume_get_uuid.restype = c_void_p
|
lib.ocf_volume_get_uuid.restype = c_void_p
|
||||||
lib.ocf_volume_get_uuid.argtypes = [c_void_p]
|
lib.ocf_volume_get_uuid.argtypes = [c_void_p]
|
||||||
|
@ -154,12 +154,7 @@ class Rio:
|
|||||||
|
|
||||||
data = Data(self.jobspec.bs) # TODO pattern and verify
|
data = Data(self.jobspec.bs) # TODO pattern and verify
|
||||||
io = self.jobspec.target.new_io(
|
io = self.jobspec.target.new_io(
|
||||||
self.queue,
|
self.queue, next(iogen), self.jobspec.bs, iodir, 0, 0,
|
||||||
next(iogen),
|
|
||||||
self.jobspec.bs,
|
|
||||||
iodir,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
)
|
)
|
||||||
io.set_data(data)
|
io.set_data(data)
|
||||||
io.callback = self.get_io_cb()
|
io.callback = self.get_io_cb()
|
||||||
|
@ -41,6 +41,7 @@ from .stats.shared import UsageStats, RequestsStats, BlocksStats, ErrorsStats
|
|||||||
from .ctx import OcfCtx
|
from .ctx import OcfCtx
|
||||||
from .volume import RamVolume, Volume
|
from .volume import RamVolume, Volume
|
||||||
|
|
||||||
|
|
||||||
class Backfill(Structure):
|
class Backfill(Structure):
|
||||||
_fields_ = [("_max_queue_size", c_uint32), ("_queue_unblock_size", c_uint32)]
|
_fields_ = [("_max_queue_size", c_uint32), ("_queue_unblock_size", c_uint32)]
|
||||||
|
|
||||||
@ -76,7 +77,7 @@ class CacheAttachConfig(Structure):
|
|||||||
("_open_cores", c_bool),
|
("_open_cores", c_bool),
|
||||||
("_force", c_bool),
|
("_force", c_bool),
|
||||||
("_discard_on_start", c_bool),
|
("_discard_on_start", c_bool),
|
||||||
("_disable_cleaner", c_bool)
|
("_disable_cleaner", c_bool),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -214,8 +215,7 @@ class Cache:
|
|||||||
_cache_line_size=self.cache_line_size,
|
_cache_line_size=self.cache_line_size,
|
||||||
_metadata_volatile=self.metadata_volatile,
|
_metadata_volatile=self.metadata_volatile,
|
||||||
_backfill=Backfill(
|
_backfill=Backfill(
|
||||||
_max_queue_size=self.max_queue_size,
|
_max_queue_size=self.max_queue_size, _queue_unblock_size=self.queue_unblock_size,
|
||||||
_queue_unblock_size=self.queue_unblock_size,
|
|
||||||
),
|
),
|
||||||
_locked=locked,
|
_locked=locked,
|
||||||
_pt_unaligned_io=self.pt_unaligned_io,
|
_pt_unaligned_io=self.pt_unaligned_io,
|
||||||
@ -259,9 +259,7 @@ class Cache:
|
|||||||
def standby_activate(self, device, open_cores=True):
|
def standby_activate(self, device, open_cores=True):
|
||||||
device_cfg = Cache.generate_device_config(device)
|
device_cfg = Cache.generate_device_config(device)
|
||||||
|
|
||||||
activate_cfg = CacheStandbyActivateConfig(
|
activate_cfg = CacheStandbyActivateConfig(_device=device_cfg, _open_cores=open_cores,)
|
||||||
_device=device_cfg, _open_cores=open_cores,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.write_lock()
|
self.write_lock()
|
||||||
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
||||||
@ -297,9 +295,7 @@ class Cache:
|
|||||||
if c.results["error"]:
|
if c.results["error"]:
|
||||||
raise OcfError("Error changing cleaning policy", c.results["error"])
|
raise OcfError("Error changing cleaning policy", c.results["error"])
|
||||||
|
|
||||||
def set_cleaning_policy_param(
|
def set_cleaning_policy_param(self, cleaning_policy: CleaningPolicy, param_id, param_value):
|
||||||
self, cleaning_policy: CleaningPolicy, param_id, param_value
|
|
||||||
):
|
|
||||||
self.write_lock()
|
self.write_lock()
|
||||||
|
|
||||||
status = self.owner.lib.ocf_mngt_cache_cleaning_set_param(
|
status = self.owner.lib.ocf_mngt_cache_cleaning_set_param(
|
||||||
@ -351,9 +347,7 @@ class Cache:
|
|||||||
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
|
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
|
||||||
self.write_lock()
|
self.write_lock()
|
||||||
|
|
||||||
status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_policy_all(
|
status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_policy_all(self.cache_handle, policy)
|
||||||
self.cache_handle, policy
|
|
||||||
)
|
|
||||||
|
|
||||||
self.write_unlock()
|
self.write_unlock()
|
||||||
|
|
||||||
@ -382,9 +376,7 @@ class Cache:
|
|||||||
self.write_unlock()
|
self.write_unlock()
|
||||||
|
|
||||||
if status:
|
if status:
|
||||||
raise OcfError(
|
raise OcfError("Error setting cache seq cut off policy promotion count", status)
|
||||||
"Error setting cache seq cut off policy promotion count", status
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_partition_info(self, part_id: int):
|
def get_partition_info(self, part_id: int):
|
||||||
ioclass_info = IoClassInfo()
|
ioclass_info = IoClassInfo()
|
||||||
@ -410,13 +402,7 @@ class Cache:
|
|||||||
}
|
}
|
||||||
|
|
||||||
def add_partition(
|
def add_partition(
|
||||||
self,
|
self, part_id: int, name: str, min_size: int, max_size: int, priority: int, valid: bool,
|
||||||
part_id: int,
|
|
||||||
name: str,
|
|
||||||
min_size: int,
|
|
||||||
max_size: int,
|
|
||||||
priority: int,
|
|
||||||
valid: bool,
|
|
||||||
):
|
):
|
||||||
self.write_lock()
|
self.write_lock()
|
||||||
|
|
||||||
@ -432,12 +418,7 @@ class Cache:
|
|||||||
raise OcfError("Error adding partition to cache", status)
|
raise OcfError("Error adding partition to cache", status)
|
||||||
|
|
||||||
def configure_partition(
|
def configure_partition(
|
||||||
self,
|
self, part_id: int, name: str, max_size: int, priority: int, cache_mode=CACHE_MODE_NONE,
|
||||||
part_id: int,
|
|
||||||
name: str,
|
|
||||||
max_size: int,
|
|
||||||
priority: int,
|
|
||||||
cache_mode=CACHE_MODE_NONE,
|
|
||||||
):
|
):
|
||||||
ioclasses_info = IoClassesInfo()
|
ioclasses_info = IoClassesInfo()
|
||||||
|
|
||||||
@ -491,12 +472,7 @@ class Cache:
|
|||||||
return device_config
|
return device_config
|
||||||
|
|
||||||
def attach_device(
|
def attach_device(
|
||||||
self,
|
self, device, force=False, perform_test=False, cache_line_size=None, open_cores=False,
|
||||||
device,
|
|
||||||
force=False,
|
|
||||||
perform_test=False,
|
|
||||||
cache_line_size=None,
|
|
||||||
open_cores=False,
|
|
||||||
):
|
):
|
||||||
self.device = device
|
self.device = device
|
||||||
self.device_name = device.uuid
|
self.device_name = device.uuid
|
||||||
@ -505,9 +481,7 @@ class Cache:
|
|||||||
|
|
||||||
attach_cfg = CacheAttachConfig(
|
attach_cfg = CacheAttachConfig(
|
||||||
_device=device_config,
|
_device=device_config,
|
||||||
_cache_line_size=cache_line_size
|
_cache_line_size=cache_line_size if cache_line_size else self.cache_line_size,
|
||||||
if cache_line_size
|
|
||||||
else self.cache_line_size,
|
|
||||||
_open_cores=open_cores,
|
_open_cores=open_cores,
|
||||||
_force=force,
|
_force=force,
|
||||||
_discard_on_start=False,
|
_discard_on_start=False,
|
||||||
@ -517,17 +491,14 @@ class Cache:
|
|||||||
|
|
||||||
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
||||||
|
|
||||||
self.owner.lib.ocf_mngt_cache_attach(
|
self.owner.lib.ocf_mngt_cache_attach(self.cache_handle, byref(attach_cfg), c, None)
|
||||||
self.cache_handle, byref(attach_cfg), c, None
|
|
||||||
)
|
|
||||||
c.wait()
|
c.wait()
|
||||||
|
|
||||||
self.write_unlock()
|
self.write_unlock()
|
||||||
|
|
||||||
if c.results["error"]:
|
if c.results["error"]:
|
||||||
raise OcfError(
|
raise OcfError(
|
||||||
f"Attaching cache device failed",
|
f"Attaching cache device failed", c.results["error"],
|
||||||
c.results["error"],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def standby_attach(self, device, force=False):
|
def standby_attach(self, device, force=False):
|
||||||
@ -548,17 +519,14 @@ class Cache:
|
|||||||
|
|
||||||
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
||||||
|
|
||||||
self.owner.lib.ocf_mngt_cache_standby_attach(
|
self.owner.lib.ocf_mngt_cache_standby_attach(self.cache_handle, byref(attach_cfg), c, None)
|
||||||
self.cache_handle, byref(attach_cfg), c, None
|
|
||||||
)
|
|
||||||
c.wait()
|
c.wait()
|
||||||
|
|
||||||
self.write_unlock()
|
self.write_unlock()
|
||||||
|
|
||||||
if c.results["error"]:
|
if c.results["error"]:
|
||||||
raise OcfError(
|
raise OcfError(
|
||||||
f"Attaching to standby cache failed",
|
f"Attaching to standby cache failed", c.results["error"],
|
||||||
c.results["error"],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def standby_load(self, device, perform_test=True):
|
def standby_load(self, device, perform_test=True):
|
||||||
@ -577,9 +545,7 @@ class Cache:
|
|||||||
|
|
||||||
self.write_lock()
|
self.write_lock()
|
||||||
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
||||||
self.owner.lib.ocf_mngt_cache_standby_load(
|
self.owner.lib.ocf_mngt_cache_standby_load(self.cache_handle, byref(attach_cfg), c, None)
|
||||||
self.cache_handle, byref(attach_cfg), c, None
|
|
||||||
)
|
|
||||||
c.wait()
|
c.wait()
|
||||||
self.write_unlock()
|
self.write_unlock()
|
||||||
|
|
||||||
@ -616,9 +582,7 @@ class Cache:
|
|||||||
|
|
||||||
self.write_lock()
|
self.write_lock()
|
||||||
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
||||||
self.owner.lib.ocf_mngt_cache_load(
|
self.owner.lib.ocf_mngt_cache_load(self.cache_handle, byref(attach_cfg), c, None)
|
||||||
self.cache_handle, byref(attach_cfg), c, None
|
|
||||||
)
|
|
||||||
c.wait()
|
c.wait()
|
||||||
self.write_unlock()
|
self.write_unlock()
|
||||||
|
|
||||||
@ -689,10 +653,7 @@ class Cache:
|
|||||||
core_handle = c_void_p()
|
core_handle = c_void_p()
|
||||||
|
|
||||||
result = self.owner.lib.ocf_core_get_by_name(
|
result = self.owner.lib.ocf_core_get_by_name(
|
||||||
self.cache_handle,
|
self.cache_handle, name.encode("ascii"), len(name), byref(core_handle),
|
||||||
name.encode("ascii"),
|
|
||||||
len(name),
|
|
||||||
byref(core_handle),
|
|
||||||
)
|
)
|
||||||
if result != 0:
|
if result != 0:
|
||||||
raise OcfError("Failed getting core by name", result)
|
raise OcfError("Failed getting core by name", result)
|
||||||
@ -714,12 +675,7 @@ class Cache:
|
|||||||
self.write_lock()
|
self.write_lock()
|
||||||
|
|
||||||
c = OcfCompletion(
|
c = OcfCompletion(
|
||||||
[
|
[("cache", c_void_p), ("core", c_void_p), ("priv", c_void_p), ("error", c_int),]
|
||||||
("cache", c_void_p),
|
|
||||||
("core", c_void_p),
|
|
||||||
("priv", c_void_p),
|
|
||||||
("error", c_int),
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
self.owner.lib.ocf_mngt_cache_add_core(self.cache_handle, byref(cfg), c, None)
|
self.owner.lib.ocf_mngt_cache_add_core(self.cache_handle, byref(cfg), c, None)
|
||||||
@ -908,6 +864,7 @@ class Cache:
|
|||||||
def settle(self):
|
def settle(self):
|
||||||
Queue.settle_many(self.io_queues + [self.mngt_queue])
|
Queue.settle_many(self.io_queues + [self.mngt_queue])
|
||||||
|
|
||||||
|
|
||||||
lib = OcfLib.getInstance()
|
lib = OcfLib.getInstance()
|
||||||
lib.ocf_mngt_cache_remove_core.argtypes = [c_void_p, c_void_p, c_void_p]
|
lib.ocf_mngt_cache_remove_core.argtypes = [c_void_p, c_void_p, c_void_p]
|
||||||
lib.ocf_mngt_cache_add_core.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p]
|
lib.ocf_mngt_cache_add_core.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p]
|
||||||
|
@ -80,10 +80,7 @@ class Core:
|
|||||||
def get_config(self):
|
def get_config(self):
|
||||||
cfg = CoreConfig(
|
cfg = CoreConfig(
|
||||||
_uuid=Uuid(
|
_uuid=Uuid(
|
||||||
_data=cast(
|
_data=cast(create_string_buffer(self.device.uuid.encode("ascii")), c_char_p,),
|
||||||
create_string_buffer(self.device.uuid.encode("ascii")),
|
|
||||||
c_char_p,
|
|
||||||
),
|
|
||||||
_size=len(self.device.uuid) + 1,
|
_size=len(self.device.uuid) + 1,
|
||||||
),
|
),
|
||||||
_name=self.name.encode("ascii"),
|
_name=self.name.encode("ascii"),
|
||||||
@ -123,9 +120,7 @@ class Core:
|
|||||||
self.cache.read_unlock()
|
self.cache.read_unlock()
|
||||||
raise OcfError("Failed collecting core stats", status)
|
raise OcfError("Failed collecting core stats", status)
|
||||||
|
|
||||||
status = self.cache.owner.lib.ocf_core_get_info(
|
status = self.cache.owner.lib.ocf_core_get_info(self.handle, byref(core_info))
|
||||||
self.handle, byref(core_info)
|
|
||||||
)
|
|
||||||
if status:
|
if status:
|
||||||
self.cache.read_unlock()
|
self.cache.read_unlock()
|
||||||
raise OcfError("Failed getting core stats", status)
|
raise OcfError("Failed getting core stats", status)
|
||||||
@ -145,9 +140,7 @@ class Core:
|
|||||||
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
|
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
|
||||||
self.cache.write_lock()
|
self.cache.write_lock()
|
||||||
|
|
||||||
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_policy(
|
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_policy(self.handle, policy)
|
||||||
self.handle, policy
|
|
||||||
)
|
|
||||||
self.cache.write_unlock()
|
self.cache.write_unlock()
|
||||||
if status:
|
if status:
|
||||||
raise OcfError("Error setting core seq cut off policy", status)
|
raise OcfError("Error setting core seq cut off policy", status)
|
||||||
@ -155,9 +148,7 @@ class Core:
|
|||||||
def set_seq_cut_off_threshold(self, threshold):
|
def set_seq_cut_off_threshold(self, threshold):
|
||||||
self.cache.write_lock()
|
self.cache.write_lock()
|
||||||
|
|
||||||
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_threshold(
|
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_threshold(self.handle, threshold)
|
||||||
self.handle, threshold
|
|
||||||
)
|
|
||||||
self.cache.write_unlock()
|
self.cache.write_unlock()
|
||||||
if status:
|
if status:
|
||||||
raise OcfError("Error setting core seq cut off policy threshold", status)
|
raise OcfError("Error setting core seq cut off policy threshold", status)
|
||||||
@ -175,6 +166,7 @@ class Core:
|
|||||||
def reset_stats(self):
|
def reset_stats(self):
|
||||||
self.cache.owner.lib.ocf_core_stats_initialize(self.handle)
|
self.cache.owner.lib.ocf_core_stats_initialize(self.handle)
|
||||||
|
|
||||||
|
|
||||||
lib = OcfLib.getInstance()
|
lib = OcfLib.getInstance()
|
||||||
lib.ocf_core_get_uuid_wrapper.restype = POINTER(Uuid)
|
lib.ocf_core_get_uuid_wrapper.restype = POINTER(Uuid)
|
||||||
lib.ocf_core_get_uuid_wrapper.argtypes = [c_void_p]
|
lib.ocf_core_get_uuid_wrapper.argtypes = [c_void_p]
|
||||||
|
@ -13,6 +13,7 @@ from .shared import OcfError
|
|||||||
from ..ocf import OcfLib
|
from ..ocf import OcfLib
|
||||||
from .queue import Queue
|
from .queue import Queue
|
||||||
|
|
||||||
|
|
||||||
class OcfCtxOps(Structure):
|
class OcfCtxOps(Structure):
|
||||||
_fields_ = [
|
_fields_ = [
|
||||||
("data", DataOps),
|
("data", DataOps),
|
||||||
@ -41,9 +42,7 @@ class OcfCtx:
|
|||||||
self.cfg = OcfCtxCfg(
|
self.cfg = OcfCtxCfg(
|
||||||
name=name,
|
name=name,
|
||||||
ops=OcfCtxOps(
|
ops=OcfCtxOps(
|
||||||
data=self.data.get_ops(),
|
data=self.data.get_ops(), cleaner=self.cleaner.get_ops(), logger=logger.get_ops(),
|
||||||
cleaner=self.cleaner.get_ops(),
|
|
||||||
logger=logger.get_ops(),
|
|
||||||
),
|
),
|
||||||
logger_priv=cast(pointer(logger.get_priv()), c_void_p),
|
logger_priv=cast(pointer(logger.get_priv()), c_void_p),
|
||||||
)
|
)
|
||||||
@ -57,13 +56,7 @@ class OcfCtx:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def with_defaults(cls, logger):
|
def with_defaults(cls, logger):
|
||||||
return cls(
|
return cls(OcfLib.getInstance(), b"PyOCF default ctx", logger, Data, Cleaner,)
|
||||||
OcfLib.getInstance(),
|
|
||||||
b"PyOCF default ctx",
|
|
||||||
logger,
|
|
||||||
Data,
|
|
||||||
Cleaner,
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_default(cls):
|
def get_default(cls):
|
||||||
@ -90,9 +83,7 @@ class OcfCtx:
|
|||||||
if not vol_type.type_id:
|
if not vol_type.type_id:
|
||||||
raise Exception("Already unregistered")
|
raise Exception("Already unregistered")
|
||||||
|
|
||||||
self.lib.ocf_ctx_unregister_volume_type(
|
self.lib.ocf_ctx_unregister_volume_type(self.ctx_handle, vol_type.type_id)
|
||||||
self.ctx_handle, vol_type.type_id
|
|
||||||
)
|
|
||||||
|
|
||||||
del self.volume_types[vol_type.type_id]
|
del self.volume_types[vol_type.type_id]
|
||||||
|
|
||||||
|
@ -161,9 +161,7 @@ class Data:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
@DataOps.COPY
|
@DataOps.COPY
|
||||||
def _copy(dst, src, skip, seek, size):
|
def _copy(dst, src, skip, seek, size):
|
||||||
return Data.get_instance(dst).copy(
|
return Data.get_instance(dst).copy(Data.get_instance(src), skip, seek, size)
|
||||||
Data.get_instance(src), skip, seek, size
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@DataOps.SECURE_ERASE
|
@DataOps.SECURE_ERASE
|
||||||
|
@ -53,9 +53,7 @@ class Io(Structure):
|
|||||||
def from_pointer(cls, ref):
|
def from_pointer(cls, ref):
|
||||||
c = cls.from_address(ref)
|
c = cls.from_address(ref)
|
||||||
cls._instances_[ref] = c
|
cls._instances_[ref] = c
|
||||||
OcfLib.getInstance().ocf_io_set_cmpl_wrapper(
|
OcfLib.getInstance().ocf_io_set_cmpl_wrapper(byref(c), None, None, c.c_end)
|
||||||
byref(c), None, None, c.c_end
|
|
||||||
)
|
|
||||||
return c
|
return c
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -22,6 +22,7 @@ from ..ocf import OcfLib
|
|||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG, handlers=[logging.NullHandler()])
|
logging.basicConfig(level=logging.DEBUG, handlers=[logging.NullHandler()])
|
||||||
|
|
||||||
|
|
||||||
class LogLevel(IntEnum):
|
class LogLevel(IntEnum):
|
||||||
EMERG = 0
|
EMERG = 0
|
||||||
ALERT = 1
|
ALERT = 1
|
||||||
@ -123,9 +124,7 @@ class DefaultLogger(Logger):
|
|||||||
|
|
||||||
self.logger = logging.getLogger(name)
|
self.logger = logging.getLogger(name)
|
||||||
ch = logging.StreamHandler()
|
ch = logging.StreamHandler()
|
||||||
fmt = logging.Formatter(
|
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||||
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
||||||
)
|
|
||||||
ch.setFormatter(fmt)
|
ch.setFormatter(fmt)
|
||||||
ch.setLevel(LevelMapping[level])
|
ch.setLevel(LevelMapping[level])
|
||||||
self.logger.addHandler(ch)
|
self.logger.addHandler(ch)
|
||||||
@ -140,9 +139,7 @@ class DefaultLogger(Logger):
|
|||||||
class FileLogger(Logger):
|
class FileLogger(Logger):
|
||||||
def __init__(self, f, console_level=None):
|
def __init__(self, f, console_level=None):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
fmt = logging.Formatter(
|
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||||
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
||||||
)
|
|
||||||
|
|
||||||
fh = logging.FileHandler(f)
|
fh = logging.FileHandler(f)
|
||||||
fh.setLevel(logging.DEBUG)
|
fh.setLevel(logging.DEBUG)
|
||||||
|
@ -69,9 +69,7 @@ class OcfCompletion:
|
|||||||
|
|
||||||
class CompletionResult:
|
class CompletionResult:
|
||||||
def __init__(self, completion_args):
|
def __init__(self, completion_args):
|
||||||
self.completion_args = {
|
self.completion_args = {x[0]: i for i, x in enumerate(completion_args)}
|
||||||
x[0]: i for i, x in enumerate(completion_args)
|
|
||||||
}
|
|
||||||
self.results = None
|
self.results = None
|
||||||
self.arg_types = [x[1] for x in completion_args]
|
self.arg_types = [x[1] for x in completion_args]
|
||||||
|
|
||||||
@ -131,9 +129,7 @@ class SharedOcfObject(Structure):
|
|||||||
return cls._instances_[ref]
|
return cls._instances_[ref]
|
||||||
except: # noqa E722
|
except: # noqa E722
|
||||||
logging.getLogger("pyocf").error(
|
logging.getLogger("pyocf").error(
|
||||||
"OcfSharedObject corruption. wanted: {} instances: {}".format(
|
"OcfSharedObject corruption. wanted: {} instances: {}".format(ref, cls._instances_)
|
||||||
ref, cls._instances_
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
#
|
#
|
||||||
# Copyright(c) 2019-2021 Intel Corporation
|
# Copyright(c) 2019-2021 Intel Corporation
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
@ -71,6 +71,7 @@ class VolumeProperties(Structure):
|
|||||||
("_ops_", VolumeOps),
|
("_ops_", VolumeOps),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class VolumeIoPriv(Structure):
|
class VolumeIoPriv(Structure):
|
||||||
_fields_ = [("_data", c_void_p), ("_offset", c_uint64)]
|
_fields_ = [("_data", c_void_p), ("_offset", c_uint64)]
|
||||||
|
|
||||||
@ -92,18 +93,14 @@ class Volume:
|
|||||||
@VolumeOps.SUBMIT_IO
|
@VolumeOps.SUBMIT_IO
|
||||||
def _submit_io(io):
|
def _submit_io(io):
|
||||||
io_structure = cast(io, POINTER(Io))
|
io_structure = cast(io, POINTER(Io))
|
||||||
volume = Volume.get_instance(
|
volume = Volume.get_instance(OcfLib.getInstance().ocf_io_get_volume(io_structure))
|
||||||
OcfLib.getInstance().ocf_io_get_volume(io_structure)
|
|
||||||
)
|
|
||||||
|
|
||||||
volume.submit_io(io_structure)
|
volume.submit_io(io_structure)
|
||||||
|
|
||||||
@VolumeOps.SUBMIT_FLUSH
|
@VolumeOps.SUBMIT_FLUSH
|
||||||
def _submit_flush(flush):
|
def _submit_flush(flush):
|
||||||
io_structure = cast(flush, POINTER(Io))
|
io_structure = cast(flush, POINTER(Io))
|
||||||
volume = Volume.get_instance(
|
volume = Volume.get_instance(OcfLib.getInstance().ocf_io_get_volume(io_structure))
|
||||||
OcfLib.getInstance().ocf_io_get_volume(io_structure)
|
|
||||||
)
|
|
||||||
|
|
||||||
volume.submit_flush(io_structure)
|
volume.submit_flush(io_structure)
|
||||||
|
|
||||||
@ -114,9 +111,7 @@ class Volume:
|
|||||||
@VolumeOps.SUBMIT_DISCARD
|
@VolumeOps.SUBMIT_DISCARD
|
||||||
def _submit_discard(discard):
|
def _submit_discard(discard):
|
||||||
io_structure = cast(discard, POINTER(Io))
|
io_structure = cast(discard, POINTER(Io))
|
||||||
volume = Volume.get_instance(
|
volume = Volume.get_instance(OcfLib.getInstance().ocf_io_get_volume(io_structure))
|
||||||
OcfLib.getInstance().ocf_io_get_volume(io_structure)
|
|
||||||
)
|
|
||||||
|
|
||||||
volume.submit_discard(io_structure)
|
volume.submit_discard(io_structure)
|
||||||
|
|
||||||
@ -126,9 +121,7 @@ class Volume:
|
|||||||
|
|
||||||
@VolumeOps.OPEN
|
@VolumeOps.OPEN
|
||||||
def _open(ref):
|
def _open(ref):
|
||||||
uuid_ptr = cast(
|
uuid_ptr = cast(OcfLib.getInstance().ocf_volume_get_uuid(ref), POINTER(Uuid))
|
||||||
OcfLib.getInstance().ocf_volume_get_uuid(ref), POINTER(Uuid)
|
|
||||||
)
|
|
||||||
uuid = str(uuid_ptr.contents._data, encoding="ascii")
|
uuid = str(uuid_ptr.contents._data, encoding="ascii")
|
||||||
try:
|
try:
|
||||||
volume = Volume.get_by_uuid(uuid)
|
volume = Volume.get_by_uuid(uuid)
|
||||||
@ -215,9 +208,7 @@ class Volume:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
@IoOps.SET_DATA
|
@IoOps.SET_DATA
|
||||||
def _io_set_data(io, data, offset):
|
def _io_set_data(io, data, offset):
|
||||||
io_priv = cast(
|
io_priv = cast(OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
|
||||||
OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
|
|
||||||
)
|
|
||||||
data = Data.get_instance(data)
|
data = Data.get_instance(data)
|
||||||
io_priv.contents._offset = offset
|
io_priv.contents._offset = offset
|
||||||
io_priv.contents._data = data.handle
|
io_priv.contents._data = data.handle
|
||||||
@ -227,17 +218,13 @@ class Volume:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
@IoOps.GET_DATA
|
@IoOps.GET_DATA
|
||||||
def _io_get_data(io):
|
def _io_get_data(io):
|
||||||
io_priv = cast(
|
io_priv = cast(OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
|
||||||
OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
|
|
||||||
)
|
|
||||||
return io_priv.contents._data
|
return io_priv.contents._data
|
||||||
|
|
||||||
def __init__(self, uuid=None):
|
def __init__(self, uuid=None):
|
||||||
if uuid:
|
if uuid:
|
||||||
if uuid in type(self)._uuid_:
|
if uuid in type(self)._uuid_:
|
||||||
raise Exception(
|
raise Exception("Volume with uuid {} already created".format(uuid))
|
||||||
"Volume with uuid {} already created".format(uuid)
|
|
||||||
)
|
|
||||||
self.uuid = uuid
|
self.uuid = uuid
|
||||||
else:
|
else:
|
||||||
self.uuid = str(id(self))
|
self.uuid = str(id(self))
|
||||||
@ -314,13 +301,7 @@ class Volume:
|
|||||||
self._reject_io(io)
|
self._reject_io(io)
|
||||||
|
|
||||||
def new_io(
|
def new_io(
|
||||||
self,
|
self, queue: Queue, addr: int, length: int, direction: IoDir, io_class: int, flags: int,
|
||||||
queue: Queue,
|
|
||||||
addr: int,
|
|
||||||
length: int,
|
|
||||||
direction: IoDir,
|
|
||||||
io_class: int,
|
|
||||||
flags: int,
|
|
||||||
):
|
):
|
||||||
lib = OcfLib.getInstance()
|
lib = OcfLib.getInstance()
|
||||||
io = lib.ocf_volume_new_io(
|
io = lib.ocf_volume_new_io(
|
||||||
@ -370,8 +351,7 @@ class RamVolume(Volume):
|
|||||||
|
|
||||||
def do_submit_io(self, io):
|
def do_submit_io(self, io):
|
||||||
try:
|
try:
|
||||||
io_priv = cast(
|
io_priv = cast(OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
|
||||||
OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
|
|
||||||
offset = io_priv.contents._offset
|
offset = io_priv.contents._offset
|
||||||
|
|
||||||
if io.contents._dir == IoDir.WRITE:
|
if io.contents._dir == IoDir.WRITE:
|
||||||
@ -407,12 +387,7 @@ class RamVolume(Volume):
|
|||||||
|
|
||||||
class ErrorDevice(Volume):
|
class ErrorDevice(Volume):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self, vol, error_sectors: set = None, error_seq_no: dict = None, armed=True, uuid=None,
|
||||||
vol,
|
|
||||||
error_sectors: set = None,
|
|
||||||
error_seq_no: dict = None,
|
|
||||||
armed=True,
|
|
||||||
uuid=None,
|
|
||||||
):
|
):
|
||||||
self.vol = vol
|
self.vol = vol
|
||||||
super().__init__(uuid)
|
super().__init__(uuid)
|
||||||
@ -436,9 +411,7 @@ class ErrorDevice(Volume):
|
|||||||
and direction in self.error_seq_no
|
and direction in self.error_seq_no
|
||||||
and self.error_seq_no[direction] <= self.io_seq_no[direction]
|
and self.error_seq_no[direction] <= self.io_seq_no[direction]
|
||||||
)
|
)
|
||||||
sector_match = (
|
sector_match = self.error_sectors is not None and io.contents._addr in self.error_sectors
|
||||||
self.error_sectors is not None and io.contents._addr in self.error_sectors
|
|
||||||
)
|
|
||||||
|
|
||||||
self.io_seq_no[direction] += 1
|
self.io_seq_no[direction] += 1
|
||||||
|
|
||||||
@ -489,6 +462,7 @@ class ErrorDevice(Volume):
|
|||||||
def get_copy(self):
|
def get_copy(self):
|
||||||
return self.vol.get_copy()
|
return self.vol.get_copy()
|
||||||
|
|
||||||
|
|
||||||
lib = OcfLib.getInstance()
|
lib = OcfLib.getInstance()
|
||||||
lib.ocf_io_get_priv.restype = POINTER(VolumeIoPriv)
|
lib.ocf_io_get_priv.restype = POINTER(VolumeIoPriv)
|
||||||
lib.ocf_io_get_volume.argtypes = [c_void_p]
|
lib.ocf_io_get_volume.argtypes = [c_void_p]
|
||||||
|
@ -21,12 +21,9 @@ class CacheVolume(ExpObjVolume):
|
|||||||
self.open()
|
self.open()
|
||||||
|
|
||||||
def open(self):
|
def open(self):
|
||||||
return Volume.open(
|
return Volume.open(self.lib.ocf_cache_get_front_volume(self.cache.cache_handle), self)
|
||||||
self.lib.ocf_cache_get_front_volume(self.cache.cache_handle),
|
|
||||||
self
|
|
||||||
)
|
|
||||||
|
|
||||||
def md5(self):
|
def md5(self):
|
||||||
out = self.cache.get_conf()
|
out = self.cache.get_conf()
|
||||||
cache_line_size = int(out['cache_line_size'])
|
cache_line_size = int(out["cache_line_size"])
|
||||||
return self._exp_obj_md5(cache_line_size)
|
return self._exp_obj_md5(cache_line_size)
|
||||||
|
@ -18,10 +18,7 @@ class CoreVolume(ExpObjVolume):
|
|||||||
self.open()
|
self.open()
|
||||||
|
|
||||||
def open(self):
|
def open(self):
|
||||||
return Volume.open(
|
return Volume.open(self.lib.ocf_core_get_front_volume(self.core.handle), self)
|
||||||
self.lib.ocf_core_get_front_volume(self.core.handle),
|
|
||||||
self
|
|
||||||
)
|
|
||||||
|
|
||||||
def md5(self):
|
def md5(self):
|
||||||
return self._exp_obj_md5(4096)
|
return self._exp_obj_md5(4096)
|
||||||
|
@ -22,9 +22,7 @@ class ExpObjVolume(Volume):
|
|||||||
def __alloc_io(self, addr, _bytes, _dir, _class, _flags):
|
def __alloc_io(self, addr, _bytes, _dir, _class, _flags):
|
||||||
vol = self.parent.get_front_volume()
|
vol = self.parent.get_front_volume()
|
||||||
queue = self.parent.get_default_queue() # TODO multiple queues?
|
queue = self.parent.get_default_queue() # TODO multiple queues?
|
||||||
return vol.new_io(
|
return vol.new_io(queue, addr, _bytes, _dir, _class, _flags)
|
||||||
queue, addr, _bytes, _dir, _class, _flags
|
|
||||||
)
|
|
||||||
|
|
||||||
def _alloc_io(self, io):
|
def _alloc_io(self, io):
|
||||||
exp_obj_io = self.__alloc_io(
|
exp_obj_io = self.__alloc_io(
|
||||||
@ -99,8 +97,7 @@ class ExpObjVolume(Volume):
|
|||||||
|
|
||||||
position = 0
|
position = 0
|
||||||
while position < read_buffer_all.size:
|
while position < read_buffer_all.size:
|
||||||
io = self.new_io(self.parent.get_default_queue(), position,
|
io = self.new_io(self.parent.get_default_queue(), position, read_size, IoDir.READ, 0, 0)
|
||||||
read_size, IoDir.READ, 0, 0)
|
|
||||||
io.set_data(read_buffer)
|
io.set_data(read_buffer)
|
||||||
|
|
||||||
cmpl = OcfCompletion([("err", c_int)])
|
cmpl = OcfCompletion([("err", c_int)])
|
||||||
|
@ -7,13 +7,7 @@ from ctypes import string_at
|
|||||||
|
|
||||||
|
|
||||||
def print_buffer(
|
def print_buffer(
|
||||||
buf,
|
buf, length, offset=0, width=16, ignore=0, stop_after_count_ignored=0, print_fcn=print,
|
||||||
length,
|
|
||||||
offset=0,
|
|
||||||
width=16,
|
|
||||||
ignore=0,
|
|
||||||
stop_after_count_ignored=0,
|
|
||||||
print_fcn=print,
|
|
||||||
):
|
):
|
||||||
end = int(offset) + int(length)
|
end = int(offset) + int(length)
|
||||||
offset = int(offset)
|
offset = int(offset)
|
||||||
@ -27,10 +21,7 @@ def print_buffer(
|
|||||||
byteline = ""
|
byteline = ""
|
||||||
asciiline = ""
|
asciiline = ""
|
||||||
if not any(x != ignore for x in cur_line):
|
if not any(x != ignore for x in cur_line):
|
||||||
if (
|
if stop_after_count_ignored and ignored_lines > stop_after_count_ignored:
|
||||||
stop_after_count_ignored
|
|
||||||
and ignored_lines > stop_after_count_ignored
|
|
||||||
):
|
|
||||||
print_fcn(
|
print_fcn(
|
||||||
"<{} bytes of '0x{:02X}' encountered, stopping>".format(
|
"<{} bytes of '0x{:02X}' encountered, stopping>".format(
|
||||||
stop_after_count_ignored * width, ignore
|
stop_after_count_ignored * width, ignore
|
||||||
@ -41,11 +32,7 @@ def print_buffer(
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if ignored_lines:
|
if ignored_lines:
|
||||||
print_fcn(
|
print_fcn("<{} of '0x{:02X}' bytes omitted>".format(ignored_lines * width, ignore))
|
||||||
"<{} of '0x{:02X}' bytes omitted>".format(
|
|
||||||
ignored_lines * width, ignore
|
|
||||||
)
|
|
||||||
)
|
|
||||||
ignored_lines = 0
|
ignored_lines = 0
|
||||||
|
|
||||||
for byte in cur_line:
|
for byte in cur_line:
|
||||||
@ -76,10 +63,7 @@ class Size:
|
|||||||
|
|
||||||
def __init__(self, b: int, sector_aligned: bool = False):
|
def __init__(self, b: int, sector_aligned: bool = False):
|
||||||
if sector_aligned:
|
if sector_aligned:
|
||||||
self.bytes = int(
|
self.bytes = int(((b + self._SECTOR_SIZE - 1) // self._SECTOR_SIZE) * self._SECTOR_SIZE)
|
||||||
((b + self._SECTOR_SIZE - 1) // self._SECTOR_SIZE)
|
|
||||||
* self._SECTOR_SIZE
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
self.bytes = int(b)
|
self.bytes = int(b)
|
||||||
|
|
||||||
|
@ -94,8 +94,9 @@ def test_load_cache_with_cores(pyocf_ctx, open_cores):
|
|||||||
vol = CoreVolume(core, open=True)
|
vol = CoreVolume(core, open=True)
|
||||||
|
|
||||||
write_data = Data.from_string("This is test data")
|
write_data = Data.from_string("This is test data")
|
||||||
io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B,
|
io = vol.new_io(
|
||||||
write_data.size, IoDir.WRITE, 0, 0)
|
cache.get_default_queue(), S.from_sector(3).B, write_data.size, IoDir.WRITE, 0, 0
|
||||||
|
)
|
||||||
io.set_data(write_data)
|
io.set_data(write_data)
|
||||||
|
|
||||||
cmpl = OcfCompletion([("err", c_int)])
|
cmpl = OcfCompletion([("err", c_int)])
|
||||||
@ -114,8 +115,7 @@ def test_load_cache_with_cores(pyocf_ctx, open_cores):
|
|||||||
vol = CoreVolume(core, open=True)
|
vol = CoreVolume(core, open=True)
|
||||||
|
|
||||||
read_data = Data(write_data.size)
|
read_data = Data(write_data.size)
|
||||||
io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B,
|
io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B, read_data.size, IoDir.READ, 0, 0)
|
||||||
read_data.size, IoDir.READ, 0, 0)
|
|
||||||
io.set_data(read_data)
|
io.set_data(read_data)
|
||||||
|
|
||||||
cmpl = OcfCompletion([("err", c_int)])
|
cmpl = OcfCompletion([("err", c_int)])
|
||||||
|
@ -18,6 +18,7 @@ from pyocf.types.ctx import OcfCtx
|
|||||||
|
|
||||||
default_registered_volumes = [RamVolume, ErrorDevice, CacheVolume, CoreVolume, ReplicatedVolume]
|
default_registered_volumes = [RamVolume, ErrorDevice, CacheVolume, CoreVolume, ReplicatedVolume]
|
||||||
|
|
||||||
|
|
||||||
def pytest_configure(config):
|
def pytest_configure(config):
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
|
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
|
||||||
|
|
||||||
|
@ -97,6 +97,7 @@ def test_change_to_nhit_and_back_io_in_flight(pyocf_ctx):
|
|||||||
r.abort()
|
r.abort()
|
||||||
assert r.error_count == 0, "No IO's should fail when turning NHIT policy off"
|
assert r.error_count == 0, "No IO's should fail when turning NHIT policy off"
|
||||||
|
|
||||||
|
|
||||||
def fill_cache(cache, fill_ratio):
|
def fill_cache(cache, fill_ratio):
|
||||||
"""
|
"""
|
||||||
Helper to fill cache from LBA 0.
|
Helper to fill cache from LBA 0.
|
||||||
@ -126,9 +127,7 @@ def fill_cache(cache, fill_ratio):
|
|||||||
|
|
||||||
@pytest.mark.parametrize("fill_percentage", [0, 1, 50, 99])
|
@pytest.mark.parametrize("fill_percentage", [0, 1, 50, 99])
|
||||||
@pytest.mark.parametrize("insertion_threshold", [2, 8])
|
@pytest.mark.parametrize("insertion_threshold", [2, 8])
|
||||||
def test_promoted_after_hits_various_thresholds(
|
def test_promoted_after_hits_various_thresholds(pyocf_ctx, insertion_threshold, fill_percentage):
|
||||||
pyocf_ctx, insertion_threshold, fill_percentage
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Check promotion policy behavior with various set thresholds
|
Check promotion policy behavior with various set thresholds
|
||||||
|
|
||||||
@ -195,8 +194,7 @@ def test_promoted_after_hits_various_thresholds(
|
|||||||
cache.settle()
|
cache.settle()
|
||||||
stats = cache.get_stats()
|
stats = cache.get_stats()
|
||||||
assert (
|
assert (
|
||||||
threshold_reached_occupancy
|
threshold_reached_occupancy == stats["usage"]["occupancy"]["value"] - 1
|
||||||
== stats["usage"]["occupancy"]["value"] - 1
|
|
||||||
), "Previous request should be promoted and occupancy should rise"
|
), "Previous request should be promoted and occupancy should rise"
|
||||||
|
|
||||||
|
|
||||||
@ -232,12 +230,8 @@ def test_partial_hit_promotion(pyocf_ctx):
|
|||||||
|
|
||||||
# Step 3
|
# Step 3
|
||||||
cache.set_promotion_policy(PromotionPolicy.NHIT)
|
cache.set_promotion_policy(PromotionPolicy.NHIT)
|
||||||
cache.set_promotion_policy_param(
|
cache.set_promotion_policy_param(PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, 0)
|
||||||
PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, 0
|
cache.set_promotion_policy_param(PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, 100)
|
||||||
)
|
|
||||||
cache.set_promotion_policy_param(
|
|
||||||
PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, 100
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 4
|
# Step 4
|
||||||
req_size = Size(2 * cache_lines.line_size)
|
req_size = Size(2 * cache_lines.line_size)
|
||||||
@ -245,6 +239,4 @@ def test_partial_hit_promotion(pyocf_ctx):
|
|||||||
|
|
||||||
cache.settle()
|
cache.settle()
|
||||||
stats = cache.get_stats()
|
stats = cache.get_stats()
|
||||||
assert (
|
assert stats["usage"]["occupancy"]["value"] == 2, "Second cache line should be mapped"
|
||||||
stats["usage"]["occupancy"]["value"] == 2
|
|
||||||
), "Second cache line should be mapped"
|
|
||||||
|
@ -61,11 +61,7 @@ def sector_to_region(sector, region_start):
|
|||||||
|
|
||||||
def region_end(region_start, region_no, total_sectors):
|
def region_end(region_start, region_no, total_sectors):
|
||||||
num_regions = len(region_start)
|
num_regions = len(region_start)
|
||||||
return (
|
return region_start[region_no + 1] - 1 if region_no < num_regions - 1 else total_sectors - 1
|
||||||
region_start[region_no + 1] - 1
|
|
||||||
if region_no < num_regions - 1
|
|
||||||
else total_sectors - 1
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class SectorStatus(IntEnum):
|
class SectorStatus(IntEnum):
|
||||||
@ -281,9 +277,7 @@ def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed)
|
|||||||
|
|
||||||
# add randomly generated sector statuses
|
# add randomly generated sector statuses
|
||||||
for _ in range(ITRATION_COUNT - len(region_statuses)):
|
for _ in range(ITRATION_COUNT - len(region_statuses)):
|
||||||
region_statuses.append(
|
region_statuses.append([random.choice(list(SectorStatus)) for _ in range(num_regions)])
|
||||||
[random.choice(list(SectorStatus)) for _ in range(num_regions)]
|
|
||||||
)
|
|
||||||
|
|
||||||
# iterate over generated status combinations and perform the test
|
# iterate over generated status combinations and perform the test
|
||||||
for region_state in region_statuses:
|
for region_state in region_statuses:
|
||||||
@ -302,9 +296,7 @@ def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed)
|
|||||||
# randomize cacheline insertion order to exercise different
|
# randomize cacheline insertion order to exercise different
|
||||||
# paths with regard to cache I/O physical addresses continuousness
|
# paths with regard to cache I/O physical addresses continuousness
|
||||||
random.shuffle(insert_order)
|
random.shuffle(insert_order)
|
||||||
sectors = [
|
sectors = [insert_order[i // CLS] * CLS + (i % CLS) for i in range(SECTOR_COUNT)]
|
||||||
insert_order[i // CLS] * CLS + (i % CLS) for i in range(SECTOR_COUNT)
|
|
||||||
]
|
|
||||||
|
|
||||||
# insert clean sectors - iterate over cachelines in @insert_order order
|
# insert clean sectors - iterate over cachelines in @insert_order order
|
||||||
cache.change_cache_mode(cache_mode=CacheMode.WT)
|
cache.change_cache_mode(cache_mode=CacheMode.WT)
|
||||||
|
@ -112,9 +112,7 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
|
|||||||
""" Verify if overflown pinned ioclass is evicted """
|
""" Verify if overflown pinned ioclass is evicted """
|
||||||
cache_device = RamVolume(Size.from_MiB(50))
|
cache_device = RamVolume(Size.from_MiB(50))
|
||||||
core_device = RamVolume(Size.from_MiB(100))
|
core_device = RamVolume(Size.from_MiB(100))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT, cache_line_size=cls)
|
||||||
cache_device, cache_mode=CacheMode.WT, cache_line_size=cls
|
|
||||||
)
|
|
||||||
core = Core.using_device(core_device)
|
core = Core.using_device(core_device)
|
||||||
cache.add_core(core)
|
cache.add_core(core)
|
||||||
vol = CoreVolume(core, open=True)
|
vol = CoreVolume(core, open=True)
|
||||||
@ -124,10 +122,7 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
|
|||||||
pinned_ioclass_max_occupancy = 10
|
pinned_ioclass_max_occupancy = 10
|
||||||
|
|
||||||
cache.configure_partition(
|
cache.configure_partition(
|
||||||
part_id=test_ioclass_id,
|
part_id=test_ioclass_id, name="default_ioclass", max_size=100, priority=1,
|
||||||
name="default_ioclass",
|
|
||||||
max_size=100,
|
|
||||||
priority=1,
|
|
||||||
)
|
)
|
||||||
cache.configure_partition(
|
cache.configure_partition(
|
||||||
part_id=pinned_ioclass_id,
|
part_id=pinned_ioclass_id,
|
||||||
@ -154,9 +149,7 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
|
|||||||
), "Failed to populate the default partition"
|
), "Failed to populate the default partition"
|
||||||
|
|
||||||
# Repart - force overflow of second partition occupancy limit
|
# Repart - force overflow of second partition occupancy limit
|
||||||
pinned_double_size = ceil(
|
pinned_double_size = ceil((cache_size.blocks_4k * pinned_ioclass_max_occupancy * 2) / 100)
|
||||||
(cache_size.blocks_4k * pinned_ioclass_max_occupancy * 2) / 100
|
|
||||||
)
|
|
||||||
for i in range(pinned_double_size):
|
for i in range(pinned_double_size):
|
||||||
send_io(core, data, i * 4096, pinned_ioclass_id)
|
send_io(core, data, i * 4096, pinned_ioclass_id)
|
||||||
|
|
||||||
@ -175,21 +168,14 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
|
|||||||
cache.get_partition_info(part_id=pinned_ioclass_id)["_curr_size"], cls
|
cache.get_partition_info(part_id=pinned_ioclass_id)["_curr_size"], cls
|
||||||
)
|
)
|
||||||
assert isclose(
|
assert isclose(
|
||||||
part_current_size.blocks_4k,
|
part_current_size.blocks_4k, ceil(cache_size.blocks_4k * 0.1), abs_tol=Size(cls).blocks_4k,
|
||||||
ceil(cache_size.blocks_4k * 0.1),
|
|
||||||
abs_tol=Size(cls).blocks_4k,
|
|
||||||
), "Overflown part has not been evicted"
|
), "Overflown part has not been evicted"
|
||||||
|
|
||||||
|
|
||||||
def send_io(core: Core, data: Data, addr: int = 0, target_ioclass: int = 0):
|
def send_io(core: Core, data: Data, addr: int = 0, target_ioclass: int = 0):
|
||||||
vol = core.get_front_volume()
|
vol = core.get_front_volume()
|
||||||
io = vol.new_io(
|
io = vol.new_io(
|
||||||
core.cache.get_default_queue(),
|
core.cache.get_default_queue(), addr, data.size, IoDir.WRITE, target_ioclass, 0,
|
||||||
addr,
|
|
||||||
data.size,
|
|
||||||
IoDir.WRITE,
|
|
||||||
target_ioclass,
|
|
||||||
0,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
io.set_data(data)
|
io.set_data(data)
|
||||||
|
@ -23,9 +23,7 @@ from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize
|
|||||||
def test_adding_core(pyocf_ctx, cache_mode, cls):
|
def test_adding_core(pyocf_ctx, cache_mode, cls):
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cache_mode, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create core device
|
# Create core device
|
||||||
core_device = RamVolume(S.from_MiB(10))
|
core_device = RamVolume(S.from_MiB(10))
|
||||||
@ -48,9 +46,7 @@ def test_adding_core(pyocf_ctx, cache_mode, cls):
|
|||||||
def test_removing_core(pyocf_ctx, cache_mode, cls):
|
def test_removing_core(pyocf_ctx, cache_mode, cls):
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cache_mode, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create core device
|
# Create core device
|
||||||
core_device = RamVolume(S.from_MiB(10))
|
core_device = RamVolume(S.from_MiB(10))
|
||||||
@ -72,9 +68,7 @@ def test_removing_core(pyocf_ctx, cache_mode, cls):
|
|||||||
def test_remove_dirty_no_flush(pyocf_ctx, cache_mode, cls):
|
def test_remove_dirty_no_flush(pyocf_ctx, cache_mode, cls):
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cache_mode, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create core device
|
# Create core device
|
||||||
core_device = RamVolume(S.from_MiB(10))
|
core_device = RamVolume(S.from_MiB(10))
|
||||||
@ -133,8 +127,7 @@ def test_10add_remove_with_io(pyocf_ctx):
|
|||||||
|
|
||||||
write_data = Data.from_string("Test data")
|
write_data = Data.from_string("Test data")
|
||||||
io = vol.new_io(
|
io = vol.new_io(
|
||||||
cache.get_default_queue(), S.from_sector(1).B, write_data.size,
|
cache.get_default_queue(), S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0
|
||||||
IoDir.WRITE, 0, 0
|
|
||||||
)
|
)
|
||||||
io.set_data(write_data)
|
io.set_data(write_data)
|
||||||
|
|
||||||
@ -210,9 +203,7 @@ def test_adding_to_random_cache(pyocf_ctx):
|
|||||||
def test_adding_core_twice(pyocf_ctx, cache_mode, cls):
|
def test_adding_core_twice(pyocf_ctx, cache_mode, cls):
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cache_mode, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create core device
|
# Create core device
|
||||||
core_device = RamVolume(S.from_MiB(10))
|
core_device = RamVolume(S.from_MiB(10))
|
||||||
@ -269,9 +260,7 @@ def test_adding_core_already_used(pyocf_ctx, cache_mode, cls):
|
|||||||
def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
|
def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cache_mode, cache_line_size=cls
|
|
||||||
)
|
|
||||||
core_devices = []
|
core_devices = []
|
||||||
core_amount = 5
|
core_amount = 5
|
||||||
|
|
||||||
@ -310,8 +299,7 @@ def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
|
|||||||
|
|
||||||
|
|
||||||
def _io_to_core(vol: Volume, queue: Queue, data: Data):
|
def _io_to_core(vol: Volume, queue: Queue, data: Data):
|
||||||
io = vol.new_io(queue, 0, data.size,
|
io = vol.new_io(queue, 0, data.size, IoDir.WRITE, 0, 0)
|
||||||
IoDir.WRITE, 0, 0)
|
|
||||||
io.set_data(data)
|
io.set_data(data)
|
||||||
|
|
||||||
completion = OcfCompletion([("err", c_int)])
|
completion = OcfCompletion([("err", c_int)])
|
||||||
@ -333,9 +321,7 @@ def test_try_add_core_with_changed_size(pyocf_ctx, cache_mode, cls):
|
|||||||
"""
|
"""
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cache_mode, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add core to cache
|
# Add core to cache
|
||||||
core_device = RamVolume(S.from_MiB(10))
|
core_device = RamVolume(S.from_MiB(10))
|
||||||
@ -367,9 +353,7 @@ def test_load_with_changed_core_size(pyocf_ctx, cache_mode, cls):
|
|||||||
"""
|
"""
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cache_mode, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add core to cache
|
# Add core to cache
|
||||||
core_device = RamVolume(S.from_MiB(10))
|
core_device = RamVolume(S.from_MiB(10))
|
||||||
|
@ -37,9 +37,7 @@ logger = logging.getLogger(__name__)
|
|||||||
@pytest.mark.parametrize("cls", CacheLineSize)
|
@pytest.mark.parametrize("cls", CacheLineSize)
|
||||||
@pytest.mark.parametrize("mode", [CacheMode.WB, CacheMode.WT, CacheMode.WO])
|
@pytest.mark.parametrize("mode", [CacheMode.WB, CacheMode.WT, CacheMode.WO])
|
||||||
@pytest.mark.parametrize("new_cache_size", [80, 120])
|
@pytest.mark.parametrize("new_cache_size", [80, 120])
|
||||||
def test_attach_different_size(
|
def test_attach_different_size(pyocf_ctx, new_cache_size, mode: CacheMode, cls: CacheLineSize):
|
||||||
pyocf_ctx, new_cache_size, mode: CacheMode, cls: CacheLineSize
|
|
||||||
):
|
|
||||||
"""Start cache and add partition with limited occupancy. Fill partition with data,
|
"""Start cache and add partition with limited occupancy. Fill partition with data,
|
||||||
attach cache with different size and trigger IO. Verify if occupancy thresold is
|
attach cache with different size and trigger IO. Verify if occupancy thresold is
|
||||||
respected with both original and new cache device.
|
respected with both original and new cache device.
|
||||||
@ -53,9 +51,7 @@ def test_attach_different_size(
|
|||||||
vol = CoreVolume(core, open=True)
|
vol = CoreVolume(core, open=True)
|
||||||
queue = cache.get_default_queue()
|
queue = cache.get_default_queue()
|
||||||
|
|
||||||
cache.configure_partition(
|
cache.configure_partition(part_id=1, name="test_part", max_size=50, priority=1)
|
||||||
part_id=1, name="test_part", max_size=50, priority=1
|
|
||||||
)
|
|
||||||
|
|
||||||
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
|
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
|
||||||
|
|
||||||
@ -67,9 +63,7 @@ def test_attach_different_size(
|
|||||||
for i in range(cache_size.blocks_4k):
|
for i in range(cache_size.blocks_4k):
|
||||||
io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
|
io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
|
||||||
|
|
||||||
part_current_size = CacheLines(
|
part_current_size = CacheLines(cache.get_partition_info(part_id=1)["_curr_size"], cls)
|
||||||
cache.get_partition_info(part_id=1)["_curr_size"], cls
|
|
||||||
)
|
|
||||||
|
|
||||||
assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
|
assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
|
||||||
|
|
||||||
@ -82,9 +76,7 @@ def test_attach_different_size(
|
|||||||
for i in range(cache_size.blocks_4k):
|
for i in range(cache_size.blocks_4k):
|
||||||
io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
|
io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
|
||||||
|
|
||||||
part_current_size = CacheLines(
|
part_current_size = CacheLines(cache.get_partition_info(part_id=1)["_curr_size"], cls)
|
||||||
cache.get_partition_info(part_id=1)["_curr_size"], cls
|
|
||||||
)
|
|
||||||
|
|
||||||
assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
|
assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
|
||||||
|
|
||||||
|
@ -18,9 +18,7 @@ from pyocf.types.shared import CacheLineSize
|
|||||||
def test_change_cache_mode(pyocf_ctx, from_cm, to_cm, cls):
|
def test_change_cache_mode(pyocf_ctx, from_cm, to_cm, cls):
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=from_cm, cache_line_size=cls)
|
||||||
cache_device, cache_mode=from_cm, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Change cache mode and check if stats are as expected
|
# Change cache mode and check if stats are as expected
|
||||||
cache.change_cache_mode(to_cm)
|
cache.change_cache_mode(to_cm)
|
||||||
@ -33,9 +31,7 @@ def test_change_cache_mode(pyocf_ctx, from_cm, to_cm, cls):
|
|||||||
def test_change_cleaning_policy(pyocf_ctx, cm, cls):
|
def test_change_cleaning_policy(pyocf_ctx, cm, cls):
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cm, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check all possible cleaning policy switches
|
# Check all possible cleaning policy switches
|
||||||
for cp_from in CleaningPolicy:
|
for cp_from in CleaningPolicy:
|
||||||
@ -58,9 +54,7 @@ def test_change_cleaning_policy(pyocf_ctx, cm, cls):
|
|||||||
def test_cache_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
|
def test_cache_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cm, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create 2 core devices
|
# Create 2 core devices
|
||||||
core_device1 = RamVolume(S.from_MiB(10))
|
core_device1 = RamVolume(S.from_MiB(10))
|
||||||
@ -97,9 +91,7 @@ def test_cache_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
|
|||||||
def test_core_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
|
def test_core_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
|
||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
|
||||||
cache_device, cache_mode=cm, cache_line_size=cls
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create 2 core devices
|
# Create 2 core devices
|
||||||
core_device1 = RamVolume(S.from_MiB(10))
|
core_device1 = RamVolume(S.from_MiB(10))
|
||||||
|
@ -331,9 +331,7 @@ def test_failover_passive_first(pyocf_2_ctx):
|
|||||||
cache1_cache_vol = ReplicatedVolume(prim_cache_backend_vol, cache2_exp_obj_vol)
|
cache1_cache_vol = ReplicatedVolume(prim_cache_backend_vol, cache2_exp_obj_vol)
|
||||||
|
|
||||||
# active cache
|
# active cache
|
||||||
cache1 = Cache.start_on_device(
|
cache1 = Cache.start_on_device(cache1_cache_vol, ctx1, cache_mode=mode, cache_line_size=cls)
|
||||||
cache1_cache_vol, ctx1, cache_mode=mode, cache_line_size=cls
|
|
||||||
)
|
|
||||||
core = Core(core_backend_vol)
|
core = Core(core_backend_vol)
|
||||||
cache1.add_core(core)
|
cache1.add_core(core)
|
||||||
core_vol = CoreVolume(core, open=True)
|
core_vol = CoreVolume(core, open=True)
|
||||||
@ -550,9 +548,7 @@ def test_failover_passive_first(pyocf_2_ctx):
|
|||||||
cache1_cache_vol = ReplicatedVolume(prim_cache_backend_vol, cache2_exp_obj_vol)
|
cache1_cache_vol = ReplicatedVolume(prim_cache_backend_vol, cache2_exp_obj_vol)
|
||||||
|
|
||||||
# active cache
|
# active cache
|
||||||
cache1 = Cache.start_on_device(
|
cache1 = Cache.start_on_device(cache1_cache_vol, ctx1, cache_mode=mode, cache_line_size=cls)
|
||||||
cache1_cache_vol, ctx1, cache_mode=mode, cache_line_size=cls
|
|
||||||
)
|
|
||||||
core = Core(core_backend_vol)
|
core = Core(core_backend_vol)
|
||||||
cache1.add_core(core)
|
cache1.add_core(core)
|
||||||
core_vol = CoreVolume(core, open=True)
|
core_vol = CoreVolume(core, open=True)
|
||||||
|
@ -18,7 +18,7 @@ from pyocf.types.cache import (
|
|||||||
CleaningPolicy,
|
CleaningPolicy,
|
||||||
CacheConfig,
|
CacheConfig,
|
||||||
PromotionPolicy,
|
PromotionPolicy,
|
||||||
Backfill
|
Backfill,
|
||||||
)
|
)
|
||||||
from pyocf.types.core import Core
|
from pyocf.types.core import Core
|
||||||
from pyocf.types.ctx import OcfCtx
|
from pyocf.types.ctx import OcfCtx
|
||||||
@ -148,7 +148,7 @@ def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: Me
|
|||||||
If possible check whether cache reports properly set parameters.
|
If possible check whether cache reports properly set parameters.
|
||||||
"""
|
"""
|
||||||
cache_device = RamVolume(Size.from_MiB(50))
|
cache_device = RamVolume(Size.from_MiB(50))
|
||||||
queue_size = randrange(60000, 2**32)
|
queue_size = randrange(60000, 2 ** 32)
|
||||||
unblock_size = randrange(1, queue_size)
|
unblock_size = randrange(1, queue_size)
|
||||||
volatile_metadata = randrange(2) == 1
|
volatile_metadata = randrange(2) == 1
|
||||||
unaligned_io = randrange(2) == 1
|
unaligned_io = randrange(2) == 1
|
||||||
@ -165,7 +165,8 @@ def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: Me
|
|||||||
max_queue_size=queue_size,
|
max_queue_size=queue_size,
|
||||||
queue_unblock_size=unblock_size,
|
queue_unblock_size=unblock_size,
|
||||||
pt_unaligned_io=unaligned_io,
|
pt_unaligned_io=unaligned_io,
|
||||||
use_submit_fast=submit_fast)
|
use_submit_fast=submit_fast,
|
||||||
|
)
|
||||||
|
|
||||||
stats = cache.get_stats()
|
stats = cache.get_stats()
|
||||||
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
||||||
@ -198,8 +199,9 @@ def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool):
|
|||||||
run_io_and_cache_data_if_possible(core, mode, cls, cls_no)
|
run_io_and_cache_data_if_possible(core, mode, cls, cls_no)
|
||||||
|
|
||||||
stats = cache.get_stats()
|
stats = cache.get_stats()
|
||||||
assert int(stats["conf"]["dirty"]) == (cls_no if mode.lazy_write() else 0),\
|
assert int(stats["conf"]["dirty"]) == (
|
||||||
"Dirty data before MD5"
|
cls_no if mode.lazy_write() else 0
|
||||||
|
), "Dirty data before MD5"
|
||||||
|
|
||||||
md5_exported_core = front_vol.md5()
|
md5_exported_core = front_vol.md5()
|
||||||
|
|
||||||
@ -208,11 +210,13 @@ def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool):
|
|||||||
cache.stop()
|
cache.stop()
|
||||||
|
|
||||||
if mode.lazy_write() and not with_flush:
|
if mode.lazy_write() and not with_flush:
|
||||||
assert core_device.md5() != md5_exported_core, \
|
assert (
|
||||||
"MD5 check: core device vs exported object with dirty data"
|
core_device.md5() != md5_exported_core
|
||||||
|
), "MD5 check: core device vs exported object with dirty data"
|
||||||
else:
|
else:
|
||||||
assert core_device.md5() == md5_exported_core, \
|
assert (
|
||||||
"MD5 check: core device vs exported object with clean data"
|
core_device.md5() == md5_exported_core
|
||||||
|
), "MD5 check: core device vs exported object with clean data"
|
||||||
|
|
||||||
|
|
||||||
def test_start_stop_multiple(pyocf_ctx):
|
def test_start_stop_multiple(pyocf_ctx):
|
||||||
@ -226,14 +230,12 @@ def test_start_stop_multiple(pyocf_ctx):
|
|||||||
cache_device = RamVolume(Size.from_MiB(50))
|
cache_device = RamVolume(Size.from_MiB(50))
|
||||||
cache_name = f"cache{i}"
|
cache_name = f"cache{i}"
|
||||||
cache_mode = CacheMode(randrange(0, len(CacheMode)))
|
cache_mode = CacheMode(randrange(0, len(CacheMode)))
|
||||||
size = 4096 * 2**randrange(0, len(CacheLineSize))
|
size = 4096 * 2 ** randrange(0, len(CacheLineSize))
|
||||||
cache_line_size = CacheLineSize(size)
|
cache_line_size = CacheLineSize(size)
|
||||||
|
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(
|
||||||
cache_device,
|
cache_device, name=cache_name, cache_mode=cache_mode, cache_line_size=cache_line_size
|
||||||
name=cache_name,
|
)
|
||||||
cache_mode=cache_mode,
|
|
||||||
cache_line_size=cache_line_size)
|
|
||||||
caches.append(cache)
|
caches.append(cache)
|
||||||
stats = cache.get_stats()
|
stats = cache.get_stats()
|
||||||
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
|
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
|
||||||
@ -258,14 +260,12 @@ def test_100_start_stop(pyocf_ctx):
|
|||||||
cache_device = RamVolume(Size.from_MiB(50))
|
cache_device = RamVolume(Size.from_MiB(50))
|
||||||
cache_name = f"cache{i}"
|
cache_name = f"cache{i}"
|
||||||
cache_mode = CacheMode(randrange(0, len(CacheMode)))
|
cache_mode = CacheMode(randrange(0, len(CacheMode)))
|
||||||
size = 4096 * 2**randrange(0, len(CacheLineSize))
|
size = 4096 * 2 ** randrange(0, len(CacheLineSize))
|
||||||
cache_line_size = CacheLineSize(size)
|
cache_line_size = CacheLineSize(size)
|
||||||
|
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(
|
||||||
cache_device,
|
cache_device, name=cache_name, cache_mode=cache_mode, cache_line_size=cache_line_size
|
||||||
name=cache_name,
|
)
|
||||||
cache_mode=cache_mode,
|
|
||||||
cache_line_size=cache_line_size)
|
|
||||||
stats = cache.get_stats()
|
stats = cache.get_stats()
|
||||||
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
|
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
|
||||||
assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
|
assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
|
||||||
@ -293,14 +293,15 @@ def test_start_stop_incrementally(pyocf_ctx):
|
|||||||
cache_device = RamVolume(Size.from_MiB(50))
|
cache_device = RamVolume(Size.from_MiB(50))
|
||||||
cache_name = f"cache{next(counter)}"
|
cache_name = f"cache{next(counter)}"
|
||||||
cache_mode = CacheMode(randrange(0, len(CacheMode)))
|
cache_mode = CacheMode(randrange(0, len(CacheMode)))
|
||||||
size = 4096 * 2**randrange(0, len(CacheLineSize))
|
size = 4096 * 2 ** randrange(0, len(CacheLineSize))
|
||||||
cache_line_size = CacheLineSize(size)
|
cache_line_size = CacheLineSize(size)
|
||||||
|
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(
|
||||||
cache_device,
|
cache_device,
|
||||||
name=cache_name,
|
name=cache_name,
|
||||||
cache_mode=cache_mode,
|
cache_mode=cache_mode,
|
||||||
cache_line_size=cache_line_size)
|
cache_line_size=cache_line_size,
|
||||||
|
)
|
||||||
caches.append(cache)
|
caches.append(cache)
|
||||||
stats = cache.get_stats()
|
stats = cache.get_stats()
|
||||||
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
|
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
|
||||||
@ -318,8 +319,9 @@ def test_start_stop_incrementally(pyocf_ctx):
|
|||||||
stats = cache.get_stats()
|
stats = cache.get_stats()
|
||||||
cache_name = stats["conf"]["cache_name"]
|
cache_name = stats["conf"]["cache_name"]
|
||||||
cache.stop()
|
cache.stop()
|
||||||
assert get_cache_by_name(pyocf_ctx, cache_name) != 0, \
|
assert (
|
||||||
"Try getting cache after stopping it"
|
get_cache_by_name(pyocf_ctx, cache_name) != 0
|
||||||
|
), "Try getting cache after stopping it"
|
||||||
add = not add
|
add = not add
|
||||||
|
|
||||||
|
|
||||||
@ -333,17 +335,15 @@ def test_start_cache_same_id(pyocf_ctx, mode, cls):
|
|||||||
cache_device1 = RamVolume(Size.from_MiB(50))
|
cache_device1 = RamVolume(Size.from_MiB(50))
|
||||||
cache_device2 = RamVolume(Size.from_MiB(50))
|
cache_device2 = RamVolume(Size.from_MiB(50))
|
||||||
cache_name = "cache"
|
cache_name = "cache"
|
||||||
cache = Cache.start_on_device(cache_device1,
|
cache = Cache.start_on_device(
|
||||||
cache_mode=mode,
|
cache_device1, cache_mode=mode, cache_line_size=cls, name=cache_name
|
||||||
cache_line_size=cls,
|
)
|
||||||
name=cache_name)
|
|
||||||
cache.get_stats()
|
cache.get_stats()
|
||||||
|
|
||||||
with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"):
|
with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"):
|
||||||
cache = Cache.start_on_device(cache_device2,
|
cache = Cache.start_on_device(
|
||||||
cache_mode=mode,
|
cache_device2, cache_mode=mode, cache_line_size=cls, name=cache_name
|
||||||
cache_line_size=cls,
|
)
|
||||||
name=cache_name)
|
|
||||||
cache.get_stats()
|
cache.get_stats()
|
||||||
|
|
||||||
|
|
||||||
@ -354,6 +354,7 @@ def test_start_cache_huge_device(pyocf_ctx_log_buffer, cls):
|
|||||||
pass_criteria:
|
pass_criteria:
|
||||||
- Starting cache on device too big to handle should fail
|
- Starting cache on device too big to handle should fail
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class HugeDevice(Volume):
|
class HugeDevice(Volume):
|
||||||
def get_length(self):
|
def get_length(self):
|
||||||
return Size.from_B((cls * c_uint32(-1).value))
|
return Size.from_B((cls * c_uint32(-1).value))
|
||||||
@ -373,7 +374,6 @@ def test_start_cache_huge_device(pyocf_ctx_log_buffer, cls):
|
|||||||
), "Expected to find log notifying that max size was exceeded"
|
), "Expected to find log notifying that max size was exceeded"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("mode", CacheMode)
|
@pytest.mark.parametrize("mode", CacheMode)
|
||||||
@pytest.mark.parametrize("cls", CacheLineSize)
|
@pytest.mark.parametrize("cls", CacheLineSize)
|
||||||
def test_start_cache_same_device(pyocf_ctx, mode, cls):
|
def test_start_cache_same_device(pyocf_ctx, mode, cls):
|
||||||
@ -382,9 +382,7 @@ def test_start_cache_same_device(pyocf_ctx, mode, cls):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
cache_device = RamVolume(Size.from_MiB(50))
|
cache_device = RamVolume(Size.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls, name="cache1")
|
||||||
cache_device, cache_mode=mode, cache_line_size=cls, name="cache1"
|
|
||||||
)
|
|
||||||
cache.get_stats()
|
cache.get_stats()
|
||||||
|
|
||||||
with pytest.raises(OcfError, match="OCF_ERR_NOT_OPEN_EXC"):
|
with pytest.raises(OcfError, match="OCF_ERR_NOT_OPEN_EXC"):
|
||||||
@ -420,9 +418,7 @@ def test_start_stop_noqueue(pyocf_ctx):
|
|||||||
assert not status, "Failed to start cache: {}".format(status)
|
assert not status, "Failed to start cache: {}".format(status)
|
||||||
|
|
||||||
# stop without creating mngmt queue
|
# stop without creating mngmt queue
|
||||||
c = OcfCompletion(
|
c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
|
||||||
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
|
|
||||||
)
|
|
||||||
pyocf_ctx.lib.ocf_mngt_cache_stop(cache_handle, c, None)
|
pyocf_ctx.lib.ocf_mngt_cache_stop(cache_handle, c, None)
|
||||||
c.wait()
|
c.wait()
|
||||||
assert not c.results["error"], "Failed to stop cache: {}".format(c.results["error"])
|
assert not c.results["error"], "Failed to stop cache: {}".format(c.results["error"])
|
||||||
@ -445,8 +441,9 @@ def run_io_and_cache_data_if_possible(core, mode, cls, cls_no):
|
|||||||
io_to_core(front_vol, queue, test_data, 0)
|
io_to_core(front_vol, queue, test_data, 0)
|
||||||
|
|
||||||
stats = core.cache.get_stats()
|
stats = core.cache.get_stats()
|
||||||
assert stats["usage"]["occupancy"]["value"] == \
|
assert stats["usage"]["occupancy"]["value"] == (
|
||||||
((cls_no * cls / CacheLineSize.LINE_4KiB) if mode != CacheMode.PT else 0), "Occupancy"
|
(cls_no * cls / CacheLineSize.LINE_4KiB) if mode != CacheMode.PT else 0
|
||||||
|
), "Occupancy"
|
||||||
|
|
||||||
|
|
||||||
def io_to_core(vol: Volume, queue: Queue, data: Data, offset: int):
|
def io_to_core(vol: Volume, queue: Queue, data: Data, offset: int):
|
||||||
@ -479,13 +476,16 @@ def check_stats_read_empty(core: Core, mode: CacheMode, cls: CacheLineSize):
|
|||||||
core.cache.settle()
|
core.cache.settle()
|
||||||
stats = core.cache.get_stats()
|
stats = core.cache.get_stats()
|
||||||
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
||||||
assert core.cache.device.get_stats()[IoDir.WRITE] == (1 if mode.read_insert() else 0), \
|
assert core.cache.device.get_stats()[IoDir.WRITE] == (
|
||||||
"Writes to cache device"
|
1 if mode.read_insert() else 0
|
||||||
|
), "Writes to cache device"
|
||||||
assert core.device.get_stats()[IoDir.READ] == 1, "Reads from core device"
|
assert core.device.get_stats()[IoDir.READ] == 1, "Reads from core device"
|
||||||
assert stats["req"]["rd_full_misses"]["value"] == (0 if mode == CacheMode.PT else 1), \
|
assert stats["req"]["rd_full_misses"]["value"] == (
|
||||||
"Read full misses"
|
0 if mode == CacheMode.PT else 1
|
||||||
assert stats["usage"]["occupancy"]["value"] == \
|
), "Read full misses"
|
||||||
((cls / CacheLineSize.LINE_4KiB) if mode.read_insert() else 0), "Occupancy"
|
assert stats["usage"]["occupancy"]["value"] == (
|
||||||
|
(cls / CacheLineSize.LINE_4KiB) if mode.read_insert() else 0
|
||||||
|
), "Occupancy"
|
||||||
|
|
||||||
|
|
||||||
def check_stats_write_empty(core: Core, mode: CacheMode, cls: CacheLineSize):
|
def check_stats_write_empty(core: Core, mode: CacheMode, cls: CacheLineSize):
|
||||||
@ -493,75 +493,89 @@ def check_stats_write_empty(core: Core, mode: CacheMode, cls: CacheLineSize):
|
|||||||
stats = core.cache.get_stats()
|
stats = core.cache.get_stats()
|
||||||
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
||||||
# TODO(ajrutkow): why 1 for WT ??
|
# TODO(ajrutkow): why 1 for WT ??
|
||||||
assert core.cache.device.get_stats()[IoDir.WRITE] == \
|
assert core.cache.device.get_stats()[IoDir.WRITE] == (
|
||||||
(2 if mode.lazy_write() else (1 if mode == CacheMode.WT else 0)), \
|
2 if mode.lazy_write() else (1 if mode == CacheMode.WT else 0)
|
||||||
"Writes to cache device"
|
), "Writes to cache device"
|
||||||
assert core.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
|
assert core.device.get_stats()[IoDir.WRITE] == (
|
||||||
"Writes to core device"
|
0 if mode.lazy_write() else 1
|
||||||
assert stats["req"]["wr_full_misses"]["value"] == (1 if mode.write_insert() else 0), \
|
), "Writes to core device"
|
||||||
"Write full misses"
|
assert stats["req"]["wr_full_misses"]["value"] == (
|
||||||
assert stats["usage"]["occupancy"]["value"] == \
|
1 if mode.write_insert() else 0
|
||||||
((cls / CacheLineSize.LINE_4KiB) if mode.write_insert() else 0), \
|
), "Write full misses"
|
||||||
"Occupancy"
|
assert stats["usage"]["occupancy"]["value"] == (
|
||||||
|
(cls / CacheLineSize.LINE_4KiB) if mode.write_insert() else 0
|
||||||
|
), "Occupancy"
|
||||||
|
|
||||||
|
|
||||||
def check_stats_write_after_read(core: Core,
|
def check_stats_write_after_read(
|
||||||
mode: CacheMode,
|
core: Core, mode: CacheMode, cls: CacheLineSize, read_from_empty=False
|
||||||
cls: CacheLineSize,
|
):
|
||||||
read_from_empty=False):
|
|
||||||
core.cache.settle()
|
core.cache.settle()
|
||||||
stats = core.cache.get_stats()
|
stats = core.cache.get_stats()
|
||||||
assert core.cache.device.get_stats()[IoDir.WRITE] == \
|
assert core.cache.device.get_stats()[IoDir.WRITE] == (
|
||||||
(0 if mode in {CacheMode.WI, CacheMode.PT} else
|
0
|
||||||
(2 if read_from_empty and mode.lazy_write() else 1)), \
|
if mode in {CacheMode.WI, CacheMode.PT}
|
||||||
"Writes to cache device"
|
else (2 if read_from_empty and mode.lazy_write() else 1)
|
||||||
assert core.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
|
), "Writes to cache device"
|
||||||
"Writes to core device"
|
assert core.device.get_stats()[IoDir.WRITE] == (
|
||||||
assert stats["req"]["wr_hits"]["value"] == \
|
0 if mode.lazy_write() else 1
|
||||||
(1 if (mode.read_insert() and mode != CacheMode.WI)
|
), "Writes to core device"
|
||||||
or (mode.write_insert() and not read_from_empty) else 0), \
|
assert stats["req"]["wr_hits"]["value"] == (
|
||||||
"Write hits"
|
1
|
||||||
assert stats["usage"]["occupancy"]["value"] == \
|
if (mode.read_insert() and mode != CacheMode.WI)
|
||||||
(0 if mode in {CacheMode.WI, CacheMode.PT} else (cls / CacheLineSize.LINE_4KiB)), \
|
or (mode.write_insert() and not read_from_empty)
|
||||||
"Occupancy"
|
else 0
|
||||||
|
), "Write hits"
|
||||||
|
assert stats["usage"]["occupancy"]["value"] == (
|
||||||
|
0 if mode in {CacheMode.WI, CacheMode.PT} else (cls / CacheLineSize.LINE_4KiB)
|
||||||
|
), "Occupancy"
|
||||||
|
|
||||||
|
|
||||||
def check_stats_read_after_write(core, mode, cls, write_to_empty=False):
|
def check_stats_read_after_write(core, mode, cls, write_to_empty=False):
|
||||||
core.cache.settle()
|
core.cache.settle()
|
||||||
stats = core.cache.get_stats()
|
stats = core.cache.get_stats()
|
||||||
assert core.cache.device.get_stats()[IoDir.WRITE] == \
|
assert core.cache.device.get_stats()[IoDir.WRITE] == (
|
||||||
(2 if mode.lazy_write() else (0 if mode == CacheMode.PT else 1)), \
|
2 if mode.lazy_write() else (0 if mode == CacheMode.PT else 1)
|
||||||
"Writes to cache device"
|
), "Writes to cache device"
|
||||||
assert core.cache.device.get_stats()[IoDir.READ] == \
|
assert core.cache.device.get_stats()[IoDir.READ] == (
|
||||||
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
|
1
|
||||||
or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
|
||||||
"Reads from cache device"
|
or (mode == CacheMode.WA and not write_to_empty)
|
||||||
assert core.device.get_stats()[IoDir.READ] == \
|
else 0
|
||||||
(0 if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT}
|
), "Reads from cache device"
|
||||||
or (mode == CacheMode.WA and not write_to_empty) else 1), \
|
assert core.device.get_stats()[IoDir.READ] == (
|
||||||
"Reads from core device"
|
0
|
||||||
assert stats["req"]["rd_full_misses"]["value"] == \
|
if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT}
|
||||||
(1 if mode in {CacheMode.WA, CacheMode.WI} else 0) \
|
or (mode == CacheMode.WA and not write_to_empty)
|
||||||
+ (0 if write_to_empty or mode in {CacheMode.PT, CacheMode.WA} else 1), \
|
else 1
|
||||||
"Read full misses"
|
), "Reads from core device"
|
||||||
assert stats["req"]["rd_hits"]["value"] == \
|
assert stats["req"]["rd_full_misses"]["value"] == (
|
||||||
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
|
1 if mode in {CacheMode.WA, CacheMode.WI} else 0
|
||||||
or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
) + (0 if write_to_empty or mode in {CacheMode.PT, CacheMode.WA} else 1), "Read full misses"
|
||||||
"Read hits"
|
assert stats["req"]["rd_hits"]["value"] == (
|
||||||
assert stats["usage"]["occupancy"]["value"] == \
|
1
|
||||||
(0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy"
|
if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
|
||||||
|
or (mode == CacheMode.WA and not write_to_empty)
|
||||||
|
else 0
|
||||||
|
), "Read hits"
|
||||||
|
assert stats["usage"]["occupancy"]["value"] == (
|
||||||
|
0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)
|
||||||
|
), "Occupancy"
|
||||||
|
|
||||||
|
|
||||||
def check_md5_sums(core: Core, mode: CacheMode):
|
def check_md5_sums(core: Core, mode: CacheMode):
|
||||||
if mode.lazy_write():
|
if mode.lazy_write():
|
||||||
assert core.device.md5() != core.get_front_volume().md5(), \
|
assert (
|
||||||
"MD5 check: core device vs exported object without flush"
|
core.device.md5() != core.get_front_volume().md5()
|
||||||
|
), "MD5 check: core device vs exported object without flush"
|
||||||
core.cache.flush()
|
core.cache.flush()
|
||||||
assert core.device.md5() == core.get_front_volume().md5(), \
|
assert (
|
||||||
"MD5 check: core device vs exported object after flush"
|
core.device.md5() == core.get_front_volume().md5()
|
||||||
|
), "MD5 check: core device vs exported object after flush"
|
||||||
else:
|
else:
|
||||||
assert core.device.md5() == core.get_front_volume().md5(), \
|
assert (
|
||||||
"MD5 check: core device vs exported object"
|
core.device.md5() == core.get_front_volume().md5()
|
||||||
|
), "MD5 check: core device vs exported object"
|
||||||
|
|
||||||
|
|
||||||
def get_cache_by_name(ctx, cache_name):
|
def get_cache_by_name(ctx, cache_name):
|
||||||
|
@ -5,12 +5,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
from ctypes import (
|
from ctypes import c_uint64, c_uint32, c_uint16, c_int
|
||||||
c_uint64,
|
|
||||||
c_uint32,
|
|
||||||
c_uint16,
|
|
||||||
c_int
|
|
||||||
)
|
|
||||||
from tests.utils.random import RandomStringGenerator, RandomGenerator, DefaultRanges, Range
|
from tests.utils.random import RandomStringGenerator, RandomGenerator, DefaultRanges, Range
|
||||||
|
|
||||||
from pyocf.types.cache import CacheMode, MetadataLayout, PromotionPolicy
|
from pyocf.types.cache import CacheMode, MetadataLayout, PromotionPolicy
|
||||||
@ -63,9 +58,7 @@ def string_randomize(request):
|
|||||||
return request.param
|
return request.param
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(
|
@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheMode)))
|
||||||
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheMode))
|
|
||||||
)
|
|
||||||
def not_cache_mode_randomize(request):
|
def not_cache_mode_randomize(request):
|
||||||
return request.param
|
return request.param
|
||||||
|
|
||||||
|
@ -199,9 +199,7 @@ def test_neg_core_set_seq_cut_off_promotion(pyocf_ctx, cm, cls):
|
|||||||
for i in RandomGenerator(DefaultRanges.UINT32):
|
for i in RandomGenerator(DefaultRanges.UINT32):
|
||||||
if i in ConfValidValues.seq_cutoff_promotion_range:
|
if i in ConfValidValues.seq_cutoff_promotion_range:
|
||||||
continue
|
continue
|
||||||
with pytest.raises(
|
with pytest.raises(OcfError, match="Error setting core seq cut off policy promotion count"):
|
||||||
OcfError, match="Error setting core seq cut off policy promotion count"
|
|
||||||
):
|
|
||||||
core1.set_seq_cut_off_promotion(i)
|
core1.set_seq_cut_off_promotion(i)
|
||||||
print(f"\n{i}")
|
print(f"\n{i}")
|
||||||
|
|
||||||
@ -235,9 +233,7 @@ def test_neg_cache_set_seq_cut_off_threshold(pyocf_ctx, cm, cls):
|
|||||||
for i in RandomGenerator(DefaultRanges.UINT32):
|
for i in RandomGenerator(DefaultRanges.UINT32):
|
||||||
if i in ConfValidValues.seq_cutoff_threshold_rage:
|
if i in ConfValidValues.seq_cutoff_threshold_rage:
|
||||||
continue
|
continue
|
||||||
with pytest.raises(
|
with pytest.raises(OcfError, match="Error setting cache seq cut off policy threshold"):
|
||||||
OcfError, match="Error setting cache seq cut off policy threshold"
|
|
||||||
):
|
|
||||||
cache.set_seq_cut_off_threshold(i)
|
cache.set_seq_cut_off_threshold(i)
|
||||||
print(f"\n{i}")
|
print(f"\n{i}")
|
||||||
|
|
||||||
@ -268,9 +264,7 @@ def test_neg_core_set_seq_cut_off_threshold(pyocf_ctx, cm, cls):
|
|||||||
for i in RandomGenerator(DefaultRanges.UINT32):
|
for i in RandomGenerator(DefaultRanges.UINT32):
|
||||||
if i in ConfValidValues.seq_cutoff_threshold_rage:
|
if i in ConfValidValues.seq_cutoff_threshold_rage:
|
||||||
continue
|
continue
|
||||||
with pytest.raises(
|
with pytest.raises(OcfError, match="Error setting core seq cut off policy threshold"):
|
||||||
OcfError, match="Error setting core seq cut off policy threshold"
|
|
||||||
):
|
|
||||||
core.set_seq_cut_off_threshold(i)
|
core.set_seq_cut_off_threshold(i)
|
||||||
print(f"\n{i}")
|
print(f"\n{i}")
|
||||||
|
|
||||||
@ -468,10 +462,7 @@ def test_neg_set_nhit_promotion_policy_param(pyocf_ctx, cm, cls):
|
|||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(
|
||||||
cache_device,
|
cache_device, cache_mode=cm, cache_line_size=cls, promotion_policy=PromotionPolicy.NHIT,
|
||||||
cache_mode=cm,
|
|
||||||
cache_line_size=cls,
|
|
||||||
promotion_policy=PromotionPolicy.NHIT,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Set invalid promotion policy param id and check if failed
|
# Set invalid promotion policy param id and check if failed
|
||||||
@ -498,10 +489,7 @@ def test_neg_set_nhit_promotion_policy_param_trigger(pyocf_ctx, cm, cls):
|
|||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(
|
||||||
cache_device,
|
cache_device, cache_mode=cm, cache_line_size=cls, promotion_policy=PromotionPolicy.NHIT,
|
||||||
cache_mode=cm,
|
|
||||||
cache_line_size=cls,
|
|
||||||
promotion_policy=PromotionPolicy.NHIT,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Set to invalid promotion policy trigger threshold and check if failed
|
# Set to invalid promotion policy trigger threshold and check if failed
|
||||||
@ -509,9 +497,7 @@ def test_neg_set_nhit_promotion_policy_param_trigger(pyocf_ctx, cm, cls):
|
|||||||
if i in ConfValidValues.promotion_nhit_trigger_threshold_range:
|
if i in ConfValidValues.promotion_nhit_trigger_threshold_range:
|
||||||
continue
|
continue
|
||||||
with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
|
with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
|
||||||
cache.set_promotion_policy_param(
|
cache.set_promotion_policy_param(PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, i)
|
||||||
PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, i
|
|
||||||
)
|
|
||||||
print(f"\n{i}")
|
print(f"\n{i}")
|
||||||
|
|
||||||
|
|
||||||
@ -530,10 +516,7 @@ def test_neg_set_nhit_promotion_policy_param_threshold(pyocf_ctx, cm, cls):
|
|||||||
# Start cache device
|
# Start cache device
|
||||||
cache_device = RamVolume(S.from_MiB(50))
|
cache_device = RamVolume(S.from_MiB(50))
|
||||||
cache = Cache.start_on_device(
|
cache = Cache.start_on_device(
|
||||||
cache_device,
|
cache_device, cache_mode=cm, cache_line_size=cls, promotion_policy=PromotionPolicy.NHIT,
|
||||||
cache_mode=cm,
|
|
||||||
cache_line_size=cls,
|
|
||||||
promotion_policy=PromotionPolicy.NHIT,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Set to invalid promotion policy insertion threshold and check if failed
|
# Set to invalid promotion policy insertion threshold and check if failed
|
||||||
@ -568,11 +551,7 @@ def test_neg_set_ioclass_max_size(pyocf_ctx, cm, cls):
|
|||||||
continue
|
continue
|
||||||
with pytest.raises(OcfError, match="Error adding partition to cache"):
|
with pytest.raises(OcfError, match="Error adding partition to cache"):
|
||||||
cache.configure_partition(
|
cache.configure_partition(
|
||||||
part_id=1,
|
part_id=1, name="unclassified", max_size=i, priority=0, cache_mode=CACHE_MODE_NONE,
|
||||||
name="unclassified",
|
|
||||||
max_size=i,
|
|
||||||
priority=0,
|
|
||||||
cache_mode=CACHE_MODE_NONE,
|
|
||||||
)
|
)
|
||||||
print(f"\n{i}")
|
print(f"\n{i}")
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ def try_start_cache(**config):
|
|||||||
cache = Cache.start_on_device(cache_device, **config)
|
cache = Cache.start_on_device(cache_device, **config)
|
||||||
cache.stop()
|
cache.stop()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.security
|
@pytest.mark.security
|
||||||
@pytest.mark.parametrize("cls", CacheLineSize)
|
@pytest.mark.parametrize("cls", CacheLineSize)
|
||||||
def test_fuzzy_start_cache_mode(pyocf_ctx, cls, not_cache_mode_randomize):
|
def test_fuzzy_start_cache_mode(pyocf_ctx, cls, not_cache_mode_randomize):
|
||||||
@ -59,14 +60,16 @@ def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls):
|
|||||||
:param cls: cache line size value to start cache with
|
:param cls: cache line size value to start cache with
|
||||||
"""
|
"""
|
||||||
cache_device = RamVolume(Size.from_MiB(50))
|
cache_device = RamVolume(Size.from_MiB(50))
|
||||||
incorrect_values = ['']
|
incorrect_values = [""]
|
||||||
try:
|
try:
|
||||||
cache = Cache.start_on_device(cache_device, name=string_randomize, cache_mode=cm,
|
cache = Cache.start_on_device(
|
||||||
cache_line_size=cls)
|
cache_device, name=string_randomize, cache_mode=cm, cache_line_size=cls
|
||||||
|
)
|
||||||
except OcfError:
|
except OcfError:
|
||||||
if string_randomize not in incorrect_values:
|
if string_randomize not in incorrect_values:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Cache did not start properly with correct name value: '{string_randomize}'")
|
f"Cache did not start properly with correct name value: '{string_randomize}'"
|
||||||
|
)
|
||||||
return
|
return
|
||||||
if string_randomize in incorrect_values:
|
if string_randomize in incorrect_values:
|
||||||
logger.error(f"Cache started with incorrect name value: '{string_randomize}'")
|
logger.error(f"Cache started with incorrect name value: '{string_randomize}'")
|
||||||
@ -75,7 +78,7 @@ def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls):
|
|||||||
|
|
||||||
@pytest.mark.security
|
@pytest.mark.security
|
||||||
@pytest.mark.parametrize("cls", CacheLineSize)
|
@pytest.mark.parametrize("cls", CacheLineSize)
|
||||||
@pytest.mark.parametrize('max_wb_queue_size', RandomGenerator(DefaultRanges.UINT32, 10))
|
@pytest.mark.parametrize("max_wb_queue_size", RandomGenerator(DefaultRanges.UINT32, 10))
|
||||||
def test_fuzzy_start_max_queue_size(pyocf_ctx, max_wb_queue_size, c_uint32_randomize, cls):
|
def test_fuzzy_start_max_queue_size(pyocf_ctx, max_wb_queue_size, c_uint32_randomize, cls):
|
||||||
"""
|
"""
|
||||||
Test whether it is impossible to start cache with invalid dependence between max queue size
|
Test whether it is impossible to start cache with invalid dependence between max queue size
|
||||||
@ -91,11 +94,14 @@ def test_fuzzy_start_max_queue_size(pyocf_ctx, max_wb_queue_size, c_uint32_rando
|
|||||||
max_queue_size=max_wb_queue_size,
|
max_queue_size=max_wb_queue_size,
|
||||||
queue_unblock_size=c_uint32_randomize,
|
queue_unblock_size=c_uint32_randomize,
|
||||||
cache_mode=CacheMode.WB,
|
cache_mode=CacheMode.WB,
|
||||||
cache_line_size=cls)
|
cache_line_size=cls,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Test skipped for valid values: "
|
logger.warning(
|
||||||
f"'max_queue_size={max_wb_queue_size}, "
|
f"Test skipped for valid values: "
|
||||||
f"queue_unblock_size={c_uint32_randomize}'.")
|
f"'max_queue_size={max_wb_queue_size}, "
|
||||||
|
f"queue_unblock_size={c_uint32_randomize}'."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.security
|
@pytest.mark.security
|
||||||
@ -111,7 +117,5 @@ def test_fuzzy_start_promotion_policy(pyocf_ctx, not_promotion_policy_randomize,
|
|||||||
"""
|
"""
|
||||||
with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
|
with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
|
||||||
try_start_cache(
|
try_start_cache(
|
||||||
cache_mode=cm,
|
cache_mode=cm, cache_line_size=cls, promotion_policy=not_promotion_policy_randomize
|
||||||
cache_line_size=cls,
|
|
||||||
promotion_policy=not_promotion_policy_randomize
|
|
||||||
)
|
)
|
||||||
|
@ -132,8 +132,7 @@ def test_neg_offset_unaligned(pyocf_ctx, c_int_randomize):
|
|||||||
data = Data(int(Size.from_KiB(1)))
|
data = Data(int(Size.from_KiB(1)))
|
||||||
if c_int_randomize % 512 != 0:
|
if c_int_randomize % 512 != 0:
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
vol.new_io(queue, c_int_randomize, data.size,
|
vol.new_io(queue, c_int_randomize, data.size, IoDir.WRITE, 0, 0)
|
||||||
IoDir.WRITE, 0, 0)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.security
|
@pytest.mark.security
|
||||||
@ -147,8 +146,7 @@ def test_neg_size_unaligned(pyocf_ctx, c_uint16_randomize):
|
|||||||
data = Data(int(Size.from_B(c_uint16_randomize)))
|
data = Data(int(Size.from_B(c_uint16_randomize)))
|
||||||
if c_uint16_randomize % 512 != 0:
|
if c_uint16_randomize % 512 != 0:
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
vol.new_io(queue, 0, data.size,
|
vol.new_io(queue, 0, data.size, IoDir.WRITE, 0, 0)
|
||||||
IoDir.WRITE, 0, 0)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.security
|
@pytest.mark.security
|
||||||
@ -200,12 +198,7 @@ def prepare_cache_and_core(core_size: Size, cache_size: Size = Size.from_MiB(50)
|
|||||||
|
|
||||||
|
|
||||||
def io_operation(
|
def io_operation(
|
||||||
vol: Volume,
|
vol: Volume, queue: Queue, data: Data, io_direction: int, offset: int = 0, io_class: int = 0,
|
||||||
queue: Queue,
|
|
||||||
data: Data,
|
|
||||||
io_direction: int,
|
|
||||||
offset: int = 0,
|
|
||||||
io_class: int = 0,
|
|
||||||
):
|
):
|
||||||
io = vol.new_io(queue, offset, data.size, io_direction, io_class, 0)
|
io = vol.new_io(queue, offset, data.size, io_direction, io_class, 0)
|
||||||
io.set_data(data)
|
io.set_data(data)
|
||||||
|
@ -59,9 +59,7 @@ class DataCopyTracer(Data):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.security
|
@pytest.mark.security
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WI])
|
||||||
"cache_mode", [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WI]
|
|
||||||
)
|
|
||||||
def test_secure_erase_simple_io_read_misses(cache_mode):
|
def test_secure_erase_simple_io_read_misses(cache_mode):
|
||||||
"""
|
"""
|
||||||
Perform simple IO which will trigger read misses, which in turn should
|
Perform simple IO which will trigger read misses, which in turn should
|
||||||
@ -88,14 +86,7 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
|
|||||||
queue = cache.get_default_queue()
|
queue = cache.get_default_queue()
|
||||||
|
|
||||||
write_data = DataCopyTracer(S.from_sector(1))
|
write_data = DataCopyTracer(S.from_sector(1))
|
||||||
io = vol.new_io(
|
io = vol.new_io(queue, S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0,)
|
||||||
queue,
|
|
||||||
S.from_sector(1).B,
|
|
||||||
write_data.size,
|
|
||||||
IoDir.WRITE,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
io.set_data(write_data)
|
io.set_data(write_data)
|
||||||
|
|
||||||
cmpl = OcfCompletion([("err", c_int)])
|
cmpl = OcfCompletion([("err", c_int)])
|
||||||
@ -106,14 +97,7 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
|
|||||||
cmpls = []
|
cmpls = []
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
read_data = DataCopyTracer(S.from_sector(1))
|
read_data = DataCopyTracer(S.from_sector(1))
|
||||||
io = vol.new_io(
|
io = vol.new_io(queue, i * S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0,)
|
||||||
queue,
|
|
||||||
i * S.from_sector(1).B,
|
|
||||||
read_data.size,
|
|
||||||
IoDir.READ,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
io.set_data(read_data)
|
io.set_data(read_data)
|
||||||
|
|
||||||
cmpl = OcfCompletion([("err", c_int)])
|
cmpl = OcfCompletion([("err", c_int)])
|
||||||
@ -137,17 +121,13 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
|
|||||||
|
|
||||||
ctx.exit()
|
ctx.exit()
|
||||||
|
|
||||||
|
assert len(DataCopyTracer.needs_erase) == 0, "Not all locked Data instances were secure erased!"
|
||||||
|
assert len(DataCopyTracer.locked_instances) == 0, "Not all locked Data instances were unlocked!"
|
||||||
assert (
|
assert (
|
||||||
len(DataCopyTracer.needs_erase) == 0
|
stats["req"]["rd_partial_misses"]["value"] + stats["req"]["rd_full_misses"]["value"]
|
||||||
), "Not all locked Data instances were secure erased!"
|
|
||||||
assert (
|
|
||||||
len(DataCopyTracer.locked_instances) == 0
|
|
||||||
), "Not all locked Data instances were unlocked!"
|
|
||||||
assert (
|
|
||||||
stats["req"]["rd_partial_misses"]["value"]
|
|
||||||
+ stats["req"]["rd_full_misses"]["value"]
|
|
||||||
) > 0
|
) > 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.security
|
@pytest.mark.security
|
||||||
def test_secure_erase_simple_io_cleaning():
|
def test_secure_erase_simple_io_cleaning():
|
||||||
"""
|
"""
|
||||||
@ -201,10 +181,6 @@ def test_secure_erase_simple_io_cleaning():
|
|||||||
|
|
||||||
ctx.exit()
|
ctx.exit()
|
||||||
|
|
||||||
assert (
|
assert len(DataCopyTracer.needs_erase) == 0, "Not all locked Data instances were secure erased!"
|
||||||
len(DataCopyTracer.needs_erase) == 0
|
assert len(DataCopyTracer.locked_instances) == 0, "Not all locked Data instances were unlocked!"
|
||||||
), "Not all locked Data instances were secure erased!"
|
|
||||||
assert (
|
|
||||||
len(DataCopyTracer.locked_instances) == 0
|
|
||||||
), "Not all locked Data instances were unlocked!"
|
|
||||||
assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
|
assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
|
||||||
|
@ -101,9 +101,7 @@ def mngmt_op_surprise_shutdown_test(
|
|||||||
pyocf_2_ctx, cache_backend_vol, error_io_seq_no
|
pyocf_2_ctx, cache_backend_vol, error_io_seq_no
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
cache, err_vol = prepare_normal(
|
cache, err_vol = prepare_normal(pyocf_2_ctx, cache_backend_vol, error_io_seq_no)
|
||||||
pyocf_2_ctx, cache_backend_vol, error_io_seq_no
|
|
||||||
)
|
|
||||||
|
|
||||||
if prepare_func:
|
if prepare_func:
|
||||||
prepare_func(cache)
|
prepare_func(cache)
|
||||||
@ -125,10 +123,7 @@ def mngmt_op_surprise_shutdown_test(
|
|||||||
error_triggered = err_vol.error_triggered()
|
error_triggered = err_vol.error_triggered()
|
||||||
assert error_triggered == (status != 0)
|
assert error_triggered == (status != 0)
|
||||||
if error_triggered:
|
if error_triggered:
|
||||||
assert (
|
assert status == OcfErrorCode.OCF_ERR_WRITE_CACHE or status == OcfErrorCode.OCF_ERR_IO
|
||||||
status == OcfErrorCode.OCF_ERR_WRITE_CACHE
|
|
||||||
or status == OcfErrorCode.OCF_ERR_IO
|
|
||||||
)
|
|
||||||
|
|
||||||
# stop cache with error injection still on
|
# stop cache with error injection still on
|
||||||
with pytest.raises(OcfError) as ex:
|
with pytest.raises(OcfError) as ex:
|
||||||
@ -174,9 +169,7 @@ def test_surprise_shutdown_add_core(pyocf_2_ctx, failover):
|
|||||||
def check_func(cache, error_triggered):
|
def check_func(cache, error_triggered):
|
||||||
check_core(cache, error_triggered)
|
check_core(cache, error_triggered)
|
||||||
|
|
||||||
mngmt_op_surprise_shutdown_test(
|
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, None, check_func)
|
||||||
pyocf_2_ctx, failover, tested_func, None, check_func
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# power failure during core removal
|
# power failure during core removal
|
||||||
@ -197,9 +190,7 @@ def test_surprise_shutdown_remove_core(pyocf_2_ctx, failover):
|
|||||||
stats = cache.get_stats()
|
stats = cache.get_stats()
|
||||||
assert stats["conf"]["core_count"] == (1 if error_triggered else 0)
|
assert stats["conf"]["core_count"] == (1 if error_triggered else 0)
|
||||||
|
|
||||||
mngmt_op_surprise_shutdown_test(
|
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, prepare_func, check_func)
|
||||||
pyocf_2_ctx, failover, tested_func, prepare_func, check_func
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.security
|
@pytest.mark.security
|
||||||
@ -228,9 +219,7 @@ def test_surprise_shutdown_remove_core_with_data(pyocf_2_ctx, failover):
|
|||||||
vol = CoreVolume(core, open=True)
|
vol = CoreVolume(core, open=True)
|
||||||
assert ocf_read(vol, cache.get_default_queue(), io_offset) == 0xAA
|
assert ocf_read(vol, cache.get_default_queue(), io_offset) == 0xAA
|
||||||
|
|
||||||
mngmt_op_surprise_shutdown_test(
|
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, prepare_func, check_func)
|
||||||
pyocf_2_ctx, failover, tested_func, prepare_func, check_func
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# power failure during core add after previous core removed
|
# power failure during core add after previous core removed
|
||||||
@ -266,9 +255,7 @@ def test_surprise_shutdown_swap_core(pyocf_2_ctx, failover):
|
|||||||
core2 = cache.get_core_by_name("core2")
|
core2 = cache.get_core_by_name("core2")
|
||||||
assert core2.device.uuid == "dev2"
|
assert core2.device.uuid == "dev2"
|
||||||
|
|
||||||
mngmt_op_surprise_shutdown_test(
|
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, prepare, check_func)
|
||||||
pyocf_2_ctx, failover, tested_func, prepare, check_func
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# power failure during core add after previous core removed
|
# power failure during core add after previous core removed
|
||||||
@ -286,10 +273,7 @@ def test_surprise_shutdown_swap_core_with_data(pyocf_2_ctx, failover):
|
|||||||
vol = CoreVolume(core1, open=True)
|
vol = CoreVolume(core1, open=True)
|
||||||
cache.save()
|
cache.save()
|
||||||
ocf_write(
|
ocf_write(
|
||||||
vol,
|
vol, cache.get_default_queue(), 0xAA, mngmt_op_surprise_shutdown_test_io_offset,
|
||||||
cache.get_default_queue(),
|
|
||||||
0xAA,
|
|
||||||
mngmt_op_surprise_shutdown_test_io_offset,
|
|
||||||
)
|
)
|
||||||
cache.remove_core(core1)
|
cache.remove_core(core1)
|
||||||
cache.save()
|
cache.save()
|
||||||
@ -316,16 +300,12 @@ def test_surprise_shutdown_swap_core_with_data(pyocf_2_ctx, failover):
|
|||||||
assert core2.device.uuid == "dev2"
|
assert core2.device.uuid == "dev2"
|
||||||
assert (
|
assert (
|
||||||
ocf_read(
|
ocf_read(
|
||||||
vol2,
|
vol2, cache.get_default_queue(), mngmt_op_surprise_shutdown_test_io_offset,
|
||||||
cache.get_default_queue(),
|
|
||||||
mngmt_op_surprise_shutdown_test_io_offset,
|
|
||||||
)
|
)
|
||||||
== VOLUME_POISON
|
== VOLUME_POISON
|
||||||
)
|
)
|
||||||
|
|
||||||
mngmt_op_surprise_shutdown_test(
|
mngmt_op_surprise_shutdown_test(pyocf_2_ctx, failover, tested_func, prepare, check_func)
|
||||||
pyocf_2_ctx, failover, tested_func, prepare, check_func
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# make sure there are no crashes when cache start is interrupted
|
# make sure there are no crashes when cache start is interrupted
|
||||||
@ -350,9 +330,7 @@ def test_surprise_shutdown_start_cache(pyocf_2_ctx, failover):
|
|||||||
cache2.start_cache()
|
cache2.start_cache()
|
||||||
cache2.standby_attach(ramdisk)
|
cache2.standby_attach(ramdisk)
|
||||||
cache2_exp_obj_vol = CacheVolume(cache2, open=True)
|
cache2_exp_obj_vol = CacheVolume(cache2, open=True)
|
||||||
err_device = ErrorDevice(
|
err_device = ErrorDevice(cache2_exp_obj_vol, error_seq_no=error_io, armed=True)
|
||||||
cache2_exp_obj_vol, error_seq_no=error_io, armed=True
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
err_device = ErrorDevice(ramdisk, error_seq_no=error_io, armed=True)
|
err_device = ErrorDevice(ramdisk, error_seq_no=error_io, armed=True)
|
||||||
|
|
||||||
@ -415,9 +393,7 @@ def test_surprise_shutdown_stop_cache(pyocf_2_ctx, failover):
|
|||||||
ramdisk = RamVolume(mngmt_op_surprise_shutdown_test_cache_size)
|
ramdisk = RamVolume(mngmt_op_surprise_shutdown_test_cache_size)
|
||||||
|
|
||||||
if failover:
|
if failover:
|
||||||
cache, cache2, device = prepare_failover(
|
cache, cache2, device = prepare_failover(pyocf_2_ctx, ramdisk, error_io_seq_no)
|
||||||
pyocf_2_ctx, ramdisk, error_io_seq_no
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
cache, device = prepare_normal(pyocf_2_ctx, ramdisk, error_io_seq_no)
|
cache, device = prepare_normal(pyocf_2_ctx, ramdisk, error_io_seq_no)
|
||||||
|
|
||||||
@ -486,9 +462,7 @@ def test_surprise_shutdown_cache_reinit(pyocf_2_ctx, failover):
|
|||||||
ramdisk = RamVolume(mngmt_op_surprise_shutdown_test_cache_size)
|
ramdisk = RamVolume(mngmt_op_surprise_shutdown_test_cache_size)
|
||||||
|
|
||||||
if failover:
|
if failover:
|
||||||
cache, cache2, device = prepare_failover(
|
cache, cache2, device = prepare_failover(pyocf_2_ctx, ramdisk, error_io_seq_no)
|
||||||
pyocf_2_ctx, ramdisk, error_io_seq_no
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
cache, device = prepare_normal(pyocf_2_ctx, ramdisk, error_io_seq_no)
|
cache, device = prepare_normal(pyocf_2_ctx, ramdisk, error_io_seq_no)
|
||||||
|
|
||||||
@ -553,9 +527,7 @@ def test_surprise_shutdown_cache_reinit(pyocf_2_ctx, failover):
|
|||||||
assert stats["usage"]["occupancy"]["value"] == 0
|
assert stats["usage"]["occupancy"]["value"] == 0
|
||||||
cache.add_core(core)
|
cache.add_core(core)
|
||||||
vol = CoreVolume(core, open=True)
|
vol = CoreVolume(core, open=True)
|
||||||
assert (
|
assert ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
|
||||||
ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
|
|
||||||
)
|
|
||||||
|
|
||||||
cache.stop()
|
cache.stop()
|
||||||
cache = None
|
cache = None
|
||||||
@ -591,9 +563,7 @@ def test_surprise_shutdown_change_cache_mode(pyocf_2_ctx, failover):
|
|||||||
@pytest.mark.parametrize("failover", [False, True])
|
@pytest.mark.parametrize("failover", [False, True])
|
||||||
@pytest.mark.parametrize("start_clp", CleaningPolicy)
|
@pytest.mark.parametrize("start_clp", CleaningPolicy)
|
||||||
@pytest.mark.parametrize("end_clp", CleaningPolicy)
|
@pytest.mark.parametrize("end_clp", CleaningPolicy)
|
||||||
def test_surprise_shutdown_set_cleaning_policy(
|
def test_surprise_shutdown_set_cleaning_policy(pyocf_2_ctx, failover, start_clp, end_clp):
|
||||||
pyocf_2_ctx, failover, start_clp, end_clp
|
|
||||||
):
|
|
||||||
core_device = RamVolume(S.from_MiB(10))
|
core_device = RamVolume(S.from_MiB(10))
|
||||||
core = Core(device=core_device)
|
core = Core(device=core_device)
|
||||||
|
|
||||||
@ -644,9 +614,7 @@ def test_surprise_shutdown_set_seq_cut_off_promotion(pyocf_2_ctx, failover):
|
|||||||
@pytest.mark.parametrize("failover", [False, True])
|
@pytest.mark.parametrize("failover", [False, True])
|
||||||
def test_surprise_shutdown_set_seq_cut_off_threshold(pyocf_2_ctx, failover):
|
def test_surprise_shutdown_set_seq_cut_off_threshold(pyocf_2_ctx, failover):
|
||||||
_test_surprise_shutdown_mngmt_generic(
|
_test_surprise_shutdown_mngmt_generic(
|
||||||
pyocf_2_ctx,
|
pyocf_2_ctx, failover, lambda cache, core: cache.set_seq_cut_off_threshold(S.from_MiB(2).B),
|
||||||
failover,
|
|
||||||
lambda cache, core: cache.set_seq_cut_off_threshold(S.from_MiB(2).B),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -706,9 +674,7 @@ def test_surprise_shutdown_set_cleaning_policy_param(pyocf_2_ctx, failover, clp)
|
|||||||
@pytest.mark.parametrize("failover", [False, True])
|
@pytest.mark.parametrize("failover", [False, True])
|
||||||
@pytest.mark.parametrize("start_pp", PromotionPolicy)
|
@pytest.mark.parametrize("start_pp", PromotionPolicy)
|
||||||
@pytest.mark.parametrize("end_pp", PromotionPolicy)
|
@pytest.mark.parametrize("end_pp", PromotionPolicy)
|
||||||
def test_surprise_shutdown_set_promotion_policy(
|
def test_surprise_shutdown_set_promotion_policy(pyocf_2_ctx, failover, start_pp, end_pp):
|
||||||
pyocf_2_ctx, failover, start_pp, end_pp
|
|
||||||
):
|
|
||||||
core_device = RamVolume(S.from_MiB(10))
|
core_device = RamVolume(S.from_MiB(10))
|
||||||
core = Core(device=core_device)
|
core = Core(device=core_device)
|
||||||
|
|
||||||
@ -801,9 +767,7 @@ def test_surprise_shutdown_set_io_class_config(pyocf_2_ctx, failover):
|
|||||||
ioclasses_info._config[i]._priority = desc[i]["_priority"]
|
ioclasses_info._config[i]._priority = desc[i]["_priority"]
|
||||||
ioclasses_info._config[i]._cache_mode = desc[i]["_cache_mode"]
|
ioclasses_info._config[i]._cache_mode = desc[i]["_cache_mode"]
|
||||||
ioclasses_info._config[i]._max_size = desc[i]["_max_size"]
|
ioclasses_info._config[i]._max_size = desc[i]["_max_size"]
|
||||||
OcfLib.getInstance().ocf_mngt_cache_io_classes_configure(
|
OcfLib.getInstance().ocf_mngt_cache_io_classes_configure(cache, byref(ioclasses_info))
|
||||||
cache, byref(ioclasses_info)
|
|
||||||
)
|
|
||||||
|
|
||||||
def prepare(cache):
|
def prepare(cache):
|
||||||
cache.add_core(core)
|
cache.add_core(core)
|
||||||
@ -1041,9 +1005,7 @@ def test_surprise_shutdown_standby_init_force_1(pyocf_ctx):
|
|||||||
core = Core(device=core_device)
|
core = Core(device=core_device)
|
||||||
cache.add_core(core)
|
cache.add_core(core)
|
||||||
vol = CoreVolume(core, open=True)
|
vol = CoreVolume(core, open=True)
|
||||||
assert (
|
assert ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
|
||||||
ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
|
|
||||||
)
|
|
||||||
|
|
||||||
cache.stop()
|
cache.stop()
|
||||||
|
|
||||||
@ -1128,9 +1090,7 @@ def test_surprise_shutdown_standby_init_force_2(pyocf_ctx):
|
|||||||
core = Core(device=core_device)
|
core = Core(device=core_device)
|
||||||
cache.add_core(core)
|
cache.add_core(core)
|
||||||
vol = CoreVolume(core, open=True)
|
vol = CoreVolume(core, open=True)
|
||||||
assert (
|
assert ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
|
||||||
ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
|
|
||||||
)
|
|
||||||
|
|
||||||
if cache:
|
if cache:
|
||||||
cache.stop()
|
cache.stop()
|
||||||
|
@ -7,14 +7,7 @@ import random
|
|||||||
import string
|
import string
|
||||||
import enum
|
import enum
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
from ctypes import (
|
from ctypes import c_uint64, c_uint32, c_uint16, c_uint8, c_int, c_uint
|
||||||
c_uint64,
|
|
||||||
c_uint32,
|
|
||||||
c_uint16,
|
|
||||||
c_uint8,
|
|
||||||
c_int,
|
|
||||||
c_uint
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Range:
|
class Range:
|
||||||
@ -75,17 +68,20 @@ class RandomStringGenerator:
|
|||||||
|
|
||||||
def __string_generator(self, len_range, extra_chars):
|
def __string_generator(self, len_range, extra_chars):
|
||||||
while True:
|
while True:
|
||||||
for t in [string.digits,
|
for t in [
|
||||||
string.ascii_letters + string.digits,
|
string.digits,
|
||||||
string.ascii_lowercase,
|
string.ascii_letters + string.digits,
|
||||||
string.ascii_uppercase,
|
string.ascii_lowercase,
|
||||||
string.printable,
|
string.ascii_uppercase,
|
||||||
string.punctuation,
|
string.printable,
|
||||||
string.hexdigits,
|
string.punctuation,
|
||||||
*extra_chars]:
|
string.hexdigits,
|
||||||
yield ''.join(random.choice(t) for _ in range(
|
*extra_chars,
|
||||||
self.random.randint(len_range.min, len_range.max)
|
]:
|
||||||
))
|
yield "".join(
|
||||||
|
random.choice(t)
|
||||||
|
for _ in range(self.random.randint(len_range.min, len_range.max))
|
||||||
|
)
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
return self
|
return self
|
||||||
|
Loading…
Reference in New Issue
Block a user